aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acorn/char/pcf8583.c3
-rw-r--r--drivers/block/cciss.c5
-rw-r--r--drivers/block/ll_rw_blk.c38
-rw-r--r--drivers/block/paride/pf.c22
-rw-r--r--drivers/block/scsi_ioctl.c1
-rw-r--r--drivers/char/agp/hp-agp.c2
-rw-r--r--drivers/char/epca.c84
-rw-r--r--drivers/char/epca.h12
-rw-r--r--drivers/char/hvc_console.c6
-rw-r--r--drivers/char/vt.c5
-rw-r--r--drivers/char/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/firmware/dell_rbu.c241
-rw-r--r--drivers/hwmon/hdaps.c356
-rw-r--r--drivers/i2c/busses/i2c-pxa.c12
-rw-r--r--drivers/ide/ide-taskfile.c1
-rw-r--r--drivers/infiniband/core/mad_rmpp.c19
-rw-r--r--drivers/infiniband/core/user_mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/isdn/hisax/hfc_pci.c1
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c2
-rw-r--r--drivers/isdn/hisax/st5481.h4
-rw-r--r--drivers/isdn/hisax/st5481_b.c22
-rw-r--r--drivers/isdn/hisax/st5481_d.c26
-rw-r--r--drivers/isdn/hisax/st5481_init.c4
-rw-r--r--drivers/isdn/hisax/st5481_usb.c70
-rw-r--r--drivers/md/raid6.h4
-rw-r--r--drivers/md/raid6algos.c1
-rw-r--r--drivers/md/raid6altivec.uc18
-rw-r--r--drivers/md/raid6test/Makefile27
-rw-r--r--drivers/message/fusion/Kconfig17
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/mptbase.c963
-rw-r--r--drivers/message/fusion/mptbase.h56
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c7
-rw-r--r--drivers/message/fusion/mptsas.c1235
-rw-r--r--drivers/message/fusion/mptscsih.c463
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/config-osm.c5
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c10
-rw-r--r--drivers/net/8139cp.c46
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/s2io.c9
-rw-r--r--drivers/net/sk98lin/skge.c12
-rw-r--r--drivers/net/skge.c98
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/tg3.c108
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c2
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c3
-rw-r--r--drivers/pci/pci.c16
-rw-r--r--drivers/pcmcia/yenta_socket.c13
-rw-r--r--drivers/s390/cio/blacklist.c4
-rw-r--r--drivers/s390/crypto/z90main.c3
-rw-r--r--drivers/s390/net/ctcmain.c41
-rw-r--r--drivers/s390/net/qeth.h4
-rw-r--r--drivers/s390/net/qeth_main.c133
-rw-r--r--drivers/s390/net/qeth_sys.c17
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c184
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c995
-rw-r--r--drivers/s390/scsi/zfcp_def.h307
-rw-r--r--drivers/s390/scsi/zfcp_erp.c135
-rw-r--r--drivers/s390/scsi/zfcp_ext.h30
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c769
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h54
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c297
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c14
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c9
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c8
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/atp870u.h5
-rw-r--r--drivers/scsi/fd_mcs.c2
-rw-r--r--drivers/scsi/hosts.c35
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c10
-rw-r--r--drivers/scsi/libata-core.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c21
-rw-r--r--drivers/scsi/sata_sis.c4
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c78
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/scsi_scan.c20
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/serial/21285.c2
-rw-r--r--drivers/serial/amba-pl010.c2
-rw-r--r--drivers/serial/amba-pl011.c2
-rw-r--r--drivers/serial/clps711x.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/serial/pxa.c4
-rw-r--r--drivers/serial/sa1100.c2
-rw-r--r--drivers/serial/serial_lh7a40x.c2
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/aty/xlinit.c5
-rw-r--r--drivers/video/backlight/corgi_bl.c9
-rw-r--r--drivers/video/console/vgacon.c4
-rw-r--r--drivers/video/fbcvt.c8
-rw-r--r--drivers/video/imxfb.c2
-rw-r--r--drivers/video/nvidia/nv_i2c.c11
-rw-r--r--drivers/video/nvidia/nvidia.c5
-rw-r--r--drivers/video/savage/savagefb-i2c.c11
-rw-r--r--drivers/video/savage/savagefb.h4
-rw-r--r--drivers/video/savage/savagefb_driver.c11
129 files changed, 5137 insertions, 2488 deletions
diff --git a/drivers/acorn/char/pcf8583.c b/drivers/acorn/char/pcf8583.c
index 141b4c237a50..2b850e5860a0 100644
--- a/drivers/acorn/char/pcf8583.c
+++ b/drivers/acorn/char/pcf8583.c
@@ -23,12 +23,13 @@ static struct i2c_driver pcf8583_driver;
23 23
24static unsigned short ignore[] = { I2C_CLIENT_END }; 24static unsigned short ignore[] = { I2C_CLIENT_END };
25static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END }; 25static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END };
26static unsigned short *forces[] = { NULL };
26 27
27static struct i2c_client_address_data addr_data = { 28static struct i2c_client_address_data addr_data = {
28 .normal_i2c = normal_addr, 29 .normal_i2c = normal_addr,
29 .probe = ignore, 30 .probe = ignore,
30 .ignore = ignore, 31 .ignore = ignore,
31 .force = ignore, 32 .forces = forces,
32}; 33};
33 34
34#define DAT(x) ((unsigned int)(x->dev.driver_data)) 35#define DAT(x) ((unsigned int)(x->dev.driver_data))
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c56f995aadad..486b6e1c7dfb 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -483,9 +483,6 @@ static int cciss_open(struct inode *inode, struct file *filep)
483 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name); 483 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
484#endif /* CCISS_DEBUG */ 484#endif /* CCISS_DEBUG */
485 485
486 if (host->busy_initializing)
487 return -EBUSY;
488
489 if (host->busy_initializing || drv->busy_configuring) 486 if (host->busy_initializing || drv->busy_configuring)
490 return -EBUSY; 487 return -EBUSY;
491 /* 488 /*
@@ -2991,6 +2988,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
2991 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); 2988 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
2992 2989
2993 cciss_procinit(i); 2990 cciss_procinit(i);
2991 hba[i]->busy_initializing = 0;
2994 2992
2995 for(j=0; j < NWD; j++) { /* mfm */ 2993 for(j=0; j < NWD; j++) { /* mfm */
2996 drive_info_struct *drv = &(hba[i]->drv[j]); 2994 drive_info_struct *drv = &(hba[i]->drv[j]);
@@ -3033,7 +3031,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3033 add_disk(disk); 3031 add_disk(disk);
3034 } 3032 }
3035 3033
3036 hba[i]->busy_initializing = 0;
3037 return(1); 3034 return(1);
3038 3035
3039clean4: 3036clean4:
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 483d71b10cf9..baedac522945 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -2373,44 +2373,6 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2373 2373
2374EXPORT_SYMBOL(blkdev_issue_flush); 2374EXPORT_SYMBOL(blkdev_issue_flush);
2375 2375
2376/**
2377 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2378 * @q: device queue
2379 * @disk: gendisk
2380 * @error_sector: error offset
2381 *
2382 * Description:
2383 * Devices understanding the SCSI command set, can use this function as
2384 * a helper for issuing a cache flush. Note: driver is required to store
2385 * the error offset (in case of error flushing) in ->sector of struct
2386 * request.
2387 */
2388int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2389 sector_t *error_sector)
2390{
2391 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2392 int ret;
2393
2394 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2395 rq->sector = 0;
2396 memset(rq->cmd, 0, sizeof(rq->cmd));
2397 rq->cmd[0] = 0x35;
2398 rq->cmd_len = 12;
2399 rq->data = NULL;
2400 rq->data_len = 0;
2401 rq->timeout = 60 * HZ;
2402
2403 ret = blk_execute_rq(q, disk, rq, 0);
2404
2405 if (ret && error_sector)
2406 *error_sector = rq->sector;
2407
2408 blk_put_request(rq);
2409 return ret;
2410}
2411
2412EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2413
2414static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2376static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2415{ 2377{
2416 int rw = rq_data_dir(rq); 2378 int rw = rq_data_dir(rq);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 711d2f314ac3..94af920465b5 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,6 +750,14 @@ static int pf_ready(void)
750 750
751static struct request_queue *pf_queue; 751static struct request_queue *pf_queue;
752 752
753static void pf_end_request(int uptodate)
754{
755 if (pf_req) {
756 end_request(pf_req, uptodate);
757 pf_req = NULL;
758 }
759}
760
753static void do_pf_request(request_queue_t * q) 761static void do_pf_request(request_queue_t * q)
754{ 762{
755 if (pf_busy) 763 if (pf_busy)
@@ -765,7 +773,7 @@ repeat:
765 pf_count = pf_req->current_nr_sectors; 773 pf_count = pf_req->current_nr_sectors;
766 774
767 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
768 end_request(pf_req, 0); 776 pf_end_request(0);
769 goto repeat; 777 goto repeat;
770 } 778 }
771 779
@@ -780,7 +788,7 @@ repeat:
780 pi_do_claimed(pf_current->pi, do_pf_write); 788 pi_do_claimed(pf_current->pi, do_pf_write);
781 else { 789 else {
782 pf_busy = 0; 790 pf_busy = 0;
783 end_request(pf_req, 0); 791 pf_end_request(0);
784 goto repeat; 792 goto repeat;
785 } 793 }
786} 794}
@@ -798,9 +806,11 @@ static int pf_next_buf(void)
798 if (!pf_count) 806 if (!pf_count)
799 return 1; 807 return 1;
800 spin_lock_irqsave(&pf_spin_lock, saved_flags); 808 spin_lock_irqsave(&pf_spin_lock, saved_flags);
801 end_request(pf_req, 1); 809 pf_end_request(1);
802 pf_count = pf_req->current_nr_sectors; 810 if (pf_req) {
803 pf_buf = pf_req->buffer; 811 pf_count = pf_req->current_nr_sectors;
812 pf_buf = pf_req->buffer;
813 }
804 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 814 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
805 return 1; 815 return 1;
806} 816}
@@ -810,7 +820,7 @@ static inline void next_request(int success)
810 unsigned long saved_flags; 820 unsigned long saved_flags;
811 821
812 spin_lock_irqsave(&pf_spin_lock, saved_flags); 822 spin_lock_irqsave(&pf_spin_lock, saved_flags);
813 end_request(pf_req, success); 823 pf_end_request(success);
814 pf_busy = 0; 824 pf_busy = 0;
815 do_pf_request(pf_queue); 825 do_pf_request(pf_queue);
816 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 826 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index 856c2278e9d0..079ec344eb47 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -168,6 +168,7 @@ static int verify_command(struct file *file, unsigned char *cmd)
168 safe_for_write(WRITE_VERIFY_12), 168 safe_for_write(WRITE_VERIFY_12),
169 safe_for_write(WRITE_16), 169 safe_for_write(WRITE_16),
170 safe_for_write(WRITE_LONG), 170 safe_for_write(WRITE_LONG),
171 safe_for_write(WRITE_LONG_2),
171 safe_for_write(ERASE), 172 safe_for_write(ERASE),
172 safe_for_write(GPCMD_MODE_SELECT_10), 173 safe_for_write(GPCMD_MODE_SELECT_10),
173 safe_for_write(MODE_SELECT), 174 safe_for_write(MODE_SELECT),
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 99762b6c19ae..de5d6d212674 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -252,7 +252,7 @@ hp_zx1_configure (void)
252 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); 252 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
253 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); 253 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
254 readl(hp->ioc_regs+HP_ZX1_TCNFG); 254 readl(hp->ioc_regs+HP_ZX1_TCNFG);
255 writel(~(HP_ZX1_IOVA_SIZE-1), hp->ioc_regs+HP_ZX1_IMASK); 255 writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
256 readl(hp->ioc_regs+HP_ZX1_IMASK); 256 readl(hp->ioc_regs+HP_ZX1_IMASK);
257 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE); 257 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
258 readl(hp->ioc_regs+HP_ZX1_IBASE); 258 readl(hp->ioc_regs+HP_ZX1_IBASE);
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 58d3738a2b7f..407708a001e4 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -534,7 +534,7 @@ static void shutdown(struct channel *ch)
534 534
535 unsigned long flags; 535 unsigned long flags;
536 struct tty_struct *tty; 536 struct tty_struct *tty;
537 struct board_chan *bc; 537 struct board_chan __iomem *bc;
538 538
539 if (!(ch->asyncflags & ASYNC_INITIALIZED)) 539 if (!(ch->asyncflags & ASYNC_INITIALIZED))
540 return; 540 return;
@@ -618,7 +618,7 @@ static int pc_write(struct tty_struct * tty,
618 struct channel *ch; 618 struct channel *ch;
619 unsigned long flags; 619 unsigned long flags;
620 int remain; 620 int remain;
621 struct board_chan *bc; 621 struct board_chan __iomem *bc;
622 622
623 /* ---------------------------------------------------------------- 623 /* ----------------------------------------------------------------
624 pc_write is primarily called directly by the kernel routine 624 pc_write is primarily called directly by the kernel routine
@@ -685,7 +685,7 @@ static int pc_write(struct tty_struct * tty,
685 ------------------------------------------------------------------- */ 685 ------------------------------------------------------------------- */
686 686
687 dataLen = min(bytesAvailable, dataLen); 687 dataLen = min(bytesAvailable, dataLen);
688 memcpy(ch->txptr + head, buf, dataLen); 688 memcpy_toio(ch->txptr + head, buf, dataLen);
689 buf += dataLen; 689 buf += dataLen;
690 head += dataLen; 690 head += dataLen;
691 amountCopied += dataLen; 691 amountCopied += dataLen;
@@ -726,7 +726,7 @@ static int pc_write_room(struct tty_struct *tty)
726 struct channel *ch; 726 struct channel *ch;
727 unsigned long flags; 727 unsigned long flags;
728 unsigned int head, tail; 728 unsigned int head, tail;
729 struct board_chan *bc; 729 struct board_chan __iomem *bc;
730 730
731 remain = 0; 731 remain = 0;
732 732
@@ -773,7 +773,7 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
773 int remain; 773 int remain;
774 unsigned long flags; 774 unsigned long flags;
775 struct channel *ch; 775 struct channel *ch;
776 struct board_chan *bc; 776 struct board_chan __iomem *bc;
777 777
778 /* --------------------------------------------------------- 778 /* ---------------------------------------------------------
779 verifyChannel returns the channel from the tty struct 779 verifyChannel returns the channel from the tty struct
@@ -830,7 +830,7 @@ static void pc_flush_buffer(struct tty_struct *tty)
830 unsigned int tail; 830 unsigned int tail;
831 unsigned long flags; 831 unsigned long flags;
832 struct channel *ch; 832 struct channel *ch;
833 struct board_chan *bc; 833 struct board_chan __iomem *bc;
834 /* --------------------------------------------------------- 834 /* ---------------------------------------------------------
835 verifyChannel returns the channel from the tty struct 835 verifyChannel returns the channel from the tty struct
836 if it is valid. This serves as a sanity check. 836 if it is valid. This serves as a sanity check.
@@ -976,7 +976,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
976 struct channel *ch; 976 struct channel *ch;
977 unsigned long flags; 977 unsigned long flags;
978 int line, retval, boardnum; 978 int line, retval, boardnum;
979 struct board_chan *bc; 979 struct board_chan __iomem *bc;
980 unsigned int head; 980 unsigned int head;
981 981
982 line = tty->index; 982 line = tty->index;
@@ -1041,7 +1041,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1041 ch->statusflags = 0; 1041 ch->statusflags = 0;
1042 1042
1043 /* Save boards current modem status */ 1043 /* Save boards current modem status */
1044 ch->imodem = bc->mstat; 1044 ch->imodem = readb(&bc->mstat);
1045 1045
1046 /* ---------------------------------------------------------------- 1046 /* ----------------------------------------------------------------
1047 Set receive head and tail ptrs to each other. This indicates 1047 Set receive head and tail ptrs to each other. This indicates
@@ -1399,10 +1399,10 @@ static void post_fep_init(unsigned int crd)
1399{ /* Begin post_fep_init */ 1399{ /* Begin post_fep_init */
1400 1400
1401 int i; 1401 int i;
1402 unsigned char *memaddr; 1402 void __iomem *memaddr;
1403 struct global_data *gd; 1403 struct global_data __iomem *gd;
1404 struct board_info *bd; 1404 struct board_info *bd;
1405 struct board_chan *bc; 1405 struct board_chan __iomem *bc;
1406 struct channel *ch; 1406 struct channel *ch;
1407 int shrinkmem = 0, lowwater ; 1407 int shrinkmem = 0, lowwater ;
1408 1408
@@ -1461,7 +1461,7 @@ static void post_fep_init(unsigned int crd)
1461 8 and 64 of these structures. 1461 8 and 64 of these structures.
1462 -------------------------------------------------------------------- */ 1462 -------------------------------------------------------------------- */
1463 1463
1464 bc = (struct board_chan *)(memaddr + CHANSTRUCT); 1464 bc = memaddr + CHANSTRUCT;
1465 1465
1466 /* ------------------------------------------------------------------- 1466 /* -------------------------------------------------------------------
1467 The below assignment will set gd to point at the BEGINING of 1467 The below assignment will set gd to point at the BEGINING of
@@ -1470,7 +1470,7 @@ static void post_fep_init(unsigned int crd)
1470 pointer begins at 0xd10. 1470 pointer begins at 0xd10.
1471 ---------------------------------------------------------------------- */ 1471 ---------------------------------------------------------------------- */
1472 1472
1473 gd = (struct global_data *)(memaddr + GLOBAL); 1473 gd = memaddr + GLOBAL;
1474 1474
1475 /* -------------------------------------------------------------------- 1475 /* --------------------------------------------------------------------
1476 XEPORTS (address 0xc22) points at the number of channels the 1476 XEPORTS (address 0xc22) points at the number of channels the
@@ -1493,6 +1493,7 @@ static void post_fep_init(unsigned int crd)
1493 1493
1494 for (i = 0; i < bd->numports; i++, ch++, bc++) { /* Begin for each port */ 1494 for (i = 0; i < bd->numports; i++, ch++, bc++) { /* Begin for each port */
1495 unsigned long flags; 1495 unsigned long flags;
1496 u16 tseg, rseg;
1496 1497
1497 ch->brdchan = bc; 1498 ch->brdchan = bc;
1498 ch->mailbox = gd; 1499 ch->mailbox = gd;
@@ -1553,50 +1554,53 @@ static void post_fep_init(unsigned int crd)
1553 shrinkmem = 0; 1554 shrinkmem = 0;
1554 } 1555 }
1555 1556
1557 tseg = readw(&bc->tseg);
1558 rseg = readw(&bc->rseg);
1559
1556 switch (bd->type) { 1560 switch (bd->type) {
1557 1561
1558 case PCIXEM: 1562 case PCIXEM:
1559 case PCIXRJ: 1563 case PCIXRJ:
1560 case PCIXR: 1564 case PCIXR:
1561 /* Cover all the 2MEG cards */ 1565 /* Cover all the 2MEG cards */
1562 ch->txptr = memaddr + (((bc->tseg) << 4) & 0x1fffff); 1566 ch->txptr = memaddr + ((tseg << 4) & 0x1fffff);
1563 ch->rxptr = memaddr + (((bc->rseg) << 4) & 0x1fffff); 1567 ch->rxptr = memaddr + ((rseg << 4) & 0x1fffff);
1564 ch->txwin = FEPWIN | ((bc->tseg) >> 11); 1568 ch->txwin = FEPWIN | (tseg >> 11);
1565 ch->rxwin = FEPWIN | ((bc->rseg) >> 11); 1569 ch->rxwin = FEPWIN | (rseg >> 11);
1566 break; 1570 break;
1567 1571
1568 case PCXEM: 1572 case PCXEM:
1569 case EISAXEM: 1573 case EISAXEM:
1570 /* Cover all the 32K windowed cards */ 1574 /* Cover all the 32K windowed cards */
1571 /* Mask equal to window size - 1 */ 1575 /* Mask equal to window size - 1 */
1572 ch->txptr = memaddr + (((bc->tseg) << 4) & 0x7fff); 1576 ch->txptr = memaddr + ((tseg << 4) & 0x7fff);
1573 ch->rxptr = memaddr + (((bc->rseg) << 4) & 0x7fff); 1577 ch->rxptr = memaddr + ((rseg << 4) & 0x7fff);
1574 ch->txwin = FEPWIN | ((bc->tseg) >> 11); 1578 ch->txwin = FEPWIN | (tseg >> 11);
1575 ch->rxwin = FEPWIN | ((bc->rseg) >> 11); 1579 ch->rxwin = FEPWIN | (rseg >> 11);
1576 break; 1580 break;
1577 1581
1578 case PCXEVE: 1582 case PCXEVE:
1579 case PCXE: 1583 case PCXE:
1580 ch->txptr = memaddr + (((bc->tseg - bd->memory_seg) << 4) & 0x1fff); 1584 ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff);
1581 ch->txwin = FEPWIN | ((bc->tseg - bd->memory_seg) >> 9); 1585 ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9);
1582 ch->rxptr = memaddr + (((bc->rseg - bd->memory_seg) << 4) & 0x1fff); 1586 ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff);
1583 ch->rxwin = FEPWIN | ((bc->rseg - bd->memory_seg) >>9 ); 1587 ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >>9 );
1584 break; 1588 break;
1585 1589
1586 case PCXI: 1590 case PCXI:
1587 case PC64XE: 1591 case PC64XE:
1588 ch->txptr = memaddr + ((bc->tseg - bd->memory_seg) << 4); 1592 ch->txptr = memaddr + ((tseg - bd->memory_seg) << 4);
1589 ch->rxptr = memaddr + ((bc->rseg - bd->memory_seg) << 4); 1593 ch->rxptr = memaddr + ((rseg - bd->memory_seg) << 4);
1590 ch->txwin = ch->rxwin = 0; 1594 ch->txwin = ch->rxwin = 0;
1591 break; 1595 break;
1592 1596
1593 } /* End switch bd->type */ 1597 } /* End switch bd->type */
1594 1598
1595 ch->txbufhead = 0; 1599 ch->txbufhead = 0;
1596 ch->txbufsize = bc->tmax + 1; 1600 ch->txbufsize = readw(&bc->tmax) + 1;
1597 1601
1598 ch->rxbufhead = 0; 1602 ch->rxbufhead = 0;
1599 ch->rxbufsize = bc->rmax + 1; 1603 ch->rxbufsize = readw(&bc->rmax) + 1;
1600 1604
1601 lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2); 1605 lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2);
1602 1606
@@ -1718,11 +1722,11 @@ static void epcapoll(unsigned long ignored)
1718static void doevent(int crd) 1722static void doevent(int crd)
1719{ /* Begin doevent */ 1723{ /* Begin doevent */
1720 1724
1721 void *eventbuf; 1725 void __iomem *eventbuf;
1722 struct channel *ch, *chan0; 1726 struct channel *ch, *chan0;
1723 static struct tty_struct *tty; 1727 static struct tty_struct *tty;
1724 struct board_info *bd; 1728 struct board_info *bd;
1725 struct board_chan *bc; 1729 struct board_chan __iomem *bc;
1726 unsigned int tail, head; 1730 unsigned int tail, head;
1727 int event, channel; 1731 int event, channel;
1728 int mstat, lstat; 1732 int mstat, lstat;
@@ -1817,7 +1821,7 @@ static void doevent(int crd)
1817static void fepcmd(struct channel *ch, int cmd, int word_or_byte, 1821static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
1818 int byte2, int ncmds, int bytecmd) 1822 int byte2, int ncmds, int bytecmd)
1819{ /* Begin fepcmd */ 1823{ /* Begin fepcmd */
1820 unchar *memaddr; 1824 unchar __iomem *memaddr;
1821 unsigned int head, cmdTail, cmdStart, cmdMax; 1825 unsigned int head, cmdTail, cmdStart, cmdMax;
1822 long count; 1826 long count;
1823 int n; 1827 int n;
@@ -2000,7 +2004,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2000 2004
2001 unsigned int cmdHead; 2005 unsigned int cmdHead;
2002 struct termios *ts; 2006 struct termios *ts;
2003 struct board_chan *bc; 2007 struct board_chan __iomem *bc;
2004 unsigned mval, hflow, cflag, iflag; 2008 unsigned mval, hflow, cflag, iflag;
2005 2009
2006 bc = ch->brdchan; 2010 bc = ch->brdchan;
@@ -2010,7 +2014,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2010 ts = tty->termios; 2014 ts = tty->termios;
2011 if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */ 2015 if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */
2012 cmdHead = readw(&bc->rin); 2016 cmdHead = readw(&bc->rin);
2013 bc->rout = cmdHead; 2017 writew(cmdHead, &bc->rout);
2014 cmdHead = readw(&bc->tin); 2018 cmdHead = readw(&bc->tin);
2015 /* Changing baud in mid-stream transmission can be wonderful */ 2019 /* Changing baud in mid-stream transmission can be wonderful */
2016 /* --------------------------------------------------------------- 2020 /* ---------------------------------------------------------------
@@ -2116,7 +2120,7 @@ static void receive_data(struct channel *ch)
2116 unchar *rptr; 2120 unchar *rptr;
2117 struct termios *ts = NULL; 2121 struct termios *ts = NULL;
2118 struct tty_struct *tty; 2122 struct tty_struct *tty;
2119 struct board_chan *bc; 2123 struct board_chan __iomem *bc;
2120 int dataToRead, wrapgap, bytesAvailable; 2124 int dataToRead, wrapgap, bytesAvailable;
2121 unsigned int tail, head; 2125 unsigned int tail, head;
2122 unsigned int wrapmask; 2126 unsigned int wrapmask;
@@ -2154,7 +2158,7 @@ static void receive_data(struct channel *ch)
2154 --------------------------------------------------------------------- */ 2158 --------------------------------------------------------------------- */
2155 2159
2156 if (!tty || !ts || !(ts->c_cflag & CREAD)) { 2160 if (!tty || !ts || !(ts->c_cflag & CREAD)) {
2157 bc->rout = head; 2161 writew(head, &bc->rout);
2158 return; 2162 return;
2159 } 2163 }
2160 2164
@@ -2270,7 +2274,7 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2270static int pc_tiocmget(struct tty_struct *tty, struct file *file) 2274static int pc_tiocmget(struct tty_struct *tty, struct file *file)
2271{ 2275{
2272 struct channel *ch = (struct channel *) tty->driver_data; 2276 struct channel *ch = (struct channel *) tty->driver_data;
2273 struct board_chan *bc; 2277 struct board_chan __iomem *bc;
2274 unsigned int mstat, mflag = 0; 2278 unsigned int mstat, mflag = 0;
2275 unsigned long flags; 2279 unsigned long flags;
2276 2280
@@ -2351,7 +2355,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2351 unsigned long flags; 2355 unsigned long flags;
2352 unsigned int mflag, mstat; 2356 unsigned int mflag, mstat;
2353 unsigned char startc, stopc; 2357 unsigned char startc, stopc;
2354 struct board_chan *bc; 2358 struct board_chan __iomem *bc;
2355 struct channel *ch = (struct channel *) tty->driver_data; 2359 struct channel *ch = (struct channel *) tty->driver_data;
2356 void __user *argp = (void __user *)arg; 2360 void __user *argp = (void __user *)arg;
2357 2361
@@ -2633,7 +2637,7 @@ static void pc_start(struct tty_struct *tty)
2633 spin_lock_irqsave(&epca_lock, flags); 2637 spin_lock_irqsave(&epca_lock, flags);
2634 /* Just in case output was resumed because of a change in Digi-flow */ 2638 /* Just in case output was resumed because of a change in Digi-flow */
2635 if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */ 2639 if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */
2636 struct board_chan *bc; 2640 struct board_chan __iomem *bc;
2637 globalwinon(ch); 2641 globalwinon(ch);
2638 bc = ch->brdchan; 2642 bc = ch->brdchan;
2639 if (ch->statusflags & LOWWAIT) 2643 if (ch->statusflags & LOWWAIT)
@@ -2727,7 +2731,7 @@ void digi_send_break(struct channel *ch, int msec)
2727static void setup_empty_event(struct tty_struct *tty, struct channel *ch) 2731static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
2728{ /* Begin setup_empty_event */ 2732{ /* Begin setup_empty_event */
2729 2733
2730 struct board_chan *bc = ch->brdchan; 2734 struct board_chan __iomem *bc = ch->brdchan;
2731 2735
2732 globalwinon(ch); 2736 globalwinon(ch);
2733 ch->statusflags |= EMPTYWAIT; 2737 ch->statusflags |= EMPTYWAIT;
diff --git a/drivers/char/epca.h b/drivers/char/epca.h
index 20eeb5a70e1a..456d6c8f94a8 100644
--- a/drivers/char/epca.h
+++ b/drivers/char/epca.h
@@ -128,17 +128,17 @@ struct channel
128 unsigned long c_cflag; 128 unsigned long c_cflag;
129 unsigned long c_lflag; 129 unsigned long c_lflag;
130 unsigned long c_oflag; 130 unsigned long c_oflag;
131 unsigned char *txptr; 131 unsigned char __iomem *txptr;
132 unsigned char *rxptr; 132 unsigned char __iomem *rxptr;
133 unsigned char *tmp_buf; 133 unsigned char *tmp_buf;
134 struct board_info *board; 134 struct board_info *board;
135 struct board_chan *brdchan; 135 struct board_chan __iomem *brdchan;
136 struct digi_struct digiext; 136 struct digi_struct digiext;
137 struct tty_struct *tty; 137 struct tty_struct *tty;
138 wait_queue_head_t open_wait; 138 wait_queue_head_t open_wait;
139 wait_queue_head_t close_wait; 139 wait_queue_head_t close_wait;
140 struct work_struct tqueue; 140 struct work_struct tqueue;
141 struct global_data *mailbox; 141 struct global_data __iomem *mailbox;
142}; 142};
143 143
144struct board_info 144struct board_info
@@ -149,8 +149,8 @@ struct board_info
149 unsigned short numports; 149 unsigned short numports;
150 unsigned long port; 150 unsigned long port;
151 unsigned long membase; 151 unsigned long membase;
152 unsigned char __iomem *re_map_port; 152 void __iomem *re_map_port;
153 unsigned char *re_map_membase; 153 void __iomem *re_map_membase;
154 unsigned long memory_seg; 154 unsigned long memory_seg;
155 void ( * memwinon ) (struct board_info *, unsigned int) ; 155 void ( * memwinon ) (struct board_info *, unsigned int) ;
156 void ( * memwinoff ) (struct board_info *, unsigned int) ; 156 void ( * memwinoff ) (struct board_info *, unsigned int) ;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index cddb789902db..f92177634677 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -839,9 +839,6 @@ int __init hvc_init(void)
839 hvc_driver->flags = TTY_DRIVER_REAL_RAW; 839 hvc_driver->flags = TTY_DRIVER_REAL_RAW;
840 tty_set_operations(hvc_driver, &hvc_ops); 840 tty_set_operations(hvc_driver, &hvc_ops);
841 841
842 if (tty_register_driver(hvc_driver))
843 panic("Couldn't register hvc console driver\n");
844
845 /* Always start the kthread because there can be hotplug vty adapters 842 /* Always start the kthread because there can be hotplug vty adapters
846 * added later. */ 843 * added later. */
847 hvc_task = kthread_run(khvcd, NULL, "khvcd"); 844 hvc_task = kthread_run(khvcd, NULL, "khvcd");
@@ -851,6 +848,9 @@ int __init hvc_init(void)
851 return -EIO; 848 return -EIO;
852 } 849 }
853 850
851 if (tty_register_driver(hvc_driver))
852 panic("Couldn't register hvc console driver\n");
853
854 return 0; 854 return 0;
855} 855}
856module_init(hvc_init); 856module_init(hvc_init);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1e33cb032e07..e91268e86833 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -810,13 +810,14 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
810 * from the top and bottom of cursor position 810 * from the top and bottom of cursor position
811 */ 811 */
812 old_origin += (vc->vc_y - new_rows/2) * old_row_size; 812 old_origin += (vc->vc_y - new_rows/2) * old_row_size;
813 end = old_origin + new_screen_size; 813 end = old_origin + (old_row_size * new_rows);
814 } 814 }
815 } else 815 } else
816 /* 816 /*
817 * Cursor near the top, copy contents from the top of buffer 817 * Cursor near the top, copy contents from the top of buffer
818 */ 818 */
819 end = (old_rows > new_rows) ? old_origin + new_screen_size : 819 end = (old_rows > new_rows) ? old_origin +
820 (old_row_size * new_rows) :
820 vc->vc_scr_end; 821 vc->vc_scr_end;
821 822
822 update_attr(vc); 823 update_attr(vc);
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index c694eee1fb24..75ca84ed4adf 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -30,6 +30,8 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/device.h> 32#include <linux/device.h>
33
34#include <asm/hardware/arm_twd.h>
33#include <asm/uaccess.h> 35#include <asm/uaccess.h>
34 36
35struct mpcore_wdt { 37struct mpcore_wdt {
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 3b865f34a095..b66782398258 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -50,7 +50,7 @@
50MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>"); 50MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
51MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems"); 51MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53MODULE_VERSION("1.0"); 53MODULE_VERSION("2.0");
54 54
55#define BIOS_SCAN_LIMIT 0xffffffff 55#define BIOS_SCAN_LIMIT 0xffffffff
56#define MAX_IMAGE_LENGTH 16 56#define MAX_IMAGE_LENGTH 16
@@ -65,10 +65,11 @@ static struct _rbu_data {
65 unsigned long packet_write_count; 65 unsigned long packet_write_count;
66 unsigned long num_packets; 66 unsigned long num_packets;
67 unsigned long packetsize; 67 unsigned long packetsize;
68 int entry_created;
68} rbu_data; 69} rbu_data;
69 70
70static char image_type[MAX_IMAGE_LENGTH] = "mono"; 71static char image_type[MAX_IMAGE_LENGTH + 1] = "mono";
71module_param_string(image_type, image_type, sizeof(image_type), 0); 72module_param_string(image_type, image_type, sizeof (image_type), 0);
72MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet"); 73MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet");
73 74
74struct packet_data { 75struct packet_data {
@@ -114,7 +115,7 @@ static int fill_last_packet(void *data, size_t length)
114 115
115 if ((rbu_data.packet_write_count + length) > rbu_data.packetsize) { 116 if ((rbu_data.packet_write_count + length) > rbu_data.packetsize) {
116 pr_debug("dell_rbu:%s: packet size data " 117 pr_debug("dell_rbu:%s: packet size data "
117 "overrun\n", __FUNCTION__); 118 "overrun\n", __FUNCTION__);
118 return -EINVAL; 119 return -EINVAL;
119 } 120 }
120 121
@@ -146,12 +147,14 @@ static int create_packet(size_t length)
146 pr_debug("create_packet: packetsize not specified\n"); 147 pr_debug("create_packet: packetsize not specified\n");
147 return -EINVAL; 148 return -EINVAL;
148 } 149 }
150 spin_unlock(&rbu_data.lock);
151 newpacket = kmalloc(sizeof (struct packet_data), GFP_KERNEL);
152 spin_lock(&rbu_data.lock);
149 153
150 newpacket = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
151 if (!newpacket) { 154 if (!newpacket) {
152 printk(KERN_WARNING 155 printk(KERN_WARNING
153 "dell_rbu:%s: failed to allocate new " 156 "dell_rbu:%s: failed to allocate new "
154 "packet\n", __FUNCTION__); 157 "packet\n", __FUNCTION__);
155 return -ENOMEM; 158 return -ENOMEM;
156 } 159 }
157 160
@@ -160,15 +163,17 @@ static int create_packet(size_t length)
160 * there is no upper limit on memory 163 * there is no upper limit on memory
161 * address for packetized mechanism 164 * address for packetized mechanism
162 */ 165 */
163 newpacket->data = (unsigned char *)__get_free_pages(GFP_KERNEL, 166 spin_unlock(&rbu_data.lock);
164 ordernum); 167 newpacket->data = (unsigned char *) __get_free_pages(GFP_KERNEL,
168 ordernum);
169 spin_lock(&rbu_data.lock);
165 170
166 pr_debug("create_packet: newpacket %p\n", newpacket->data); 171 pr_debug("create_packet: newpacket %p\n", newpacket->data);
167 172
168 if (!newpacket->data) { 173 if (!newpacket->data) {
169 printk(KERN_WARNING 174 printk(KERN_WARNING
170 "dell_rbu:%s: failed to allocate new " 175 "dell_rbu:%s: failed to allocate new "
171 "packet\n", __FUNCTION__); 176 "packet\n", __FUNCTION__);
172 kfree(newpacket); 177 kfree(newpacket);
173 return -ENOMEM; 178 return -ENOMEM;
174 } 179 }
@@ -204,9 +209,8 @@ static int packetize_data(void *data, size_t length)
204 return rc; 209 return rc;
205} 210}
206 211
207static int 212static int do_packet_read(char *data, struct list_head *ptemp_list,
208do_packet_read(char *data, struct list_head *ptemp_list, 213 int length, int bytes_read, int *list_read_count)
209 int length, int bytes_read, int *list_read_count)
210{ 214{
211 void *ptemp_buf; 215 void *ptemp_buf;
212 struct packet_data *newpacket = NULL; 216 struct packet_data *newpacket = NULL;
@@ -239,7 +243,7 @@ do_packet_read(char *data, struct list_head *ptemp_list,
239 return bytes_copied; 243 return bytes_copied;
240} 244}
241 245
242static int packet_read_list(char *data, size_t * pread_length) 246static int packet_read_list(char *data, size_t *pread_length)
243{ 247{
244 struct list_head *ptemp_list; 248 struct list_head *ptemp_list;
245 int temp_count = 0; 249 int temp_count = 0;
@@ -258,8 +262,7 @@ static int packet_read_list(char *data, size_t * pread_length)
258 ptemp_list = (&packet_data_head.list)->next; 262 ptemp_list = (&packet_data_head.list)->next;
259 while (!list_empty(ptemp_list)) { 263 while (!list_empty(ptemp_list)) {
260 bytes_copied = do_packet_read(pdest, ptemp_list, 264 bytes_copied = do_packet_read(pdest, ptemp_list,
261 remaining_bytes, bytes_read, 265 remaining_bytes, bytes_read, &temp_count);
262 &temp_count);
263 remaining_bytes -= bytes_copied; 266 remaining_bytes -= bytes_copied;
264 bytes_read += bytes_copied; 267 bytes_read += bytes_copied;
265 pdest += bytes_copied; 268 pdest += bytes_copied;
@@ -287,7 +290,7 @@ static void packet_empty_list(void)
287 ptemp_list = (&packet_data_head.list)->next; 290 ptemp_list = (&packet_data_head.list)->next;
288 while (!list_empty(ptemp_list)) { 291 while (!list_empty(ptemp_list)) {
289 newpacket = 292 newpacket =
290 list_entry(ptemp_list, struct packet_data, list); 293 list_entry(ptemp_list, struct packet_data, list);
291 pnext_list = ptemp_list->next; 294 pnext_list = ptemp_list->next;
292 list_del(ptemp_list); 295 list_del(ptemp_list);
293 ptemp_list = pnext_list; 296 ptemp_list = pnext_list;
@@ -296,8 +299,8 @@ static void packet_empty_list(void)
296 * to make sure there are no stale RBU packets left in memory 299 * to make sure there are no stale RBU packets left in memory
297 */ 300 */
298 memset(newpacket->data, 0, rbu_data.packetsize); 301 memset(newpacket->data, 0, rbu_data.packetsize);
299 free_pages((unsigned long)newpacket->data, 302 free_pages((unsigned long) newpacket->data,
300 newpacket->ordernum); 303 newpacket->ordernum);
301 kfree(newpacket); 304 kfree(newpacket);
302 } 305 }
303 rbu_data.packet_write_count = 0; 306 rbu_data.packet_write_count = 0;
@@ -319,14 +322,13 @@ static void img_update_free(void)
319 * BIOS image copied in memory. 322 * BIOS image copied in memory.
320 */ 323 */
321 memset(rbu_data.image_update_buffer, 0, 324 memset(rbu_data.image_update_buffer, 0,
322 rbu_data.image_update_buffer_size); 325 rbu_data.image_update_buffer_size);
323 if (rbu_data.dma_alloc == 1) 326 if (rbu_data.dma_alloc == 1)
324 dma_free_coherent(NULL, rbu_data.bios_image_size, 327 dma_free_coherent(NULL, rbu_data.bios_image_size,
325 rbu_data.image_update_buffer, 328 rbu_data.image_update_buffer, dell_rbu_dmaaddr);
326 dell_rbu_dmaaddr);
327 else 329 else
328 free_pages((unsigned long)rbu_data.image_update_buffer, 330 free_pages((unsigned long) rbu_data.image_update_buffer,
329 rbu_data.image_update_ordernum); 331 rbu_data.image_update_ordernum);
330 332
331 /* 333 /*
332 * Re-initialize the rbu_data variables after a free 334 * Re-initialize the rbu_data variables after a free
@@ -366,7 +368,7 @@ static int img_update_realloc(unsigned long size)
366 */ 368 */
367 if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { 369 if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
368 printk(KERN_ERR "dell_rbu:%s: corruption " 370 printk(KERN_ERR "dell_rbu:%s: corruption "
369 "check failed\n", __FUNCTION__); 371 "check failed\n", __FUNCTION__);
370 return -EINVAL; 372 return -EINVAL;
371 } 373 }
372 /* 374 /*
@@ -385,17 +387,16 @@ static int img_update_realloc(unsigned long size)
385 387
386 ordernum = get_order(size); 388 ordernum = get_order(size);
387 image_update_buffer = 389 image_update_buffer =
388 (unsigned char *)__get_free_pages(GFP_KERNEL, ordernum); 390 (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum);
389 391
390 img_buf_phys_addr = 392 img_buf_phys_addr =
391 (unsigned long)virt_to_phys(image_update_buffer); 393 (unsigned long) virt_to_phys(image_update_buffer);
392 394
393 if (img_buf_phys_addr > BIOS_SCAN_LIMIT) { 395 if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
394 free_pages((unsigned long)image_update_buffer, ordernum); 396 free_pages((unsigned long) image_update_buffer, ordernum);
395 ordernum = -1; 397 ordernum = -1;
396 image_update_buffer = dma_alloc_coherent(NULL, size, 398 image_update_buffer = dma_alloc_coherent(NULL, size,
397 &dell_rbu_dmaaddr, 399 &dell_rbu_dmaaddr, GFP_KERNEL);
398 GFP_KERNEL);
399 dma_alloc = 1; 400 dma_alloc = 1;
400 } 401 }
401 402
@@ -405,13 +406,13 @@ static int img_update_realloc(unsigned long size)
405 rbu_data.image_update_buffer = image_update_buffer; 406 rbu_data.image_update_buffer = image_update_buffer;
406 rbu_data.image_update_buffer_size = size; 407 rbu_data.image_update_buffer_size = size;
407 rbu_data.bios_image_size = 408 rbu_data.bios_image_size =
408 rbu_data.image_update_buffer_size; 409 rbu_data.image_update_buffer_size;
409 rbu_data.image_update_ordernum = ordernum; 410 rbu_data.image_update_ordernum = ordernum;
410 rbu_data.dma_alloc = dma_alloc; 411 rbu_data.dma_alloc = dma_alloc;
411 rc = 0; 412 rc = 0;
412 } else { 413 } else {
413 pr_debug("Not enough memory for image update:" 414 pr_debug("Not enough memory for image update:"
414 "size = %ld\n", size); 415 "size = %ld\n", size);
415 rc = -ENOMEM; 416 rc = -ENOMEM;
416 } 417 }
417 418
@@ -438,7 +439,7 @@ static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
438 if (pos > imagesize) { 439 if (pos > imagesize) {
439 retval = 0; 440 retval = 0;
440 printk(KERN_WARNING "dell_rbu:read_packet_data: " 441 printk(KERN_WARNING "dell_rbu:read_packet_data: "
441 "data underrun\n"); 442 "data underrun\n");
442 goto read_rbu_data_exit; 443 goto read_rbu_data_exit;
443 } 444 }
444 445
@@ -468,11 +469,11 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
468 469
469 /* check to see if we have something to return */ 470 /* check to see if we have something to return */
470 if ((rbu_data.image_update_buffer == NULL) || 471 if ((rbu_data.image_update_buffer == NULL) ||
471 (rbu_data.bios_image_size == 0)) { 472 (rbu_data.bios_image_size == 0)) {
472 pr_debug("read_rbu_data_mono: image_update_buffer %p ," 473 pr_debug("read_rbu_data_mono: image_update_buffer %p ,"
473 "bios_image_size %lu\n", 474 "bios_image_size %lu\n",
474 rbu_data.image_update_buffer, 475 rbu_data.image_update_buffer,
475 rbu_data.bios_image_size); 476 rbu_data.bios_image_size);
476 ret_count = -ENOMEM; 477 ret_count = -ENOMEM;
477 goto read_rbu_data_exit; 478 goto read_rbu_data_exit;
478 } 479 }
@@ -497,8 +498,8 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
497 return ret_count; 498 return ret_count;
498} 499}
499 500
500static ssize_t 501static ssize_t read_rbu_data(struct kobject *kobj, char *buffer,
501read_rbu_data(struct kobject *kobj, char *buffer, loff_t pos, size_t count) 502 loff_t pos, size_t count)
502{ 503{
503 ssize_t ret_count = 0; 504 ssize_t ret_count = 0;
504 505
@@ -515,62 +516,20 @@ read_rbu_data(struct kobject *kobj, char *buffer, loff_t pos, size_t count)
515 return ret_count; 516 return ret_count;
516} 517}
517 518
518static ssize_t
519read_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
520 size_t count)
521{
522 int size = 0;
523 if (!pos)
524 size = sprintf(buffer, "%s\n", image_type);
525 return size;
526}
527
528static ssize_t
529write_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
530 size_t count)
531{
532 int rc = count;
533 spin_lock(&rbu_data.lock);
534
535 if (strlen(buffer) < MAX_IMAGE_LENGTH)
536 sscanf(buffer, "%s", image_type);
537 else
538 printk(KERN_WARNING "dell_rbu: image_type is invalid"
539 "max chars = %d, \n incoming str--%s-- \n",
540 MAX_IMAGE_LENGTH, buffer);
541
542 /* we must free all previous allocations */
543 packet_empty_list();
544 img_update_free();
545
546 spin_unlock(&rbu_data.lock);
547 return rc;
548
549}
550
551static struct bin_attribute rbu_data_attr = {
552 .attr = {.name = "data",.owner = THIS_MODULE,.mode = 0444},
553 .read = read_rbu_data,
554};
555
556static struct bin_attribute rbu_image_type_attr = {
557 .attr = {.name = "image_type",.owner = THIS_MODULE,.mode = 0644},
558 .read = read_rbu_image_type,
559 .write = write_rbu_image_type,
560};
561
562static void callbackfn_rbu(const struct firmware *fw, void *context) 519static void callbackfn_rbu(const struct firmware *fw, void *context)
563{ 520{
564 int rc = 0; 521 int rc = 0;
565 522
566 if (!fw || !fw->size) 523 if (!fw || !fw->size) {
524 rbu_data.entry_created = 0;
567 return; 525 return;
526 }
568 527
569 spin_lock(&rbu_data.lock); 528 spin_lock(&rbu_data.lock);
570 if (!strcmp(image_type, "mono")) { 529 if (!strcmp(image_type, "mono")) {
571 if (!img_update_realloc(fw->size)) 530 if (!img_update_realloc(fw->size))
572 memcpy(rbu_data.image_update_buffer, 531 memcpy(rbu_data.image_update_buffer,
573 fw->data, fw->size); 532 fw->data, fw->size);
574 } else if (!strcmp(image_type, "packet")) { 533 } else if (!strcmp(image_type, "packet")) {
575 if (!rbu_data.packetsize) 534 if (!rbu_data.packetsize)
576 rbu_data.packetsize = fw->size; 535 rbu_data.packetsize = fw->size;
@@ -584,14 +543,103 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
584 spin_unlock(&rbu_data.lock); 543 spin_unlock(&rbu_data.lock);
585 544
586 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, 545 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
587 "dell_rbu", &rbu_device->dev, 546 "dell_rbu", &rbu_device->dev, &context, callbackfn_rbu);
588 &context, callbackfn_rbu);
589 if (rc) 547 if (rc)
590 printk(KERN_ERR 548 printk(KERN_ERR
591 "dell_rbu:%s request_firmware_nowait failed" 549 "dell_rbu:%s request_firmware_nowait failed"
592 " %d\n", __FUNCTION__, rc); 550 " %d\n", __FUNCTION__, rc);
551 else
552 rbu_data.entry_created = 1;
553}
554
555static ssize_t read_rbu_image_type(struct kobject *kobj, char *buffer,
556 loff_t pos, size_t count)
557{
558 int size = 0;
559 if (!pos)
560 size = sprintf(buffer, "%s\n", image_type);
561 return size;
562}
563
564static ssize_t write_rbu_image_type(struct kobject *kobj, char *buffer,
565 loff_t pos, size_t count)
566{
567 int rc = count;
568 int req_firm_rc = 0;
569 int i;
570 spin_lock(&rbu_data.lock);
571 /*
572 * Find the first newline or space
573 */
574 for (i = 0; i < count; ++i)
575 if (buffer[i] == '\n' || buffer[i] == ' ') {
576 buffer[i] = '\0';
577 break;
578 }
579 if (i == count)
580 buffer[count] = '\0';
581
582 if (strstr(buffer, "mono"))
583 strcpy(image_type, "mono");
584 else if (strstr(buffer, "packet"))
585 strcpy(image_type, "packet");
586 else if (strstr(buffer, "init")) {
587 /*
588 * If due to the user error the driver gets in a bad
589 * state where even though it is loaded , the
590 * /sys/class/firmware/dell_rbu entries are missing.
591 * to cover this situation the user can recreate entries
592 * by writing init to image_type.
593 */
594 if (!rbu_data.entry_created) {
595 spin_unlock(&rbu_data.lock);
596 req_firm_rc = request_firmware_nowait(THIS_MODULE,
597 FW_ACTION_NOHOTPLUG, "dell_rbu",
598 &rbu_device->dev, &context,
599 callbackfn_rbu);
600 if (req_firm_rc) {
601 printk(KERN_ERR
602 "dell_rbu:%s request_firmware_nowait"
603 " failed %d\n", __FUNCTION__, rc);
604 rc = -EIO;
605 } else
606 rbu_data.entry_created = 1;
607
608 spin_lock(&rbu_data.lock);
609 }
610 } else {
611 printk(KERN_WARNING "dell_rbu: image_type is invalid\n");
612 spin_unlock(&rbu_data.lock);
613 return -EINVAL;
614 }
615
616 /* we must free all previous allocations */
617 packet_empty_list();
618 img_update_free();
619 spin_unlock(&rbu_data.lock);
620
621 return rc;
593} 622}
594 623
624static struct bin_attribute rbu_data_attr = {
625 .attr = {
626 .name = "data",
627 .owner = THIS_MODULE,
628 .mode = 0444,
629 },
630 .read = read_rbu_data,
631};
632
633static struct bin_attribute rbu_image_type_attr = {
634 .attr = {
635 .name = "image_type",
636 .owner = THIS_MODULE,
637 .mode = 0644,
638 },
639 .read = read_rbu_image_type,
640 .write = write_rbu_image_type,
641};
642
595static int __init dcdrbu_init(void) 643static int __init dcdrbu_init(void)
596{ 644{
597 int rc = 0; 645 int rc = 0;
@@ -599,11 +647,11 @@ static int __init dcdrbu_init(void)
599 647
600 init_packet_head(); 648 init_packet_head();
601 rbu_device = 649 rbu_device =
602 platform_device_register_simple("dell_rbu", -1, NULL, 0); 650 platform_device_register_simple("dell_rbu", -1, NULL, 0);
603 if (!rbu_device) { 651 if (!rbu_device) {
604 printk(KERN_ERR 652 printk(KERN_ERR
605 "dell_rbu:%s:platform_device_register_simple " 653 "dell_rbu:%s:platform_device_register_simple "
606 "failed\n", __FUNCTION__); 654 "failed\n", __FUNCTION__);
607 return -EIO; 655 return -EIO;
608 } 656 }
609 657
@@ -611,11 +659,12 @@ static int __init dcdrbu_init(void)
611 sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr); 659 sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
612 660
613 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, 661 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
614 "dell_rbu", &rbu_device->dev, 662 "dell_rbu", &rbu_device->dev, &context, callbackfn_rbu);
615 &context, callbackfn_rbu);
616 if (rc) 663 if (rc)
617 printk(KERN_ERR "dell_rbu:%s:request_firmware_nowait" 664 printk(KERN_ERR "dell_rbu:%s:request_firmware_nowait"
618 " failed %d\n", __FUNCTION__, rc); 665 " failed %d\n", __FUNCTION__, rc);
666 else
667 rbu_data.entry_created = 1;
619 668
620 return rc; 669 return rc;
621 670
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index eaebfc14c933..4c56411f3993 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -5,10 +5,10 @@
5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> 5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
6 * 6 *
7 * The HardDisk Active Protection System (hdaps) is present in the IBM ThinkPad 7 * The HardDisk Active Protection System (hdaps) is present in the IBM ThinkPad
8 * T41, T42, T43, R51, and X40, at least. It provides a basic two-axis 8 * T41, T42, T43, R50, R50p, R51, and X40, at least. It provides a basic
9 * accelerometer and other data, such as the device's temperature. 9 * two-axis accelerometer and other data, such as the device's temperature.
10 * 10 *
11 * Based on the document by Mark A. Smith available at 11 * This driver is based on the document by Mark A. Smith available at
12 * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial 12 * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
13 * and error. 13 * and error.
14 * 14 *
@@ -36,12 +36,7 @@
36#include <asm/io.h> 36#include <asm/io.h>
37 37
38#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */ 38#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
39#define HDAPS_NR_PORTS 0x30 /* 0x1600 - 0x162f */ 39#define HDAPS_NR_PORTS 0x30 /* number of ports: 0x1600 - 0x162f */
40
41#define STATE_FRESH 0x50 /* accelerometer data is fresh */
42
43#define REFRESH_ASYNC 0x00 /* do asynchronous refresh */
44#define REFRESH_SYNC 0x01 /* do synchronous refresh */
45 40
46#define HDAPS_PORT_STATE 0x1611 /* device state */ 41#define HDAPS_PORT_STATE 0x1611 /* device state */
47#define HDAPS_PORT_YPOS 0x1612 /* y-axis position */ 42#define HDAPS_PORT_YPOS 0x1612 /* y-axis position */
@@ -53,7 +48,7 @@
53#define HDAPS_PORT_UNKNOWN 0x161c /* what is this? */ 48#define HDAPS_PORT_UNKNOWN 0x161c /* what is this? */
54#define HDAPS_PORT_KMACT 0x161d /* keyboard or mouse activity */ 49#define HDAPS_PORT_KMACT 0x161d /* keyboard or mouse activity */
55 50
56#define HDAPS_READ_MASK 0xff /* some reads have the low 8 bits set */ 51#define STATE_FRESH 0x50 /* accelerometer data is fresh */
57 52
58#define KEYBD_MASK 0x20 /* set if keyboard activity */ 53#define KEYBD_MASK 0x20 /* set if keyboard activity */
59#define MOUSE_MASK 0x40 /* set if mouse activity */ 54#define MOUSE_MASK 0x40 /* set if mouse activity */
@@ -63,12 +58,11 @@
63#define INIT_TIMEOUT_MSECS 4000 /* wait up to 4s for device init ... */ 58#define INIT_TIMEOUT_MSECS 4000 /* wait up to 4s for device init ... */
64#define INIT_WAIT_MSECS 200 /* ... in 200ms increments */ 59#define INIT_WAIT_MSECS 200 /* ... in 200ms increments */
65 60
66static struct platform_device *pdev; 61#define HDAPS_POLL_PERIOD (HZ/20) /* poll for input every 1/20s */
67static struct input_dev hdaps_idev; 62#define HDAPS_INPUT_FUZZ 4 /* input event threshold */
63
68static struct timer_list hdaps_timer; 64static struct timer_list hdaps_timer;
69static unsigned int hdaps_mousedev_threshold = 4; 65static struct platform_device *pdev;
70static unsigned long hdaps_poll_ms = 50;
71static unsigned int hdaps_mousedev;
72static unsigned int hdaps_invert; 66static unsigned int hdaps_invert;
73static u8 km_activity; 67static u8 km_activity;
74static int rest_x; 68static int rest_x;
@@ -81,14 +75,14 @@ static DECLARE_MUTEX(hdaps_sem);
81 */ 75 */
82static inline u8 __get_latch(u16 port) 76static inline u8 __get_latch(u16 port)
83{ 77{
84 return inb(port) & HDAPS_READ_MASK; 78 return inb(port) & 0xff;
85} 79}
86 80
87/* 81/*
88 * __check_latch - Check a port latch for a given value. Callers must hold 82 * __check_latch - Check a port latch for a given value. Returns zero if the
89 * hdaps_sem. Returns zero if the port contains the given value. 83 * port contains the given value. Callers must hold hdaps_sem.
90 */ 84 */
91static inline unsigned int __check_latch(u16 port, u8 val) 85static inline int __check_latch(u16 port, u8 val)
92{ 86{
93 if (__get_latch(port) == val) 87 if (__get_latch(port) == val)
94 return 0; 88 return 0;
@@ -99,7 +93,7 @@ static inline unsigned int __check_latch(u16 port, u8 val)
99 * __wait_latch - Wait up to 100us for a port latch to get a certain value, 93 * __wait_latch - Wait up to 100us for a port latch to get a certain value,
100 * returning zero if the value is obtained. Callers must hold hdaps_sem. 94 * returning zero if the value is obtained. Callers must hold hdaps_sem.
101 */ 95 */
102static unsigned int __wait_latch(u16 port, u8 val) 96static int __wait_latch(u16 port, u8 val)
103{ 97{
104 unsigned int i; 98 unsigned int i;
105 99
@@ -109,59 +103,42 @@ static unsigned int __wait_latch(u16 port, u8 val)
109 udelay(5); 103 udelay(5);
110 } 104 }
111 105
112 return -EINVAL; 106 return -EIO;
113} 107}
114 108
115/* 109/*
116 * __device_refresh - Request a refresh from the accelerometer. 110 * __device_refresh - request a refresh from the accelerometer. Does not wait
117 * 111 * for refresh to complete. Callers must hold hdaps_sem.
118 * If sync is REFRESH_SYNC, we perform a synchronous refresh and will wait.
119 * Returns zero if successful and nonzero on error.
120 *
121 * If sync is REFRESH_ASYNC, we merely kick off a new refresh if the device is
122 * not up-to-date. Always returns zero.
123 *
124 * Callers must hold hdaps_sem.
125 */ 112 */
126static int __device_refresh(unsigned int sync) 113static void __device_refresh(void)
127{ 114{
128 u8 state; 115 udelay(200);
129 116 if (inb(0x1604) != STATE_FRESH) {
130 udelay(100); 117 outb(0x11, 0x1610);
131 118 outb(0x01, 0x161f);
132 state = inb(0x1604); 119 }
133 if (state == STATE_FRESH) 120}
134 return 0;
135
136 outb(0x11, 0x1610);
137 outb(0x01, 0x161f);
138 if (sync == REFRESH_ASYNC)
139 return 0;
140 121
122/*
123 * __device_refresh_sync - request a synchronous refresh from the
124 * accelerometer. We wait for the refresh to complete. Returns zero if
125 * successful and nonzero on error. Callers must hold hdaps_sem.
126 */
127static int __device_refresh_sync(void)
128{
129 __device_refresh();
141 return __wait_latch(0x1604, STATE_FRESH); 130 return __wait_latch(0x1604, STATE_FRESH);
142} 131}
143 132
144/* 133/*
145 * __device_complete - Indicate to the accelerometer that we are done reading 134 * __device_complete - indicate to the accelerometer that we are done reading
146 * data, and then initiate an async refresh. Callers must hold hdaps_sem. 135 * data, and then initiate an async refresh. Callers must hold hdaps_sem.
147 */ 136 */
148static inline void __device_complete(void) 137static inline void __device_complete(void)
149{ 138{
150 inb(0x161f); 139 inb(0x161f);
151 inb(0x1604); 140 inb(0x1604);
152 __device_refresh(REFRESH_ASYNC); 141 __device_refresh();
153}
154
155static int __hdaps_readb_one(unsigned int port, u8 *val)
156{
157 /* do a sync refresh -- we need to be sure that we read fresh data */
158 if (__device_refresh(REFRESH_SYNC))
159 return -EIO;
160
161 *val = inb(port);
162 __device_complete();
163
164 return 0;
165} 142}
166 143
167/* 144/*
@@ -174,17 +151,26 @@ static int hdaps_readb_one(unsigned int port, u8 *val)
174 int ret; 151 int ret;
175 152
176 down(&hdaps_sem); 153 down(&hdaps_sem);
177 ret = __hdaps_readb_one(port, val);
178 up(&hdaps_sem);
179 154
155 /* do a sync refresh -- we need to be sure that we read fresh data */
156 ret = __device_refresh_sync();
157 if (ret)
158 goto out;
159
160 *val = inb(port);
161 __device_complete();
162
163out:
164 up(&hdaps_sem);
180 return ret; 165 return ret;
181} 166}
182 167
168/* __hdaps_read_pair - internal lockless helper for hdaps_read_pair(). */
183static int __hdaps_read_pair(unsigned int port1, unsigned int port2, 169static int __hdaps_read_pair(unsigned int port1, unsigned int port2,
184 int *x, int *y) 170 int *x, int *y)
185{ 171{
186 /* do a sync refresh -- we need to be sure that we read fresh data */ 172 /* do a sync refresh -- we need to be sure that we read fresh data */
187 if (__device_refresh(REFRESH_SYNC)) 173 if (__device_refresh_sync())
188 return -EIO; 174 return -EIO;
189 175
190 *y = inw(port2); 176 *y = inw(port2);
@@ -217,11 +203,13 @@ static int hdaps_read_pair(unsigned int port1, unsigned int port2,
217 return ret; 203 return ret;
218} 204}
219 205
220/* initialize the accelerometer */ 206/*
207 * hdaps_device_init - initialize the accelerometer. Returns zero on success
208 * and negative error code on failure. Can sleep.
209 */
221static int hdaps_device_init(void) 210static int hdaps_device_init(void)
222{ 211{
223 unsigned int total_msecs = INIT_TIMEOUT_MSECS; 212 int total, ret = -ENXIO;
224 int ret = -ENXIO;
225 213
226 down(&hdaps_sem); 214 down(&hdaps_sem);
227 215
@@ -231,8 +219,10 @@ static int hdaps_device_init(void)
231 goto out; 219 goto out;
232 220
233 /* 221 /*
234 * The 0x03 value appears to only work on some thinkpads, such as the 222 * Most ThinkPads return 0x01.
235 * T42p. Others return 0x01. 223 *
224 * Others--namely the R50p, T41p, and T42p--return 0x03. These laptops
225 * have "inverted" axises.
236 * 226 *
237 * The 0x02 value occurs when the chip has been previously initialized. 227 * The 0x02 value occurs when the chip has been previously initialized.
238 */ 228 */
@@ -267,24 +257,23 @@ static int hdaps_device_init(void)
267 outb(0x01, 0x161f); 257 outb(0x01, 0x161f);
268 if (__wait_latch(0x161f, 0x00)) 258 if (__wait_latch(0x161f, 0x00))
269 goto out; 259 goto out;
270 if (__device_refresh(REFRESH_SYNC)) 260 if (__device_refresh_sync())
271 goto out; 261 goto out;
272 if (__wait_latch(0x1611, 0x00)) 262 if (__wait_latch(0x1611, 0x00))
273 goto out; 263 goto out;
274 264
275 /* we have done our dance, now let's wait for the applause */ 265 /* we have done our dance, now let's wait for the applause */
276 while (total_msecs > 0) { 266 for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
277 u8 ignored; 267 int x, y;
278 268
279 /* a read of the device helps push it into action */ 269 /* a read of the device helps push it into action */
280 __hdaps_readb_one(HDAPS_PORT_UNKNOWN, &ignored); 270 __hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
281 if (!__wait_latch(0x1611, 0x02)) { 271 if (!__wait_latch(0x1611, 0x02)) {
282 ret = 0; 272 ret = 0;
283 break; 273 break;
284 } 274 }
285 275
286 msleep(INIT_WAIT_MSECS); 276 msleep(INIT_WAIT_MSECS);
287 total_msecs -= INIT_WAIT_MSECS;
288 } 277 }
289 278
290out: 279out:
@@ -293,96 +282,6 @@ out:
293} 282}
294 283
295 284
296/* Input class stuff */
297
298/*
299 * hdaps_calibrate - Zero out our "resting" values. Callers must hold hdaps_sem.
300 */
301static void hdaps_calibrate(void)
302{
303 int x, y;
304
305 if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
306 return;
307
308 rest_x = x;
309 rest_y = y;
310}
311
312static void hdaps_mousedev_poll(unsigned long unused)
313{
314 int x, y;
315
316 /* Cannot sleep. Try nonblockingly. If we fail, try again later. */
317 if (down_trylock(&hdaps_sem)) {
318 mod_timer(&hdaps_timer,jiffies+msecs_to_jiffies(hdaps_poll_ms));
319 return;
320 }
321
322 if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
323 goto out;
324
325 x -= rest_x;
326 y -= rest_y;
327 if (abs(x) > hdaps_mousedev_threshold)
328 input_report_rel(&hdaps_idev, REL_X, x);
329 if (abs(y) > hdaps_mousedev_threshold)
330 input_report_rel(&hdaps_idev, REL_Y, y);
331 input_sync(&hdaps_idev);
332
333 mod_timer(&hdaps_timer, jiffies + msecs_to_jiffies(hdaps_poll_ms));
334
335out:
336 up(&hdaps_sem);
337}
338
339/*
340 * hdaps_mousedev_enable - enable the input class device. Can sleep.
341 */
342static void hdaps_mousedev_enable(void)
343{
344 down(&hdaps_sem);
345
346 /* calibrate the device before enabling */
347 hdaps_calibrate();
348
349 /* initialize the input class */
350 init_input_dev(&hdaps_idev);
351 hdaps_idev.dev = &pdev->dev;
352 hdaps_idev.evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
353 hdaps_idev.relbit[0] = BIT(REL_X) | BIT(REL_Y);
354 hdaps_idev.keybit[LONG(BTN_LEFT)] = BIT(BTN_LEFT);
355 input_register_device(&hdaps_idev);
356
357 /* start up our timer */
358 init_timer(&hdaps_timer);
359 hdaps_timer.function = hdaps_mousedev_poll;
360 hdaps_timer.expires = jiffies + msecs_to_jiffies(hdaps_poll_ms);
361 add_timer(&hdaps_timer);
362
363 hdaps_mousedev = 1;
364
365 up(&hdaps_sem);
366
367 printk(KERN_INFO "hdaps: input device enabled.\n");
368}
369
370/*
371 * hdaps_mousedev_disable - disable the input class device. Caller must hold
372 * hdaps_sem.
373 */
374static void hdaps_mousedev_disable(void)
375{
376 down(&hdaps_sem);
377 if (hdaps_mousedev) {
378 hdaps_mousedev = 0;
379 del_timer_sync(&hdaps_timer);
380 input_unregister_device(&hdaps_idev);
381 }
382 up(&hdaps_sem);
383}
384
385
386/* Device model stuff */ 285/* Device model stuff */
387 286
388static int hdaps_probe(struct device *dev) 287static int hdaps_probe(struct device *dev)
@@ -412,6 +311,49 @@ static struct device_driver hdaps_driver = {
412 .resume = hdaps_resume 311 .resume = hdaps_resume
413}; 312};
414 313
314/* Input class stuff */
315
316static struct input_dev hdaps_idev = {
317 .name = "hdaps",
318 .evbit = { BIT(EV_ABS) },
319 .absbit = { BIT(ABS_X) | BIT(ABS_Y) },
320 .absmin = { [ABS_X] = -256, [ABS_Y] = -256 },
321 .absmax = { [ABS_X] = 256, [ABS_Y] = 256 },
322 .absfuzz = { [ABS_X] = HDAPS_INPUT_FUZZ, [ABS_Y] = HDAPS_INPUT_FUZZ },
323 .absflat = { [ABS_X] = HDAPS_INPUT_FUZZ, [ABS_Y] = HDAPS_INPUT_FUZZ },
324};
325
326/*
327 * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_sem.
328 */
329static void hdaps_calibrate(void)
330{
331 __hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
332}
333
334static void hdaps_mousedev_poll(unsigned long unused)
335{
336 int x, y;
337
338 /* Cannot sleep. Try nonblockingly. If we fail, try again later. */
339 if (down_trylock(&hdaps_sem)) {
340 mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD);
341 return;
342 }
343
344 if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
345 goto out;
346
347 input_report_abs(&hdaps_idev, ABS_X, x - rest_x);
348 input_report_abs(&hdaps_idev, ABS_Y, y - rest_y);
349 input_sync(&hdaps_idev);
350
351 mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
352
353out:
354 up(&hdaps_sem);
355}
356
415 357
416/* Sysfs Files */ 358/* Sysfs Files */
417 359
@@ -517,69 +459,6 @@ static ssize_t hdaps_invert_store(struct device *dev,
517 return count; 459 return count;
518} 460}
519 461
520static ssize_t hdaps_mousedev_show(struct device *dev,
521 struct device_attribute *attr, char *buf)
522{
523 return sprintf(buf, "%d\n", hdaps_mousedev);
524}
525
526static ssize_t hdaps_mousedev_store(struct device *dev,
527 struct device_attribute *attr,
528 const char *buf, size_t count)
529{
530 int enable;
531
532 if (sscanf(buf, "%d", &enable) != 1)
533 return -EINVAL;
534
535 if (enable == 1)
536 hdaps_mousedev_enable();
537 else if (enable == 0)
538 hdaps_mousedev_disable();
539 else
540 return -EINVAL;
541
542 return count;
543}
544
545static ssize_t hdaps_poll_show(struct device *dev,
546 struct device_attribute *attr, char *buf)
547{
548 return sprintf(buf, "%lu\n", hdaps_poll_ms);
549}
550
551static ssize_t hdaps_poll_store(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf, size_t count)
554{
555 unsigned int poll;
556
557 if (sscanf(buf, "%u", &poll) != 1 || poll == 0)
558 return -EINVAL;
559 hdaps_poll_ms = poll;
560
561 return count;
562}
563
564static ssize_t hdaps_threshold_show(struct device *dev,
565 struct device_attribute *attr, char *buf)
566{
567 return sprintf(buf, "%u\n", hdaps_mousedev_threshold);
568}
569
570static ssize_t hdaps_threshold_store(struct device *dev,
571 struct device_attribute *attr,
572 const char *buf, size_t count)
573{
574 unsigned int threshold;
575
576 if (sscanf(buf, "%u", &threshold) != 1 || threshold == 0)
577 return -EINVAL;
578 hdaps_mousedev_threshold = threshold;
579
580 return count;
581}
582
583static DEVICE_ATTR(position, 0444, hdaps_position_show, NULL); 462static DEVICE_ATTR(position, 0444, hdaps_position_show, NULL);
584static DEVICE_ATTR(variance, 0444, hdaps_variance_show, NULL); 463static DEVICE_ATTR(variance, 0444, hdaps_variance_show, NULL);
585static DEVICE_ATTR(temp1, 0444, hdaps_temp1_show, NULL); 464static DEVICE_ATTR(temp1, 0444, hdaps_temp1_show, NULL);
@@ -588,10 +467,6 @@ static DEVICE_ATTR(keyboard_activity, 0444, hdaps_keyboard_activity_show, NULL);
588static DEVICE_ATTR(mouse_activity, 0444, hdaps_mouse_activity_show, NULL); 467static DEVICE_ATTR(mouse_activity, 0444, hdaps_mouse_activity_show, NULL);
589static DEVICE_ATTR(calibrate, 0644, hdaps_calibrate_show,hdaps_calibrate_store); 468static DEVICE_ATTR(calibrate, 0644, hdaps_calibrate_show,hdaps_calibrate_store);
590static DEVICE_ATTR(invert, 0644, hdaps_invert_show, hdaps_invert_store); 469static DEVICE_ATTR(invert, 0644, hdaps_invert_show, hdaps_invert_store);
591static DEVICE_ATTR(mousedev, 0644, hdaps_mousedev_show, hdaps_mousedev_store);
592static DEVICE_ATTR(mousedev_poll_ms, 0644, hdaps_poll_show, hdaps_poll_store);
593static DEVICE_ATTR(mousedev_threshold, 0644, hdaps_threshold_show,
594 hdaps_threshold_store);
595 470
596static struct attribute *hdaps_attributes[] = { 471static struct attribute *hdaps_attributes[] = {
597 &dev_attr_position.attr, 472 &dev_attr_position.attr,
@@ -601,9 +476,6 @@ static struct attribute *hdaps_attributes[] = {
601 &dev_attr_keyboard_activity.attr, 476 &dev_attr_keyboard_activity.attr,
602 &dev_attr_mouse_activity.attr, 477 &dev_attr_mouse_activity.attr,
603 &dev_attr_calibrate.attr, 478 &dev_attr_calibrate.attr,
604 &dev_attr_mousedev.attr,
605 &dev_attr_mousedev_threshold.attr,
606 &dev_attr_mousedev_poll_ms.attr,
607 &dev_attr_invert.attr, 479 &dev_attr_invert.attr,
608 NULL, 480 NULL,
609}; 481};
@@ -619,7 +491,7 @@ static struct attribute_group hdaps_attribute_group = {
619 * XXX: We should be able to return nonzero and halt the detection process. 491 * XXX: We should be able to return nonzero and halt the detection process.
620 * But there is a bug in dmi_check_system() where a nonzero return from the 492 * But there is a bug in dmi_check_system() where a nonzero return from the
621 * first match will result in a return of failure from dmi_check_system(). 493 * first match will result in a return of failure from dmi_check_system().
622 * I fixed this; the patch is in 2.6-mm. Once in Linus's tree we can make 494 * I fixed this; the patch is 2.6-git. Once in a released tree, we can make
623 * hdaps_dmi_match_invert() return hdaps_dmi_match(), which in turn returns 1. 495 * hdaps_dmi_match_invert() return hdaps_dmi_match(), which in turn returns 1.
624 */ 496 */
625static int hdaps_dmi_match(struct dmi_system_id *id) 497static int hdaps_dmi_match(struct dmi_system_id *id)
@@ -668,6 +540,7 @@ static int __init hdaps_init(void)
668 HDAPS_DMI_MATCH_NORMAL("ThinkPad T42"), 540 HDAPS_DMI_MATCH_NORMAL("ThinkPad T42"),
669 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"), 541 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"),
670 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"), 542 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"),
543 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"),
671 { .ident = NULL } 544 { .ident = NULL }
672 }; 545 };
673 546
@@ -696,8 +569,18 @@ static int __init hdaps_init(void)
696 if (ret) 569 if (ret)
697 goto out_device; 570 goto out_device;
698 571
699 if (hdaps_mousedev) 572 /* initial calibrate for the input device */
700 hdaps_mousedev_enable(); 573 hdaps_calibrate();
574
575 /* initialize the input class */
576 hdaps_idev.dev = &pdev->dev;
577 input_register_device(&hdaps_idev);
578
579 /* start up our timer for the input device */
580 init_timer(&hdaps_timer);
581 hdaps_timer.function = hdaps_mousedev_poll;
582 hdaps_timer.expires = jiffies + HDAPS_POLL_PERIOD;
583 add_timer(&hdaps_timer);
701 584
702 printk(KERN_INFO "hdaps: driver successfully loaded.\n"); 585 printk(KERN_INFO "hdaps: driver successfully loaded.\n");
703 return 0; 586 return 0;
@@ -715,8 +598,8 @@ out:
715 598
716static void __exit hdaps_exit(void) 599static void __exit hdaps_exit(void)
717{ 600{
718 hdaps_mousedev_disable(); 601 del_timer_sync(&hdaps_timer);
719 602 input_unregister_device(&hdaps_idev);
720 sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group); 603 sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
721 platform_device_unregister(pdev); 604 platform_device_unregister(pdev);
722 driver_unregister(&hdaps_driver); 605 driver_unregister(&hdaps_driver);
@@ -728,9 +611,6 @@ static void __exit hdaps_exit(void)
728module_init(hdaps_init); 611module_init(hdaps_init);
729module_exit(hdaps_exit); 612module_exit(hdaps_exit);
730 613
731module_param_named(mousedev, hdaps_mousedev, bool, 0);
732MODULE_PARM_DESC(mousedev, "enable the input class device");
733
734module_param_named(invert, hdaps_invert, bool, 0); 614module_param_named(invert, hdaps_invert, bool, 0);
735MODULE_PARM_DESC(invert, "invert data along each axis"); 615MODULE_PARM_DESC(invert, "invert data along each axis");
736 616
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index fdf53ce04248..44b595d90a4a 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -914,19 +914,23 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num
914 return ret; 914 return ret;
915} 915}
916 916
917static u32 i2c_pxa_functionality(struct i2c_adapter *adap)
918{
919 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
920}
921
917static struct i2c_algorithm i2c_pxa_algorithm = { 922static struct i2c_algorithm i2c_pxa_algorithm = {
918 .name = "PXA-I2C-Algorithm",
919 .id = I2C_ALGO_PXA,
920 .master_xfer = i2c_pxa_xfer, 923 .master_xfer = i2c_pxa_xfer,
924 .functionality = i2c_pxa_functionality,
921}; 925};
922 926
923static struct pxa_i2c i2c_pxa = { 927static struct pxa_i2c i2c_pxa = {
924 .lock = SPIN_LOCK_UNLOCKED, 928 .lock = SPIN_LOCK_UNLOCKED,
925 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(i2c_pxa.wait), 929 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(i2c_pxa.wait),
926 .adap = { 930 .adap = {
927 .name = "pxa2xx-i2c", 931 .owner = THIS_MODULE,
928 .id = I2C_ALGO_PXA,
929 .algo = &i2c_pxa_algorithm, 932 .algo = &i2c_pxa_algorithm,
933 .name = "pxa2xx-i2c",
930 .retries = 5, 934 .retries = 5,
931 }, 935 },
932}; 936};
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index d04f62ab5de1..ace8edad6e96 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -500,6 +500,7 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
500 } 500 }
501 501
502 rq.special = args; 502 rq.special = args;
503 args->rq = &rq;
503 return ide_do_drive_cmd(drive, &rq, ide_wait); 504 return ide_do_drive_cmd(drive, &rq, ide_wait);
504} 505}
505 506
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 2bd8b1cc57c4..e23836d0e21b 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -412,8 +412,8 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
412 412
413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
415 pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 415 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
416 if (pad > data_size || pad < 0) 416 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
417 pad = 0; 417 pad = 0;
418 418
419 return hdr_size + rmpp_recv->seg_num * data_size - pad; 419 return hdr_size + rmpp_recv->seg_num * data_size - pad;
@@ -583,6 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
583{ 583{
584 struct ib_rmpp_mad *rmpp_mad; 584 struct ib_rmpp_mad *rmpp_mad;
585 int timeout; 585 int timeout;
586 u32 paylen;
586 587
587 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 588 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
588 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 589 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
@@ -590,11 +591,9 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
590 591
591 if (mad_send_wr->seg_num == 1) { 592 if (mad_send_wr->seg_num == 1) {
592 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; 593 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
593 rmpp_mad->rmpp_hdr.paylen_newwin = 594 paylen = mad_send_wr->total_seg * IB_MGMT_RMPP_DATA -
594 cpu_to_be32(mad_send_wr->total_seg * 595 mad_send_wr->pad;
595 (sizeof(struct ib_rmpp_mad) - 596 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
596 offsetof(struct ib_rmpp_mad, data)) -
597 mad_send_wr->pad);
598 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad); 597 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
599 } else { 598 } else {
600 mad_send_wr->send_wr.num_sge = 2; 599 mad_send_wr->send_wr.num_sge = 2;
@@ -608,10 +607,8 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
608 607
609 if (mad_send_wr->seg_num == mad_send_wr->total_seg) { 608 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
610 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; 609 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
611 rmpp_mad->rmpp_hdr.paylen_newwin = 610 paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
612 cpu_to_be32(sizeof(struct ib_rmpp_mad) - 611 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
613 offsetof(struct ib_rmpp_mad, data) -
614 mad_send_wr->pad);
615 } 612 }
616 613
617 /* 2 seconds for an ACK until we can find the packet lifetime */ 614 /* 2 seconds for an ACK until we can find the packet lifetime */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7c2f03057ddb..a64d6b4dcc16 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -334,10 +334,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
334 ret = -EINVAL; 334 ret = -EINVAL;
335 goto err_ah; 335 goto err_ah;
336 } 336 }
337 /* Validate that management class can support RMPP */ 337
338 /* Validate that the management class can support RMPP */
338 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 339 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
339 hdr_len = offsetof(struct ib_sa_mad, data); 340 hdr_len = offsetof(struct ib_sa_mad, data);
340 data_len = length; 341 data_len = length - hdr_len;
341 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 342 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
342 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { 343 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
343 hdr_len = offsetof(struct ib_vendor_mad, data); 344 hdr_len = offsetof(struct ib_vendor_mad, data);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 18f0981eb0c1..78152a8ad17d 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -476,12 +476,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
476 int i; 476 int i;
477 u8 status; 477 u8 status;
478 478
479 /* Make sure EQ size is aligned to a power of 2 size. */ 479 eq->dev = dev;
480 for (i = 1; i < nent; i <<= 1) 480 eq->nent = roundup_pow_of_two(max(nent, 2));
481 ; /* nothing */
482 nent = i;
483
484 eq->dev = dev;
485 481
486 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 482 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
487 GFP_KERNEL); 483 GFP_KERNEL);
@@ -512,7 +508,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
512 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 508 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
513 } 509 }
514 510
515 for (i = 0; i < nent; ++i) 511 for (i = 0; i < eq->nent; ++i)
516 set_eqe_hw(get_eqe(eq, i)); 512 set_eqe_hw(get_eqe(eq, i));
517 513
518 eq->eqn = mthca_alloc(&dev->eq_table.alloc); 514 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
@@ -528,8 +524,6 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
528 if (err) 524 if (err)
529 goto err_out_free_eq; 525 goto err_out_free_eq;
530 526
531 eq->nent = nent;
532
533 memset(eq_context, 0, sizeof *eq_context); 527 memset(eq_context, 0, sizeof *eq_context);
534 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | 528 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
535 MTHCA_EQ_OWNER_HW | 529 MTHCA_EQ_OWNER_HW |
@@ -538,7 +532,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
538 if (mthca_is_memfree(dev)) 532 if (mthca_is_memfree(dev))
539 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); 533 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
540 534
541 eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
542 if (mthca_is_memfree(dev)) { 536 if (mthca_is_memfree(dev)) {
543 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); 537 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
544 } else { 538 } else {
@@ -569,7 +563,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
569 dev->eq_table.arm_mask |= eq->eqn_mask; 563 dev->eq_table.arm_mask |= eq->eqn_mask;
570 564
571 mthca_dbg(dev, "Allocated EQ %d with %d entries\n", 565 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
572 eq->eqn, nent); 566 eq->eqn, eq->nent);
573 567
574 return err; 568 return err;
575 569
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index bcef06bf15e7..5fa00669f9b8 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
227 wq->last_comp = wq->max - 1; 227 wq->last_comp = wq->max - 1;
228 wq->head = 0; 228 wq->head = 0;
229 wq->tail = 0; 229 wq->tail = 0;
230 wq->last = NULL;
231} 230}
232 231
233void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 232void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -687,7 +686,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
687 } 686 }
688 687
689 if (attr_mask & IB_QP_TIMEOUT) { 688 if (attr_mask & IB_QP_TIMEOUT) {
690 qp_context->pri_path.ackto = attr->timeout; 689 qp_context->pri_path.ackto = attr->timeout << 3;
691 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 690 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
692 } 691 }
693 692
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1103 } 1102 }
1104 } 1103 }
1105 1104
1105 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1106 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1107
1106 return 0; 1108 return 0;
1107} 1109}
1108 1110
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1583 goto out; 1585 goto out;
1584 } 1586 }
1585 1587
1586 if (prev_wqe) { 1588 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1587 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1589 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1588 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1590 qp->send_wqe_offset) |
1589 qp->send_wqe_offset) | 1591 mthca_opcode[wr->opcode]);
1590 mthca_opcode[wr->opcode]); 1592 wmb();
1591 wmb(); 1593 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1592 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1594 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1593 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1594 }
1595 1595
1596 if (!size0) { 1596 if (!size0) {
1597 size0 = size; 1597 size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1688 1688
1689 qp->wrid[ind] = wr->wr_id; 1689 qp->wrid[ind] = wr->wr_id;
1690 1690
1691 if (likely(prev_wqe)) { 1691 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1692 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1692 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1693 cpu_to_be32((ind << qp->rq.wqe_shift) | 1); 1693 wmb();
1694 wmb(); 1694 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1695 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1695 cpu_to_be32(MTHCA_NEXT_DBD | size);
1696 cpu_to_be32(MTHCA_NEXT_DBD | size);
1697 }
1698 1696
1699 if (!size0) 1697 if (!size0)
1700 size0 = size; 1698 size0 = size;
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1905 goto out; 1903 goto out;
1906 } 1904 }
1907 1905
1908 if (likely(prev_wqe)) { 1906 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1909 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1907 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1910 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1908 qp->send_wqe_offset) |
1911 qp->send_wqe_offset) | 1909 mthca_opcode[wr->opcode]);
1912 mthca_opcode[wr->opcode]); 1910 wmb();
1913 wmb(); 1911 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1914 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1912 cpu_to_be32(MTHCA_NEXT_DBD | size);
1915 cpu_to_be32(MTHCA_NEXT_DBD | size);
1916 }
1917 1913
1918 if (!size0) { 1914 if (!size0) {
1919 size0 = size; 1915 size0 = size;
@@ -2127,5 +2123,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2127 for (i = 0; i < 2; ++i) 2123 for (i = 0; i < 2; ++i)
2128 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2124 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2129 2125
2126 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2130 mthca_alloc_cleanup(&dev->qp_table.alloc); 2127 mthca_alloc_cleanup(&dev->qp_table.alloc);
2131} 2128}
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 75cd2d84ef12..18998d48c53e 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -172,6 +172,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
173 } 173 }
174 174
175 srq->last = get_wqe(srq, srq->max - 1);
176
175 return 0; 177 return 0;
176} 178}
177 179
@@ -189,7 +191,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
189 191
190 srq->max = attr->max_wr; 192 srq->max = attr->max_wr;
191 srq->max_gs = attr->max_sge; 193 srq->max_gs = attr->max_sge;
192 srq->last = NULL;
193 srq->counter = 0; 194 srq->counter = 0;
194 195
195 if (mthca_is_memfree(dev)) 196 if (mthca_is_memfree(dev))
@@ -409,7 +410,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
409 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 410 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
410 err = -ENOMEM; 411 err = -ENOMEM;
411 *bad_wr = wr; 412 *bad_wr = wr;
412 return nreq; 413 break;
413 } 414 }
414 415
415 wqe = get_wqe(srq, ind); 416 wqe = get_wqe(srq, ind);
@@ -427,7 +428,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
427 err = -EINVAL; 428 err = -EINVAL;
428 *bad_wr = wr; 429 *bad_wr = wr;
429 srq->last = prev_wqe; 430 srq->last = prev_wqe;
430 return nreq; 431 break;
431 } 432 }
432 433
433 for (i = 0; i < wr->num_sge; ++i) { 434 for (i = 0; i < wr->num_sge; ++i) {
@@ -446,20 +447,16 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
446 ((struct mthca_data_seg *) wqe)->addr = 0; 447 ((struct mthca_data_seg *) wqe)->addr = 0;
447 } 448 }
448 449
449 if (likely(prev_wqe)) { 450 ((struct mthca_next_seg *) prev_wqe)->nda_op =
450 ((struct mthca_next_seg *) prev_wqe)->nda_op = 451 cpu_to_be32((ind << srq->wqe_shift) | 1);
451 cpu_to_be32((ind << srq->wqe_shift) | 1); 452 wmb();
452 wmb(); 453 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
453 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 454 cpu_to_be32(MTHCA_NEXT_DBD);
454 cpu_to_be32(MTHCA_NEXT_DBD);
455 }
456 455
457 srq->wrid[ind] = wr->wr_id; 456 srq->wrid[ind] = wr->wr_id;
458 srq->first_free = next_ind; 457 srq->first_free = next_ind;
459 } 458 }
460 459
461 return nreq;
462
463 if (likely(nreq)) { 460 if (likely(nreq)) {
464 __be32 doorbell[2]; 461 __be32 doorbell[2];
465 462
@@ -503,7 +500,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
503 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 500 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 err = -ENOMEM; 501 err = -ENOMEM;
505 *bad_wr = wr; 502 *bad_wr = wr;
506 return nreq; 503 break;
507 } 504 }
508 505
509 wqe = get_wqe(srq, ind); 506 wqe = get_wqe(srq, ind);
@@ -519,7 +516,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
519 if (unlikely(wr->num_sge > srq->max_gs)) { 516 if (unlikely(wr->num_sge > srq->max_gs)) {
520 err = -EINVAL; 517 err = -EINVAL;
521 *bad_wr = wr; 518 *bad_wr = wr;
522 return nreq; 519 break;
523 } 520 }
524 521
525 for (i = 0; i < wr->num_sge; ++i) { 522 for (i = 0; i < wr->num_sge; ++i) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bea960b8191f..4ea1c1ca85bc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -257,7 +257,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
257 257
258void ipoib_mcast_restart_task(void *dev_ptr); 258void ipoib_mcast_restart_task(void *dev_ptr);
259int ipoib_mcast_start_thread(struct net_device *dev); 259int ipoib_mcast_start_thread(struct net_device *dev);
260int ipoib_mcast_stop_thread(struct net_device *dev); 260int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
261 261
262void ipoib_mcast_dev_down(struct net_device *dev); 262void ipoib_mcast_dev_down(struct net_device *dev);
263void ipoib_mcast_dev_flush(struct net_device *dev); 263void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ef0e3894863c..f7440096b5ed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -432,7 +432,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
432 flush_workqueue(ipoib_workqueue); 432 flush_workqueue(ipoib_workqueue);
433 } 433 }
434 434
435 ipoib_mcast_stop_thread(dev); 435 ipoib_mcast_stop_thread(dev, 1);
436 436
437 /* 437 /*
438 * Flush the multicast groups first so we stop any multicast joins. The 438 * Flush the multicast groups first so we stop any multicast joins. The
@@ -599,7 +599,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
599 599
600 ipoib_dbg(priv, "cleaning up ib_dev\n"); 600 ipoib_dbg(priv, "cleaning up ib_dev\n");
601 601
602 ipoib_mcast_stop_thread(dev); 602 ipoib_mcast_stop_thread(dev, 1);
603 603
604 /* Delete the broadcast address and the local address */ 604 /* Delete the broadcast address and the local address */
605 ipoib_mcast_dev_down(dev); 605 ipoib_mcast_dev_down(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 49d120d2b92c..704f48e0b6a7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1005,6 +1005,7 @@ debug_failed:
1005 1005
1006register_failed: 1006register_failed:
1007 ib_unregister_event_handler(&priv->event_handler); 1007 ib_unregister_event_handler(&priv->event_handler);
1008 flush_scheduled_work();
1008 1009
1009event_failed: 1010event_failed:
1010 ipoib_dev_cleanup(priv->dev); 1011 ipoib_dev_cleanup(priv->dev);
@@ -1057,6 +1058,7 @@ static void ipoib_remove_one(struct ib_device *device)
1057 1058
1058 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1059 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1059 ib_unregister_event_handler(&priv->event_handler); 1060 ib_unregister_event_handler(&priv->event_handler);
1061 flush_scheduled_work();
1060 1062
1061 unregister_netdev(priv->dev); 1063 unregister_netdev(priv->dev);
1062 ipoib_dev_cleanup(priv->dev); 1064 ipoib_dev_cleanup(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index aca7aea18a69..36ce29836bf2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -145,7 +145,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
145 145
146 mcast->dev = dev; 146 mcast->dev = dev;
147 mcast->created = jiffies; 147 mcast->created = jiffies;
148 mcast->backoff = HZ; 148 mcast->backoff = 1;
149 mcast->logcount = 0; 149 mcast->logcount = 0;
150 150
151 INIT_LIST_HEAD(&mcast->list); 151 INIT_LIST_HEAD(&mcast->list);
@@ -396,7 +396,7 @@ static void ipoib_mcast_join_complete(int status,
396 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 396 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
397 397
398 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { 398 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
399 mcast->backoff = HZ; 399 mcast->backoff = 1;
400 down(&mcast_mutex); 400 down(&mcast_mutex);
401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
402 queue_work(ipoib_workqueue, &priv->mcast_task); 402 queue_work(ipoib_workqueue, &priv->mcast_task);
@@ -496,7 +496,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
496 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 496 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
497 queue_delayed_work(ipoib_workqueue, 497 queue_delayed_work(ipoib_workqueue,
498 &priv->mcast_task, 498 &priv->mcast_task,
499 mcast->backoff); 499 mcast->backoff * HZ);
500 up(&mcast_mutex); 500 up(&mcast_mutex);
501 } else 501 } else
502 mcast->query_id = ret; 502 mcast->query_id = ret;
@@ -598,7 +598,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
598 return 0; 598 return 0;
599} 599}
600 600
601int ipoib_mcast_stop_thread(struct net_device *dev) 601int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
602{ 602{
603 struct ipoib_dev_priv *priv = netdev_priv(dev); 603 struct ipoib_dev_priv *priv = netdev_priv(dev);
604 struct ipoib_mcast *mcast; 604 struct ipoib_mcast *mcast;
@@ -610,7 +610,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
610 cancel_delayed_work(&priv->mcast_task); 610 cancel_delayed_work(&priv->mcast_task);
611 up(&mcast_mutex); 611 up(&mcast_mutex);
612 612
613 flush_workqueue(ipoib_workqueue); 613 if (flush)
614 flush_workqueue(ipoib_workqueue);
614 615
615 if (priv->broadcast && priv->broadcast->query) { 616 if (priv->broadcast && priv->broadcast->query) {
616 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query); 617 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
@@ -832,7 +833,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
832 833
833 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 834 ipoib_dbg_mcast(priv, "restarting multicast task\n");
834 835
835 ipoib_mcast_stop_thread(dev); 836 ipoib_mcast_stop_thread(dev, 0);
836 837
837 spin_lock_irqsave(&priv->lock, flags); 838 spin_lock_irqsave(&priv->lock, flags);
838 839
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8337b0f26cc4..4866fc32d8d9 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -61,6 +61,7 @@ static const PCI_ENTRY id_list[] =
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"}, 61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"}, 62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"}, 63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
64 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
64 {0, 0, NULL, NULL}, 65 {0, 0, NULL, NULL},
65}; 66};
66 67
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index c6b5bf7d2aca..dc334aab433e 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -611,7 +611,7 @@ static int sedlbauer_event(event_t event, int priority,
611} /* sedlbauer_event */ 611} /* sedlbauer_event */
612 612
613static struct pcmcia_device_id sedlbauer_ids[] = { 613static struct pcmcia_device_id sedlbauer_ids[] = {
614 PCMCIA_DEVICE_PROD_ID1234("SEDLBAUER", "speed star II", "V 3.1", "(c) 93 - 98 cb ", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a, 0x50d4149c), 614 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "speed star II", "V 3.1", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a),
615 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90), 615 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90),
616 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce), 616 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce),
617 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (C) 93-94 VK", 0x81fb79f5, 0xe4e9bc12, 0x8db143fe), 617 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (C) 93-94 VK", 0x81fb79f5, 0xe4e9bc12, 0x8db143fe),
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 0fda5c89429b..9ffaae7c657a 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -466,10 +466,10 @@ void st5481_stop(struct st5481_adapter *adapter);
466#define __debug_variable st5481_debug 466#define __debug_variable st5481_debug
467#include "hisax_debug.h" 467#include "hisax_debug.h"
468 468
469#ifdef CONFIG_HISAX_DEBUG
470
471extern int st5481_debug; 469extern int st5481_debug;
472 470
471#ifdef CONFIG_HISAX_DEBUG
472
473#define DBG_ISO_PACKET(level,urb) \ 473#define DBG_ISO_PACKET(level,urb) \
474 if (level & __debug_variable) dump_iso_packet(__FUNCTION__,urb) 474 if (level & __debug_variable) dump_iso_packet(__FUNCTION__,urb)
475 475
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 2fcd093921d8..0a2536d62402 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -172,14 +172,18 @@ static void usb_b_out_complete(struct urb *urb, struct pt_regs *regs)
172 test_and_clear_bit(buf_nr, &b_out->busy); 172 test_and_clear_bit(buf_nr, &b_out->busy);
173 173
174 if (unlikely(urb->status < 0)) { 174 if (unlikely(urb->status < 0)) {
175 if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) { 175 switch (urb->status) {
176 WARN("urb status %d",urb->status); 176 case -ENOENT:
177 if (b_out->busy == 0) { 177 case -ESHUTDOWN:
178 st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2 | USB_DIR_OUT, NULL, NULL); 178 case -ECONNRESET:
179 } 179 DBG(4,"urb killed status %d", urb->status);
180 } else { 180 return; // Give up
181 DBG(1,"urb killed"); 181 default:
182 return; // Give up 182 WARN("urb status %d",urb->status);
183 if (b_out->busy == 0) {
184 st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2 | USB_DIR_OUT, NULL, NULL);
185 }
186 break;
183 } 187 }
184 } 188 }
185 189
@@ -205,7 +209,9 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
205 bcs->mode = mode; 209 bcs->mode = mode;
206 210
207 // Cancel all USB transfers on this B channel 211 // Cancel all USB transfers on this B channel
212 b_out->urb[0]->transfer_flags |= URB_ASYNC_UNLINK;
208 usb_unlink_urb(b_out->urb[0]); 213 usb_unlink_urb(b_out->urb[0]);
214 b_out->urb[1]->transfer_flags |= URB_ASYNC_UNLINK;
209 usb_unlink_urb(b_out->urb[1]); 215 usb_unlink_urb(b_out->urb[1]);
210 b_out->busy = 0; 216 b_out->busy = 0;
211 217
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index 071b1d31999f..941f7022ada1 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -382,16 +382,20 @@ static void usb_d_out_complete(struct urb *urb, struct pt_regs *regs)
382 test_and_clear_bit(buf_nr, &d_out->busy); 382 test_and_clear_bit(buf_nr, &d_out->busy);
383 383
384 if (unlikely(urb->status < 0)) { 384 if (unlikely(urb->status < 0)) {
385 if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) { 385 switch (urb->status) {
386 WARN("urb status %d",urb->status); 386 case -ENOENT:
387 if (d_out->busy == 0) { 387 case -ESHUTDOWN:
388 st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter); 388 case -ECONNRESET:
389 } 389 DBG(1,"urb killed status %d", urb->status);
390 return; 390 break;
391 } else { 391 default:
392 DBG(1,"urb killed"); 392 WARN("urb status %d",urb->status);
393 return; // Give up 393 if (d_out->busy == 0) {
394 st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
395 }
396 break;
394 } 397 }
398 return; // Give up
395 } 399 }
396 400
397 FsmEvent(&adapter->d_out.fsm, EV_DOUT_COMPLETE, (void *) buf_nr); 401 FsmEvent(&adapter->d_out.fsm, EV_DOUT_COMPLETE, (void *) buf_nr);
@@ -709,14 +713,14 @@ int st5481_setup_d(struct st5481_adapter *adapter)
709 713
710 adapter->l1m.fsm = &l1fsm; 714 adapter->l1m.fsm = &l1fsm;
711 adapter->l1m.state = ST_L1_F3; 715 adapter->l1m.state = ST_L1_F3;
712 adapter->l1m.debug = 1; 716 adapter->l1m.debug = st5481_debug & 0x100;
713 adapter->l1m.userdata = adapter; 717 adapter->l1m.userdata = adapter;
714 adapter->l1m.printdebug = l1m_debug; 718 adapter->l1m.printdebug = l1m_debug;
715 FsmInitTimer(&adapter->l1m, &adapter->timer); 719 FsmInitTimer(&adapter->l1m, &adapter->timer);
716 720
717 adapter->d_out.fsm.fsm = &dout_fsm; 721 adapter->d_out.fsm.fsm = &dout_fsm;
718 adapter->d_out.fsm.state = ST_DOUT_NONE; 722 adapter->d_out.fsm.state = ST_DOUT_NONE;
719 adapter->d_out.fsm.debug = 1; 723 adapter->d_out.fsm.debug = st5481_debug & 0x100;
720 adapter->d_out.fsm.userdata = adapter; 724 adapter->d_out.fsm.userdata = adapter;
721 adapter->d_out.fsm.printdebug = dout_debug; 725 adapter->d_out.fsm.printdebug = dout_debug;
722 726
diff --git a/drivers/isdn/hisax/st5481_init.c b/drivers/isdn/hisax/st5481_init.c
index 7aa810d5d333..2cf5d1a6df6c 100644
--- a/drivers/isdn/hisax/st5481_init.c
+++ b/drivers/isdn/hisax/st5481_init.c
@@ -43,10 +43,10 @@ static int number_of_leds = 2; /* 2 LEDs on the adpater default */
43module_param(number_of_leds, int, 0); 43module_param(number_of_leds, int, 0);
44 44
45#ifdef CONFIG_HISAX_DEBUG 45#ifdef CONFIG_HISAX_DEBUG
46static int debug = 0x1; 46static int debug = 0;
47module_param(debug, int, 0); 47module_param(debug, int, 0);
48int st5481_debug;
49#endif 48#endif
49int st5481_debug;
50 50
51static LIST_HEAD(adapter_list); 51static LIST_HEAD(adapter_list);
52 52
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ab62223297a5..ffd5b2d45552 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -132,11 +132,15 @@ static void usb_ctrl_complete(struct urb *urb, struct pt_regs *regs)
132 struct ctrl_msg *ctrl_msg; 132 struct ctrl_msg *ctrl_msg;
133 133
134 if (unlikely(urb->status < 0)) { 134 if (unlikely(urb->status < 0)) {
135 if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) { 135 switch (urb->status) {
136 WARN("urb status %d",urb->status); 136 case -ENOENT:
137 } else { 137 case -ESHUTDOWN:
138 DBG(1,"urb killed"); 138 case -ECONNRESET:
139 return; // Give up 139 DBG(1,"urb killed status %d", urb->status);
140 return; // Give up
141 default:
142 WARN("urb status %d",urb->status);
143 break;
140 } 144 }
141 } 145 }
142 146
@@ -184,22 +188,22 @@ static void usb_int_complete(struct urb *urb, struct pt_regs *regs)
184 int status; 188 int status;
185 189
186 switch (urb->status) { 190 switch (urb->status) {
187 case 0: 191 case 0:
188 /* success */ 192 /* success */
189 break; 193 break;
190 case -ECONNRESET: 194 case -ECONNRESET:
191 case -ENOENT: 195 case -ENOENT:
192 case -ESHUTDOWN: 196 case -ESHUTDOWN:
193 /* this urb is terminated, clean up */ 197 /* this urb is terminated, clean up */
194 DBG(1, "urb shutting down with status: %d", urb->status); 198 DBG(2, "urb shutting down with status: %d", urb->status);
195 return; 199 return;
196 default: 200 default:
197 WARN("nonzero urb status received: %d", urb->status); 201 WARN("nonzero urb status received: %d", urb->status);
198 goto exit; 202 goto exit;
199 } 203 }
200 204
201 205
202 DBG_PACKET(1, data, INT_PKT_SIZE); 206 DBG_PACKET(2, data, INT_PKT_SIZE);
203 207
204 if (urb->actual_length == 0) { 208 if (urb->actual_length == 0) {
205 goto exit; 209 goto exit;
@@ -250,7 +254,7 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
250 struct urb *urb; 254 struct urb *urb;
251 u8 *buf; 255 u8 *buf;
252 256
253 DBG(1,""); 257 DBG(2,"");
254 258
255 if ((status = usb_reset_configuration (dev)) < 0) { 259 if ((status = usb_reset_configuration (dev)) < 0) {
256 WARN("reset_configuration failed,status=%d",status); 260 WARN("reset_configuration failed,status=%d",status);
@@ -330,15 +334,17 @@ void st5481_release_usb(struct st5481_adapter *adapter)
330 DBG(1,""); 334 DBG(1,"");
331 335
332 // Stop and free Control and Interrupt URBs 336 // Stop and free Control and Interrupt URBs
333 usb_unlink_urb(ctrl->urb); 337 usb_kill_urb(ctrl->urb);
334 if (ctrl->urb->transfer_buffer) 338 if (ctrl->urb->transfer_buffer)
335 kfree(ctrl->urb->transfer_buffer); 339 kfree(ctrl->urb->transfer_buffer);
336 usb_free_urb(ctrl->urb); 340 usb_free_urb(ctrl->urb);
341 ctrl->urb = NULL;
337 342
338 usb_unlink_urb(intr->urb); 343 usb_kill_urb(intr->urb);
339 if (intr->urb->transfer_buffer) 344 if (intr->urb->transfer_buffer)
340 kfree(intr->urb->transfer_buffer); 345 kfree(intr->urb->transfer_buffer);
341 usb_free_urb(intr->urb); 346 usb_free_urb(intr->urb);
347 ctrl->urb = NULL;
342} 348}
343 349
344/* 350/*
@@ -406,6 +412,7 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev,
406 spin_lock_init(&urb->lock); 412 spin_lock_init(&urb->lock);
407 urb->dev=dev; 413 urb->dev=dev;
408 urb->pipe=pipe; 414 urb->pipe=pipe;
415 urb->interval = 1;
409 urb->transfer_buffer=buf; 416 urb->transfer_buffer=buf;
410 urb->number_of_packets = num_packets; 417 urb->number_of_packets = num_packets;
411 urb->transfer_buffer_length=num_packets*packet_size; 418 urb->transfer_buffer_length=num_packets*packet_size;
@@ -452,7 +459,9 @@ st5481_setup_isocpipes(struct urb* urb[2], struct usb_device *dev,
452 if (urb[j]) { 459 if (urb[j]) {
453 if (urb[j]->transfer_buffer) 460 if (urb[j]->transfer_buffer)
454 kfree(urb[j]->transfer_buffer); 461 kfree(urb[j]->transfer_buffer);
462 urb[j]->transfer_buffer = NULL;
455 usb_free_urb(urb[j]); 463 usb_free_urb(urb[j]);
464 urb[j] = NULL;
456 } 465 }
457 } 466 }
458 return retval; 467 return retval;
@@ -463,10 +472,11 @@ void st5481_release_isocpipes(struct urb* urb[2])
463 int j; 472 int j;
464 473
465 for (j = 0; j < 2; j++) { 474 for (j = 0; j < 2; j++) {
466 usb_unlink_urb(urb[j]); 475 usb_kill_urb(urb[j]);
467 if (urb[j]->transfer_buffer) 476 if (urb[j]->transfer_buffer)
468 kfree(urb[j]->transfer_buffer); 477 kfree(urb[j]->transfer_buffer);
469 usb_free_urb(urb[j]); 478 usb_free_urb(urb[j]);
479 urb[j] = NULL;
470 } 480 }
471} 481}
472 482
@@ -485,11 +495,15 @@ static void usb_in_complete(struct urb *urb, struct pt_regs *regs)
485 int len, count, status; 495 int len, count, status;
486 496
487 if (unlikely(urb->status < 0)) { 497 if (unlikely(urb->status < 0)) {
488 if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) { 498 switch (urb->status) {
489 WARN("urb status %d",urb->status); 499 case -ENOENT:
490 } else { 500 case -ESHUTDOWN:
491 DBG(1,"urb killed"); 501 case -ECONNRESET:
492 return; // Give up 502 DBG(1,"urb killed status %d", urb->status);
503 return; // Give up
504 default:
505 WARN("urb status %d",urb->status);
506 break;
493 } 507 }
494 } 508 }
495 509
@@ -631,7 +645,9 @@ void st5481_in_mode(struct st5481_in *in, int mode)
631 645
632 in->mode = mode; 646 in->mode = mode;
633 647
648 in->urb[0]->transfer_flags |= URB_ASYNC_UNLINK;
634 usb_unlink_urb(in->urb[0]); 649 usb_unlink_urb(in->urb[0]);
650 in->urb[1]->transfer_flags |= URB_ASYNC_UNLINK;
635 usb_unlink_urb(in->urb[1]); 651 usb_unlink_urb(in->urb[1]);
636 652
637 if (in->mode != L1_MODE_NULL) { 653 if (in->mode != L1_MODE_NULL) {
diff --git a/drivers/md/raid6.h b/drivers/md/raid6.h
index f80ee6350edf..31cbee71365f 100644
--- a/drivers/md/raid6.h
+++ b/drivers/md/raid6.h
@@ -69,9 +69,13 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
69#define __init 69#define __init
70#define __exit 70#define __exit
71#define __attribute_const__ __attribute__((const)) 71#define __attribute_const__ __attribute__((const))
72#define noinline __attribute__((noinline))
72 73
73#define preempt_enable() 74#define preempt_enable()
74#define preempt_disable() 75#define preempt_disable()
76#define cpu_has_feature(x) 1
77#define enable_kernel_altivec()
78#define disable_kernel_altivec()
75 79
76#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
77 81
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index acf386fc4b4f..51c63c0cf1c9 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -19,6 +19,7 @@
19#include "raid6.h" 19#include "raid6.h"
20#ifndef __KERNEL__ 20#ifndef __KERNEL__
21#include <sys/mman.h> 21#include <sys/mman.h>
22#include <stdio.h>
22#endif 23#endif
23 24
24struct raid6_calls raid6_call; 25struct raid6_calls raid6_call;
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc
index 1de8f030eee0..b9afd35b8812 100644
--- a/drivers/md/raid6altivec.uc
+++ b/drivers/md/raid6altivec.uc
@@ -27,16 +27,20 @@
27#ifdef CONFIG_ALTIVEC 27#ifdef CONFIG_ALTIVEC
28 28
29#include <altivec.h> 29#include <altivec.h>
30#include <asm/system.h> 30#ifdef __KERNEL__
31#include <asm/cputable.h> 31# include <asm/system.h>
32# include <asm/cputable.h>
33#endif
32 34
33/* 35/*
34 * This is the C data type to use 36 * This is the C data type to use. We use a vector of
37 * signed char so vec_cmpgt() will generate the right
38 * instruction.
35 */ 39 */
36 40
37typedef vector unsigned char unative_t; 41typedef vector signed char unative_t;
38 42
39#define NBYTES(x) ((vector unsigned char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) 43#define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
40#define NSIZE sizeof(unative_t) 44#define NSIZE sizeof(unative_t)
41 45
42/* 46/*
@@ -108,7 +112,11 @@ int raid6_have_altivec(void);
108int raid6_have_altivec(void) 112int raid6_have_altivec(void)
109{ 113{
110 /* This assumes either all CPUs have Altivec or none does */ 114 /* This assumes either all CPUs have Altivec or none does */
115# ifdef __KERNEL__
111 return cpu_has_feature(CPU_FTR_ALTIVEC); 116 return cpu_has_feature(CPU_FTR_ALTIVEC);
117# else
118 return 1;
119# endif
112} 120}
113#endif 121#endif
114 122
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile
index 557806728609..78e0396adf2a 100644
--- a/drivers/md/raid6test/Makefile
+++ b/drivers/md/raid6test/Makefile
@@ -8,6 +8,8 @@ OPTFLAGS = -O2 # Adjust as desired
8CFLAGS = -I.. -g $(OPTFLAGS) 8CFLAGS = -I.. -g $(OPTFLAGS)
9LD = ld 9LD = ld
10PERL = perl 10PERL = perl
11AR = ar
12RANLIB = ranlib
11 13
12.c.o: 14.c.o:
13 $(CC) $(CFLAGS) -c -o $@ $< 15 $(CC) $(CFLAGS) -c -o $@ $<
@@ -18,18 +20,33 @@ PERL = perl
18%.uc: ../%.uc 20%.uc: ../%.uc
19 cp -f $< $@ 21 cp -f $< $@
20 22
21all: raid6.o raid6test 23all: raid6.a raid6test
22 24
23raid6.o: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \ 25raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
24 raid6int32.o \ 26 raid6int32.o \
25 raid6mmx.o raid6sse1.o raid6sse2.o \ 27 raid6mmx.o raid6sse1.o raid6sse2.o \
28 raid6altivec1.o raid6altivec2.o raid6altivec4.o raid6altivec8.o \
26 raid6recov.o raid6algos.o \ 29 raid6recov.o raid6algos.o \
27 raid6tables.o 30 raid6tables.o
28 $(LD) -r -o $@ $^ 31 rm -f $@
32 $(AR) cq $@ $^
33 $(RANLIB) $@
29 34
30raid6test: raid6.o test.c 35raid6test: test.c raid6.a
31 $(CC) $(CFLAGS) -o raid6test $^ 36 $(CC) $(CFLAGS) -o raid6test $^
32 37
38raid6altivec1.c: raid6altivec.uc ../unroll.pl
39 $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@
40
41raid6altivec2.c: raid6altivec.uc ../unroll.pl
42 $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@
43
44raid6altivec4.c: raid6altivec.uc ../unroll.pl
45 $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@
46
47raid6altivec8.c: raid6altivec.uc ../unroll.pl
48 $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@
49
33raid6int1.c: raid6int.uc ../unroll.pl 50raid6int1.c: raid6int.uc ../unroll.pl
34 $(PERL) ../unroll.pl 1 < raid6int.uc > $@ 51 $(PERL) ../unroll.pl 1 < raid6int.uc > $@
35 52
@@ -52,7 +69,7 @@ raid6tables.c: mktables
52 ./mktables > raid6tables.c 69 ./mktables > raid6tables.c
53 70
54clean: 71clean:
55 rm -f *.o mktables mktables.c raid6int.uc raid6*.c raid6test 72 rm -f *.o *.a mktables mktables.c raid6int.uc raid6*.c raid6test
56 73
57spotless: clean 74spotless: clean
58 rm -f *~ 75 rm -f *~
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index 33f209a39cb6..1883d22cffeb 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -35,6 +35,23 @@ config FUSION_FC
35 LSIFC929X 35 LSIFC929X
36 LSIFC929XL 36 LSIFC929XL
37 37
38config FUSION_SAS
39 tristate "Fusion MPT ScsiHost drivers for SAS"
40 depends on PCI && SCSI
41 select FUSION
42 select SCSI_SAS_ATTRS
43 ---help---
44 SCSI HOST support for a SAS host adapters.
45
46 List of supported controllers:
47
48 LSISAS1064
49 LSISAS1066
50 LSISAS1068
51 LSISAS1064E
52 LSISAS1066E
53 LSISAS1068E
54
38config FUSION_MAX_SGE 55config FUSION_MAX_SGE
39 int "Maximum number of scatter gather entries (16 - 128)" 56 int "Maximum number of scatter gather entries (16 - 128)"
40 depends on FUSION 57 depends on FUSION
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index 1d2f9db813c1..8a2e2657f4c2 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -34,5 +34,6 @@
34 34
35obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o 35obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o
36obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o 36obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o
37obj-$(CONFIG_FUSION_SAS) += mptbase.o mptscsih.o mptsas.o
37obj-$(CONFIG_FUSION_CTL) += mptctl.o 38obj-$(CONFIG_FUSION_CTL) += mptctl.o
38obj-$(CONFIG_FUSION_LAN) += mptlan.o 39obj-$(CONFIG_FUSION_LAN) += mptlan.o
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index f517d0692d5f..790a2932ded9 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -135,13 +135,12 @@ static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
135 135
136static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc); 136static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
137static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag); 137static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
138//static u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
139static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason); 138static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
140static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag); 139static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
141static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag); 140static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
142static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag); 141static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
143static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag); 142static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
144static int mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag); 143static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
145static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag); 144static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
146static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag); 145static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
147static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag); 146static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
@@ -152,6 +151,7 @@ static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
152static int GetLanConfigPages(MPT_ADAPTER *ioc); 151static int GetLanConfigPages(MPT_ADAPTER *ioc);
153static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum); 152static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum);
154static int GetIoUnitPage2(MPT_ADAPTER *ioc); 153static int GetIoUnitPage2(MPT_ADAPTER *ioc);
154int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
155static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); 155static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
156static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); 156static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
157static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 157static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
@@ -159,6 +159,8 @@ static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
159static void mpt_timer_expired(unsigned long data); 159static void mpt_timer_expired(unsigned long data);
160static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); 160static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
161static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 161static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
162static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
163static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
162 164
163#ifdef CONFIG_PROC_FS 165#ifdef CONFIG_PROC_FS
164static int procmpt_summary_read(char *buf, char **start, off_t offset, 166static int procmpt_summary_read(char *buf, char **start, off_t offset,
@@ -175,6 +177,7 @@ static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *
175static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 177static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
176static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 178static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
177static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info); 179static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info);
180static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info);
178 181
179/* module entry point */ 182/* module entry point */
180static int __init fusion_init (void); 183static int __init fusion_init (void);
@@ -206,6 +209,144 @@ pci_enable_io_access(struct pci_dev *pdev)
206 pci_write_config_word(pdev, PCI_COMMAND, command_reg); 209 pci_write_config_word(pdev, PCI_COMMAND, command_reg);
207} 210}
208 211
212/*
213 * Process turbo (context) reply...
214 */
215static void
216mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
217{
218 MPT_FRAME_HDR *mf = NULL;
219 MPT_FRAME_HDR *mr = NULL;
220 int req_idx = 0;
221 int cb_idx;
222
223 dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n",
224 ioc->name, pa));
225
226 switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
227 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
228 req_idx = pa & 0x0000FFFF;
229 cb_idx = (pa & 0x00FF0000) >> 16;
230 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
231 break;
232 case MPI_CONTEXT_REPLY_TYPE_LAN:
233 cb_idx = mpt_lan_index;
234 /*
235 * Blind set of mf to NULL here was fatal
236 * after lan_reply says "freeme"
237 * Fix sort of combined with an optimization here;
238 * added explicit check for case where lan_reply
239 * was just returning 1 and doing nothing else.
240 * For this case skip the callback, but set up
241 * proper mf value first here:-)
242 */
243 if ((pa & 0x58000000) == 0x58000000) {
244 req_idx = pa & 0x0000FFFF;
245 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
246 mpt_free_msg_frame(ioc, mf);
247 mb();
248 return;
249 break;
250 }
251 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
252 break;
253 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
254 cb_idx = mpt_stm_index;
255 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
256 break;
257 default:
258 cb_idx = 0;
259 BUG();
260 }
261
262 /* Check for (valid) IO callback! */
263 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
264 MptCallbacks[cb_idx] == NULL) {
265 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
266 __FUNCTION__, ioc->name, cb_idx);
267 goto out;
268 }
269
270 if (MptCallbacks[cb_idx](ioc, mf, mr))
271 mpt_free_msg_frame(ioc, mf);
272 out:
273 mb();
274}
275
276static void
277mpt_reply(MPT_ADAPTER *ioc, u32 pa)
278{
279 MPT_FRAME_HDR *mf;
280 MPT_FRAME_HDR *mr;
281 int req_idx;
282 int cb_idx;
283 int freeme;
284
285 u32 reply_dma_low;
286 u16 ioc_stat;
287
288 /* non-TURBO reply! Hmmm, something may be up...
289 * Newest turbo reply mechanism; get address
290 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
291 */
292
293 /* Map DMA address of reply header to cpu address.
294 * pa is 32 bits - but the dma address may be 32 or 64 bits
295 * get offset based only only the low addresses
296 */
297
298 reply_dma_low = (pa <<= 1);
299 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
300 (reply_dma_low - ioc->reply_frames_low_dma));
301
302 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
303 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
304 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
305
306 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
307 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
308 DBG_DUMP_REPLY_FRAME(mr)
309
310 /* Check/log IOC log info
311 */
312 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
313 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
314 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
315 if (ioc->bus_type == FC)
316 mpt_fc_log_info(ioc, log_info);
317 else if (ioc->bus_type == SCSI)
318 mpt_sp_log_info(ioc, log_info);
319 else if (ioc->bus_type == SAS)
320 mpt_sas_log_info(ioc, log_info);
321 }
322 if (ioc_stat & MPI_IOCSTATUS_MASK) {
323 if (ioc->bus_type == SCSI &&
324 cb_idx != mpt_stm_index &&
325 cb_idx != mpt_lan_index)
326 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
327 }
328
329
330 /* Check for (valid) IO callback! */
331 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
332 MptCallbacks[cb_idx] == NULL) {
333 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
334 __FUNCTION__, ioc->name, cb_idx);
335 freeme = 0;
336 goto out;
337 }
338
339 freeme = MptCallbacks[cb_idx](ioc, mf, mr);
340
341 out:
342 /* Flush (non-TURBO) reply with a WRITE! */
343 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
344
345 if (freeme)
346 mpt_free_msg_frame(ioc, mf);
347 mb();
348}
349
209/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 350/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
210/* 351/*
211 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler. 352 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
@@ -227,164 +368,21 @@ pci_enable_io_access(struct pci_dev *pdev)
227static irqreturn_t 368static irqreturn_t
228mpt_interrupt(int irq, void *bus_id, struct pt_regs *r) 369mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
229{ 370{
230 MPT_ADAPTER *ioc; 371 MPT_ADAPTER *ioc = bus_id;
231 MPT_FRAME_HDR *mf; 372 u32 pa;
232 MPT_FRAME_HDR *mr;
233 u32 pa;
234 int req_idx;
235 int cb_idx;
236 int type;
237 int freeme;
238
239 ioc = (MPT_ADAPTER *)bus_id;
240 373
241 /* 374 /*
242 * Drain the reply FIFO! 375 * Drain the reply FIFO!
243 *
244 * NOTES: I've seen up to 10 replies processed in this loop, so far...
245 * Update: I've seen up to 9182 replies processed in this loop! ??
246 * Update: Limit ourselves to processing max of N replies
247 * (bottom of loop).
248 */ 376 */
249 while (1) { 377 while (1) {
250 378 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
251 if ((pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo)) == 0xFFFFFFFF) 379 if (pa == 0xFFFFFFFF)
252 return IRQ_HANDLED; 380 return IRQ_HANDLED;
253 381 else if (pa & MPI_ADDRESS_REPLY_A_BIT)
254 cb_idx = 0; 382 mpt_reply(ioc, pa);
255 freeme = 0; 383 else
256 384 mpt_turbo_reply(ioc, pa);
257 /* 385 }
258 * Check for non-TURBO reply!
259 */
260 if (pa & MPI_ADDRESS_REPLY_A_BIT) {
261 u32 reply_dma_low;
262 u16 ioc_stat;
263
264 /* non-TURBO reply! Hmmm, something may be up...
265 * Newest turbo reply mechanism; get address
266 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
267 */
268
269 /* Map DMA address of reply header to cpu address.
270 * pa is 32 bits - but the dma address may be 32 or 64 bits
271 * get offset based only only the low addresses
272 */
273 reply_dma_low = (pa = (pa << 1));
274 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
275 (reply_dma_low - ioc->reply_frames_low_dma));
276
277 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
278 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
279 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
280
281 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
282 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
283 DBG_DUMP_REPLY_FRAME(mr)
284
285 /* Check/log IOC log info
286 */
287 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
288 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
289 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
290 if (ioc->bus_type == FC)
291 mpt_fc_log_info(ioc, log_info);
292 else if (ioc->bus_type == SCSI)
293 mpt_sp_log_info(ioc, log_info);
294 }
295 if (ioc_stat & MPI_IOCSTATUS_MASK) {
296 if (ioc->bus_type == SCSI)
297 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
298 }
299 } else {
300 /*
301 * Process turbo (context) reply...
302 */
303 dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n", ioc->name, pa));
304 type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT);
305 if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) {
306 cb_idx = mpt_stm_index;
307 mf = NULL;
308 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
309 } else if (type == MPI_CONTEXT_REPLY_TYPE_LAN) {
310 cb_idx = mpt_lan_index;
311 /* Blind set of mf to NULL here was fatal
312 * after lan_reply says "freeme"
313 * Fix sort of combined with an optimization here;
314 * added explicit check for case where lan_reply
315 * was just returning 1 and doing nothing else.
316 * For this case skip the callback, but set up
317 * proper mf value first here:-)
318 */
319 if ((pa & 0x58000000) == 0x58000000) {
320 req_idx = pa & 0x0000FFFF;
321 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
322 freeme = 1;
323 /*
324 * IMPORTANT! Invalidate the callback!
325 */
326 cb_idx = 0;
327 } else {
328 mf = NULL;
329 }
330 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
331 } else {
332 req_idx = pa & 0x0000FFFF;
333 cb_idx = (pa & 0x00FF0000) >> 16;
334 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
335 mr = NULL;
336 }
337 pa = 0; /* No reply flush! */
338 }
339
340#ifdef MPT_DEBUG_IRQ
341 if (ioc->bus_type == SCSI) {
342 /* Verify mf, mr are reasonable.
343 */
344 if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
345 || (mf < ioc->req_frames)) ) {
346 printk(MYIOC_s_WARN_FMT
347 "mpt_interrupt: Invalid mf (%p)!\n", ioc->name, (void *)mf);
348 cb_idx = 0;
349 pa = 0;
350 freeme = 0;
351 }
352 if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth))
353 || (mr < ioc->reply_frames)) ) {
354 printk(MYIOC_s_WARN_FMT
355 "mpt_interrupt: Invalid rf (%p)!\n", ioc->name, (void *)mr);
356 cb_idx = 0;
357 pa = 0;
358 freeme = 0;
359 }
360 if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) {
361 printk(MYIOC_s_WARN_FMT
362 "mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx);
363 cb_idx = 0;
364 pa = 0;
365 freeme = 0;
366 }
367 }
368#endif
369
370 /* Check for (valid) IO callback! */
371 if (cb_idx) {
372 /* Do the callback! */
373 freeme = (*(MptCallbacks[cb_idx]))(ioc, mf, mr);
374 }
375
376 if (pa) {
377 /* Flush (non-TURBO) reply with a WRITE! */
378 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
379 }
380
381 if (freeme) {
382 /* Put Request back on FreeQ! */
383 mpt_free_msg_frame(ioc, mf);
384 }
385
386 mb();
387 } /* drain reply FIFO */
388 386
389 return IRQ_HANDLED; 387 return IRQ_HANDLED;
390} 388}
@@ -509,6 +507,14 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
509 pCfg->wait_done = 1; 507 pCfg->wait_done = 1;
510 wake_up(&mpt_waitq); 508 wake_up(&mpt_waitq);
511 } 509 }
510 } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) {
511 /* we should be always getting a reply frame */
512 memcpy(ioc->persist_reply_frame, reply,
513 min(MPT_DEFAULT_FRAME_SIZE,
514 4*reply->u.reply.MsgLength));
515 del_timer(&ioc->persist_timer);
516 ioc->persist_wait_done = 1;
517 wake_up(&mpt_waitq);
512 } else { 518 } else {
513 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", 519 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
514 ioc->name, func); 520 ioc->name, func);
@@ -750,6 +756,7 @@ mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc)
750 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR, 756 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
751 u.frame.linkage.list); 757 u.frame.linkage.list);
752 list_del(&mf->u.frame.linkage.list); 758 list_del(&mf->u.frame.linkage.list);
759 mf->u.frame.linkage.arg1 = 0;
753 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */ 760 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */
754 req_offset = (u8 *)mf - (u8 *)ioc->req_frames; 761 req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
755 /* u16! */ 762 /* u16! */
@@ -845,6 +852,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
845 852
846 /* Put Request back on FreeQ! */ 853 /* Put Request back on FreeQ! */
847 spin_lock_irqsave(&ioc->FreeQlock, flags); 854 spin_lock_irqsave(&ioc->FreeQlock, flags);
855 mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */
848 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); 856 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
849#ifdef MFCNT 857#ifdef MFCNT
850 ioc->mfcnt--; 858 ioc->mfcnt--;
@@ -971,12 +979,123 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
971 979
972 /* Make sure there are no doorbells */ 980 /* Make sure there are no doorbells */
973 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 981 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
974 982
975 return r; 983 return r;
976} 984}
977 985
978/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 986/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
979/** 987/**
988 * mpt_host_page_access_control - provides mechanism for the host
989 * driver to control the IOC's Host Page Buffer access.
990 * @ioc: Pointer to MPT adapter structure
991 * @access_control_value: define bits below
992 *
993 * Access Control Value - bits[15:12]
994 * 0h Reserved
995 * 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
996 * 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
997 * 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
998 *
999 * Returns 0 for success, non-zero for failure.
1000 */
1001
1002static int
1003mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
1004{
1005 int r = 0;
1006
1007 /* return if in use */
1008 if (CHIPREG_READ32(&ioc->chip->Doorbell)
1009 & MPI_DOORBELL_ACTIVE)
1010 return -1;
1011
1012 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1013
1014 CHIPREG_WRITE32(&ioc->chip->Doorbell,
1015 ((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
1016 <<MPI_DOORBELL_FUNCTION_SHIFT) |
1017 (access_control_value<<12)));
1018
1019 /* Wait for IOC to clear Doorbell Status bit */
1020 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1021 return -2;
1022 }else
1023 return 0;
1024}
1025
1026/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1027/**
1028 * mpt_host_page_alloc - allocate system memory for the fw
1029 * If we already allocated memory in past, then resend the same pointer.
1030 * ioc@: Pointer to pointer to IOC adapter
1031 * ioc_init@: Pointer to ioc init config page
1032 *
1033 * Returns 0 for success, non-zero for failure.
1034 */
1035static int
1036mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1037{
1038 char *psge;
1039 int flags_length;
1040 u32 host_page_buffer_sz=0;
1041
1042 if(!ioc->HostPageBuffer) {
1043
1044 host_page_buffer_sz =
1045 le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
1046
1047 if(!host_page_buffer_sz)
1048 return 0; /* fw doesn't need any host buffers */
1049
1050 /* spin till we get enough memory */
1051 while(host_page_buffer_sz > 0) {
1052
1053 if((ioc->HostPageBuffer = pci_alloc_consistent(
1054 ioc->pcidev,
1055 host_page_buffer_sz,
1056 &ioc->HostPageBuffer_dma)) != NULL) {
1057
1058 dinitprintk((MYIOC_s_INFO_FMT
1059 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
1060 ioc->name,
1061 ioc->HostPageBuffer,
1062 ioc->HostPageBuffer_dma,
1063 host_page_buffer_sz));
1064 ioc->alloc_total += host_page_buffer_sz;
1065 ioc->HostPageBuffer_sz = host_page_buffer_sz;
1066 break;
1067 }
1068
1069 host_page_buffer_sz -= (4*1024);
1070 }
1071 }
1072
1073 if(!ioc->HostPageBuffer) {
1074 printk(MYIOC_s_ERR_FMT
1075 "Failed to alloc memory for host_page_buffer!\n",
1076 ioc->name);
1077 return -999;
1078 }
1079
1080 psge = (char *)&ioc_init->HostPageBufferSGE;
1081 flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1082 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
1083 MPI_SGE_FLAGS_32_BIT_ADDRESSING |
1084 MPI_SGE_FLAGS_HOST_TO_IOC |
1085 MPI_SGE_FLAGS_END_OF_BUFFER;
1086 if (sizeof(dma_addr_t) == sizeof(u64)) {
1087 flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1088 }
1089 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1090 flags_length |= ioc->HostPageBuffer_sz;
1091 mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1092 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1093
1094return 0;
1095}
1096
1097/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1098/**
980 * mpt_verify_adapter - Given a unique IOC identifier, set pointer to 1099 * mpt_verify_adapter - Given a unique IOC identifier, set pointer to
981 * the associated MPT adapter structure. 1100 * the associated MPT adapter structure.
982 * @iocid: IOC unique identifier (integer) 1101 * @iocid: IOC unique identifier (integer)
@@ -1084,7 +1203,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1084 1203
1085 /* Initilize SCSI Config Data structure 1204 /* Initilize SCSI Config Data structure
1086 */ 1205 */
1087 memset(&ioc->spi_data, 0, sizeof(ScsiCfgData)); 1206 memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1088 1207
1089 /* Initialize the running configQ head. 1208 /* Initialize the running configQ head.
1090 */ 1209 */
@@ -1213,6 +1332,33 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1213 ioc->prod_name = "LSI53C1035"; 1332 ioc->prod_name = "LSI53C1035";
1214 ioc->bus_type = SCSI; 1333 ioc->bus_type = SCSI;
1215 } 1334 }
1335 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
1336 ioc->prod_name = "LSISAS1064";
1337 ioc->bus_type = SAS;
1338 ioc->errata_flag_1064 = 1;
1339 }
1340 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066) {
1341 ioc->prod_name = "LSISAS1066";
1342 ioc->bus_type = SAS;
1343 ioc->errata_flag_1064 = 1;
1344 }
1345 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
1346 ioc->prod_name = "LSISAS1068";
1347 ioc->bus_type = SAS;
1348 ioc->errata_flag_1064 = 1;
1349 }
1350 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064E) {
1351 ioc->prod_name = "LSISAS1064E";
1352 ioc->bus_type = SAS;
1353 }
1354 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066E) {
1355 ioc->prod_name = "LSISAS1066E";
1356 ioc->bus_type = SAS;
1357 }
1358 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
1359 ioc->prod_name = "LSISAS1068E";
1360 ioc->bus_type = SAS;
1361 }
1216 1362
1217 if (ioc->errata_flag_1064) 1363 if (ioc->errata_flag_1064)
1218 pci_disable_io_access(pdev); 1364 pci_disable_io_access(pdev);
@@ -1604,8 +1750,23 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1604 */ 1750 */
1605 if (ret == 0) { 1751 if (ret == 0) {
1606 rc = mpt_do_upload(ioc, sleepFlag); 1752 rc = mpt_do_upload(ioc, sleepFlag);
1607 if (rc != 0) 1753 if (rc == 0) {
1754 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
1755 /*
1756 * Maintain only one pointer to FW memory
1757 * so there will not be two attempt to
1758 * downloadboot onboard dual function
1759 * chips (mpt_adapter_disable,
1760 * mpt_diag_reset)
1761 */
1762 ioc->cached_fw = NULL;
1763 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
1764 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
1765 }
1766 } else {
1608 printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); 1767 printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
1768 ret = -5;
1769 }
1609 } 1770 }
1610 } 1771 }
1611 } 1772 }
@@ -1640,7 +1801,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1640 * and we try GetLanConfigPages again... 1801 * and we try GetLanConfigPages again...
1641 */ 1802 */
1642 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) { 1803 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
1643 if (ioc->bus_type == FC) { 1804 if (ioc->bus_type == SAS) {
1805
1806 /* clear persistency table */
1807 if(ioc->facts.IOCExceptions &
1808 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
1809 ret = mptbase_sas_persist_operation(ioc,
1810 MPI_SAS_OP_CLEAR_NOT_PRESENT);
1811 if(ret != 0)
1812 return -1;
1813 }
1814
1815 /* Find IM volumes
1816 */
1817 mpt_findImVolumes(ioc);
1818
1819 } else if (ioc->bus_type == FC) {
1644 /* 1820 /*
1645 * Pre-fetch FC port WWN and stuff... 1821 * Pre-fetch FC port WWN and stuff...
1646 * (FCPortPage0_t stuff) 1822 * (FCPortPage0_t stuff)
@@ -1783,7 +1959,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1783 1959
1784 if (ioc->cached_fw != NULL) { 1960 if (ioc->cached_fw != NULL) {
1785 ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n")); 1961 ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n"));
1786 if ((ret = mpt_downloadboot(ioc, NO_SLEEP)) < 0) { 1962 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)ioc->cached_fw, NO_SLEEP)) < 0) {
1787 printk(KERN_WARNING MYNAM 1963 printk(KERN_WARNING MYNAM
1788 ": firmware downloadboot failure (%d)!\n", ret); 1964 ": firmware downloadboot failure (%d)!\n", ret);
1789 } 1965 }
@@ -1831,9 +2007,9 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1831 } 2007 }
1832 2008
1833 kfree(ioc->spi_data.nvram); 2009 kfree(ioc->spi_data.nvram);
1834 kfree(ioc->spi_data.pIocPg3); 2010 kfree(ioc->raid_data.pIocPg3);
1835 ioc->spi_data.nvram = NULL; 2011 ioc->spi_data.nvram = NULL;
1836 ioc->spi_data.pIocPg3 = NULL; 2012 ioc->raid_data.pIocPg3 = NULL;
1837 2013
1838 if (ioc->spi_data.pIocPg4 != NULL) { 2014 if (ioc->spi_data.pIocPg4 != NULL) {
1839 sz = ioc->spi_data.IocPg4Sz; 2015 sz = ioc->spi_data.IocPg4Sz;
@@ -1852,6 +2028,23 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1852 2028
1853 kfree(ioc->ChainToChain); 2029 kfree(ioc->ChainToChain);
1854 ioc->ChainToChain = NULL; 2030 ioc->ChainToChain = NULL;
2031
2032 if (ioc->HostPageBuffer != NULL) {
2033 if((ret = mpt_host_page_access_control(ioc,
2034 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
2035 printk(KERN_ERR MYNAM
2036 ": %s: host page buffers free failed (%d)!\n",
2037 __FUNCTION__, ret);
2038 }
2039 dexitprintk((KERN_INFO MYNAM ": %s HostPageBuffer free @ %p, sz=%d bytes\n",
2040 ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz));
2041 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2042 ioc->HostPageBuffer,
2043 ioc->HostPageBuffer_dma);
2044 ioc->HostPageBuffer = NULL;
2045 ioc->HostPageBuffer_sz = 0;
2046 ioc->alloc_total -= ioc->HostPageBuffer_sz;
2047 }
1855} 2048}
1856 2049
1857/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2050/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2034,7 +2227,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2034 * Loop here waiting for IOC to come READY. 2227 * Loop here waiting for IOC to come READY.
2035 */ 2228 */
2036 ii = 0; 2229 ii = 0;
2037 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */ 2230 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
2038 2231
2039 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { 2232 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
2040 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) { 2233 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
@@ -2212,6 +2405,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2212 le32_to_cpu(facts->CurrentSenseBufferHighAddr); 2405 le32_to_cpu(facts->CurrentSenseBufferHighAddr);
2213 facts->CurReplyFrameSize = 2406 facts->CurReplyFrameSize =
2214 le16_to_cpu(facts->CurReplyFrameSize); 2407 le16_to_cpu(facts->CurReplyFrameSize);
2408 facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
2215 2409
2216 /* 2410 /*
2217 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx 2411 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
@@ -2383,13 +2577,25 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2383 ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n", 2577 ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n",
2384 ioc->name, ioc->upload_fw, ioc->facts.Flags)); 2578 ioc->name, ioc->upload_fw, ioc->facts.Flags));
2385 2579
2386 if (ioc->bus_type == FC) 2580 if(ioc->bus_type == SAS)
2581 ioc_init.MaxDevices = ioc->facts.MaxDevices;
2582 else if(ioc->bus_type == FC)
2387 ioc_init.MaxDevices = MPT_MAX_FC_DEVICES; 2583 ioc_init.MaxDevices = MPT_MAX_FC_DEVICES;
2388 else 2584 else
2389 ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES; 2585 ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES;
2390
2391 ioc_init.MaxBuses = MPT_MAX_BUS; 2586 ioc_init.MaxBuses = MPT_MAX_BUS;
2392 2587 dinitprintk((MYIOC_s_INFO_FMT "facts.MsgVersion=%x\n",
2588 ioc->name, ioc->facts.MsgVersion));
2589 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
2590 // set MsgVersion and HeaderVersion host driver was built with
2591 ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
2592 ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
2593
2594 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
2595 ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
2596 } else if(mpt_host_page_alloc(ioc, &ioc_init))
2597 return -99;
2598 }
2393 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ 2599 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
2394 2600
2395 if (sizeof(dma_addr_t) == sizeof(u64)) { 2601 if (sizeof(dma_addr_t) == sizeof(u64)) {
@@ -2403,17 +2609,21 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2403 ioc_init.HostMfaHighAddr = cpu_to_le32(0); 2609 ioc_init.HostMfaHighAddr = cpu_to_le32(0);
2404 ioc_init.SenseBufferHighAddr = cpu_to_le32(0); 2610 ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
2405 } 2611 }
2406 2612
2407 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr; 2613 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
2408 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr; 2614 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
2615 ioc->facts.MaxDevices = ioc_init.MaxDevices;
2616 ioc->facts.MaxBuses = ioc_init.MaxBuses;
2409 2617
2410 dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n", 2618 dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n",
2411 ioc->name, &ioc_init)); 2619 ioc->name, &ioc_init));
2412 2620
2413 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init, 2621 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
2414 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag); 2622 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
2415 if (r != 0) 2623 if (r != 0) {
2624 printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
2416 return r; 2625 return r;
2626 }
2417 2627
2418 /* No need to byte swap the multibyte fields in the reply 2628 /* No need to byte swap the multibyte fields in the reply
2419 * since we don't even look at it's contents. 2629 * since we don't even look at it's contents.
@@ -2472,7 +2682,7 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
2472{ 2682{
2473 PortEnable_t port_enable; 2683 PortEnable_t port_enable;
2474 MPIDefaultReply_t reply_buf; 2684 MPIDefaultReply_t reply_buf;
2475 int ii; 2685 int rc;
2476 int req_sz; 2686 int req_sz;
2477 int reply_sz; 2687 int reply_sz;
2478 2688
@@ -2494,22 +2704,15 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
2494 2704
2495 /* RAID FW may take a long time to enable 2705 /* RAID FW may take a long time to enable
2496 */ 2706 */
2497 if (ioc->bus_type == FC) { 2707 if ( (ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
2498 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable, 2708 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI ) {
2499 reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag); 2709 rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2500 } else {
2501 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2502 reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag); 2710 reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag);
2711 } else {
2712 rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2713 reply_sz, (u16*)&reply_buf, 30 /*seconds*/, sleepFlag);
2503 } 2714 }
2504 2715 return rc;
2505 if (ii != 0)
2506 return ii;
2507
2508 /* We do not even look at the reply, so we need not
2509 * swap the multi-byte fields.
2510 */
2511
2512 return 0;
2513} 2716}
2514 2717
2515/* 2718/*
@@ -2666,9 +2869,8 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
2666 * <0 for fw upload failure. 2869 * <0 for fw upload failure.
2667 */ 2870 */
2668static int 2871static int
2669mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag) 2872mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
2670{ 2873{
2671 MpiFwHeader_t *pFwHeader;
2672 MpiExtImageHeader_t *pExtImage; 2874 MpiExtImageHeader_t *pExtImage;
2673 u32 fwSize; 2875 u32 fwSize;
2674 u32 diag0val; 2876 u32 diag0val;
@@ -2679,18 +2881,8 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2679 u32 load_addr; 2881 u32 load_addr;
2680 u32 ioc_state=0; 2882 u32 ioc_state=0;
2681 2883
2682 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x, ioc FW Ptr %p\n", 2884 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
2683 ioc->name, ioc->facts.FWImageSize, ioc->cached_fw)); 2885 ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
2684
2685 if ( ioc->facts.FWImageSize == 0 )
2686 return -1;
2687
2688 if (ioc->cached_fw == NULL)
2689 return -2;
2690
2691 /* prevent a second downloadboot and memory free with alt_ioc */
2692 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
2693 ioc->alt_ioc->cached_fw = NULL;
2694 2886
2695 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 2887 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2696 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); 2888 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
@@ -2718,16 +2910,17 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2718 ioc->name, count)); 2910 ioc->name, count));
2719 break; 2911 break;
2720 } 2912 }
2721 /* wait 1 sec */ 2913 /* wait .1 sec */
2722 if (sleepFlag == CAN_SLEEP) { 2914 if (sleepFlag == CAN_SLEEP) {
2723 msleep_interruptible (1000); 2915 msleep_interruptible (100);
2724 } else { 2916 } else {
2725 mdelay (1000); 2917 mdelay (100);
2726 } 2918 }
2727 } 2919 }
2728 2920
2729 if ( count == 30 ) { 2921 if ( count == 30 ) {
2730 ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! Unable to RESET_ADAPTER diag0val=%x\n", 2922 ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! "
2923 "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
2731 ioc->name, diag0val)); 2924 ioc->name, diag0val));
2732 return -3; 2925 return -3;
2733 } 2926 }
@@ -2742,7 +2935,6 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2742 /* Set the DiagRwEn and Disable ARM bits */ 2935 /* Set the DiagRwEn and Disable ARM bits */
2743 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM)); 2936 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
2744 2937
2745 pFwHeader = (MpiFwHeader_t *) ioc->cached_fw;
2746 fwSize = (pFwHeader->ImageSize + 3)/4; 2938 fwSize = (pFwHeader->ImageSize + 3)/4;
2747 ptrFw = (u32 *) pFwHeader; 2939 ptrFw = (u32 *) pFwHeader;
2748 2940
@@ -2792,19 +2984,38 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2792 /* Clear the internal flash bad bit - autoincrementing register, 2984 /* Clear the internal flash bad bit - autoincrementing register,
2793 * so must do two writes. 2985 * so must do two writes.
2794 */ 2986 */
2795 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); 2987 if (ioc->bus_type == SCSI) {
2796 diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData); 2988 /*
2797 diagRwData |= 0x4000000; 2989 * 1030 and 1035 H/W errata, workaround to access
2798 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); 2990 * the ClearFlashBadSignatureBit
2799 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData); 2991 */
2992 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
2993 diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
2994 diagRwData |= 0x40000000;
2995 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
2996 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
2997
2998 } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
2999 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3000 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
3001 MPI_DIAG_CLEAR_FLASH_BAD_SIG);
3002
3003 /* wait 1 msec */
3004 if (sleepFlag == CAN_SLEEP) {
3005 msleep_interruptible (1);
3006 } else {
3007 mdelay (1);
3008 }
3009 }
2800 3010
2801 if (ioc->errata_flag_1064) 3011 if (ioc->errata_flag_1064)
2802 pci_disable_io_access(ioc->pcidev); 3012 pci_disable_io_access(ioc->pcidev);
2803 3013
2804 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3014 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
2805 ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, turning off PREVENT_IOC_BOOT, DISABLE_ARM\n", 3015 ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, "
3016 "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
2806 ioc->name, diag0val)); 3017 ioc->name, diag0val));
2807 diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM); 3018 diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE);
2808 ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n", 3019 ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n",
2809 ioc->name, diag0val)); 3020 ioc->name, diag0val));
2810 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); 3021 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
@@ -2812,10 +3023,23 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2812 /* Write 0xFF to reset the sequencer */ 3023 /* Write 0xFF to reset the sequencer */
2813 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 3024 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2814 3025
3026 if (ioc->bus_type == SAS) {
3027 ioc_state = mpt_GetIocState(ioc, 0);
3028 if ( (GetIocFacts(ioc, sleepFlag,
3029 MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
3030 ddlprintk((MYIOC_s_INFO_FMT "GetIocFacts failed: IocState=%x\n",
3031 ioc->name, ioc_state));
3032 return -EFAULT;
3033 }
3034 }
3035
2815 for (count=0; count<HZ*20; count++) { 3036 for (count=0; count<HZ*20; count++) {
2816 if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) { 3037 if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
2817 ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n", 3038 ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n",
2818 ioc->name, count, ioc_state)); 3039 ioc->name, count, ioc_state));
3040 if (ioc->bus_type == SAS) {
3041 return 0;
3042 }
2819 if ((SendIocInit(ioc, sleepFlag)) != 0) { 3043 if ((SendIocInit(ioc, sleepFlag)) != 0) {
2820 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n", 3044 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n",
2821 ioc->name)); 3045 ioc->name));
@@ -3049,12 +3273,13 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3049 3273
3050 /* wait 1 sec */ 3274 /* wait 1 sec */
3051 if (sleepFlag == CAN_SLEEP) { 3275 if (sleepFlag == CAN_SLEEP) {
3052 ssleep(1); 3276 msleep_interruptible (1000);
3053 } else { 3277 } else {
3054 mdelay (1000); 3278 mdelay (1000);
3055 } 3279 }
3056 } 3280 }
3057 if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) { 3281 if ((count = mpt_downloadboot(ioc,
3282 (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) {
3058 printk(KERN_WARNING MYNAM 3283 printk(KERN_WARNING MYNAM
3059 ": firmware downloadboot failure (%d)!\n", count); 3284 ": firmware downloadboot failure (%d)!\n", count);
3060 } 3285 }
@@ -3637,7 +3862,7 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3637 int count = 0; 3862 int count = 0;
3638 u32 intstat=0; 3863 u32 intstat=0;
3639 3864
3640 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong; 3865 cntdn = 1000 * howlong;
3641 3866
3642 if (sleepFlag == CAN_SLEEP) { 3867 if (sleepFlag == CAN_SLEEP) {
3643 while (--cntdn) { 3868 while (--cntdn) {
@@ -3687,7 +3912,7 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3687 int count = 0; 3912 int count = 0;
3688 u32 intstat=0; 3913 u32 intstat=0;
3689 3914
3690 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong; 3915 cntdn = 1000 * howlong;
3691 if (sleepFlag == CAN_SLEEP) { 3916 if (sleepFlag == CAN_SLEEP) {
3692 while (--cntdn) { 3917 while (--cntdn) {
3693 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3918 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
@@ -4001,6 +4226,85 @@ GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
4001 4226
4002/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4227/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4003/* 4228/*
4229 * mptbase_sas_persist_operation - Perform operation on SAS Persitent Table
4230 * @ioc: Pointer to MPT_ADAPTER structure
4231 * @sas_address: 64bit SAS Address for operation.
4232 * @target_id: specified target for operation
4233 * @bus: specified bus for operation
4234 * @persist_opcode: see below
4235 *
4236 * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
4237 * devices not currently present.
4238 * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
4239 *
4240 * NOTE: Don't use not this function during interrupt time.
4241 *
4242 * Returns: 0 for success, non-zero error
4243 */
4244
4245/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4246int
4247mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4248{
4249 SasIoUnitControlRequest_t *sasIoUnitCntrReq;
4250 SasIoUnitControlReply_t *sasIoUnitCntrReply;
4251 MPT_FRAME_HDR *mf = NULL;
4252 MPIHeader_t *mpi_hdr;
4253
4254
4255 /* insure garbage is not sent to fw */
4256 switch(persist_opcode) {
4257
4258 case MPI_SAS_OP_CLEAR_NOT_PRESENT:
4259 case MPI_SAS_OP_CLEAR_ALL_PERSISTENT:
4260 break;
4261
4262 default:
4263 return -1;
4264 break;
4265 }
4266
4267 printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode);
4268
4269 /* Get a MF for this command.
4270 */
4271 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4272 printk("%s: no msg frames!\n",__FUNCTION__);
4273 return -1;
4274 }
4275
4276 mpi_hdr = (MPIHeader_t *) mf;
4277 sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
4278 memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
4279 sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
4280 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
4281 sasIoUnitCntrReq->Operation = persist_opcode;
4282
4283 init_timer(&ioc->persist_timer);
4284 ioc->persist_timer.data = (unsigned long) ioc;
4285 ioc->persist_timer.function = mpt_timer_expired;
4286 ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
4287 ioc->persist_wait_done=0;
4288 add_timer(&ioc->persist_timer);
4289 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4290 wait_event(mpt_waitq, ioc->persist_wait_done);
4291
4292 sasIoUnitCntrReply =
4293 (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
4294 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4295 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4296 __FUNCTION__,
4297 sasIoUnitCntrReply->IOCStatus,
4298 sasIoUnitCntrReply->IOCLogInfo);
4299 return -1;
4300 }
4301
4302 printk("%s: success\n",__FUNCTION__);
4303 return 0;
4304}
4305
4306/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4307/*
4004 * GetIoUnitPage2 - Retrieve BIOS version and boot order information. 4308 * GetIoUnitPage2 - Retrieve BIOS version and boot order information.
4005 * @ioc: Pointer to MPT_ADAPTER structure 4309 * @ioc: Pointer to MPT_ADAPTER structure
4006 * 4310 *
@@ -4340,10 +4644,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4340 if (mpt_config(ioc, &cfg) != 0) 4644 if (mpt_config(ioc, &cfg) != 0)
4341 goto done_and_free; 4645 goto done_and_free;
4342 4646
4343 if ( (mem = (u8 *)ioc->spi_data.pIocPg2) == NULL ) { 4647 if ( (mem = (u8 *)ioc->raid_data.pIocPg2) == NULL ) {
4344 mem = kmalloc(iocpage2sz, GFP_ATOMIC); 4648 mem = kmalloc(iocpage2sz, GFP_ATOMIC);
4345 if (mem) { 4649 if (mem) {
4346 ioc->spi_data.pIocPg2 = (IOCPage2_t *) mem; 4650 ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
4347 } else { 4651 } else {
4348 goto done_and_free; 4652 goto done_and_free;
4349 } 4653 }
@@ -4360,7 +4664,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4360 /* At least 1 RAID Volume 4664 /* At least 1 RAID Volume
4361 */ 4665 */
4362 pIocRv = pIoc2->RaidVolume; 4666 pIocRv = pIoc2->RaidVolume;
4363 ioc->spi_data.isRaid = 0; 4667 ioc->raid_data.isRaid = 0;
4364 for (jj = 0; jj < nVols; jj++, pIocRv++) { 4668 for (jj = 0; jj < nVols; jj++, pIocRv++) {
4365 vid = pIocRv->VolumeID; 4669 vid = pIocRv->VolumeID;
4366 vbus = pIocRv->VolumeBus; 4670 vbus = pIocRv->VolumeBus;
@@ -4369,7 +4673,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4369 /* find the match 4673 /* find the match
4370 */ 4674 */
4371 if (vbus == 0) { 4675 if (vbus == 0) {
4372 ioc->spi_data.isRaid |= (1 << vid); 4676 ioc->raid_data.isRaid |= (1 << vid);
4373 } else { 4677 } else {
4374 /* Error! Always bus 0 4678 /* Error! Always bus 0
4375 */ 4679 */
@@ -4404,8 +4708,8 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
4404 4708
4405 /* Free the old page 4709 /* Free the old page
4406 */ 4710 */
4407 kfree(ioc->spi_data.pIocPg3); 4711 kfree(ioc->raid_data.pIocPg3);
4408 ioc->spi_data.pIocPg3 = NULL; 4712 ioc->raid_data.pIocPg3 = NULL;
4409 4713
4410 /* There is at least one physical disk. 4714 /* There is at least one physical disk.
4411 * Read and save IOC Page 3 4715 * Read and save IOC Page 3
@@ -4442,7 +4746,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
4442 mem = kmalloc(iocpage3sz, GFP_ATOMIC); 4746 mem = kmalloc(iocpage3sz, GFP_ATOMIC);
4443 if (mem) { 4747 if (mem) {
4444 memcpy(mem, (u8 *)pIoc3, iocpage3sz); 4748 memcpy(mem, (u8 *)pIoc3, iocpage3sz);
4445 ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem; 4749 ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
4446 } 4750 }
4447 } 4751 }
4448 4752
@@ -5366,8 +5670,8 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
5366} 5670}
5367 5671
5368/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5672/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5369static char * 5673static void
5370EventDescriptionStr(u8 event, u32 evData0) 5674EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5371{ 5675{
5372 char *ds; 5676 char *ds;
5373 5677
@@ -5420,8 +5724,95 @@ EventDescriptionStr(u8 event, u32 evData0)
5420 ds = "Events(OFF) Change"; 5724 ds = "Events(OFF) Change";
5421 break; 5725 break;
5422 case MPI_EVENT_INTEGRATED_RAID: 5726 case MPI_EVENT_INTEGRATED_RAID:
5423 ds = "Integrated Raid"; 5727 {
5728 u8 ReasonCode = (u8)(evData0 >> 16);
5729 switch (ReasonCode) {
5730 case MPI_EVENT_RAID_RC_VOLUME_CREATED :
5731 ds = "Integrated Raid: Volume Created";
5732 break;
5733 case MPI_EVENT_RAID_RC_VOLUME_DELETED :
5734 ds = "Integrated Raid: Volume Deleted";
5735 break;
5736 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED :
5737 ds = "Integrated Raid: Volume Settings Changed";
5738 break;
5739 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED :
5740 ds = "Integrated Raid: Volume Status Changed";
5741 break;
5742 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED :
5743 ds = "Integrated Raid: Volume Physdisk Changed";
5744 break;
5745 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED :
5746 ds = "Integrated Raid: Physdisk Created";
5747 break;
5748 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED :
5749 ds = "Integrated Raid: Physdisk Deleted";
5750 break;
5751 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED :
5752 ds = "Integrated Raid: Physdisk Settings Changed";
5753 break;
5754 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED :
5755 ds = "Integrated Raid: Physdisk Status Changed";
5756 break;
5757 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED :
5758 ds = "Integrated Raid: Domain Validation Needed";
5759 break;
5760 case MPI_EVENT_RAID_RC_SMART_DATA :
5761 ds = "Integrated Raid; Smart Data";
5762 break;
5763 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED :
5764 ds = "Integrated Raid: Replace Action Started";
5765 break;
5766 default:
5767 ds = "Integrated Raid";
5768 break;
5769 }
5770 break;
5771 }
5772 case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:
5773 ds = "SCSI Device Status Change";
5774 break;
5775 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
5776 {
5777 u8 ReasonCode = (u8)(evData0 >> 16);
5778 switch (ReasonCode) {
5779 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
5780 ds = "SAS Device Status Change: Added";
5781 break;
5782 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
5783 ds = "SAS Device Status Change: Deleted";
5784 break;
5785 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
5786 ds = "SAS Device Status Change: SMART Data";
5787 break;
5788 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
5789 ds = "SAS Device Status Change: No Persistancy Added";
5790 break;
5791 default:
5792 ds = "SAS Device Status Change: Unknown";
5793 break;
5794 }
5795 break;
5796 }
5797 case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
5798 ds = "Bus Timer Expired";
5799 break;
5800 case MPI_EVENT_QUEUE_FULL:
5801 ds = "Queue Full";
5802 break;
5803 case MPI_EVENT_SAS_SES:
5804 ds = "SAS SES Event";
5805 break;
5806 case MPI_EVENT_PERSISTENT_TABLE_FULL:
5807 ds = "Persistent Table Full";
5808 break;
5809 case MPI_EVENT_SAS_PHY_LINK_STATUS:
5810 ds = "SAS PHY Link Status";
5811 break;
5812 case MPI_EVENT_SAS_DISCOVERY_ERROR:
5813 ds = "SAS Discovery Error";
5424 break; 5814 break;
5815
5425 /* 5816 /*
5426 * MPT base "custom" events may be added here... 5817 * MPT base "custom" events may be added here...
5427 */ 5818 */
@@ -5429,7 +5820,7 @@ EventDescriptionStr(u8 event, u32 evData0)
5429 ds = "Unknown"; 5820 ds = "Unknown";
5430 break; 5821 break;
5431 } 5822 }
5432 return ds; 5823 strcpy(evStr,ds);
5433} 5824}
5434 5825
5435/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5826/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5451,7 +5842,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5451 int ii; 5842 int ii;
5452 int r = 0; 5843 int r = 0;
5453 int handlers = 0; 5844 int handlers = 0;
5454 char *evStr; 5845 char evStr[100];
5455 u8 event; 5846 u8 event;
5456 5847
5457 /* 5848 /*
@@ -5464,7 +5855,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5464 evData0 = le32_to_cpu(pEventReply->Data[0]); 5855 evData0 = le32_to_cpu(pEventReply->Data[0]);
5465 } 5856 }
5466 5857
5467 evStr = EventDescriptionStr(event, evData0); 5858 EventDescriptionStr(event, evData0, evStr);
5468 devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n", 5859 devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n",
5469 ioc->name, 5860 ioc->name,
5470 evStr, 5861 evStr,
@@ -5481,20 +5872,6 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5481 * Do general / base driver event processing 5872 * Do general / base driver event processing
5482 */ 5873 */
5483 switch(event) { 5874 switch(event) {
5484 case MPI_EVENT_NONE: /* 00 */
5485 case MPI_EVENT_LOG_DATA: /* 01 */
5486 case MPI_EVENT_STATE_CHANGE: /* 02 */
5487 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
5488 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
5489 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
5490 case MPI_EVENT_RESCAN: /* 06 */
5491 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
5492 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
5493 case MPI_EVENT_LOGOUT: /* 09 */
5494 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
5495 case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: /* 0C */
5496 default:
5497 break;
5498 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 5875 case MPI_EVENT_EVENT_CHANGE: /* 0A */
5499 if (evDataLen) { 5876 if (evDataLen) {
5500 u8 evState = evData0 & 0xFF; 5877 u8 evState = evData0 & 0xFF;
@@ -5507,6 +5884,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5507 } 5884 }
5508 } 5885 }
5509 break; 5886 break;
5887 default:
5888 break;
5510 } 5889 }
5511 5890
5512 /* 5891 /*
@@ -5653,6 +6032,111 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
5653 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc); 6032 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
5654} 6033}
5655 6034
6035/* strings for sas loginfo */
6036 static char *originator_str[] = {
6037 "IOP", /* 00h */
6038 "PL", /* 01h */
6039 "IR" /* 02h */
6040 };
6041 static char *iop_code_str[] = {
6042 NULL, /* 00h */
6043 "Invalid SAS Address", /* 01h */
6044 NULL, /* 02h */
6045 "Invalid Page", /* 03h */
6046 NULL, /* 04h */
6047 "Task Terminated" /* 05h */
6048 };
6049 static char *pl_code_str[] = {
6050 NULL, /* 00h */
6051 "Open Failure", /* 01h */
6052 "Invalid Scatter Gather List", /* 02h */
6053 "Wrong Relative Offset or Frame Length", /* 03h */
6054 "Frame Transfer Error", /* 04h */
6055 "Transmit Frame Connected Low", /* 05h */
6056 "SATA Non-NCQ RW Error Bit Set", /* 06h */
6057 "SATA Read Log Receive Data Error", /* 07h */
6058 "SATA NCQ Fail All Commands After Error", /* 08h */
6059 "SATA Error in Receive Set Device Bit FIS", /* 09h */
6060 "Receive Frame Invalid Message", /* 0Ah */
6061 "Receive Context Message Valid Error", /* 0Bh */
6062 "Receive Frame Current Frame Error", /* 0Ch */
6063 "SATA Link Down", /* 0Dh */
6064 "Discovery SATA Init W IOS", /* 0Eh */
6065 "Config Invalid Page", /* 0Fh */
6066 "Discovery SATA Init Timeout", /* 10h */
6067 "Reset", /* 11h */
6068 "Abort", /* 12h */
6069 "IO Not Yet Executed", /* 13h */
6070 "IO Executed", /* 14h */
6071 NULL, /* 15h */
6072 NULL, /* 16h */
6073 NULL, /* 17h */
6074 NULL, /* 18h */
6075 NULL, /* 19h */
6076 NULL, /* 1Ah */
6077 NULL, /* 1Bh */
6078 NULL, /* 1Ch */
6079 NULL, /* 1Dh */
6080 NULL, /* 1Eh */
6081 NULL, /* 1Fh */
6082 "Enclosure Management" /* 20h */
6083 };
6084
6085/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6086/*
6087 * mpt_sas_log_info - Log information returned from SAS IOC.
6088 * @ioc: Pointer to MPT_ADAPTER structure
6089 * @log_info: U32 LogInfo reply word from the IOC
6090 *
6091 * Refer to lsi/mpi_log_sas.h.
6092 */
6093static void
6094mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info)
6095{
6096union loginfo_type {
6097 u32 loginfo;
6098 struct {
6099 u32 subcode:16;
6100 u32 code:8;
6101 u32 originator:4;
6102 u32 bus_type:4;
6103 }dw;
6104};
6105 union loginfo_type sas_loginfo;
6106 char *code_desc = NULL;
6107
6108 sas_loginfo.loginfo = log_info;
6109 if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
6110 (sas_loginfo.dw.originator < sizeof(originator_str)/sizeof(char*)))
6111 return;
6112 if ((sas_loginfo.dw.originator == 0 /*IOP*/) &&
6113 (sas_loginfo.dw.code < sizeof(iop_code_str)/sizeof(char*))) {
6114 code_desc = iop_code_str[sas_loginfo.dw.code];
6115 }else if ((sas_loginfo.dw.originator == 1 /*PL*/) &&
6116 (sas_loginfo.dw.code < sizeof(pl_code_str)/sizeof(char*) )) {
6117 code_desc = pl_code_str[sas_loginfo.dw.code];
6118 }
6119
6120 if (code_desc != NULL)
6121 printk(MYIOC_s_INFO_FMT
6122 "LogInfo(0x%08x): Originator={%s}, Code={%s},"
6123 " SubCode(0x%04x)\n",
6124 ioc->name,
6125 log_info,
6126 originator_str[sas_loginfo.dw.originator],
6127 code_desc,
6128 sas_loginfo.dw.subcode);
6129 else
6130 printk(MYIOC_s_INFO_FMT
6131 "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
6132 " SubCode(0x%04x)\n",
6133 ioc->name,
6134 log_info,
6135 originator_str[sas_loginfo.dw.originator],
6136 sas_loginfo.dw.code,
6137 sas_loginfo.dw.subcode);
6138}
6139
5656/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6140/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5657/* 6141/*
5658 * mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC. 6142 * mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
@@ -5814,6 +6298,7 @@ EXPORT_SYMBOL(mpt_findImVolumes);
5814EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6298EXPORT_SYMBOL(mpt_read_ioc_pg_3);
5815EXPORT_SYMBOL(mpt_alloc_fw_memory); 6299EXPORT_SYMBOL(mpt_alloc_fw_memory);
5816EXPORT_SYMBOL(mpt_free_fw_memory); 6300EXPORT_SYMBOL(mpt_free_fw_memory);
6301EXPORT_SYMBOL(mptbase_sas_persist_operation);
5817 6302
5818 6303
5819/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6304/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f4827d923731..75105277e22f 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -65,6 +65,7 @@
65#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */ 65#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */
66#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */ 66#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */
67#include "lsi/mpi_tool.h" /* Tools support */ 67#include "lsi/mpi_tool.h" /* Tools support */
68#include "lsi/mpi_sas.h" /* SAS support */
68 69
69/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 70/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
70 71
@@ -76,8 +77,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 77#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
77#endif 78#endif
78 79
79#define MPT_LINUX_VERSION_COMMON "3.03.02" 80#define MPT_LINUX_VERSION_COMMON "3.03.03"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.02" 81#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.03"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 82#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 83
83#define show_mptmod_ver(s,ver) \ 84#define show_mptmod_ver(s,ver) \
@@ -423,7 +424,7 @@ typedef struct _MPT_IOCTL {
423/* 424/*
424 * Event Structure and define 425 * Event Structure and define
425 */ 426 */
426#define MPTCTL_EVENT_LOG_SIZE (0x0000000A) 427#define MPTCTL_EVENT_LOG_SIZE (0x000000032)
427typedef struct _mpt_ioctl_events { 428typedef struct _mpt_ioctl_events {
428 u32 event; /* Specified by define above */ 429 u32 event; /* Specified by define above */
429 u32 eventContext; /* Index or counter */ 430 u32 eventContext; /* Index or counter */
@@ -451,16 +452,13 @@ typedef struct _mpt_ioctl_events {
451#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */ 452#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */
452/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */ 453/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */
453 454
454typedef struct _ScsiCfgData { 455typedef struct _SpiCfgData {
455 u32 PortFlags; 456 u32 PortFlags;
456 int *nvram; /* table of device NVRAM values */ 457 int *nvram; /* table of device NVRAM values */
457 IOCPage2_t *pIocPg2; /* table of Raid Volumes */
458 IOCPage3_t *pIocPg3; /* table of physical disks */
459 IOCPage4_t *pIocPg4; /* SEP devices addressing */ 458 IOCPage4_t *pIocPg4; /* SEP devices addressing */
460 dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */ 459 dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */
461 int IocPg4Sz; /* IOCPage4 size */ 460 int IocPg4Sz; /* IOCPage4 size */
462 u8 dvStatus[MPT_MAX_SCSI_DEVICES]; 461 u8 dvStatus[MPT_MAX_SCSI_DEVICES];
463 int isRaid; /* bit field, 1 if RAID */
464 u8 minSyncFactor; /* 0xFF if async */ 462 u8 minSyncFactor; /* 0xFF if async */
465 u8 maxSyncOffset; /* 0 if async */ 463 u8 maxSyncOffset; /* 0 if async */
466 u8 maxBusWidth; /* 0 if narrow, 1 if wide */ 464 u8 maxBusWidth; /* 0 if narrow, 1 if wide */
@@ -472,10 +470,28 @@ typedef struct _ScsiCfgData {
472 u8 dvScheduled; /* 1 if scheduled */ 470 u8 dvScheduled; /* 1 if scheduled */
473 u8 forceDv; /* 1 to force DV scheduling */ 471 u8 forceDv; /* 1 to force DV scheduling */
474 u8 noQas; /* Disable QAS for this adapter */ 472 u8 noQas; /* Disable QAS for this adapter */
475 u8 Saf_Te; /* 1 to force all Processors as SAF-TE if Inquiry data length is too short to check for SAF-TE */ 473 u8 Saf_Te; /* 1 to force all Processors as
474 * SAF-TE if Inquiry data length
475 * is too short to check for SAF-TE
476 */
476 u8 mpt_dv; /* command line option: enhanced=1, basic=0 */ 477 u8 mpt_dv; /* command line option: enhanced=1, basic=0 */
478 u8 bus_reset; /* 1 to allow bus reset */
477 u8 rsvd[1]; 479 u8 rsvd[1];
478} ScsiCfgData; 480}SpiCfgData;
481
482typedef struct _SasCfgData {
483 u8 ptClear; /* 1 to automatically clear the
484 * persistent table.
485 * 0 to disable
486 * automatic clearing.
487 */
488}SasCfgData;
489
490typedef struct _RaidCfgData {
491 IOCPage2_t *pIocPg2; /* table of Raid Volumes */
492 IOCPage3_t *pIocPg3; /* table of physical disks */
493 int isRaid; /* bit field, 1 if RAID */
494}RaidCfgData;
479 495
480/* 496/*
481 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 497 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
@@ -530,11 +546,16 @@ typedef struct _MPT_ADAPTER
530 u8 *sense_buf_pool; 546 u8 *sense_buf_pool;
531 dma_addr_t sense_buf_pool_dma; 547 dma_addr_t sense_buf_pool_dma;
532 u32 sense_buf_low_dma; 548 u32 sense_buf_low_dma;
549 u8 *HostPageBuffer; /* SAS - host page buffer support */
550 u32 HostPageBuffer_sz;
551 dma_addr_t HostPageBuffer_dma;
533 int mtrr_reg; 552 int mtrr_reg;
534 struct pci_dev *pcidev; /* struct pci_dev pointer */ 553 struct pci_dev *pcidev; /* struct pci_dev pointer */
535 u8 __iomem *memmap; /* mmap address */ 554 u8 __iomem *memmap; /* mmap address */
536 struct Scsi_Host *sh; /* Scsi Host pointer */ 555 struct Scsi_Host *sh; /* Scsi Host pointer */
537 ScsiCfgData spi_data; /* Scsi config. data */ 556 SpiCfgData spi_data; /* Scsi config. data */
557 RaidCfgData raid_data; /* Raid config. data */
558 SasCfgData sas_data; /* Sas config. data */
538 MPT_IOCTL *ioctl; /* ioctl data pointer */ 559 MPT_IOCTL *ioctl; /* ioctl data pointer */
539 struct proc_dir_entry *ioc_dentry; 560 struct proc_dir_entry *ioc_dentry;
540 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ 561 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
@@ -554,31 +575,35 @@ typedef struct _MPT_ADAPTER
554#else 575#else
555 u32 mfcnt; 576 u32 mfcnt;
556#endif 577#endif
557 u32 NB_for_64_byte_frame; 578 u32 NB_for_64_byte_frame;
558 u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)]; 579 u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)];
559 u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)]; 580 u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)];
560 IOCFactsReply_t facts; 581 IOCFactsReply_t facts;
561 PortFactsReply_t pfacts[2]; 582 PortFactsReply_t pfacts[2];
562 FCPortPage0_t fc_port_page0[2]; 583 FCPortPage0_t fc_port_page0[2];
584 struct timer_list persist_timer; /* persist table timer */
585 int persist_wait_done; /* persist completion flag */
586 u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
563 LANPage0_t lan_cnfg_page0; 587 LANPage0_t lan_cnfg_page0;
564 LANPage1_t lan_cnfg_page1; 588 LANPage1_t lan_cnfg_page1;
565 /* 589 /*
566 * Description: errata_flag_1064 590 * Description: errata_flag_1064
567 * If a PCIX read occurs within 1 or 2 cycles after the chip receives 591 * If a PCIX read occurs within 1 or 2 cycles after the chip receives
568 * a split completion for a read data, an internal address pointer incorrectly 592 * a split completion for a read data, an internal address pointer incorrectly
569 * increments by 32 bytes 593 * increments by 32 bytes
570 */ 594 */
571 int errata_flag_1064; 595 int errata_flag_1064;
572 u8 FirstWhoInit; 596 u8 FirstWhoInit;
573 u8 upload_fw; /* If set, do a fw upload */ 597 u8 upload_fw; /* If set, do a fw upload */
574 u8 reload_fw; /* Force a FW Reload on next reset */ 598 u8 reload_fw; /* Force a FW Reload on next reset */
575 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ 599 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
576 u8 pad1[4]; 600 u8 pad1[4];
577 int DoneCtx; 601 int DoneCtx;
578 int TaskCtx; 602 int TaskCtx;
579 int InternalCtx; 603 int InternalCtx;
580 struct list_head list; 604 struct list_head list;
581 struct net_device *netdev; 605 struct net_device *netdev;
606 struct list_head sas_topology;
582} MPT_ADAPTER; 607} MPT_ADAPTER;
583 608
584/* 609/*
@@ -964,6 +989,7 @@ extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
964extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 989extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
965extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 990extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
966extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc); 991extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
992extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
967 993
968/* 994/*
969 * Public data decl's... 995 * Public data decl's...
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7577c2417e2e..cb2d59d5f5af 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1326,7 +1326,7 @@ mptctl_gettargetinfo (unsigned long arg)
1326 */ 1326 */
1327 if (hd && hd->Targets) { 1327 if (hd && hd->Targets) {
1328 mpt_findImVolumes(ioc); 1328 mpt_findImVolumes(ioc);
1329 pIoc2 = ioc->spi_data.pIocPg2; 1329 pIoc2 = ioc->raid_data.pIocPg2;
1330 for ( id = 0; id <= max_id; ) { 1330 for ( id = 0; id <= max_id; ) {
1331 if ( pIoc2 && pIoc2->NumActiveVolumes ) { 1331 if ( pIoc2 && pIoc2->NumActiveVolumes ) {
1332 if ( id == pIoc2->RaidVolume[0].VolumeID ) { 1332 if ( id == pIoc2->RaidVolume[0].VolumeID ) {
@@ -1348,7 +1348,7 @@ mptctl_gettargetinfo (unsigned long arg)
1348 --maxWordsLeft; 1348 --maxWordsLeft;
1349 goto next_id; 1349 goto next_id;
1350 } else { 1350 } else {
1351 pIoc3 = ioc->spi_data.pIocPg3; 1351 pIoc3 = ioc->raid_data.pIocPg3;
1352 for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) { 1352 for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) {
1353 if ( pIoc3->PhysDisk[jj].PhysDiskID == id ) 1353 if ( pIoc3->PhysDisk[jj].PhysDiskID == id )
1354 goto next_id; 1354 goto next_id;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 13771abea13f..a628be9bbbad 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -189,7 +189,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
189 printk(MYIOC_s_WARN_FMT 189 printk(MYIOC_s_WARN_FMT
190 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", 190 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
191 ioc->name, ioc); 191 ioc->name, ioc);
192 return -ENODEV; 192 return 0;
193 } 193 }
194 194
195 sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); 195 sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 52794be5a95c..ed3c891e388f 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -312,7 +312,12 @@ static int
312mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 312mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
313{ 313{
314 struct net_device *dev = ioc->netdev; 314 struct net_device *dev = ioc->netdev;
315 struct mpt_lan_priv *priv = netdev_priv(dev); 315 struct mpt_lan_priv *priv;
316
317 if (dev == NULL)
318 return(1);
319 else
320 priv = netdev_priv(dev);
316 321
317 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", 322 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
318 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( 323 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
new file mode 100644
index 000000000000..429820e48c69
--- /dev/null
+++ b/drivers/message/fusion/mptsas.c
@@ -0,0 +1,1235 @@
1/*
2 * linux/drivers/message/fusion/mptsas.c
3 * For use with LSI Logic PCI chip/adapter(s)
4 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
5 *
6 * Copyright (c) 1999-2005 LSI Logic Corporation
7 * (mailto:mpt_linux_developer@lsil.com)
8 * Copyright (c) 2005 Dell
9 */
10/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
11/*
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; version 2 of the License.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 NO WARRANTY
22 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
23 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
24 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
25 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
26 solely responsible for determining the appropriateness of using and
27 distributing the Program and assumes all risks associated with its
28 exercise of rights under this Agreement, including but not limited to
29 the risks and costs of program errors, damage to or loss of data,
30 programs or equipment, and unavailability or interruption of operations.
31
32 DISCLAIMER OF LIABILITY
33 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
34 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
36 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
39 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40
41 You should have received a copy of the GNU General Public License
42 along with this program; if not, write to the Free Software
43 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
44*/
45/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
46
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/errno.h>
51#include <linux/sched.h>
52#include <linux/workqueue.h>
53
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h>
57#include <scsi/scsi_transport_sas.h>
58
59#include "mptbase.h"
60#include "mptscsih.h"
61
62
63#define my_NAME "Fusion MPT SAS Host driver"
64#define my_VERSION MPT_LINUX_VERSION_COMMON
65#define MYNAM "mptsas"
66
67MODULE_AUTHOR(MODULEAUTHOR);
68MODULE_DESCRIPTION(my_NAME);
69MODULE_LICENSE("GPL");
70
71static int mpt_pq_filter;
72module_param(mpt_pq_filter, int, 0);
73MODULE_PARM_DESC(mpt_pq_filter,
74 "Enable peripheral qualifier filter: enable=1 "
75 "(default=0)");
76
77static int mpt_pt_clear;
78module_param(mpt_pt_clear, int, 0);
79MODULE_PARM_DESC(mpt_pt_clear,
80 "Clear persistency table: enable=1 "
81 "(default=MPTSCSIH_PT_CLEAR=0)");
82
83static int mptsasDoneCtx = -1;
84static int mptsasTaskCtx = -1;
85static int mptsasInternalCtx = -1; /* Used only for internal commands */
86
87
88/*
89 * SAS topology structures
90 *
91 * The MPT Fusion firmware interface spreads information about the
92 * SAS topology over many manufacture pages, thus we need some data
93 * structure to collect it and process it for the SAS transport class.
94 */
95
96struct mptsas_devinfo {
97 u16 handle; /* unique id to address this device */
98 u8 phy_id; /* phy number of parent device */
99 u8 port_id; /* sas physical port this device
100 is assoc'd with */
101 u8 target; /* logical target id of this device */
102 u8 bus; /* logical bus number of this device */
103 u64 sas_address; /* WWN of this device,
104 SATA is assigned by HBA,expander */
105 u32 device_info; /* bitfield detailed info about this device */
106};
107
108struct mptsas_phyinfo {
109 u8 phy_id; /* phy index */
110 u8 port_id; /* port number this phy is part of */
111 u8 negotiated_link_rate; /* nego'd link rate for this phy */
112 u8 hw_link_rate; /* hardware max/min phys link rate */
113 u8 programmed_link_rate; /* programmed max/min phy link rate */
114 struct mptsas_devinfo identify; /* point to phy device info */
115 struct mptsas_devinfo attached; /* point to attached device info */
116 struct sas_rphy *rphy;
117};
118
119struct mptsas_portinfo {
120 struct list_head list;
121 u16 handle; /* unique id to address this */
122 u8 num_phys; /* number of phys */
123 struct mptsas_phyinfo *phy_info;
124};
125
126/*
127 * This is pretty ugly. We will be able to seriously clean it up
128 * once the DV code in mptscsih goes away and we can properly
129 * implement ->target_alloc.
130 */
131static int
132mptsas_slave_alloc(struct scsi_device *device)
133{
134 struct Scsi_Host *host = device->host;
135 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
136 struct sas_rphy *rphy;
137 struct mptsas_portinfo *p;
138 VirtDevice *vdev;
139 uint target = device->id;
140 int i;
141
142 if ((vdev = hd->Targets[target]) != NULL)
143 goto out;
144
145 vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
146 if (!vdev) {
147 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
148 hd->ioc->name, sizeof(VirtDevice));
149 return -ENOMEM;
150 }
151
152 memset(vdev, 0, sizeof(VirtDevice));
153 vdev->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
154 vdev->ioc_id = hd->ioc->id;
155
156 rphy = dev_to_rphy(device->sdev_target->dev.parent);
157 list_for_each_entry(p, &hd->ioc->sas_topology, list) {
158 for (i = 0; i < p->num_phys; i++) {
159 if (p->phy_info[i].attached.sas_address ==
160 rphy->identify.sas_address) {
161 vdev->target_id =
162 p->phy_info[i].attached.target;
163 vdev->bus_id = p->phy_info[i].attached.bus;
164 hd->Targets[device->id] = vdev;
165 goto out;
166 }
167 }
168 }
169
170 printk("No matching SAS device found!!\n");
171 kfree(vdev);
172 return -ENODEV;
173
174 out:
175 vdev->num_luns++;
176 device->hostdata = vdev;
177 return 0;
178}
179
180static struct scsi_host_template mptsas_driver_template = {
181 .proc_name = "mptsas",
182 .proc_info = mptscsih_proc_info,
183 .name = "MPT SPI Host",
184 .info = mptscsih_info,
185 .queuecommand = mptscsih_qcmd,
186 .slave_alloc = mptsas_slave_alloc,
187 .slave_configure = mptscsih_slave_configure,
188 .slave_destroy = mptscsih_slave_destroy,
189 .change_queue_depth = mptscsih_change_queue_depth,
190 .eh_abort_handler = mptscsih_abort,
191 .eh_device_reset_handler = mptscsih_dev_reset,
192 .eh_bus_reset_handler = mptscsih_bus_reset,
193 .eh_host_reset_handler = mptscsih_host_reset,
194 .bios_param = mptscsih_bios_param,
195 .can_queue = MPT_FC_CAN_QUEUE,
196 .this_id = -1,
197 .sg_tablesize = MPT_SCSI_SG_DEPTH,
198 .max_sectors = 8192,
199 .cmd_per_lun = 7,
200 .use_clustering = ENABLE_CLUSTERING,
201};
202
203static struct sas_function_template mptsas_transport_functions = {
204};
205
206static struct scsi_transport_template *mptsas_transport_template;
207
208#ifdef SASDEBUG
209static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
210{
211 printk("---- IO UNIT PAGE 0 ------------\n");
212 printk("Handle=0x%X\n",
213 le16_to_cpu(phy_data->AttachedDeviceHandle));
214 printk("Controller Handle=0x%X\n",
215 le16_to_cpu(phy_data->ControllerDevHandle));
216 printk("Port=0x%X\n", phy_data->Port);
217 printk("Port Flags=0x%X\n", phy_data->PortFlags);
218 printk("PHY Flags=0x%X\n", phy_data->PhyFlags);
219 printk("Negotiated Link Rate=0x%X\n", phy_data->NegotiatedLinkRate);
220 printk("Controller PHY Device Info=0x%X\n",
221 le32_to_cpu(phy_data->ControllerPhyDeviceInfo));
222 printk("DiscoveryStatus=0x%X\n",
223 le32_to_cpu(phy_data->DiscoveryStatus));
224 printk("\n");
225}
226
227static void mptsas_print_phy_pg0(SasPhyPage0_t *pg0)
228{
229 __le64 sas_address;
230
231 memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
232
233 printk("---- SAS PHY PAGE 0 ------------\n");
234 printk("Attached Device Handle=0x%X\n",
235 le16_to_cpu(pg0->AttachedDevHandle));
236 printk("SAS Address=0x%llX\n",
237 (unsigned long long)le64_to_cpu(sas_address));
238 printk("Attached PHY Identifier=0x%X\n", pg0->AttachedPhyIdentifier);
239 printk("Attached Device Info=0x%X\n",
240 le32_to_cpu(pg0->AttachedDeviceInfo));
241 printk("Programmed Link Rate=0x%X\n", pg0->ProgrammedLinkRate);
242 printk("Change Count=0x%X\n", pg0->ChangeCount);
243 printk("PHY Info=0x%X\n", le32_to_cpu(pg0->PhyInfo));
244 printk("\n");
245}
246
247static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
248{
249 __le64 sas_address;
250
251 memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
252
253 printk("---- SAS DEVICE PAGE 0 ---------\n");
254 printk("Handle=0x%X\n" ,le16_to_cpu(pg0->DevHandle));
255 printk("Enclosure Handle=0x%X\n", le16_to_cpu(pg0->EnclosureHandle));
256 printk("Slot=0x%X\n", le16_to_cpu(pg0->Slot));
257 printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
258 printk("Target ID=0x%X\n", pg0->TargetID);
259 printk("Bus=0x%X\n", pg0->Bus);
260 printk("PhyNum=0x%X\n", pg0->PhyNum);
261 printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
262 printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
263 printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
264 printk("Physical Port=0x%X\n", pg0->PhysicalPort);
265 printk("\n");
266}
267
268static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
269{
270 printk("---- SAS EXPANDER PAGE 1 ------------\n");
271
272 printk("Physical Port=0x%X\n", pg1->PhysicalPort);
273 printk("PHY Identifier=0x%X\n", pg1->Phy);
274 printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
275 printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
276 printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
277 printk("Owner Device Handle=0x%X\n",
278 le16_to_cpu(pg1->OwnerDevHandle));
279 printk("Attached Device Handle=0x%X\n",
280 le16_to_cpu(pg1->AttachedDevHandle));
281}
282#else
283#define mptsas_print_phy_data(phy_data) do { } while (0)
284#define mptsas_print_phy_pg0(pg0) do { } while (0)
285#define mptsas_print_device_pg0(pg0) do { } while (0)
286#define mptsas_print_expander_pg1(pg1) do { } while (0)
287#endif
288
289static int
290mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
291{
292 ConfigExtendedPageHeader_t hdr;
293 CONFIGPARMS cfg;
294 SasIOUnitPage0_t *buffer;
295 dma_addr_t dma_handle;
296 int error, i;
297
298 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
299 hdr.ExtPageLength = 0;
300 hdr.PageNumber = 0;
301 hdr.Reserved1 = 0;
302 hdr.Reserved2 = 0;
303 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
304 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
305
306 cfg.cfghdr.ehdr = &hdr;
307 cfg.physAddr = -1;
308 cfg.pageAddr = 0;
309 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
310 cfg.dir = 0; /* read */
311 cfg.timeout = 10;
312
313 error = mpt_config(ioc, &cfg);
314 if (error)
315 goto out;
316 if (!hdr.ExtPageLength) {
317 error = -ENXIO;
318 goto out;
319 }
320
321 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
322 &dma_handle);
323 if (!buffer) {
324 error = -ENOMEM;
325 goto out;
326 }
327
328 cfg.physAddr = dma_handle;
329 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
330
331 error = mpt_config(ioc, &cfg);
332 if (error)
333 goto out_free_consistent;
334
335 port_info->num_phys = buffer->NumPhys;
336 port_info->phy_info = kcalloc(port_info->num_phys,
337 sizeof(struct mptsas_phyinfo),GFP_KERNEL);
338 if (!port_info->phy_info) {
339 error = -ENOMEM;
340 goto out_free_consistent;
341 }
342
343 for (i = 0; i < port_info->num_phys; i++) {
344 mptsas_print_phy_data(&buffer->PhyData[i]);
345 port_info->phy_info[i].phy_id = i;
346 port_info->phy_info[i].port_id =
347 buffer->PhyData[i].Port;
348 port_info->phy_info[i].negotiated_link_rate =
349 buffer->PhyData[i].NegotiatedLinkRate;
350 }
351
352 out_free_consistent:
353 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
354 buffer, dma_handle);
355 out:
356 return error;
357}
358
359static int
360mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
361 u32 form, u32 form_specific)
362{
363 ConfigExtendedPageHeader_t hdr;
364 CONFIGPARMS cfg;
365 SasPhyPage0_t *buffer;
366 dma_addr_t dma_handle;
367 int error;
368
369 hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
370 hdr.ExtPageLength = 0;
371 hdr.PageNumber = 0;
372 hdr.Reserved1 = 0;
373 hdr.Reserved2 = 0;
374 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
375 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
376
377 cfg.cfghdr.ehdr = &hdr;
378 cfg.dir = 0; /* read */
379 cfg.timeout = 10;
380
381 /* Get Phy Pg 0 for each Phy. */
382 cfg.physAddr = -1;
383 cfg.pageAddr = form + form_specific;
384 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
385
386 error = mpt_config(ioc, &cfg);
387 if (error)
388 goto out;
389
390 if (!hdr.ExtPageLength) {
391 error = -ENXIO;
392 goto out;
393 }
394
395 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
396 &dma_handle);
397 if (!buffer) {
398 error = -ENOMEM;
399 goto out;
400 }
401
402 cfg.physAddr = dma_handle;
403 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
404
405 error = mpt_config(ioc, &cfg);
406 if (error)
407 goto out_free_consistent;
408
409 mptsas_print_phy_pg0(buffer);
410
411 phy_info->hw_link_rate = buffer->HwLinkRate;
412 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
413 phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
414 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
415
416 out_free_consistent:
417 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
418 buffer, dma_handle);
419 out:
420 return error;
421}
422
423static int
424mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
425 u32 form, u32 form_specific)
426{
427 ConfigExtendedPageHeader_t hdr;
428 CONFIGPARMS cfg;
429 SasDevicePage0_t *buffer;
430 dma_addr_t dma_handle;
431 __le64 sas_address;
432 int error;
433
434 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
435 hdr.ExtPageLength = 0;
436 hdr.PageNumber = 0;
437 hdr.Reserved1 = 0;
438 hdr.Reserved2 = 0;
439 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
440 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
441
442 cfg.cfghdr.ehdr = &hdr;
443 cfg.pageAddr = form + form_specific;
444 cfg.physAddr = -1;
445 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
446 cfg.dir = 0; /* read */
447 cfg.timeout = 10;
448
449 error = mpt_config(ioc, &cfg);
450 if (error)
451 goto out;
452 if (!hdr.ExtPageLength) {
453 error = -ENXIO;
454 goto out;
455 }
456
457 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
458 &dma_handle);
459 if (!buffer) {
460 error = -ENOMEM;
461 goto out;
462 }
463
464 cfg.physAddr = dma_handle;
465 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
466
467 error = mpt_config(ioc, &cfg);
468 if (error)
469 goto out_free_consistent;
470
471 mptsas_print_device_pg0(buffer);
472
473 device_info->handle = le16_to_cpu(buffer->DevHandle);
474 device_info->phy_id = buffer->PhyNum;
475 device_info->port_id = buffer->PhysicalPort;
476 device_info->target = buffer->TargetID;
477 device_info->bus = buffer->Bus;
478 memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
479 device_info->sas_address = le64_to_cpu(sas_address);
480 device_info->device_info =
481 le32_to_cpu(buffer->DeviceInfo);
482
483 out_free_consistent:
484 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
485 buffer, dma_handle);
486 out:
487 return error;
488}
489
490static int
491mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
492 u32 form, u32 form_specific)
493{
494 ConfigExtendedPageHeader_t hdr;
495 CONFIGPARMS cfg;
496 SasExpanderPage0_t *buffer;
497 dma_addr_t dma_handle;
498 int error;
499
500 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
501 hdr.ExtPageLength = 0;
502 hdr.PageNumber = 0;
503 hdr.Reserved1 = 0;
504 hdr.Reserved2 = 0;
505 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
506 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
507
508 cfg.cfghdr.ehdr = &hdr;
509 cfg.physAddr = -1;
510 cfg.pageAddr = form + form_specific;
511 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
512 cfg.dir = 0; /* read */
513 cfg.timeout = 10;
514
515 error = mpt_config(ioc, &cfg);
516 if (error)
517 goto out;
518
519 if (!hdr.ExtPageLength) {
520 error = -ENXIO;
521 goto out;
522 }
523
524 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
525 &dma_handle);
526 if (!buffer) {
527 error = -ENOMEM;
528 goto out;
529 }
530
531 cfg.physAddr = dma_handle;
532 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
533
534 error = mpt_config(ioc, &cfg);
535 if (error)
536 goto out_free_consistent;
537
538 /* save config data */
539 port_info->num_phys = buffer->NumPhys;
540 port_info->handle = le16_to_cpu(buffer->DevHandle);
541 port_info->phy_info = kcalloc(port_info->num_phys,
542 sizeof(struct mptsas_phyinfo),GFP_KERNEL);
543 if (!port_info->phy_info) {
544 error = -ENOMEM;
545 goto out_free_consistent;
546 }
547
548 out_free_consistent:
549 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
550 buffer, dma_handle);
551 out:
552 return error;
553}
554
555static int
556mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
557 u32 form, u32 form_specific)
558{
559 ConfigExtendedPageHeader_t hdr;
560 CONFIGPARMS cfg;
561 SasExpanderPage1_t *buffer;
562 dma_addr_t dma_handle;
563 int error;
564
565 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
566 hdr.ExtPageLength = 0;
567 hdr.PageNumber = 1;
568 hdr.Reserved1 = 0;
569 hdr.Reserved2 = 0;
570 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
571 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
572
573 cfg.cfghdr.ehdr = &hdr;
574 cfg.physAddr = -1;
575 cfg.pageAddr = form + form_specific;
576 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
577 cfg.dir = 0; /* read */
578 cfg.timeout = 10;
579
580 error = mpt_config(ioc, &cfg);
581 if (error)
582 goto out;
583
584 if (!hdr.ExtPageLength) {
585 error = -ENXIO;
586 goto out;
587 }
588
589 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
590 &dma_handle);
591 if (!buffer) {
592 error = -ENOMEM;
593 goto out;
594 }
595
596 cfg.physAddr = dma_handle;
597 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
598
599 error = mpt_config(ioc, &cfg);
600 if (error)
601 goto out_free_consistent;
602
603
604 mptsas_print_expander_pg1(buffer);
605
606 /* save config data */
607 phy_info->phy_id = buffer->Phy;
608 phy_info->port_id = buffer->PhysicalPort;
609 phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
610 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
611 phy_info->hw_link_rate = buffer->HwLinkRate;
612 phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
613 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
614
615
616 out_free_consistent:
617 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
618 buffer, dma_handle);
619 out:
620 return error;
621}
622
623static void
624mptsas_parse_device_info(struct sas_identify *identify,
625 struct mptsas_devinfo *device_info)
626{
627 u16 protocols;
628
629 identify->sas_address = device_info->sas_address;
630 identify->phy_identifier = device_info->phy_id;
631
632 /*
633 * Fill in Phy Initiator Port Protocol.
634 * Bits 6:3, more than one bit can be set, fall through cases.
635 */
636 protocols = device_info->device_info & 0x78;
637 identify->initiator_port_protocols = 0;
638 if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
639 identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
640 if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
641 identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
642 if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
643 identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
644 if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
645 identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
646
647 /*
648 * Fill in Phy Target Port Protocol.
649 * Bits 10:7, more than one bit can be set, fall through cases.
650 */
651 protocols = device_info->device_info & 0x780;
652 identify->target_port_protocols = 0;
653 if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
654 identify->target_port_protocols |= SAS_PROTOCOL_SSP;
655 if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
656 identify->target_port_protocols |= SAS_PROTOCOL_STP;
657 if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
658 identify->target_port_protocols |= SAS_PROTOCOL_SMP;
659 if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
660 identify->target_port_protocols |= SAS_PROTOCOL_SATA;
661
662 /*
663 * Fill in Attached device type.
664 */
665 switch (device_info->device_info &
666 MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
667 case MPI_SAS_DEVICE_INFO_NO_DEVICE:
668 identify->device_type = SAS_PHY_UNUSED;
669 break;
670 case MPI_SAS_DEVICE_INFO_END_DEVICE:
671 identify->device_type = SAS_END_DEVICE;
672 break;
673 case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
674 identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
675 break;
676 case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
677 identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
678 break;
679 }
680}
681
682static int mptsas_probe_one_phy(struct device *dev,
683 struct mptsas_phyinfo *phy_info, int index)
684{
685 struct sas_phy *port;
686 int error;
687
688 port = sas_phy_alloc(dev, index);
689 if (!port)
690 return -ENOMEM;
691
692 port->port_identifier = phy_info->port_id;
693 mptsas_parse_device_info(&port->identify, &phy_info->identify);
694
695 /*
696 * Set Negotiated link rate.
697 */
698 switch (phy_info->negotiated_link_rate) {
699 case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
700 port->negotiated_linkrate = SAS_PHY_DISABLED;
701 break;
702 case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
703 port->negotiated_linkrate = SAS_LINK_RATE_FAILED;
704 break;
705 case MPI_SAS_IOUNIT0_RATE_1_5:
706 port->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
707 break;
708 case MPI_SAS_IOUNIT0_RATE_3_0:
709 port->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
710 break;
711 case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
712 case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
713 default:
714 port->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
715 break;
716 }
717
718 /*
719 * Set Max hardware link rate.
720 */
721 switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
722 case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
723 port->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
724 break;
725 case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
726 port->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
727 break;
728 default:
729 break;
730 }
731
732 /*
733 * Set Max programmed link rate.
734 */
735 switch (phy_info->programmed_link_rate &
736 MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
737 case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
738 port->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
739 break;
740 case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
741 port->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
742 break;
743 default:
744 break;
745 }
746
747 /*
748 * Set Min hardware link rate.
749 */
750 switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
751 case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
752 port->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
753 break;
754 case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
755 port->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
756 break;
757 default:
758 break;
759 }
760
761 /*
762 * Set Min programmed link rate.
763 */
764 switch (phy_info->programmed_link_rate &
765 MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
766 case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
767 port->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
768 break;
769 case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
770 port->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
771 break;
772 default:
773 break;
774 }
775
776 error = sas_phy_add(port);
777 if (error) {
778 sas_phy_free(port);
779 return error;
780 }
781
782 if (phy_info->attached.handle) {
783 struct sas_rphy *rphy;
784
785 rphy = sas_rphy_alloc(port);
786 if (!rphy)
787 return 0; /* non-fatal: an rphy can be added later */
788
789 mptsas_parse_device_info(&rphy->identify, &phy_info->attached);
790 error = sas_rphy_add(rphy);
791 if (error) {
792 sas_rphy_free(rphy);
793 return error;
794 }
795
796 phy_info->rphy = rphy;
797 }
798
799 return 0;
800}
801
802static int
803mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
804{
805 struct mptsas_portinfo *port_info;
806 u32 handle = 0xFFFF;
807 int error = -ENOMEM, i;
808
809 port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
810 if (!port_info)
811 goto out;
812 memset(port_info, 0, sizeof(*port_info));
813
814 error = mptsas_sas_io_unit_pg0(ioc, port_info);
815 if (error)
816 goto out_free_port_info;
817
818 list_add_tail(&port_info->list, &ioc->sas_topology);
819
820 for (i = 0; i < port_info->num_phys; i++) {
821 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
822 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
823 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
824
825 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
826 (MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
827 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
828 handle = port_info->phy_info[i].identify.handle;
829
830 if (port_info->phy_info[i].attached.handle) {
831 mptsas_sas_device_pg0(ioc,
832 &port_info->phy_info[i].attached,
833 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
834 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
835 port_info->phy_info[i].attached.handle);
836 }
837
838 mptsas_probe_one_phy(&ioc->sh->shost_gendev,
839 &port_info->phy_info[i], *index);
840 (*index)++;
841 }
842
843 return 0;
844
845 out_free_port_info:
846 kfree(port_info);
847 out:
848 return error;
849}
850
851static int
852mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
853{
854 struct mptsas_portinfo *port_info, *p;
855 int error = -ENOMEM, i, j;
856
857 port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
858 if (!port_info)
859 goto out;
860 memset(port_info, 0, sizeof(*port_info));
861
862 error = mptsas_sas_expander_pg0(ioc, port_info,
863 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
864 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
865 if (error)
866 goto out_free_port_info;
867
868 *handle = port_info->handle;
869
870 list_add_tail(&port_info->list, &ioc->sas_topology);
871 for (i = 0; i < port_info->num_phys; i++) {
872 struct device *parent;
873
874 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
875 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
876 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
877
878 if (port_info->phy_info[i].identify.handle) {
879 mptsas_sas_device_pg0(ioc,
880 &port_info->phy_info[i].identify,
881 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
882 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
883 port_info->phy_info[i].identify.handle);
884 }
885
886 if (port_info->phy_info[i].attached.handle) {
887 mptsas_sas_device_pg0(ioc,
888 &port_info->phy_info[i].attached,
889 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
890 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
891 port_info->phy_info[i].attached.handle);
892 }
893
894 /*
895 * If we find a parent port handle this expander is
896 * attached to another expander, else it hangs of the
897 * HBA phys.
898 */
899 parent = &ioc->sh->shost_gendev;
900 list_for_each_entry(p, &ioc->sas_topology, list) {
901 for (j = 0; j < p->num_phys; j++) {
902 if (port_info->phy_info[i].identify.handle ==
903 p->phy_info[j].attached.handle)
904 parent = &p->phy_info[j].rphy->dev;
905 }
906 }
907
908 mptsas_probe_one_phy(parent, &port_info->phy_info[i], *index);
909 (*index)++;
910 }
911
912 return 0;
913
914 out_free_port_info:
915 kfree(port_info);
916 out:
917 return error;
918}
919
920static void
921mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
922{
923 u32 handle = 0xFFFF;
924 int index = 0;
925
926 mptsas_probe_hba_phys(ioc, &index);
927 while (!mptsas_probe_expander_phys(ioc, &handle, &index))
928 ;
929}
930
931static int
932mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
933{
934 struct Scsi_Host *sh;
935 MPT_SCSI_HOST *hd;
936 MPT_ADAPTER *ioc;
937 unsigned long flags;
938 int sz, ii;
939 int numSGE = 0;
940 int scale;
941 int ioc_cap;
942 u8 *mem;
943 int error=0;
944 int r;
945
946 r = mpt_attach(pdev,id);
947 if (r)
948 return r;
949
950 ioc = pci_get_drvdata(pdev);
951 ioc->DoneCtx = mptsasDoneCtx;
952 ioc->TaskCtx = mptsasTaskCtx;
953 ioc->InternalCtx = mptsasInternalCtx;
954
955 /* Added sanity check on readiness of the MPT adapter.
956 */
957 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
958 printk(MYIOC_s_WARN_FMT
959 "Skipping because it's not operational!\n",
960 ioc->name);
961 return -ENODEV;
962 }
963
964 if (!ioc->active) {
965 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
966 ioc->name);
967 return -ENODEV;
968 }
969
970 /* Sanity check - ensure at least 1 port is INITIATOR capable
971 */
972 ioc_cap = 0;
973 for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
974 if (ioc->pfacts[ii].ProtocolFlags &
975 MPI_PORTFACTS_PROTOCOL_INITIATOR)
976 ioc_cap++;
977 }
978
979 if (!ioc_cap) {
980 printk(MYIOC_s_WARN_FMT
981 "Skipping ioc=%p because SCSI Initiator mode "
982 "is NOT enabled!\n", ioc->name, ioc);
983 return 0;
984 }
985
986 sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
987 if (!sh) {
988 printk(MYIOC_s_WARN_FMT
989 "Unable to register controller with SCSI subsystem\n",
990 ioc->name);
991 return -1;
992 }
993
994 spin_lock_irqsave(&ioc->FreeQlock, flags);
995
996 /* Attach the SCSI Host to the IOC structure
997 */
998 ioc->sh = sh;
999
1000 sh->io_port = 0;
1001 sh->n_io_port = 0;
1002 sh->irq = 0;
1003
1004 /* set 16 byte cdb's */
1005 sh->max_cmd_len = 16;
1006
1007 sh->max_id = ioc->pfacts->MaxDevices + 1;
1008
1009 sh->transportt = mptsas_transport_template;
1010
1011 sh->max_lun = MPT_LAST_LUN + 1;
1012 sh->max_channel = 0;
1013 sh->this_id = ioc->pfacts[0].PortSCSIID;
1014
1015 /* Required entry.
1016 */
1017 sh->unique_id = ioc->id;
1018
1019 INIT_LIST_HEAD(&ioc->sas_topology);
1020
1021 /* Verify that we won't exceed the maximum
1022 * number of chain buffers
1023 * We can optimize: ZZ = req_sz/sizeof(SGE)
1024 * For 32bit SGE's:
1025 * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
1026 * + (req_sz - 64)/sizeof(SGE)
1027 * A slightly different algorithm is required for
1028 * 64bit SGEs.
1029 */
1030 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
1031 if (sizeof(dma_addr_t) == sizeof(u64)) {
1032 numSGE = (scale - 1) *
1033 (ioc->facts.MaxChainDepth-1) + scale +
1034 (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
1035 sizeof(u32));
1036 } else {
1037 numSGE = 1 + (scale - 1) *
1038 (ioc->facts.MaxChainDepth-1) + scale +
1039 (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
1040 sizeof(u32));
1041 }
1042
1043 if (numSGE < sh->sg_tablesize) {
1044 /* Reset this value */
1045 dprintk((MYIOC_s_INFO_FMT
1046 "Resetting sg_tablesize to %d from %d\n",
1047 ioc->name, numSGE, sh->sg_tablesize));
1048 sh->sg_tablesize = numSGE;
1049 }
1050
1051 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1052
1053 hd = (MPT_SCSI_HOST *) sh->hostdata;
1054 hd->ioc = ioc;
1055
1056 /* SCSI needs scsi_cmnd lookup table!
1057 * (with size equal to req_depth*PtrSz!)
1058 */
1059 sz = ioc->req_depth * sizeof(void *);
1060 mem = kmalloc(sz, GFP_ATOMIC);
1061 if (mem == NULL) {
1062 error = -ENOMEM;
1063 goto mptsas_probe_failed;
1064 }
1065
1066 memset(mem, 0, sz);
1067 hd->ScsiLookup = (struct scsi_cmnd **) mem;
1068
1069 dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n",
1070 ioc->name, hd->ScsiLookup, sz));
1071
1072 /* Allocate memory for the device structures.
1073 * A non-Null pointer at an offset
1074 * indicates a device exists.
1075 * max_id = 1 + maximum id (hosts.h)
1076 */
1077 sz = sh->max_id * sizeof(void *);
1078 mem = kmalloc(sz, GFP_ATOMIC);
1079 if (mem == NULL) {
1080 error = -ENOMEM;
1081 goto mptsas_probe_failed;
1082 }
1083
1084 memset(mem, 0, sz);
1085 hd->Targets = (VirtDevice **) mem;
1086
1087 dprintk((KERN_INFO
1088 " Targets @ %p, sz=%d\n", hd->Targets, sz));
1089
1090 /* Clear the TM flags
1091 */
1092 hd->tmPending = 0;
1093 hd->tmState = TM_STATE_NONE;
1094 hd->resetPending = 0;
1095 hd->abortSCpnt = NULL;
1096
1097 /* Clear the pointer used to store
1098 * single-threaded commands, i.e., those
1099 * issued during a bus scan, dv and
1100 * configuration pages.
1101 */
1102 hd->cmdPtr = NULL;
1103
1104 /* Initialize this SCSI Hosts' timers
1105 * To use, set the timer expires field
1106 * and add_timer
1107 */
1108 init_timer(&hd->timer);
1109 hd->timer.data = (unsigned long) hd;
1110 hd->timer.function = mptscsih_timer_expired;
1111
1112 hd->mpt_pq_filter = mpt_pq_filter;
1113 ioc->sas_data.ptClear = mpt_pt_clear;
1114
1115 if (ioc->sas_data.ptClear==1) {
1116 mptbase_sas_persist_operation(
1117 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
1118 }
1119
1120 ddvprintk((MYIOC_s_INFO_FMT
1121 "mpt_pq_filter %x mpt_pq_filter %x\n",
1122 ioc->name,
1123 mpt_pq_filter,
1124 mpt_pq_filter));
1125
1126 init_waitqueue_head(&hd->scandv_waitq);
1127 hd->scandv_wait_done = 0;
1128 hd->last_queue_full = 0;
1129
1130 error = scsi_add_host(sh, &ioc->pcidev->dev);
1131 if (error) {
1132 dprintk((KERN_ERR MYNAM
1133 "scsi_add_host failed\n"));
1134 goto mptsas_probe_failed;
1135 }
1136
1137 mptsas_scan_sas_topology(ioc);
1138
1139 return 0;
1140
1141mptsas_probe_failed:
1142
1143 mptscsih_remove(pdev);
1144 return error;
1145}
1146
1147static void __devexit mptsas_remove(struct pci_dev *pdev)
1148{
1149 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1150 struct mptsas_portinfo *p, *n;
1151
1152 sas_remove_host(ioc->sh);
1153
1154 list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
1155 list_del(&p->list);
1156 kfree(p);
1157 }
1158
1159 mptscsih_remove(pdev);
1160}
1161
1162static struct pci_device_id mptsas_pci_table[] = {
1163 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064,
1164 PCI_ANY_ID, PCI_ANY_ID },
1165 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066,
1166 PCI_ANY_ID, PCI_ANY_ID },
1167 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068,
1168 PCI_ANY_ID, PCI_ANY_ID },
1169 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064E,
1170 PCI_ANY_ID, PCI_ANY_ID },
1171 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066E,
1172 PCI_ANY_ID, PCI_ANY_ID },
1173 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068E,
1174 PCI_ANY_ID, PCI_ANY_ID },
1175 {0} /* Terminating entry */
1176};
1177MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
1178
1179
1180static struct pci_driver mptsas_driver = {
1181 .name = "mptsas",
1182 .id_table = mptsas_pci_table,
1183 .probe = mptsas_probe,
1184 .remove = __devexit_p(mptsas_remove),
1185 .shutdown = mptscsih_shutdown,
1186#ifdef CONFIG_PM
1187 .suspend = mptscsih_suspend,
1188 .resume = mptscsih_resume,
1189#endif
1190};
1191
1192static int __init
1193mptsas_init(void)
1194{
1195 show_mptmod_ver(my_NAME, my_VERSION);
1196
1197 mptsas_transport_template =
1198 sas_attach_transport(&mptsas_transport_functions);
1199 if (!mptsas_transport_template)
1200 return -ENODEV;
1201
1202 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
1203 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
1204 mptsasInternalCtx =
1205 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
1206
1207 if (mpt_event_register(mptsasDoneCtx, mptscsih_event_process) == 0) {
1208 devtprintk((KERN_INFO MYNAM
1209 ": Registered for IOC event notifications\n"));
1210 }
1211
1212 if (mpt_reset_register(mptsasDoneCtx, mptscsih_ioc_reset) == 0) {
1213 dprintk((KERN_INFO MYNAM
1214 ": Registered for IOC reset notifications\n"));
1215 }
1216
1217 return pci_register_driver(&mptsas_driver);
1218}
1219
1220static void __exit
1221mptsas_exit(void)
1222{
1223 pci_unregister_driver(&mptsas_driver);
1224 sas_release_transport(mptsas_transport_template);
1225
1226 mpt_reset_deregister(mptsasDoneCtx);
1227 mpt_event_deregister(mptsasDoneCtx);
1228
1229 mpt_deregister(mptsasInternalCtx);
1230 mpt_deregister(mptsasTaskCtx);
1231 mpt_deregister(mptsasDoneCtx);
1232}
1233
1234module_init(mptsas_init);
1235module_exit(mptsas_exit);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 4a003dc5fde8..5cb07eb224d7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -62,6 +62,7 @@
62#include <scsi/scsi_device.h> 62#include <scsi/scsi_device.h>
63#include <scsi/scsi_host.h> 63#include <scsi/scsi_host.h>
64#include <scsi/scsi_tcq.h> 64#include <scsi/scsi_tcq.h>
65#include <scsi/scsi_dbg.h>
65 66
66#include "mptbase.h" 67#include "mptbase.h"
67#include "mptscsih.h" 68#include "mptscsih.h"
@@ -93,8 +94,9 @@ typedef struct _BIG_SENSE_BUF {
93 94
94#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */ 95#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */
95#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */ 96#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */
96#define MPT_ICFLAG_PHYS_DISK 0x04 /* Any SCSI IO but do Phys Disk Format */ 97#define MPT_ICFLAG_EBOS 0x04 /* ReadBuffer Echo buffer has EBOS */
97#define MPT_ICFLAG_TAGGED_CMD 0x08 /* Do tagged IO */ 98#define MPT_ICFLAG_PHYS_DISK 0x08 /* Any SCSI IO but do Phys Disk Format */
99#define MPT_ICFLAG_TAGGED_CMD 0x10 /* Do tagged IO */
98#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */ 100#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */
99#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */ 101#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */
100 102
@@ -159,6 +161,8 @@ int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR
159static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); 161static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
160static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum); 162static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum);
161 163
164static struct work_struct mptscsih_persistTask;
165
162#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 166#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
163static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io); 167static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
164static void mptscsih_domainValidation(void *hd); 168static void mptscsih_domainValidation(void *hd);
@@ -167,6 +171,7 @@ static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
167static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target); 171static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
168static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage); 172static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
169static void mptscsih_fillbuf(char *buffer, int size, int index, int width); 173static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
174static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id);
170#endif 175#endif
171 176
172void mptscsih_remove(struct pci_dev *); 177void mptscsih_remove(struct pci_dev *);
@@ -606,11 +611,24 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
606 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); 611 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
607 sc->resid = sc->request_bufflen - xfer_cnt; 612 sc->resid = sc->request_bufflen - xfer_cnt;
608 613
614 /*
615 * if we get a data underrun indication, yet no data was
616 * transferred and the SCSI status indicates that the
617 * command was never started, change the data underrun
618 * to success
619 */
620 if (status == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
621 (scsi_status == MPI_SCSI_STATUS_BUSY ||
622 scsi_status == MPI_SCSI_STATUS_RESERVATION_CONFLICT ||
623 scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)) {
624 status = MPI_IOCSTATUS_SUCCESS;
625 }
626
609 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n" 627 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
610 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n" 628 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
611 "resid=%d bufflen=%d xfer_cnt=%d\n", 629 "resid=%d bufflen=%d xfer_cnt=%d\n",
612 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1], 630 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
613 status, scsi_state, scsi_status, sc->resid, 631 status, scsi_state, scsi_status, sc->resid,
614 sc->request_bufflen, xfer_cnt)); 632 sc->request_bufflen, xfer_cnt));
615 633
616 if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) 634 if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
@@ -619,8 +637,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
619 /* 637 /*
620 * Look for + dump FCP ResponseInfo[]! 638 * Look for + dump FCP ResponseInfo[]!
621 */ 639 */
622 if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) { 640 if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
623 printk(KERN_NOTICE " FCP_ResponseInfo=%08xh\n", 641 pScsiReply->ResponseInfo) {
642 printk(KERN_NOTICE "ha=%d id=%d lun=%d: "
643 "FCP_ResponseInfo=%08xh\n",
644 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
624 le32_to_cpu(pScsiReply->ResponseInfo)); 645 le32_to_cpu(pScsiReply->ResponseInfo));
625 } 646 }
626 647
@@ -661,23 +682,13 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
661 break; 682 break;
662 683
663 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 684 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
664 if ( xfer_cnt >= sc->underflow ) { 685 sc->resid = sc->request_bufflen - xfer_cnt;
665 /* Sufficient data transfer occurred */ 686 if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
687 sc->result=DID_SOFT_ERROR << 16;
688 else /* Sufficient data transfer occurred */
666 sc->result = (DID_OK << 16) | scsi_status; 689 sc->result = (DID_OK << 16) | scsi_status;
667 } else if ( xfer_cnt == 0 ) { 690 dreplyprintk((KERN_NOTICE
668 /* A CRC Error causes this condition; retry */ 691 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
669 sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
670 (CHECK_CONDITION << 1);
671 sc->sense_buffer[0] = 0x70;
672 sc->sense_buffer[2] = NO_SENSE;
673 sc->sense_buffer[12] = 0;
674 sc->sense_buffer[13] = 0;
675 } else {
676 sc->result = DID_SOFT_ERROR << 16;
677 }
678 dreplyprintk((KERN_NOTICE
679 "RESIDUAL_MISMATCH: result=%x on id=%d\n",
680 sc->result, sc->device->id));
681 break; 692 break;
682 693
683 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ 694 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
@@ -692,7 +703,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
692 ; 703 ;
693 } else { 704 } else {
694 if (xfer_cnt < sc->underflow) { 705 if (xfer_cnt < sc->underflow) {
695 sc->result = DID_SOFT_ERROR << 16; 706 if (scsi_status == SAM_STAT_BUSY)
707 sc->result = SAM_STAT_BUSY;
708 else
709 sc->result = DID_SOFT_ERROR << 16;
696 } 710 }
697 if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) { 711 if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
698 /* What to do? 712 /* What to do?
@@ -717,8 +731,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
717 731
718 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ 732 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
719 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ 733 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
720 scsi_status = pScsiReply->SCSIStatus; 734 if (scsi_status == MPI_SCSI_STATUS_BUSY)
721 sc->result = (DID_OK << 16) | scsi_status; 735 sc->result = (DID_BUS_BUSY << 16) | scsi_status;
736 else
737 sc->result = (DID_OK << 16) | scsi_status;
722 if (scsi_state == 0) { 738 if (scsi_state == 0) {
723 ; 739 ;
724 } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) { 740 } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
@@ -890,12 +906,13 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
890 SCSIIORequest_t *mf = NULL; 906 SCSIIORequest_t *mf = NULL;
891 int ii; 907 int ii;
892 int max = hd->ioc->req_depth; 908 int max = hd->ioc->req_depth;
909 struct scsi_cmnd *sc;
893 910
894 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n", 911 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
895 target, lun, max)); 912 target, lun, max));
896 913
897 for (ii=0; ii < max; ii++) { 914 for (ii=0; ii < max; ii++) {
898 if (hd->ScsiLookup[ii] != NULL) { 915 if ((sc = hd->ScsiLookup[ii]) != NULL) {
899 916
900 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); 917 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
901 918
@@ -910,9 +927,22 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
910 hd->ScsiLookup[ii] = NULL; 927 hd->ScsiLookup[ii] = NULL;
911 mptscsih_freeChainBuffers(hd->ioc, ii); 928 mptscsih_freeChainBuffers(hd->ioc, ii);
912 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 929 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
930 if (sc->use_sg) {
931 pci_unmap_sg(hd->ioc->pcidev,
932 (struct scatterlist *) sc->request_buffer,
933 sc->use_sg,
934 sc->sc_data_direction);
935 } else if (sc->request_bufflen) {
936 pci_unmap_single(hd->ioc->pcidev,
937 sc->SCp.dma_handle,
938 sc->request_bufflen,
939 sc->sc_data_direction);
940 }
941 sc->host_scribble = NULL;
942 sc->result = DID_NO_CONNECT << 16;
943 sc->scsi_done(sc);
913 } 944 }
914 } 945 }
915
916 return; 946 return;
917} 947}
918 948
@@ -967,8 +997,10 @@ mptscsih_remove(struct pci_dev *pdev)
967 unsigned long flags; 997 unsigned long flags;
968 int sz1; 998 int sz1;
969 999
970 if(!host) 1000 if(!host) {
1001 mpt_detach(pdev);
971 return; 1002 return;
1003 }
972 1004
973 scsi_remove_host(host); 1005 scsi_remove_host(host);
974 1006
@@ -1256,8 +1288,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1256 MPT_SCSI_HOST *hd; 1288 MPT_SCSI_HOST *hd;
1257 MPT_FRAME_HDR *mf; 1289 MPT_FRAME_HDR *mf;
1258 SCSIIORequest_t *pScsiReq; 1290 SCSIIORequest_t *pScsiReq;
1259 VirtDevice *pTarget; 1291 VirtDevice *pTarget = SCpnt->device->hostdata;
1260 int target;
1261 int lun; 1292 int lun;
1262 u32 datalen; 1293 u32 datalen;
1263 u32 scsictl; 1294 u32 scsictl;
@@ -1267,12 +1298,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1267 int ii; 1298 int ii;
1268 1299
1269 hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata; 1300 hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata;
1270 target = SCpnt->device->id;
1271 lun = SCpnt->device->lun; 1301 lun = SCpnt->device->lun;
1272 SCpnt->scsi_done = done; 1302 SCpnt->scsi_done = done;
1273 1303
1274 pTarget = hd->Targets[target];
1275
1276 dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n", 1304 dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n",
1277 (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done)); 1305 (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done));
1278 1306
@@ -1315,7 +1343,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1315 /* Default to untagged. Once a target structure has been allocated, 1343 /* Default to untagged. Once a target structure has been allocated,
1316 * use the Inquiry data to determine if device supports tagged. 1344 * use the Inquiry data to determine if device supports tagged.
1317 */ 1345 */
1318 if ( pTarget 1346 if (pTarget
1319 && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES) 1347 && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
1320 && (SCpnt->device->tagged_supported)) { 1348 && (SCpnt->device->tagged_supported)) {
1321 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ; 1349 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
@@ -1325,8 +1353,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1325 1353
1326 /* Use the above information to set up the message frame 1354 /* Use the above information to set up the message frame
1327 */ 1355 */
1328 pScsiReq->TargetID = (u8) target; 1356 pScsiReq->TargetID = (u8) pTarget->target_id;
1329 pScsiReq->Bus = (u8) SCpnt->device->channel; 1357 pScsiReq->Bus = pTarget->bus_id;
1330 pScsiReq->ChainOffset = 0; 1358 pScsiReq->ChainOffset = 0;
1331 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 1359 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1332 pScsiReq->CDBLength = SCpnt->cmd_len; 1360 pScsiReq->CDBLength = SCpnt->cmd_len;
@@ -1378,7 +1406,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1378 1406
1379#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 1407#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
1380 if (hd->ioc->bus_type == SCSI) { 1408 if (hd->ioc->bus_type == SCSI) {
1381 int dvStatus = hd->ioc->spi_data.dvStatus[target]; 1409 int dvStatus = hd->ioc->spi_data.dvStatus[pTarget->target_id];
1382 int issueCmd = 1; 1410 int issueCmd = 1;
1383 1411
1384 if (dvStatus || hd->ioc->spi_data.forceDv) { 1412 if (dvStatus || hd->ioc->spi_data.forceDv) {
@@ -1426,6 +1454,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1426 return 0; 1454 return 0;
1427 1455
1428 fail: 1456 fail:
1457 hd->ScsiLookup[my_idx] = NULL;
1429 mptscsih_freeChainBuffers(hd->ioc, my_idx); 1458 mptscsih_freeChainBuffers(hd->ioc, my_idx);
1430 mpt_free_msg_frame(hd->ioc, mf); 1459 mpt_free_msg_frame(hd->ioc, mf);
1431 return SCSI_MLQUEUE_HOST_BUSY; 1460 return SCSI_MLQUEUE_HOST_BUSY;
@@ -1713,24 +1742,23 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1713 MPT_FRAME_HDR *mf; 1742 MPT_FRAME_HDR *mf;
1714 u32 ctx2abort; 1743 u32 ctx2abort;
1715 int scpnt_idx; 1744 int scpnt_idx;
1745 int retval;
1716 1746
1717 /* If we can't locate our host adapter structure, return FAILED status. 1747 /* If we can't locate our host adapter structure, return FAILED status.
1718 */ 1748 */
1719 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) { 1749 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
1720 SCpnt->result = DID_RESET << 16; 1750 SCpnt->result = DID_RESET << 16;
1721 SCpnt->scsi_done(SCpnt); 1751 SCpnt->scsi_done(SCpnt);
1722 dfailprintk((KERN_WARNING MYNAM ": mptscsih_abort: " 1752 dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: "
1723 "Can't locate host! (sc=%p)\n", 1753 "Can't locate host! (sc=%p)\n",
1724 SCpnt)); 1754 SCpnt));
1725 return FAILED; 1755 return FAILED;
1726 } 1756 }
1727 1757
1728 ioc = hd->ioc; 1758 ioc = hd->ioc;
1729 if (hd->resetPending) 1759 if (hd->resetPending) {
1730 return FAILED; 1760 return FAILED;
1731 1761 }
1732 printk(KERN_WARNING MYNAM ": %s: >> Attempting task abort! (sc=%p)\n",
1733 hd->ioc->name, SCpnt);
1734 1762
1735 if (hd->timeouts < -1) 1763 if (hd->timeouts < -1)
1736 hd->timeouts++; 1764 hd->timeouts++;
@@ -1738,16 +1766,20 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1738 /* Find this command 1766 /* Find this command
1739 */ 1767 */
1740 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { 1768 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
1741 /* Cmd not found in ScsiLookup. 1769 /* Cmd not found in ScsiLookup.
1742 * Do OS callback. 1770 * Do OS callback.
1743 */ 1771 */
1744 SCpnt->result = DID_RESET << 16; 1772 SCpnt->result = DID_RESET << 16;
1745 dtmprintk((KERN_WARNING MYNAM ": %s: mptscsih_abort: " 1773 dtmprintk((KERN_INFO MYNAM ": %s: mptscsih_abort: "
1746 "Command not in the active list! (sc=%p)\n", 1774 "Command not in the active list! (sc=%p)\n",
1747 hd->ioc->name, SCpnt)); 1775 hd->ioc->name, SCpnt));
1748 return SUCCESS; 1776 return SUCCESS;
1749 } 1777 }
1750 1778
1779 printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
1780 hd->ioc->name, SCpnt);
1781 scsi_print_command(SCpnt);
1782
1751 /* Most important! Set TaskMsgContext to SCpnt's MsgContext! 1783 /* Most important! Set TaskMsgContext to SCpnt's MsgContext!
1752 * (the IO to be ABORT'd) 1784 * (the IO to be ABORT'd)
1753 * 1785 *
@@ -1760,38 +1792,22 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1760 1792
1761 hd->abortSCpnt = SCpnt; 1793 hd->abortSCpnt = SCpnt;
1762 1794
1763 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1795 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1764 SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun, 1796 SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
1765 ctx2abort, 2 /* 2 second timeout */) 1797 ctx2abort, 2 /* 2 second timeout */);
1766 < 0) {
1767 1798
1768 /* The TM request failed and the subsequent FW-reload failed! 1799 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
1769 * Fatal error case. 1800 hd->ioc->name,
1770 */ 1801 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1771 printk(MYIOC_s_WARN_FMT "Error issuing abort task! (sc=%p)\n",
1772 hd->ioc->name, SCpnt);
1773 1802
1774 /* We must clear our pending flag before clearing our state. 1803 if (retval == 0)
1775 */ 1804 return SUCCESS;
1805
1806 if(retval != FAILED ) {
1776 hd->tmPending = 0; 1807 hd->tmPending = 0;
1777 hd->tmState = TM_STATE_NONE; 1808 hd->tmState = TM_STATE_NONE;
1778
1779 /* Unmap the DMA buffers, if any. */
1780 if (SCpnt->use_sg) {
1781 pci_unmap_sg(ioc->pcidev, (struct scatterlist *) SCpnt->request_buffer,
1782 SCpnt->use_sg, SCpnt->sc_data_direction);
1783 } else if (SCpnt->request_bufflen) {
1784 pci_unmap_single(ioc->pcidev, SCpnt->SCp.dma_handle,
1785 SCpnt->request_bufflen, SCpnt->sc_data_direction);
1786 }
1787 hd->ScsiLookup[scpnt_idx] = NULL;
1788 SCpnt->result = DID_RESET << 16;
1789 SCpnt->scsi_done(SCpnt); /* Issue the command callback */
1790 mptscsih_freeChainBuffers(ioc, scpnt_idx);
1791 mpt_free_msg_frame(ioc, mf);
1792 return FAILED;
1793 } 1809 }
1794 return SUCCESS; 1810 return FAILED;
1795} 1811}
1796 1812
1797/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1813/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1807,11 +1823,12 @@ int
1807mptscsih_dev_reset(struct scsi_cmnd * SCpnt) 1823mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1808{ 1824{
1809 MPT_SCSI_HOST *hd; 1825 MPT_SCSI_HOST *hd;
1826 int retval;
1810 1827
1811 /* If we can't locate our host adapter structure, return FAILED status. 1828 /* If we can't locate our host adapter structure, return FAILED status.
1812 */ 1829 */
1813 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){ 1830 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
1814 dtmprintk((KERN_WARNING MYNAM ": mptscsih_dev_reset: " 1831 dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: "
1815 "Can't locate host! (sc=%p)\n", 1832 "Can't locate host! (sc=%p)\n",
1816 SCpnt)); 1833 SCpnt));
1817 return FAILED; 1834 return FAILED;
@@ -1820,24 +1837,26 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1820 if (hd->resetPending) 1837 if (hd->resetPending)
1821 return FAILED; 1838 return FAILED;
1822 1839
1823 printk(KERN_WARNING MYNAM ": %s: >> Attempting target reset! (sc=%p)\n", 1840 printk(KERN_WARNING MYNAM ": %s: attempting target reset! (sc=%p)\n",
1824 hd->ioc->name, SCpnt); 1841 hd->ioc->name, SCpnt);
1842 scsi_print_command(SCpnt);
1825 1843
1826 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1844 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1827 SCpnt->device->channel, SCpnt->device->id, 1845 SCpnt->device->channel, SCpnt->device->id,
1828 0, 0, 5 /* 5 second timeout */) 1846 0, 0, 5 /* 5 second timeout */);
1829 < 0){ 1847
1830 /* The TM request failed and the subsequent FW-reload failed! 1848 printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
1831 * Fatal error case. 1849 hd->ioc->name,
1832 */ 1850 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1833 printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", 1851
1834 hd->ioc->name, SCpnt); 1852 if (retval == 0)
1853 return SUCCESS;
1854
1855 if(retval != FAILED ) {
1835 hd->tmPending = 0; 1856 hd->tmPending = 0;
1836 hd->tmState = TM_STATE_NONE; 1857 hd->tmState = TM_STATE_NONE;
1837 return FAILED;
1838 } 1858 }
1839 1859 return FAILED;
1840 return SUCCESS;
1841} 1860}
1842 1861
1843/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1862/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1853,41 +1872,39 @@ int
1853mptscsih_bus_reset(struct scsi_cmnd * SCpnt) 1872mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1854{ 1873{
1855 MPT_SCSI_HOST *hd; 1874 MPT_SCSI_HOST *hd;
1856 spinlock_t *host_lock = SCpnt->device->host->host_lock; 1875 int retval;
1857 1876
1858 /* If we can't locate our host adapter structure, return FAILED status. 1877 /* If we can't locate our host adapter structure, return FAILED status.
1859 */ 1878 */
1860 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){ 1879 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
1861 dtmprintk((KERN_WARNING MYNAM ": mptscsih_bus_reset: " 1880 dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: "
1862 "Can't locate host! (sc=%p)\n", 1881 "Can't locate host! (sc=%p)\n",
1863 SCpnt ) ); 1882 SCpnt ) );
1864 return FAILED; 1883 return FAILED;
1865 } 1884 }
1866 1885
1867 printk(KERN_WARNING MYNAM ": %s: >> Attempting bus reset! (sc=%p)\n", 1886 printk(KERN_WARNING MYNAM ": %s: attempting bus reset! (sc=%p)\n",
1868 hd->ioc->name, SCpnt); 1887 hd->ioc->name, SCpnt);
1888 scsi_print_command(SCpnt);
1869 1889
1870 if (hd->timeouts < -1) 1890 if (hd->timeouts < -1)
1871 hd->timeouts++; 1891 hd->timeouts++;
1872 1892
1873 /* We are now ready to execute the task management request. */ 1893 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1874 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1894 SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */);
1875 SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */)
1876 < 0){
1877 1895
1878 /* The TM request failed and the subsequent FW-reload failed! 1896 printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
1879 * Fatal error case. 1897 hd->ioc->name,
1880 */ 1898 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1881 printk(MYIOC_s_WARN_FMT 1899
1882 "Error processing TaskMgmt request (sc=%p)\n", 1900 if (retval == 0)
1883 hd->ioc->name, SCpnt); 1901 return SUCCESS;
1902
1903 if(retval != FAILED ) {
1884 hd->tmPending = 0; 1904 hd->tmPending = 0;
1885 hd->tmState = TM_STATE_NONE; 1905 hd->tmState = TM_STATE_NONE;
1886 spin_lock_irq(host_lock);
1887 return FAILED;
1888 } 1906 }
1889 1907 return FAILED;
1890 return SUCCESS;
1891} 1908}
1892 1909
1893/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1910/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2169,7 +2186,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
2169 vdev->raidVolume = 0; 2186 vdev->raidVolume = 0;
2170 hd->Targets[device->id] = vdev; 2187 hd->Targets[device->id] = vdev;
2171 if (hd->ioc->bus_type == SCSI) { 2188 if (hd->ioc->bus_type == SCSI) {
2172 if (hd->ioc->spi_data.isRaid & (1 << device->id)) { 2189 if (hd->ioc->raid_data.isRaid & (1 << device->id)) {
2173 vdev->raidVolume = 1; 2190 vdev->raidVolume = 1;
2174 ddvtprintk((KERN_INFO 2191 ddvtprintk((KERN_INFO
2175 "RAID Volume @ id %d\n", device->id)); 2192 "RAID Volume @ id %d\n", device->id));
@@ -2180,22 +2197,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
2180 2197
2181 out: 2198 out:
2182 vdev->num_luns++; 2199 vdev->num_luns++;
2183 return 0; 2200 device->hostdata = vdev;
2184}
2185
2186static int
2187mptscsih_is_raid_volume(MPT_SCSI_HOST *hd, uint id)
2188{
2189 int i;
2190
2191 if (!hd->ioc->spi_data.isRaid || !hd->ioc->spi_data.pIocPg3)
2192 return 0;
2193
2194 for (i = 0; i < hd->ioc->spi_data.pIocPg3->NumPhysDisks; i++) {
2195 if (id == hd->ioc->spi_data.pIocPg3->PhysDisk[i].PhysDiskID)
2196 return 1;
2197 }
2198
2199 return 0; 2201 return 0;
2200} 2202}
2201 2203
@@ -2226,7 +2228,7 @@ mptscsih_slave_destroy(struct scsi_device *device)
2226 hd->Targets[target] = NULL; 2228 hd->Targets[target] = NULL;
2227 2229
2228 if (hd->ioc->bus_type == SCSI) { 2230 if (hd->ioc->bus_type == SCSI) {
2229 if (mptscsih_is_raid_volume(hd, target)) { 2231 if (mptscsih_is_phys_disk(hd->ioc, target)) {
2230 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3; 2232 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
2231 } else { 2233 } else {
2232 hd->ioc->spi_data.dvStatus[target] = 2234 hd->ioc->spi_data.dvStatus[target] =
@@ -2439,6 +2441,7 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2439{ 2441{
2440 MPT_SCSI_HOST *hd; 2442 MPT_SCSI_HOST *hd;
2441 unsigned long flags; 2443 unsigned long flags;
2444 int ii;
2442 2445
2443 dtmprintk((KERN_WARNING MYNAM 2446 dtmprintk((KERN_WARNING MYNAM
2444 ": IOC %s_reset routed to SCSI host driver!\n", 2447 ": IOC %s_reset routed to SCSI host driver!\n",
@@ -2496,11 +2499,8 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2496 2499
2497 /* ScsiLookup initialization 2500 /* ScsiLookup initialization
2498 */ 2501 */
2499 { 2502 for (ii=0; ii < hd->ioc->req_depth; ii++)
2500 int ii; 2503 hd->ScsiLookup[ii] = NULL;
2501 for (ii=0; ii < hd->ioc->req_depth; ii++)
2502 hd->ScsiLookup[ii] = NULL;
2503 }
2504 2504
2505 /* 2. Chain Buffer initialization 2505 /* 2. Chain Buffer initialization
2506 */ 2506 */
@@ -2549,6 +2549,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2549} 2549}
2550 2550
2551/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2551/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2552/* work queue thread to clear the persitency table */
2553static void
2554mptscsih_sas_persist_clear_table(void * arg)
2555{
2556 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
2557
2558 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2559}
2560
2561/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2552int 2562int
2553mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 2563mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2554{ 2564{
@@ -2558,18 +2568,18 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2558 devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2568 devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
2559 ioc->name, event)); 2569 ioc->name, event));
2560 2570
2571 if (ioc->sh == NULL ||
2572 ((hd = (MPT_SCSI_HOST *)ioc->sh->hostdata) == NULL))
2573 return 1;
2574
2561 switch (event) { 2575 switch (event) {
2562 case MPI_EVENT_UNIT_ATTENTION: /* 03 */ 2576 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
2563 /* FIXME! */ 2577 /* FIXME! */
2564 break; 2578 break;
2565 case MPI_EVENT_IOC_BUS_RESET: /* 04 */ 2579 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2566 case MPI_EVENT_EXT_BUS_RESET: /* 05 */ 2580 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2567 hd = NULL; 2581 if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
2568 if (ioc->sh) { 2582 hd->soft_resets++;
2569 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2570 if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
2571 hd->soft_resets++;
2572 }
2573 break; 2583 break;
2574 case MPI_EVENT_LOGOUT: /* 09 */ 2584 case MPI_EVENT_LOGOUT: /* 09 */
2575 /* FIXME! */ 2585 /* FIXME! */
@@ -2588,69 +2598,24 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2588 break; 2598 break;
2589 2599
2590 case MPI_EVENT_INTEGRATED_RAID: /* 0B */ 2600 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
2601 {
2602 pMpiEventDataRaid_t pRaidEventData =
2603 (pMpiEventDataRaid_t) pEvReply->Data;
2591#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 2604#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
2592 /* negoNvram set to 0 if DV enabled and to USE_NVRAM if 2605 /* Domain Validation Needed */
2593 * if DV disabled. Need to check for target mode. 2606 if (ioc->bus_type == SCSI &&
2594 */ 2607 pRaidEventData->ReasonCode ==
2595 hd = NULL; 2608 MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED)
2596 if (ioc->sh) 2609 mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum);
2597 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2598
2599 if (hd && (ioc->bus_type == SCSI) && (hd->negoNvram == 0)) {
2600 ScsiCfgData *pSpi;
2601 Ioc3PhysDisk_t *pPDisk;
2602 int numPDisk;
2603 u8 reason;
2604 u8 physDiskNum;
2605
2606 reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
2607 if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
2608 /* New or replaced disk.
2609 * Set DV flag and schedule DV.
2610 */
2611 pSpi = &ioc->spi_data;
2612 physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
2613 ddvtprintk(("DV requested for phys disk id %d\n", physDiskNum));
2614 if (pSpi->pIocPg3) {
2615 pPDisk = pSpi->pIocPg3->PhysDisk;
2616 numPDisk =pSpi->pIocPg3->NumPhysDisks;
2617
2618 while (numPDisk) {
2619 if (physDiskNum == pPDisk->PhysDiskNum) {
2620 pSpi->dvStatus[pPDisk->PhysDiskID] = (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
2621 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
2622 ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
2623 break;
2624 }
2625 pPDisk++;
2626 numPDisk--;
2627 }
2628
2629 if (numPDisk == 0) {
2630 /* The physical disk that needs DV was not found
2631 * in the stored IOC Page 3. The driver must reload
2632 * this page. DV routine will set the NEED_DV flag for
2633 * all phys disks that have DV_NOT_DONE set.
2634 */
2635 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
2636 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n", physDiskNum));
2637 }
2638 }
2639 }
2640 }
2641#endif 2610#endif
2611 break;
2612 }
2642 2613
2643#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) 2614 /* Persistent table is full. */
2644 printk("Raid Event RF: "); 2615 case MPI_EVENT_PERSISTENT_TABLE_FULL:
2645 { 2616 INIT_WORK(&mptscsih_persistTask,
2646 u32 *m = (u32 *)pEvReply; 2617 mptscsih_sas_persist_clear_table,(void *)ioc);
2647 int ii; 2618 schedule_work(&mptscsih_persistTask);
2648 int n = (int)pEvReply->MsgLength;
2649 for (ii=6; ii < n; ii++)
2650 printk(" %08x", le32_to_cpu(m[ii]));
2651 printk("\n");
2652 }
2653#endif
2654 break; 2619 break;
2655 2620
2656 case MPI_EVENT_NONE: /* 00 */ 2621 case MPI_EVENT_NONE: /* 00 */
@@ -2687,7 +2652,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2687{ 2652{
2688 int indexed_lun, lun_index; 2653 int indexed_lun, lun_index;
2689 VirtDevice *vdev; 2654 VirtDevice *vdev;
2690 ScsiCfgData *pSpi; 2655 SpiCfgData *pSpi;
2691 char data_56; 2656 char data_56;
2692 2657
2693 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n", 2658 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
@@ -2794,7 +2759,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2794static void 2759static void
2795mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56) 2760mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
2796{ 2761{
2797 ScsiCfgData *pspi_data = &hd->ioc->spi_data; 2762 SpiCfgData *pspi_data = &hd->ioc->spi_data;
2798 int id = (int) target->target_id; 2763 int id = (int) target->target_id;
2799 int nvram; 2764 int nvram;
2800 VirtDevice *vdev; 2765 VirtDevice *vdev;
@@ -2973,11 +2938,13 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
2973static void 2938static void
2974mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq) 2939mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
2975{ 2940{
2941 MPT_ADAPTER *ioc = hd->ioc;
2976 u8 cmd; 2942 u8 cmd;
2977 ScsiCfgData *pSpi; 2943 SpiCfgData *pSpi;
2978 2944
2979 ddvtprintk((" set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n", 2945 ddvtprintk((MYIOC_s_NOTE_FMT
2980 pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0])); 2946 " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
2947 hd->ioc->name, pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
2981 2948
2982 if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0)) 2949 if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
2983 return; 2950 return;
@@ -2985,12 +2952,12 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
2985 cmd = pReq->CDB[0]; 2952 cmd = pReq->CDB[0];
2986 2953
2987 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) { 2954 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
2988 pSpi = &hd->ioc->spi_data; 2955 pSpi = &ioc->spi_data;
2989 if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) { 2956 if ((ioc->raid_data.isRaid & (1 << pReq->TargetID)) && ioc->raid_data.pIocPg3) {
2990 /* Set NEED_DV for all hidden disks 2957 /* Set NEED_DV for all hidden disks
2991 */ 2958 */
2992 Ioc3PhysDisk_t *pPDisk = pSpi->pIocPg3->PhysDisk; 2959 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
2993 int numPDisk = pSpi->pIocPg3->NumPhysDisks; 2960 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
2994 2961
2995 while (numPDisk) { 2962 while (numPDisk) {
2996 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV; 2963 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
@@ -3004,6 +2971,50 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
3004 } 2971 }
3005} 2972}
3006 2973
2974/* mptscsih_raid_set_dv_flags()
2975 *
2976 * New or replaced disk. Set DV flag and schedule DV.
2977 */
2978static void
2979mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
2980{
2981 MPT_ADAPTER *ioc = hd->ioc;
2982 SpiCfgData *pSpi = &ioc->spi_data;
2983 Ioc3PhysDisk_t *pPDisk;
2984 int numPDisk;
2985
2986 if (hd->negoNvram != 0)
2987 return;
2988
2989 ddvtprintk(("DV requested for phys disk id %d\n", id));
2990 if (ioc->raid_data.pIocPg3) {
2991 pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
2992 numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
2993 while (numPDisk) {
2994 if (id == pPDisk->PhysDiskNum) {
2995 pSpi->dvStatus[pPDisk->PhysDiskID] =
2996 (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
2997 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
2998 ddvtprintk(("NEED_DV set for phys disk id %d\n",
2999 pPDisk->PhysDiskID));
3000 break;
3001 }
3002 pPDisk++;
3003 numPDisk--;
3004 }
3005
3006 if (numPDisk == 0) {
3007 /* The physical disk that needs DV was not found
3008 * in the stored IOC Page 3. The driver must reload
3009 * this page. DV routine will set the NEED_DV flag for
3010 * all phys disks that have DV_NOT_DONE set.
3011 */
3012 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
3013 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
3014 }
3015 }
3016}
3017
3007/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3018/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3008/* 3019/*
3009 * If no Target, bus reset on 1st I/O. Set the flag to 3020 * If no Target, bus reset on 1st I/O. Set the flag to
@@ -3091,7 +3102,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3091 MPT_ADAPTER *ioc = hd->ioc; 3102 MPT_ADAPTER *ioc = hd->ioc;
3092 Config_t *pReq; 3103 Config_t *pReq;
3093 SCSIDevicePage1_t *pData; 3104 SCSIDevicePage1_t *pData;
3094 VirtDevice *pTarget; 3105 VirtDevice *pTarget=NULL;
3095 MPT_FRAME_HDR *mf; 3106 MPT_FRAME_HDR *mf;
3096 dma_addr_t dataDma; 3107 dma_addr_t dataDma;
3097 u16 req_idx; 3108 u16 req_idx;
@@ -3190,7 +3201,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3190#endif 3201#endif
3191 3202
3192 if (flags & MPT_SCSICFG_BLK_NEGO) 3203 if (flags & MPT_SCSICFG_BLK_NEGO)
3193 negoFlags = MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC; 3204 negoFlags |= MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
3194 3205
3195 mptscsih_setDevicePage1Flags(width, factor, offset, 3206 mptscsih_setDevicePage1Flags(width, factor, offset,
3196 &requested, &configuration, negoFlags); 3207 &requested, &configuration, negoFlags);
@@ -4011,7 +4022,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
4011 4022
4012 /* If target Ptr NULL or if this target is NOT a disk, skip. 4023 /* If target Ptr NULL or if this target is NOT a disk, skip.
4013 */ 4024 */
4014 if ((pTarget) && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)){ 4025 if ((pTarget) && (pTarget->inq_data[0] == TYPE_DISK)){
4015 for (lun=0; lun <= MPT_LAST_LUN; lun++) { 4026 for (lun=0; lun <= MPT_LAST_LUN; lun++) {
4016 /* If LUN present, issue the command 4027 /* If LUN present, issue the command
4017 */ 4028 */
@@ -4106,9 +4117,9 @@ mptscsih_domainValidation(void *arg)
4106 4117
4107 if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) { 4118 if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) {
4108 mpt_read_ioc_pg_3(ioc); 4119 mpt_read_ioc_pg_3(ioc);
4109 if (ioc->spi_data.pIocPg3) { 4120 if (ioc->raid_data.pIocPg3) {
4110 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; 4121 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
4111 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; 4122 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
4112 4123
4113 while (numPDisk) { 4124 while (numPDisk) {
4114 if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE) 4125 if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE)
@@ -4147,7 +4158,7 @@ mptscsih_domainValidation(void *arg)
4147 isPhysDisk = mptscsih_is_phys_disk(ioc, id); 4158 isPhysDisk = mptscsih_is_phys_disk(ioc, id);
4148 if (isPhysDisk) { 4159 if (isPhysDisk) {
4149 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 4160 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4150 if (hd->ioc->spi_data.isRaid & (1 << ii)) { 4161 if (hd->ioc->raid_data.isRaid & (1 << ii)) {
4151 hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING; 4162 hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING;
4152 } 4163 }
4153 } 4164 }
@@ -4166,7 +4177,7 @@ mptscsih_domainValidation(void *arg)
4166 4177
4167 if (isPhysDisk) { 4178 if (isPhysDisk) {
4168 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 4179 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4169 if (hd->ioc->spi_data.isRaid & (1 << ii)) { 4180 if (hd->ioc->raid_data.isRaid & (1 << ii)) {
4170 hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING; 4181 hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING;
4171 } 4182 }
4172 } 4183 }
@@ -4188,21 +4199,21 @@ mptscsih_domainValidation(void *arg)
4188 4199
4189/* Search IOC page 3 to determine if this is hidden physical disk 4200/* Search IOC page 3 to determine if this is hidden physical disk
4190 */ 4201 */
4191static int 4202/* Search IOC page 3 to determine if this is hidden physical disk
4203 */
4204static int
4192mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id) 4205mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
4193{ 4206{
4194 if (ioc->spi_data.pIocPg3) { 4207 int i;
4195 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
4196 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
4197 4208
4198 while (numPDisk) { 4209 if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
4199 if (pPDisk->PhysDiskID == id) { 4210 return 0;
4200 return 1; 4211
4201 } 4212 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
4202 pPDisk++; 4213 if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
4203 numPDisk--; 4214 return 1;
4204 }
4205 } 4215 }
4216
4206 return 0; 4217 return 0;
4207} 4218}
4208 4219
@@ -4408,7 +4419,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4408 /* Skip this ID? Set cfg.cfghdr.hdr to force config page write 4419 /* Skip this ID? Set cfg.cfghdr.hdr to force config page write
4409 */ 4420 */
4410 { 4421 {
4411 ScsiCfgData *pspi_data = &hd->ioc->spi_data; 4422 SpiCfgData *pspi_data = &hd->ioc->spi_data;
4412 if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) { 4423 if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
4413 /* Set the factor from nvram */ 4424 /* Set the factor from nvram */
4414 nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8; 4425 nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8;
@@ -4438,11 +4449,11 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4438 } 4449 }
4439 4450
4440 /* Finish iocmd inititialization - hidden or visible disk? */ 4451 /* Finish iocmd inititialization - hidden or visible disk? */
4441 if (ioc->spi_data.pIocPg3) { 4452 if (ioc->raid_data.pIocPg3) {
4442 /* Search IOC page 3 for matching id 4453 /* Search IOC page 3 for matching id
4443 */ 4454 */
4444 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; 4455 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
4445 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; 4456 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
4446 4457
4447 while (numPDisk) { 4458 while (numPDisk) {
4448 if (pPDisk->PhysDiskID == id) { 4459 if (pPDisk->PhysDiskID == id) {
@@ -4466,7 +4477,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4466 /* RAID Volume ID's may double for a physical device. If RAID but 4477 /* RAID Volume ID's may double for a physical device. If RAID but
4467 * not a physical ID as well, skip DV. 4478 * not a physical ID as well, skip DV.
4468 */ 4479 */
4469 if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK)) 4480 if ((hd->ioc->raid_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
4470 goto target_done; 4481 goto target_done;
4471 4482
4472 4483
@@ -4815,6 +4826,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4815 notDone = 0; 4826 notDone = 0;
4816 if (iocmd.flags & MPT_ICFLAG_ECHO) { 4827 if (iocmd.flags & MPT_ICFLAG_ECHO) {
4817 bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3]; 4828 bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3];
4829 if (pbuf1[0] & 0x01)
4830 iocmd.flags |= MPT_ICFLAG_EBOS;
4818 } else { 4831 } else {
4819 bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3]; 4832 bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3];
4820 } 4833 }
@@ -4911,6 +4924,9 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4911 } 4924 }
4912 iocmd.flags &= ~MPT_ICFLAG_DID_RESET; 4925 iocmd.flags &= ~MPT_ICFLAG_DID_RESET;
4913 4926
4927 if (iocmd.flags & MPT_ICFLAG_EBOS)
4928 goto skip_Reserve;
4929
4914 repeat = 5; 4930 repeat = 5;
4915 while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) { 4931 while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) {
4916 iocmd.cmd = RESERVE; 4932 iocmd.cmd = RESERVE;
@@ -4954,6 +4970,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4954 } 4970 }
4955 } 4971 }
4956 4972
4973skip_Reserve:
4957 mptscsih_fillbuf(pbuf1, sz, patt, 1); 4974 mptscsih_fillbuf(pbuf1, sz, patt, 1);
4958 iocmd.cmd = WRITE_BUFFER; 4975 iocmd.cmd = WRITE_BUFFER;
4959 iocmd.data_dma = buf1_dma; 4976 iocmd.data_dma = buf1_dma;
@@ -5198,11 +5215,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5198 * If not an LVD bus, the adapter minSyncFactor has been 5215 * If not an LVD bus, the adapter minSyncFactor has been
5199 * already throttled back. 5216 * already throttled back.
5200 */ 5217 */
5218 negoFlags = hd->ioc->spi_data.noQas;
5201 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) { 5219 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) {
5202 width = pTarget->maxWidth; 5220 width = pTarget->maxWidth;
5203 offset = pTarget->maxOffset; 5221 offset = pTarget->maxOffset;
5204 factor = pTarget->minSyncFactor; 5222 factor = pTarget->minSyncFactor;
5205 negoFlags = pTarget->negoFlags; 5223 negoFlags |= pTarget->negoFlags;
5206 } else { 5224 } else {
5207 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { 5225 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
5208 data = hd->ioc->spi_data.nvram[id]; 5226 data = hd->ioc->spi_data.nvram[id];
@@ -5223,7 +5241,6 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5223 } 5241 }
5224 5242
5225 /* Set the negotiation flags */ 5243 /* Set the negotiation flags */
5226 negoFlags = hd->ioc->spi_data.noQas;
5227 if (!width) 5244 if (!width)
5228 negoFlags |= MPT_TARGET_NO_NEGO_WIDE; 5245 negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
5229 5246
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 51c0255ac16e..971fda4b8b57 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/message/fusion/mptscsi.h 2 * linux/drivers/message/fusion/mptscsih.h
3 * High performance SCSI / Fibre Channel SCSI Host device driver. 3 * High performance SCSI / Fibre Channel SCSI Host device driver.
4 * For use with PCI chip/adapter(s): 4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel 5 * LSIFC9xx/LSI409xx Fibre Channel
@@ -53,8 +53,8 @@
53 * SCSI Public stuff... 53 * SCSI Public stuff...
54 */ 54 */
55 55
56#define MPT_SCSI_CMD_PER_DEV_HIGH 31 56#define MPT_SCSI_CMD_PER_DEV_HIGH 64
57#define MPT_SCSI_CMD_PER_DEV_LOW 7 57#define MPT_SCSI_CMD_PER_DEV_LOW 32
58 58
59#define MPT_SCSI_CMD_PER_LUN 7 59#define MPT_SCSI_CMD_PER_LUN 7
60 60
@@ -77,6 +77,7 @@
77#define MPTSCSIH_MAX_WIDTH 1 77#define MPTSCSIH_MAX_WIDTH 1
78#define MPTSCSIH_MIN_SYNC 0x08 78#define MPTSCSIH_MIN_SYNC 0x08
79#define MPTSCSIH_SAF_TE 0 79#define MPTSCSIH_SAF_TE 0
80#define MPTSCSIH_PT_CLEAR 0
80 81
81 82
82#endif 83#endif
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 587d1274fd74..5c0e307d1d5d 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -199,7 +199,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
199 printk(MYIOC_s_WARN_FMT 199 printk(MYIOC_s_WARN_FMT
200 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", 200 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
201 ioc->name, ioc); 201 ioc->name, ioc);
202 return -ENODEV; 202 return 0;
203 } 203 }
204 204
205 sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST)); 205 sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index af32ab4e90cd..10432f665201 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -56,8 +56,11 @@ static int __init i2o_config_init(void)
56 return -EBUSY; 56 return -EBUSY;
57 } 57 }
58#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL 58#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
59 if (i2o_config_old_init()) 59 if (i2o_config_old_init()) {
60 osm_err("old config handler initialization failed\n");
60 i2o_driver_unregister(&i2o_config_driver); 61 i2o_driver_unregister(&i2o_config_driver);
62 return -EBUSY;
63 }
61#endif 64#endif
62 65
63 return 0; 66 return 0;
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index d15da6fd84c1..b7f093fbf9b0 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -82,7 +82,7 @@ int __init init_sharpsl(void)
82 } else if (machine_is_tosa()) { 82 } else if (machine_is_tosa()) {
83 sharpsl_partitions[0].size=0x006a0000; 83 sharpsl_partitions[0].size=0x006a0000;
84 sharpsl_partitions[0].offset=0x00160000; 84 sharpsl_partitions[0].offset=0x00160000;
85 } else if (machine_is_spitz()) { 85 } else if (machine_is_spitz() || machine_is_akita() || machine_is_borzoi()) {
86 sharpsl_partitions[0].size=0x006b0000; 86 sharpsl_partitions[0].size=0x006b0000;
87 sharpsl_partitions[0].offset=0x00140000; 87 sharpsl_partitions[0].offset=0x00140000;
88 } else { 88 } else {
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 9853b87bb756..88b5b5b40b43 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -221,10 +221,16 @@ sharpsl_nand_init(void)
221 sharpsl_partition_info[1].size=25 * 1024 * 1024; 221 sharpsl_partition_info[1].size=25 * 1024 * 1024;
222 } else if (machine_is_husky()) { 222 } else if (machine_is_husky()) {
223 sharpsl_partition_info[1].size=53 * 1024 * 1024; 223 sharpsl_partition_info[1].size=53 * 1024 * 1024;
224 } 224 } else if (machine_is_spitz()) {
225 sharpsl_partition_info[1].size=5 * 1024 * 1024;
226 } else if (machine_is_akita()) {
227 sharpsl_partition_info[1].size=58 * 1024 * 1024;
228 } else if (machine_is_borzoi()) {
229 sharpsl_partition_info[1].size=32 * 1024 * 1024;
230 }
225 } 231 }
226 232
227 if (machine_is_husky()) { 233 if (machine_is_husky() || machine_is_borzoi()) {
228 /* Need to use small eraseblock size for backward compatibility */ 234 /* Need to use small eraseblock size for backward compatibility */
229 sharpsl_mtd->flags |= MTD_NO_VIRTBLOCKS; 235 sharpsl_mtd->flags |= MTD_NO_VIRTBLOCKS;
230 } 236 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index bd99c268e2da..f822cd3025ff 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -353,8 +353,6 @@ struct cp_private {
353 353
354 struct net_device_stats net_stats; 354 struct net_device_stats net_stats;
355 struct cp_extra_stats cp_stats; 355 struct cp_extra_stats cp_stats;
356 struct cp_dma_stats *nic_stats;
357 dma_addr_t nic_stats_dma;
358 356
359 unsigned rx_tail ____cacheline_aligned; 357 unsigned rx_tail ____cacheline_aligned;
360 struct cp_desc *rx_ring; 358 struct cp_desc *rx_ring;
@@ -1142,10 +1140,6 @@ static int cp_alloc_rings (struct cp_private *cp)
1142 cp->rx_ring = mem; 1140 cp->rx_ring = mem;
1143 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; 1141 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1144 1142
1145 mem += (CP_RING_BYTES - CP_STATS_SIZE);
1146 cp->nic_stats = mem;
1147 cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE);
1148
1149 return cp_init_rings(cp); 1143 return cp_init_rings(cp);
1150} 1144}
1151 1145
@@ -1186,7 +1180,6 @@ static void cp_free_rings (struct cp_private *cp)
1186 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); 1180 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1187 cp->rx_ring = NULL; 1181 cp->rx_ring = NULL;
1188 cp->tx_ring = NULL; 1182 cp->tx_ring = NULL;
1189 cp->nic_stats = NULL;
1190} 1183}
1191 1184
1192static int cp_open (struct net_device *dev) 1185static int cp_open (struct net_device *dev)
@@ -1515,13 +1508,17 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1515 struct ethtool_stats *estats, u64 *tmp_stats) 1508 struct ethtool_stats *estats, u64 *tmp_stats)
1516{ 1509{
1517 struct cp_private *cp = netdev_priv(dev); 1510 struct cp_private *cp = netdev_priv(dev);
1511 struct cp_dma_stats *nic_stats;
1512 dma_addr_t dma;
1518 int i; 1513 int i;
1519 1514
1520 memset(cp->nic_stats, 0, sizeof(struct cp_dma_stats)); 1515 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1516 if (!nic_stats)
1517 return;
1521 1518
1522 /* begin NIC statistics dump */ 1519 /* begin NIC statistics dump */
1523 cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16); 1520 cpw32(StatsAddr + 4, (u64)dma >> 32);
1524 cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats); 1521 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1525 cpr32(StatsAddr); 1522 cpr32(StatsAddr);
1526 1523
1527 for (i = 0; i < 1000; i++) { 1524 for (i = 0; i < 1000; i++) {
@@ -1531,24 +1528,27 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1531 } 1528 }
1532 cpw32(StatsAddr, 0); 1529 cpw32(StatsAddr, 0);
1533 cpw32(StatsAddr + 4, 0); 1530 cpw32(StatsAddr + 4, 0);
1531 cpr32(StatsAddr);
1534 1532
1535 i = 0; 1533 i = 0;
1536 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok); 1534 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1537 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok); 1535 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1538 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err); 1536 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1539 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err); 1537 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1540 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo); 1538 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1541 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align); 1539 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1542 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col); 1540 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1543 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol); 1541 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1544 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys); 1542 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1545 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast); 1543 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1546 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast); 1544 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1547 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort); 1545 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1548 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun); 1546 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1549 tmp_stats[i++] = cp->cp_stats.rx_frags; 1547 tmp_stats[i++] = cp->cp_stats.rx_frags;
1550 if (i != CP_NUM_STATS) 1548 if (i != CP_NUM_STATS)
1551 BUG(); 1549 BUG();
1550
1551 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1552} 1552}
1553 1553
1554static struct ethtool_ops cp_ethtool_ops = { 1554static struct ethtool_ops cp_ethtool_ops = {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 82603e419d76..ff3fccd7513b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1951,7 +1951,7 @@ config SKGE
1951 ---help--- 1951 ---help---
1952 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx 1952 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
1953 and related Gigabit Ethernet adapters. It is a new smaller driver 1953 and related Gigabit Ethernet adapters. It is a new smaller driver
1954 driver with better performance and more complete ethtool support. 1954 with better performance and more complete ethtool support.
1955 1955
1956 It does not support the link failover and network management 1956 It does not support the link failover and network management
1957 features that "portable" vendor supplied sk98lin driver does. 1957 features that "portable" vendor supplied sk98lin driver does.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 94c9f68dd16b..90449a0f2a6c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2879,6 +2879,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2879 * This target is not on a VLAN 2879 * This target is not on a VLAN
2880 */ 2880 */
2881 if (rt->u.dst.dev == bond->dev) { 2881 if (rt->u.dst.dev == bond->dev) {
2882 ip_rt_put(rt);
2882 dprintk("basa: rtdev == bond->dev: arp_send\n"); 2883 dprintk("basa: rtdev == bond->dev: arp_send\n");
2883 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2884 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2884 bond->master_ip, 0); 2885 bond->master_ip, 0);
@@ -2898,6 +2899,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2898 } 2899 }
2899 2900
2900 if (vlan_id) { 2901 if (vlan_id) {
2902 ip_rt_put(rt);
2901 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2903 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2902 vlan->vlan_ip, vlan_id); 2904 vlan->vlan_ip, vlan_id);
2903 continue; 2905 continue;
@@ -2909,6 +2911,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2909 bond->dev->name, NIPQUAD(fl.fl4_dst), 2911 bond->dev->name, NIPQUAD(fl.fl4_dst),
2910 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL"); 2912 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
2911 } 2913 }
2914 ip_rt_put(rt);
2912 } 2915 }
2913} 2916}
2914 2917
@@ -5036,6 +5039,14 @@ static int __init bonding_init(void)
5036 return 0; 5039 return 0;
5037 5040
5038out_err: 5041out_err:
5042 /*
5043 * rtnl_unlock() will run netdev_run_todo(), putting the
5044 * thus-far-registered bonding devices into a state which
5045 * unregigister_netdevice() will accept
5046 */
5047 rtnl_unlock();
5048 rtnl_lock();
5049
5039 /* free and unregister all bonds that were successfully added */ 5050 /* free and unregister all bonds that were successfully added */
5040 bond_free_all(); 5051 bond_free_all();
5041 5052
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1c918309539f..c15406d46418 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1387,13 +1387,13 @@ static void e100_update_stats(struct nic *nic)
1387 ns->collisions += nic->tx_collisions; 1387 ns->collisions += nic->tx_collisions;
1388 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + 1388 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1389 le32_to_cpu(s->tx_lost_crs); 1389 le32_to_cpu(s->tx_lost_crs);
1390 ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
1391 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) + 1390 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1392 nic->rx_over_length_errors; 1391 nic->rx_over_length_errors;
1393 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); 1392 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1394 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); 1393 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1395 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); 1394 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1396 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); 1395 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1396 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1397 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + 1397 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1398 le32_to_cpu(s->rx_alignment_errors) + 1398 le32_to_cpu(s->rx_alignment_errors) +
1399 le32_to_cpu(s->rx_short_frame_errors) + 1399 le32_to_cpu(s->rx_short_frame_errors) +
@@ -1727,12 +1727,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1727 1727
1728 if(unlikely(!(rfd_status & cb_ok))) { 1728 if(unlikely(!(rfd_status & cb_ok))) {
1729 /* Don't indicate if hardware indicates errors */ 1729 /* Don't indicate if hardware indicates errors */
1730 nic->net_stats.rx_dropped++;
1731 dev_kfree_skb_any(skb); 1730 dev_kfree_skb_any(skb);
1732 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) { 1731 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1733 /* Don't indicate oversized frames */ 1732 /* Don't indicate oversized frames */
1734 nic->rx_over_length_errors++; 1733 nic->rx_over_length_errors++;
1735 nic->net_stats.rx_dropped++;
1736 dev_kfree_skb_any(skb); 1734 dev_kfree_skb_any(skb);
1737 } else { 1735 } else {
1738 nic->net_stats.rx_packets++; 1736 nic->net_stats.rx_packets++;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d02883dcc9b3..c062b0ad8262 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2545,7 +2545,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
2545 adapter->stats.crcerrs + adapter->stats.algnerrc + 2545 adapter->stats.crcerrs + adapter->stats.algnerrc +
2546 adapter->stats.rlec + adapter->stats.mpc + 2546 adapter->stats.rlec + adapter->stats.mpc +
2547 adapter->stats.cexterr; 2547 adapter->stats.cexterr;
2548 adapter->net_stats.rx_dropped = adapter->stats.mpc;
2549 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2548 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2550 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2549 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2551 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 2550 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 5015eaf4e20a..176680cb153e 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1617,8 +1617,6 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1617 adapter->stats.icbc + 1617 adapter->stats.icbc +
1618 adapter->stats.ecbc + adapter->stats.mpc; 1618 adapter->stats.ecbc + adapter->stats.mpc;
1619 1619
1620 adapter->net_stats.rx_dropped = adapter->stats.mpc;
1621
1622 /* see above 1620 /* see above
1623 * adapter->net_stats.rx_length_errors = adapter->stats.rlec; 1621 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1624 */ 1622 */
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c829e6a2e8a6..dd451e099a4c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -428,7 +428,7 @@ static int init_shared_mem(struct s2io_nic *nic)
428 DBG_PRINT(INIT_DBG, 428 DBG_PRINT(INIT_DBG,
429 "%s: Zero DMA address for TxDL. ", dev->name); 429 "%s: Zero DMA address for TxDL. ", dev->name);
430 DBG_PRINT(INIT_DBG, 430 DBG_PRINT(INIT_DBG,
431 "Virtual address %llx\n", (u64)tmp_v); 431 "Virtual address %p\n", tmp_v);
432 tmp_v = pci_alloc_consistent(nic->pdev, 432 tmp_v = pci_alloc_consistent(nic->pdev,
433 PAGE_SIZE, &tmp_p); 433 PAGE_SIZE, &tmp_p);
434 if (!tmp_v) { 434 if (!tmp_v) {
@@ -657,9 +657,10 @@ static void free_shared_mem(struct s2io_nic *nic)
657 mac_control->zerodma_virt_addr, 657 mac_control->zerodma_virt_addr,
658 (dma_addr_t)0); 658 (dma_addr_t)0);
659 DBG_PRINT(INIT_DBG, 659 DBG_PRINT(INIT_DBG,
660 "%s: Freeing TxDL with zero DMA addr. ", dev->name); 660 "%s: Freeing TxDL with zero DMA addr. ",
661 DBG_PRINT(INIT_DBG, "Virtual address %llx\n", 661 dev->name);
662 (u64)(mac_control->zerodma_virt_addr)); 662 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
663 mac_control->zerodma_virt_addr);
663 } 664 }
664 kfree(mac_control->fifos[i].list_info); 665 kfree(mac_control->fifos[i].list_info);
665 } 666 }
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 6ee4771addf1..2e72d79a143c 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -5216,17 +5216,15 @@ static struct pci_device_id skge_pci_tbl[] = {
5216 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5216 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5217 { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5217 { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5218 { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5218 { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5219 { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5219/* DLink card does not have valid VPD so this driver gags
5220 * { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5221 */
5220 { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5222 { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5221#if 0 /* don't handle Yukon2 cards at the moment -- mlindner@syskonnect.de */
5222 { PCI_VENDOR_ID_MARVELL, 0x4360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5223 { PCI_VENDOR_ID_MARVELL, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5224#endif
5225 { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5223 { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5226 { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5224 { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5227 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5225 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
5228 { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5226 { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5229 { 0, } 5227 { 0 }
5230}; 5228};
5231 5229
5232MODULE_DEVICE_TABLE(pci, skge_pci_tbl); 5230MODULE_DEVICE_TABLE(pci, skge_pci_tbl);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 757c83387a29..7ce734ec6ba8 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.9" 45#define DRV_VERSION "1.0"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -669,7 +669,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
669 PHY_M_LED_BLINK_RT(BLINK_84MS) | 669 PHY_M_LED_BLINK_RT(BLINK_84MS) |
670 PHY_M_LEDC_TX_CTRL | 670 PHY_M_LEDC_TX_CTRL |
671 PHY_M_LEDC_DP_CTRL); 671 PHY_M_LEDC_DP_CTRL);
672 672
673 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 673 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
674 PHY_M_LED_MO_RX(MO_LED_OFF) | 674 PHY_M_LED_MO_RX(MO_LED_OFF) |
675 (skge->speed == SPEED_100 ? 675 (skge->speed == SPEED_100 ?
@@ -877,7 +877,7 @@ static int skge_rx_fill(struct skge_port *skge)
877 877
878static void skge_link_up(struct skge_port *skge) 878static void skge_link_up(struct skge_port *skge)
879{ 879{
880 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 880 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
881 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 881 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
882 882
883 netif_carrier_on(skge->netdev); 883 netif_carrier_on(skge->netdev);
@@ -988,6 +988,8 @@ static void genesis_reset(struct skge_hw *hw, int port)
988{ 988{
989 const u8 zero[8] = { 0 }; 989 const u8 zero[8] = { 0 };
990 990
991 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
992
991 /* reset the statistics module */ 993 /* reset the statistics module */
992 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 994 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
993 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ 995 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
@@ -1022,8 +1024,6 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1022 (void) xm_phy_read(hw, port, PHY_BCOM_STAT); 1024 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
1023 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1025 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1024 1026
1025 pr_debug("bcom_check_link status=0x%x\n", status);
1026
1027 if ((status & PHY_ST_LSYNC) == 0) { 1027 if ((status & PHY_ST_LSYNC) == 0) {
1028 u16 cmd = xm_read16(hw, port, XM_MMU_CMD); 1028 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
1029 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1029 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
@@ -1107,8 +1107,6 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo)
1107 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1107 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1108 }; 1108 };
1109 1109
1110 pr_debug("bcom_phy_init\n");
1111
1112 /* read Id from external PHY (all have the same address) */ 1110 /* read Id from external PHY (all have the same address) */
1113 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); 1111 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1114 1112
@@ -1341,6 +1339,8 @@ static void genesis_stop(struct skge_port *skge)
1341 int port = skge->port; 1339 int port = skge->port;
1342 u32 reg; 1340 u32 reg;
1343 1341
1342 genesis_reset(hw, port);
1343
1344 /* Clear Tx packet arbiter timeout IRQ */ 1344 /* Clear Tx packet arbiter timeout IRQ */
1345 skge_write16(hw, B3_PA_CTRL, 1345 skge_write16(hw, B3_PA_CTRL,
1346 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); 1346 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
@@ -1466,7 +1466,6 @@ static void genesis_link_up(struct skge_port *skge)
1466 u16 cmd; 1466 u16 cmd;
1467 u32 mode, msk; 1467 u32 mode, msk;
1468 1468
1469 pr_debug("genesis_link_up\n");
1470 cmd = xm_read16(hw, port, XM_MMU_CMD); 1469 cmd = xm_read16(hw, port, XM_MMU_CMD);
1471 1470
1472 /* 1471 /*
@@ -1579,7 +1578,6 @@ static void yukon_init(struct skge_hw *hw, int port)
1579 struct skge_port *skge = netdev_priv(hw->dev[port]); 1578 struct skge_port *skge = netdev_priv(hw->dev[port]);
1580 u16 ctrl, ct1000, adv; 1579 u16 ctrl, ct1000, adv;
1581 1580
1582 pr_debug("yukon_init\n");
1583 if (skge->autoneg == AUTONEG_ENABLE) { 1581 if (skge->autoneg == AUTONEG_ENABLE) {
1584 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1582 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1585 1583
@@ -1678,9 +1676,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1678 1676
1679 /* WA code for COMA mode -- set PHY reset */ 1677 /* WA code for COMA mode -- set PHY reset */
1680 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1678 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1681 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1679 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1682 skge_write32(hw, B2_GP_IO, 1680 reg = skge_read32(hw, B2_GP_IO);
1683 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9)); 1681 reg |= GP_DIR_9 | GP_IO_9;
1682 skge_write32(hw, B2_GP_IO, reg);
1683 }
1684 1684
1685 /* hard reset */ 1685 /* hard reset */
1686 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1686 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
@@ -1688,10 +1688,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1688 1688
1689 /* WA code for COMA mode -- clear PHY reset */ 1689 /* WA code for COMA mode -- clear PHY reset */
1690 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1690 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1691 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1691 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1692 skge_write32(hw, B2_GP_IO, 1692 reg = skge_read32(hw, B2_GP_IO);
1693 (skge_read32(hw, B2_GP_IO) | GP_DIR_9) 1693 reg |= GP_DIR_9;
1694 & ~GP_IO_9); 1694 reg &= ~GP_IO_9;
1695 skge_write32(hw, B2_GP_IO, reg);
1696 }
1695 1697
1696 /* Set hardware config mode */ 1698 /* Set hardware config mode */
1697 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 1699 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
@@ -1730,7 +1732,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1730 } 1732 }
1731 1733
1732 gma_write16(hw, port, GM_GP_CTRL, reg); 1734 gma_write16(hw, port, GM_GP_CTRL, reg);
1733 skge_read16(hw, GMAC_IRQ_SRC); 1735 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1734 1736
1735 yukon_init(hw, port); 1737 yukon_init(hw, port);
1736 1738
@@ -1802,20 +1804,26 @@ static void yukon_stop(struct skge_port *skge)
1802 struct skge_hw *hw = skge->hw; 1804 struct skge_hw *hw = skge->hw;
1803 int port = skge->port; 1805 int port = skge->port;
1804 1806
1805 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1807 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1806 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 1808 yukon_reset(hw, port);
1807 skge_write32(hw, B2_GP_IO,
1808 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1809 }
1810 1809
1811 gma_write16(hw, port, GM_GP_CTRL, 1810 gma_write16(hw, port, GM_GP_CTRL,
1812 gma_read16(hw, port, GM_GP_CTRL) 1811 gma_read16(hw, port, GM_GP_CTRL)
1813 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); 1812 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1814 gma_read16(hw, port, GM_GP_CTRL); 1813 gma_read16(hw, port, GM_GP_CTRL);
1815 1814
1815 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1816 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1817 u32 io = skge_read32(hw, B2_GP_IO);
1818
1819 io |= GP_DIR_9 | GP_IO_9;
1820 skge_write32(hw, B2_GP_IO, io);
1821 skge_read32(hw, B2_GP_IO);
1822 }
1823
1816 /* set GPHY Control reset */ 1824 /* set GPHY Control reset */
1817 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1825 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1818 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 1826 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1819} 1827}
1820 1828
1821static void yukon_get_stats(struct skge_port *skge, u64 *data) 1829static void yukon_get_stats(struct skge_port *skge, u64 *data)
@@ -1874,10 +1882,8 @@ static void yukon_link_up(struct skge_port *skge)
1874 int port = skge->port; 1882 int port = skge->port;
1875 u16 reg; 1883 u16 reg;
1876 1884
1877 pr_debug("yukon_link_up\n");
1878
1879 /* Enable Transmit FIFO Underrun */ 1885 /* Enable Transmit FIFO Underrun */
1880 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); 1886 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1881 1887
1882 reg = gma_read16(hw, port, GM_GP_CTRL); 1888 reg = gma_read16(hw, port, GM_GP_CTRL);
1883 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 1889 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
@@ -1897,7 +1903,6 @@ static void yukon_link_down(struct skge_port *skge)
1897 int port = skge->port; 1903 int port = skge->port;
1898 u16 ctrl; 1904 u16 ctrl;
1899 1905
1900 pr_debug("yukon_link_down\n");
1901 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 1906 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1902 1907
1903 ctrl = gma_read16(hw, port, GM_GP_CTRL); 1908 ctrl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2113,7 +2118,6 @@ static int skge_up(struct net_device *dev)
2113 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2118 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2114 skge_led(skge, LED_MODE_ON); 2119 skge_led(skge, LED_MODE_ON);
2115 2120
2116 pr_debug("skge_up completed\n");
2117 return 0; 2121 return 0;
2118 2122
2119 free_rx_ring: 2123 free_rx_ring:
@@ -2136,15 +2140,20 @@ static int skge_down(struct net_device *dev)
2136 2140
2137 netif_stop_queue(dev); 2141 netif_stop_queue(dev);
2138 2142
2143 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2144 if (hw->chip_id == CHIP_ID_GENESIS)
2145 genesis_stop(skge);
2146 else
2147 yukon_stop(skge);
2148
2149 hw->intr_mask &= ~portirqmask[skge->port];
2150 skge_write32(hw, B0_IMSK, hw->intr_mask);
2151
2139 /* Stop transmitter */ 2152 /* Stop transmitter */
2140 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2153 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2141 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2154 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2142 RB_RST_SET|RB_DIS_OP_MD); 2155 RB_RST_SET|RB_DIS_OP_MD);
2143 2156
2144 if (hw->chip_id == CHIP_ID_GENESIS)
2145 genesis_stop(skge);
2146 else
2147 yukon_stop(skge);
2148 2157
2149 /* Disable Force Sync bit and Enable Alloc bit */ 2158 /* Disable Force Sync bit and Enable Alloc bit */
2150 skge_write8(hw, SK_REG(port, TXA_CTRL), 2159 skge_write8(hw, SK_REG(port, TXA_CTRL),
@@ -2368,8 +2377,6 @@ static void genesis_set_multicast(struct net_device *dev)
2368 u32 mode; 2377 u32 mode;
2369 u8 filter[8]; 2378 u8 filter[8];
2370 2379
2371 pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
2372
2373 mode = xm_read32(hw, port, XM_MODE); 2380 mode = xm_read32(hw, port, XM_MODE);
2374 mode |= XM_MD_ENA_HASH; 2381 mode |= XM_MD_ENA_HASH;
2375 if (dev->flags & IFF_PROMISC) 2382 if (dev->flags & IFF_PROMISC)
@@ -2531,8 +2538,6 @@ static int skge_poll(struct net_device *dev, int *budget)
2531 unsigned int to_do = min(dev->quota, *budget); 2538 unsigned int to_do = min(dev->quota, *budget);
2532 unsigned int work_done = 0; 2539 unsigned int work_done = 0;
2533 2540
2534 pr_debug("skge_poll\n");
2535
2536 for (e = ring->to_clean; work_done < to_do; e = e->next) { 2541 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2537 struct skge_rx_desc *rd = e->desc; 2542 struct skge_rx_desc *rd = e->desc;
2538 struct sk_buff *skb; 2543 struct sk_buff *skb;
@@ -2673,9 +2678,9 @@ static void skge_error_irq(struct skge_hw *hw)
2673 if (hw->chip_id == CHIP_ID_GENESIS) { 2678 if (hw->chip_id == CHIP_ID_GENESIS) {
2674 /* clear xmac errors */ 2679 /* clear xmac errors */
2675 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 2680 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2676 skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); 2681 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
2677 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 2682 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2678 skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); 2683 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
2679 } else { 2684 } else {
2680 /* Timestamp (unused) overflow */ 2685 /* Timestamp (unused) overflow */
2681 if (hwstatus & IS_IRQ_TIST_OV) 2686 if (hwstatus & IS_IRQ_TIST_OV)
@@ -3001,9 +3006,6 @@ static int skge_reset(struct skge_hw *hw)
3001 3006
3002 skge_write32(hw, B0_IMSK, hw->intr_mask); 3007 skge_write32(hw, B0_IMSK, hw->intr_mask);
3003 3008
3004 if (hw->chip_id != CHIP_ID_GENESIS)
3005 skge_write8(hw, GMAC_IRQ_MSK, 0);
3006
3007 spin_lock_bh(&hw->phy_lock); 3009 spin_lock_bh(&hw->phy_lock);
3008 for (i = 0; i < hw->ports; i++) { 3010 for (i = 0; i < hw->ports; i++) {
3009 if (hw->chip_id == CHIP_ID_GENESIS) 3011 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -3232,6 +3234,11 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3232 dev0 = hw->dev[0]; 3234 dev0 = hw->dev[0];
3233 unregister_netdev(dev0); 3235 unregister_netdev(dev0);
3234 3236
3237 skge_write32(hw, B0_IMSK, 0);
3238 skge_write16(hw, B0_LED, LED_STAT_OFF);
3239 skge_pci_clear(hw);
3240 skge_write8(hw, B0_CTST, CS_RST_SET);
3241
3235 tasklet_kill(&hw->ext_tasklet); 3242 tasklet_kill(&hw->ext_tasklet);
3236 3243
3237 free_irq(pdev->irq, hw); 3244 free_irq(pdev->irq, hw);
@@ -3240,7 +3247,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3240 if (dev1) 3247 if (dev1)
3241 free_netdev(dev1); 3248 free_netdev(dev1);
3242 free_netdev(dev0); 3249 free_netdev(dev0);
3243 skge_write16(hw, B0_LED, LED_STAT_OFF); 3250
3244 iounmap(hw->regs); 3251 iounmap(hw->regs);
3245 kfree(hw); 3252 kfree(hw);
3246 pci_set_drvdata(pdev, NULL); 3253 pci_set_drvdata(pdev, NULL);
@@ -3259,7 +3266,10 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3259 struct skge_port *skge = netdev_priv(dev); 3266 struct skge_port *skge = netdev_priv(dev);
3260 if (netif_running(dev)) { 3267 if (netif_running(dev)) {
3261 netif_carrier_off(dev); 3268 netif_carrier_off(dev);
3262 skge_down(dev); 3269 if (skge->wol)
3270 netif_stop_queue(dev);
3271 else
3272 skge_down(dev);
3263 } 3273 }
3264 netif_device_detach(dev); 3274 netif_device_detach(dev);
3265 wol |= skge->wol; 3275 wol |= skge->wol;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index f1680beb8e68..efbf98c675d2 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2008,7 +2008,7 @@ enum {
2008 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ 2008 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
2009 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 2009 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
2010 2010
2011#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | GM_IS_TX_FF_UR) 2011#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
2012 2012
2013/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ 2013/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
2014 /* Bits 15.. 2: reserved */ 2014 /* Bits 15.. 2: reserved */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 4e19220473d0..c796f41b4a52 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1817,6 +1817,10 @@ spider_net_setup_phy(struct spider_net_card *card)
1817 /* LEDs active in both modes, autosense prio = fiber */ 1817 /* LEDs active in both modes, autosense prio = fiber */
1818 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f); 1818 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
1819 1819
1820 /* switch off fibre autoneg */
1821 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
1822 spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
1823
1820 phy->def->ops->read_link(phy); 1824 phy->def->ops->read_link(phy);
1821 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, 1825 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
1822 phy->speed, phy->duplex==1 ? "Full" : "Half"); 1826 phy->speed, phy->duplex==1 ? "Full" : "Half");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7599f52e15b3..81f4aedf534c 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67 67
68#define DRV_MODULE_NAME "tg3" 68#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 69#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.39" 70#define DRV_MODULE_VERSION "3.40"
71#define DRV_MODULE_RELDATE "September 5, 2005" 71#define DRV_MODULE_RELDATE "September 15, 2005"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -3442,31 +3442,47 @@ static void tg3_tx_timeout(struct net_device *dev)
3442 schedule_work(&tp->reset_task); 3442 schedule_work(&tp->reset_task);
3443} 3443}
3444 3444
3445/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3446static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3447{
3448 u32 base = (u32) mapping & 0xffffffff;
3449
3450 return ((base > 0xffffdcc0) &&
3451 (base + len + 8 < base));
3452}
3453
3445static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); 3454static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3446 3455
3447static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 3456static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3448 u32 guilty_entry, int guilty_len, 3457 u32 last_plus_one, u32 *start,
3449 u32 last_plus_one, u32 *start, u32 mss) 3458 u32 base_flags, u32 mss)
3450{ 3459{
3451 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); 3460 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3452 dma_addr_t new_addr; 3461 dma_addr_t new_addr = 0;
3453 u32 entry = *start; 3462 u32 entry = *start;
3454 int i; 3463 int i, ret = 0;
3455 3464
3456 if (!new_skb) { 3465 if (!new_skb) {
3457 dev_kfree_skb(skb); 3466 ret = -1;
3458 return -1; 3467 } else {
3468 /* New SKB is guaranteed to be linear. */
3469 entry = *start;
3470 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3471 PCI_DMA_TODEVICE);
3472 /* Make sure new skb does not cross any 4G boundaries.
3473 * Drop the packet if it does.
3474 */
3475 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3476 ret = -1;
3477 dev_kfree_skb(new_skb);
3478 new_skb = NULL;
3479 } else {
3480 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3481 base_flags, 1 | (mss << 1));
3482 *start = NEXT_TX(entry);
3483 }
3459 } 3484 }
3460 3485
3461 /* New SKB is guaranteed to be linear. */
3462 entry = *start;
3463 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3464 PCI_DMA_TODEVICE);
3465 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3466 (skb->ip_summed == CHECKSUM_HW) ?
3467 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3468 *start = NEXT_TX(entry);
3469
3470 /* Now clean up the sw ring entries. */ 3486 /* Now clean up the sw ring entries. */
3471 i = 0; 3487 i = 0;
3472 while (entry != last_plus_one) { 3488 while (entry != last_plus_one) {
@@ -3491,7 +3507,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3491 3507
3492 dev_kfree_skb(skb); 3508 dev_kfree_skb(skb);
3493 3509
3494 return 0; 3510 return ret;
3495} 3511}
3496 3512
3497static void tg3_set_txd(struct tg3 *tp, int entry, 3513static void tg3_set_txd(struct tg3 *tp, int entry,
@@ -3517,19 +3533,10 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
3517 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; 3533 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3518} 3534}
3519 3535
3520static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3521{
3522 u32 base = (u32) mapping & 0xffffffff;
3523
3524 return ((base > 0xffffdcc0) &&
3525 (base + len + 8 < base));
3526}
3527
3528static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 3536static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3529{ 3537{
3530 struct tg3 *tp = netdev_priv(dev); 3538 struct tg3 *tp = netdev_priv(dev);
3531 dma_addr_t mapping; 3539 dma_addr_t mapping;
3532 unsigned int i;
3533 u32 len, entry, base_flags, mss; 3540 u32 len, entry, base_flags, mss;
3534 int would_hit_hwbug; 3541 int would_hit_hwbug;
3535 3542
@@ -3624,7 +3631,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3624 would_hit_hwbug = 0; 3631 would_hit_hwbug = 0;
3625 3632
3626 if (tg3_4g_overflow_test(mapping, len)) 3633 if (tg3_4g_overflow_test(mapping, len))
3627 would_hit_hwbug = entry + 1; 3634 would_hit_hwbug = 1;
3628 3635
3629 tg3_set_txd(tp, entry, mapping, len, base_flags, 3636 tg3_set_txd(tp, entry, mapping, len, base_flags,
3630 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 3637 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -3648,12 +3655,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3648 tp->tx_buffers[entry].skb = NULL; 3655 tp->tx_buffers[entry].skb = NULL;
3649 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); 3656 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3650 3657
3651 if (tg3_4g_overflow_test(mapping, len)) { 3658 if (tg3_4g_overflow_test(mapping, len))
3652 /* Only one should match. */ 3659 would_hit_hwbug = 1;
3653 if (would_hit_hwbug)
3654 BUG();
3655 would_hit_hwbug = entry + 1;
3656 }
3657 3660
3658 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 3661 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3659 tg3_set_txd(tp, entry, mapping, len, 3662 tg3_set_txd(tp, entry, mapping, len,
@@ -3669,34 +3672,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3669 if (would_hit_hwbug) { 3672 if (would_hit_hwbug) {
3670 u32 last_plus_one = entry; 3673 u32 last_plus_one = entry;
3671 u32 start; 3674 u32 start;
3672 unsigned int len = 0;
3673
3674 would_hit_hwbug -= 1;
3675 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3676 entry &= (TG3_TX_RING_SIZE - 1);
3677 start = entry;
3678 i = 0;
3679 while (entry != last_plus_one) {
3680 if (i == 0)
3681 len = skb_headlen(skb);
3682 else
3683 len = skb_shinfo(skb)->frags[i-1].size;
3684 3675
3685 if (entry == would_hit_hwbug) 3676 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3686 break; 3677 start &= (TG3_TX_RING_SIZE - 1);
3687
3688 i++;
3689 entry = NEXT_TX(entry);
3690
3691 }
3692 3678
3693 /* If the workaround fails due to memory/mapping 3679 /* If the workaround fails due to memory/mapping
3694 * failure, silently drop this packet. 3680 * failure, silently drop this packet.
3695 */ 3681 */
3696 if (tigon3_4gb_hwbug_workaround(tp, skb, 3682 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3697 entry, len, 3683 &start, base_flags, mss))
3698 last_plus_one,
3699 &start, mss))
3700 goto out_unlock; 3684 goto out_unlock;
3701 3685
3702 entry = start; 3686 entry = start;
@@ -9271,6 +9255,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9271 static struct pci_device_id write_reorder_chipsets[] = { 9255 static struct pci_device_id write_reorder_chipsets[] = {
9272 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9256 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9273 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9257 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9258 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9259 PCI_DEVICE_ID_AMD_K8_NB) },
9274 { }, 9260 { },
9275 }; 9261 };
9276 u32 misc_ctrl_reg; 9262 u32 misc_ctrl_reg;
@@ -9285,7 +9271,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9285 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9271 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9286#endif 9272#endif
9287 9273
9288 /* If we have an AMD 762 chipset, write 9274 /* If we have an AMD 762 or K8 chipset, write
9289 * reordering to the mailbox registers done by the host 9275 * reordering to the mailbox registers done by the host
9290 * controller can cause major troubles. We read back from 9276 * controller can cause major troubles. We read back from
9291 * every mailbox register write to force the writes to be 9277 * every mailbox register write to force the writes to be
@@ -9532,7 +9518,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9532 tp->write32_rx_mbox = tg3_write_indirect_mbox; 9518 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9533 9519
9534 iounmap(tp->regs); 9520 iounmap(tp->regs);
9535 tp->regs = 0; 9521 tp->regs = NULL;
9536 9522
9537 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 9523 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9538 pci_cmd &= ~PCI_COMMAND_MEMORY; 9524 pci_cmd &= ~PCI_COMMAND_MEMORY;
@@ -10680,7 +10666,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10680err_out_iounmap: 10666err_out_iounmap:
10681 if (tp->regs) { 10667 if (tp->regs) {
10682 iounmap(tp->regs); 10668 iounmap(tp->regs);
10683 tp->regs = 0; 10669 tp->regs = NULL;
10684 } 10670 }
10685 10671
10686err_out_free_dev: 10672err_out_free_dev:
@@ -10705,7 +10691,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
10705 unregister_netdev(dev); 10691 unregister_netdev(dev);
10706 if (tp->regs) { 10692 if (tp->regs) {
10707 iounmap(tp->regs); 10693 iounmap(tp->regs);
10708 tp->regs = 0; 10694 tp->regs = NULL;
10709 } 10695 }
10710 free_netdev(dev); 10696 free_netdev(dev);
10711 pci_release_regions(pdev); 10697 pci_release_regions(pdev);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 26cc4f6378c7..60d1e05ab732 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -117,7 +117,7 @@ static int xircom_open(struct net_device *dev);
117static int xircom_close(struct net_device *dev); 117static int xircom_close(struct net_device *dev);
118static void xircom_up(struct xircom_private *card); 118static void xircom_up(struct xircom_private *card);
119static struct net_device_stats *xircom_get_stats(struct net_device *dev); 119static struct net_device_stats *xircom_get_stats(struct net_device *dev);
120#if CONFIG_NET_POLL_CONTROLLER 120#ifdef CONFIG_NET_POLL_CONTROLLER
121static void xircom_poll_controller(struct net_device *dev); 121static void xircom_poll_controller(struct net_device *dev);
122#endif 122#endif
123 123
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 48c03c11cd9a..a01efa6d5c62 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
72 } 72 }
73 skb_reserve(skb, 4); 73 skb_reserve(skb, 4);
74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0); 74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
75 data = (cisco_packet*)skb->data; 75 data = (cisco_packet*)(skb->data + 4);
76 76
77 data->type = htonl(type); 77 data->type = htonl(type);
78 data->par1 = htonl(par1); 78 data->par1 = htonl(par1);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 2be65d308fbe..06998c2240d9 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -6852,7 +6852,10 @@ static inline char *airo_translate_scan(struct net_device *dev,
6852 /* Add frequency */ 6852 /* Add frequency */
6853 iwe.cmd = SIOCGIWFREQ; 6853 iwe.cmd = SIOCGIWFREQ;
6854 iwe.u.freq.m = le16_to_cpu(bss->dsChannel); 6854 iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
6855 iwe.u.freq.m = frequency_list[iwe.u.freq.m] * 100000; 6855 /* iwe.u.freq.m containt the channel (starting 1), our
6856 * frequency_list array start at index 0...
6857 */
6858 iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
6856 iwe.u.freq.e = 1; 6859 iwe.u.freq.e = 1;
6857 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); 6860 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
6858 6861
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 8122fe734aa7..b1ba429e0a2d 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -558,7 +558,7 @@ static int configure_device (struct pci_func *func)
558 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE); 558 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE);
559 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY); 559 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY);
560 560
561 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_ROM_ADDRESS, 0x00L); 561 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, PCI_ROM_ADDRESS, 0x00L);
562 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE); 562 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE);
563 563
564 return 0; 564 return 0;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 0e0947601526..898f6da6f0de 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -2526,7 +2526,6 @@ configure_new_function(struct controller *ctrl, struct pci_func *func,
2526 int cloop; 2526 int cloop;
2527 u8 temp_byte; 2527 u8 temp_byte;
2528 u8 class_code; 2528 u8 class_code;
2529 u16 temp_word;
2530 u32 rc; 2529 u32 rc;
2531 u32 temp_register; 2530 u32 temp_register;
2532 u32 base; 2531 u32 base;
@@ -2682,8 +2681,7 @@ configure_new_function(struct controller *ctrl, struct pci_func *func,
2682 } /* End of base register loop */ 2681 } /* End of base register loop */
2683 2682
2684 /* disable ROM base Address */ 2683 /* disable ROM base Address */
2685 temp_word = 0x00L; 2684 rc = pci_bus_write_config_dword (pci_bus, devfn, PCI_ROM_ADDRESS, 0x00);
2686 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_ROM_ADDRESS, temp_word);
2687 2685
2688 /* Set HP parameters (Cache Line Size, Latency Timer) */ 2686 /* Set HP parameters (Cache Line Size, Latency Timer) */
2689 rc = pciehprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL); 2687 rc = pciehprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 783b5abb0717..91c9903e621f 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -2824,8 +2824,7 @@ static int configure_new_function (struct controller * ctrl, struct pci_func * f
2824 } 2824 }
2825#endif 2825#endif
2826 /* Disable ROM base Address */ 2826 /* Disable ROM base Address */
2827 temp_word = 0x00L; 2827 rc = pci_bus_write_config_dword (pci_bus, devfn, PCI_ROM_ADDRESS, 0x00);
2828 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_ROM_ADDRESS, temp_word);
2829 2828
2830 /* Set HP parameters (Cache Line Size, Latency Timer) */ 2829 /* Set HP parameters (Cache Line Size, Latency Timer) */
2831 rc = shpchprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL); 2830 rc = shpchprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 992db89adce7..259d247b7551 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -309,17 +309,25 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
309 309
310 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 310 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
311 311
312 /* If we're in D3, force entire word to 0. 312 /* If we're (effectively) in D3, force entire word to 0.
313 * This doesn't affect PME_Status, disables PME_En, and 313 * This doesn't affect PME_Status, disables PME_En, and
314 * sets PowerState to 0. 314 * sets PowerState to 0.
315 */ 315 */
316 if (dev->current_state >= PCI_D3hot) { 316 switch (dev->current_state) {
317 if (!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 317 case PCI_UNKNOWN: /* Boot-up */
318 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
319 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
318 need_restore = 1; 320 need_restore = 1;
321 /* Fall-through: force to D0 */
322 case PCI_D3hot:
323 case PCI_D3cold:
324 case PCI_POWER_ERROR:
319 pmcsr = 0; 325 pmcsr = 0;
320 } else { 326 break;
327 default:
321 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 328 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
322 pmcsr |= state; 329 pmcsr |= state;
330 break;
323 } 331 }
324 332
325 /* enter specified state */ 333 /* enter specified state */
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index f0997c36c9b7..ba4d78e5b121 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1045,7 +1045,18 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
1045{ 1045{
1046 struct yenta_socket *socket; 1046 struct yenta_socket *socket;
1047 int ret; 1047 int ret;
1048 1048
1049 /*
1050 * If we failed to assign proper bus numbers for this cardbus
1051 * controller during PCI probe, its subordinate pci_bus is NULL.
1052 * Bail out if so.
1053 */
1054 if (!dev->subordinate) {
1055 printk(KERN_ERR "Yenta: no bus associated with %s! "
1056 "(try 'pci=assign-busses')\n", pci_name(dev));
1057 return -ENODEV;
1058 }
1059
1049 socket = kmalloc(sizeof(struct yenta_socket), GFP_KERNEL); 1060 socket = kmalloc(sizeof(struct yenta_socket), GFP_KERNEL);
1050 if (!socket) 1061 if (!socket)
1051 return -ENOMEM; 1062 return -ENOMEM;
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index aac83ce6469c..a1c52a682191 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/blacklist.c 2 * drivers/s390/cio/blacklist.c
3 * S/390 common I/O routines -- blacklisting of specific devices 3 * S/390 common I/O routines -- blacklisting of specific devices
4 * $Revision: 1.34 $ 4 * $Revision: 1.35 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -35,7 +35,7 @@
35 */ 35 */
36 36
37/* 65536 bits to indicate if a devno is blacklisted or not */ 37/* 65536 bits to indicate if a devno is blacklisted or not */
38#define __BL_DEV_WORDS (__MAX_SUBCHANNELS + (8*sizeof(long) - 1) / \ 38#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \
39 (8*sizeof(long))) 39 (8*sizeof(long)))
40static unsigned long bl_dev[__BL_DEV_WORDS]; 40static unsigned long bl_dev[__BL_DEV_WORDS];
41typedef enum {add, free} range_action; 41typedef enum {add, free} range_action;
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
index 6aeef3bacc33..0cb47eca91f3 100644
--- a/drivers/s390/crypto/z90main.c
+++ b/drivers/s390/crypto/z90main.c
@@ -682,9 +682,6 @@ z90crypt_cleanup_module(void)
682 del_timer(&config_timer); 682 del_timer(&config_timer);
683 del_timer(&cleanup_timer); 683 del_timer(&cleanup_timer);
684 684
685 if (z90_device_work)
686 destroy_workqueue(z90_device_work);
687
688 destroy_z90crypt(); 685 destroy_z90crypt();
689 686
690 PRINTKN("Unloaded.\n"); 687 PRINTKN("Unloaded.\n");
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 96ca863eaff2..0db4f57a6a95 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: ctcmain.c,v 1.74 2005/03/24 09:04:17 mschwide Exp $ 2 * $Id: ctcmain.c,v 1.78 2005/09/07 12:18:02 pavlic Exp $
3 * 3 *
4 * CTC / ESCON network driver 4 * CTC / ESCON network driver
5 * 5 *
@@ -37,10 +37,9 @@
37 * along with this program; if not, write to the Free Software 37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 38 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 * 39 *
40 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.74 $ 40 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.78 $
41 * 41 *
42 */ 42 */
43
44#undef DEBUG 43#undef DEBUG
45#include <linux/module.h> 44#include <linux/module.h>
46#include <linux/init.h> 45#include <linux/init.h>
@@ -135,7 +134,7 @@ static const char *dev_event_names[] = {
135 "TX down", 134 "TX down",
136 "Restart", 135 "Restart",
137}; 136};
138 137
139/** 138/**
140 * Events of the channel statemachine 139 * Events of the channel statemachine
141 */ 140 */
@@ -249,7 +248,7 @@ static void
249print_banner(void) 248print_banner(void)
250{ 249{
251 static int printed = 0; 250 static int printed = 0;
252 char vbuf[] = "$Revision: 1.74 $"; 251 char vbuf[] = "$Revision: 1.78 $";
253 char *version = vbuf; 252 char *version = vbuf;
254 253
255 if (printed) 254 if (printed)
@@ -334,7 +333,7 @@ static const char *ch_state_names[] = {
334 "Restarting", 333 "Restarting",
335 "Not operational", 334 "Not operational",
336}; 335};
337 336
338#ifdef DEBUG 337#ifdef DEBUG
339/** 338/**
340 * Dump header and first 16 bytes of an sk_buff for debugging purposes. 339 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
@@ -671,7 +670,7 @@ static void
671fsm_action_nop(fsm_instance * fi, int event, void *arg) 670fsm_action_nop(fsm_instance * fi, int event, void *arg)
672{ 671{
673} 672}
674 673
675/** 674/**
676 * Actions for channel - statemachines. 675 * Actions for channel - statemachines.
677 *****************************************************************************/ 676 *****************************************************************************/
@@ -1514,7 +1513,6 @@ ch_action_reinit(fsm_instance *fi, int event, void *arg)
1514 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev); 1513 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1515} 1514}
1516 1515
1517
1518/** 1516/**
1519 * The statemachine for a channel. 1517 * The statemachine for a channel.
1520 */ 1518 */
@@ -1625,7 +1623,7 @@ static const fsm_node ch_fsm[] = {
1625}; 1623};
1626 1624
1627static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node); 1625static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1628 1626
1629/** 1627/**
1630 * Functions related to setup and device detection. 1628 * Functions related to setup and device detection.
1631 *****************************************************************************/ 1629 *****************************************************************************/
@@ -1976,7 +1974,7 @@ ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1976 fsm_event(ch->fsm, CH_EVENT_IRQ, ch); 1974 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1977 1975
1978} 1976}
1979 1977
1980/** 1978/**
1981 * Actions for interface - statemachine. 1979 * Actions for interface - statemachine.
1982 *****************************************************************************/ 1980 *****************************************************************************/
@@ -2209,13 +2207,18 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2209 int rc = 0; 2207 int rc = 0;
2210 2208
2211 DBF_TEXT(trace, 5, __FUNCTION__); 2209 DBF_TEXT(trace, 5, __FUNCTION__);
2210 /* we need to acquire the lock for testing the state
2211 * otherwise we can have an IRQ changing the state to
2212 * TXIDLE after the test but before acquiring the lock.
2213 */
2214 spin_lock_irqsave(&ch->collect_lock, saveflags);
2212 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) { 2215 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2213 int l = skb->len + LL_HEADER_LENGTH; 2216 int l = skb->len + LL_HEADER_LENGTH;
2214 2217
2215 spin_lock_irqsave(&ch->collect_lock, saveflags); 2218 if (ch->collect_len + l > ch->max_bufsize - 2) {
2216 if (ch->collect_len + l > ch->max_bufsize - 2) 2219 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2217 rc = -EBUSY; 2220 return -EBUSY;
2218 else { 2221 } else {
2219 atomic_inc(&skb->users); 2222 atomic_inc(&skb->users);
2220 header.length = l; 2223 header.length = l;
2221 header.type = skb->protocol; 2224 header.type = skb->protocol;
@@ -2231,7 +2234,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2231 int ccw_idx; 2234 int ccw_idx;
2232 struct sk_buff *nskb; 2235 struct sk_buff *nskb;
2233 unsigned long hi; 2236 unsigned long hi;
2234 2237 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2235 /** 2238 /**
2236 * Protect skb against beeing free'd by upper 2239 * Protect skb against beeing free'd by upper
2237 * layers. 2240 * layers.
@@ -2256,6 +2259,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2256 if (!nskb) { 2259 if (!nskb) {
2257 atomic_dec(&skb->users); 2260 atomic_dec(&skb->users);
2258 skb_pull(skb, LL_HEADER_LENGTH + 2); 2261 skb_pull(skb, LL_HEADER_LENGTH + 2);
2262 ctc_clear_busy(ch->netdev);
2259 return -ENOMEM; 2263 return -ENOMEM;
2260 } else { 2264 } else {
2261 memcpy(skb_put(nskb, skb->len), 2265 memcpy(skb_put(nskb, skb->len),
@@ -2281,6 +2285,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2281 */ 2285 */
2282 atomic_dec(&skb->users); 2286 atomic_dec(&skb->users);
2283 skb_pull(skb, LL_HEADER_LENGTH + 2); 2287 skb_pull(skb, LL_HEADER_LENGTH + 2);
2288 ctc_clear_busy(ch->netdev);
2284 return -EBUSY; 2289 return -EBUSY;
2285 } 2290 }
2286 2291
@@ -2327,9 +2332,10 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2327 } 2332 }
2328 } 2333 }
2329 2334
2335 ctc_clear_busy(ch->netdev);
2330 return rc; 2336 return rc;
2331} 2337}
2332 2338
2333/** 2339/**
2334 * Interface API for upper network layers 2340 * Interface API for upper network layers
2335 *****************************************************************************/ 2341 *****************************************************************************/
@@ -2421,7 +2427,6 @@ ctc_tx(struct sk_buff *skb, struct net_device * dev)
2421 dev->trans_start = jiffies; 2427 dev->trans_start = jiffies;
2422 if (transmit_skb(privptr->channel[WRITE], skb) != 0) 2428 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2423 rc = 1; 2429 rc = 1;
2424 ctc_clear_busy(dev);
2425 return rc; 2430 return rc;
2426} 2431}
2427 2432
@@ -2610,7 +2615,6 @@ stats_write(struct device *dev, struct device_attribute *attr, const char *buf,
2610 return count; 2615 return count;
2611} 2616}
2612 2617
2613
2614static void 2618static void
2615ctc_netdev_unregister(struct net_device * dev) 2619ctc_netdev_unregister(struct net_device * dev)
2616{ 2620{
@@ -2685,7 +2689,6 @@ ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *b
2685 return count; 2689 return count;
2686} 2690}
2687 2691
2688
2689static ssize_t 2692static ssize_t
2690ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf) 2693ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2691{ 2694{
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 3a0285669adf..2ad4797ce024 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -24,7 +24,7 @@
24 24
25#include "qeth_mpc.h" 25#include "qeth_mpc.h"
26 26
27#define VERSION_QETH_H "$Revision: 1.139 $" 27#define VERSION_QETH_H "$Revision: 1.142 $"
28 28
29#ifdef CONFIG_QETH_IPV6 29#ifdef CONFIG_QETH_IPV6
30#define QETH_VERSION_IPV6 ":IPv6" 30#define QETH_VERSION_IPV6 ":IPv6"
@@ -1172,7 +1172,7 @@ extern int
1172qeth_realloc_buffer_pool(struct qeth_card *, int); 1172qeth_realloc_buffer_pool(struct qeth_card *, int);
1173 1173
1174extern int 1174extern int
1175qeth_set_large_send(struct qeth_card *); 1175qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
1176 1176
1177extern void 1177extern void
1178qeth_fill_header(struct qeth_card *, struct qeth_hdr *, 1178qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 79c74f3a11f5..86582cf1e19e 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.214 $) 3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.224 $)
4 * 4 *
5 * Linux on zSeries OSA Express and HiperSockets support 5 * Linux on zSeries OSA Express and HiperSockets support
6 * 6 *
@@ -12,7 +12,7 @@
12 * Frank Pavlic (pavlic@de.ibm.com) and 12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com> 13 * Thomas Spatzier <tspat@de.ibm.com>
14 * 14 *
15 * $Revision: 1.214 $ $Date: 2005/05/04 20:19:18 $ 15 * $Revision: 1.224 $ $Date: 2005/05/04 20:19:18 $
16 * 16 *
17 * This program is free software; you can redistribute it and/or modify 17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by 18 * it under the terms of the GNU General Public License as published by
@@ -29,14 +29,6 @@
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */ 30 */
31 31
32/***
33 * eye catcher; just for debugging purposes
34 */
35void volatile
36qeth_eyecatcher(void)
37{
38 return;
39}
40 32
41#include <linux/config.h> 33#include <linux/config.h>
42#include <linux/module.h> 34#include <linux/module.h>
@@ -80,7 +72,7 @@ qeth_eyecatcher(void)
80#include "qeth_eddp.h" 72#include "qeth_eddp.h"
81#include "qeth_tso.h" 73#include "qeth_tso.h"
82 74
83#define VERSION_QETH_C "$Revision: 1.214 $" 75#define VERSION_QETH_C "$Revision: 1.224 $"
84static const char *version = "qeth S/390 OSA-Express driver"; 76static const char *version = "qeth S/390 OSA-Express driver";
85 77
86/** 78/**
@@ -2759,11 +2751,9 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2759 queue->card->perf_stats.outbound_do_qdio_start_time; 2751 queue->card->perf_stats.outbound_do_qdio_start_time;
2760#endif 2752#endif
2761 if (rc){ 2753 if (rc){
2762 QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
2763 "returned error (%i) on device %s.",
2764 rc, CARD_DDEV_ID(queue->card));
2765 QETH_DBF_TEXT(trace, 2, "flushbuf"); 2754 QETH_DBF_TEXT(trace, 2, "flushbuf");
2766 QETH_DBF_TEXT_(trace, 2, " err%d", rc); 2755 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2756 QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
2767 queue->card->stats.tx_errors += count; 2757 queue->card->stats.tx_errors += count;
2768 /* this must not happen under normal circumstances. if it 2758 /* this must not happen under normal circumstances. if it
2769 * happens something is really wrong -> recover */ 2759 * happens something is really wrong -> recover */
@@ -2909,11 +2899,8 @@ qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
2909 QETH_DBF_TEXT(trace, 6, "qdouhdl"); 2899 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2910 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 2900 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2911 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){ 2901 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2912 QETH_DBF_SPRINTF(trace, 2, "On device %s: " 2902 QETH_DBF_TEXT(trace, 2, "achkcond");
2913 "received active check " 2903 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
2914 "condition (0x%08x).",
2915 CARD_BUS_ID(card), status);
2916 QETH_DBF_TEXT(trace, 2, "chkcond");
2917 QETH_DBF_TEXT_(trace, 2, "%08x", status); 2904 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2918 netif_stop_queue(card->dev); 2905 netif_stop_queue(card->dev);
2919 qeth_schedule_recovery(card); 2906 qeth_schedule_recovery(card);
@@ -3356,26 +3343,32 @@ qeth_halt_channel(struct qeth_channel *channel)
3356static int 3343static int
3357qeth_halt_channels(struct qeth_card *card) 3344qeth_halt_channels(struct qeth_card *card)
3358{ 3345{
3359 int rc = 0; 3346 int rc1 = 0, rc2=0, rc3 = 0;
3360 3347
3361 QETH_DBF_TEXT(trace,3,"haltchs"); 3348 QETH_DBF_TEXT(trace,3,"haltchs");
3362 if ((rc = qeth_halt_channel(&card->read))) 3349 rc1 = qeth_halt_channel(&card->read);
3363 return rc; 3350 rc2 = qeth_halt_channel(&card->write);
3364 if ((rc = qeth_halt_channel(&card->write))) 3351 rc3 = qeth_halt_channel(&card->data);
3365 return rc; 3352 if (rc1)
3366 return qeth_halt_channel(&card->data); 3353 return rc1;
3354 if (rc2)
3355 return rc2;
3356 return rc3;
3367} 3357}
3368static int 3358static int
3369qeth_clear_channels(struct qeth_card *card) 3359qeth_clear_channels(struct qeth_card *card)
3370{ 3360{
3371 int rc = 0; 3361 int rc1 = 0, rc2=0, rc3 = 0;
3372 3362
3373 QETH_DBF_TEXT(trace,3,"clearchs"); 3363 QETH_DBF_TEXT(trace,3,"clearchs");
3374 if ((rc = qeth_clear_channel(&card->read))) 3364 rc1 = qeth_clear_channel(&card->read);
3375 return rc; 3365 rc2 = qeth_clear_channel(&card->write);
3376 if ((rc = qeth_clear_channel(&card->write))) 3366 rc3 = qeth_clear_channel(&card->data);
3377 return rc; 3367 if (rc1)
3378 return qeth_clear_channel(&card->data); 3368 return rc1;
3369 if (rc2)
3370 return rc2;
3371 return rc3;
3379} 3372}
3380 3373
3381static int 3374static int
@@ -3445,23 +3438,23 @@ qeth_mpc_initialize(struct qeth_card *card)
3445 } 3438 }
3446 if ((rc = qeth_cm_enable(card))){ 3439 if ((rc = qeth_cm_enable(card))){
3447 QETH_DBF_TEXT_(setup, 2, "2err%d", rc); 3440 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3448 return rc; 3441 goto out_qdio;
3449 } 3442 }
3450 if ((rc = qeth_cm_setup(card))){ 3443 if ((rc = qeth_cm_setup(card))){
3451 QETH_DBF_TEXT_(setup, 2, "3err%d", rc); 3444 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3452 return rc; 3445 goto out_qdio;
3453 } 3446 }
3454 if ((rc = qeth_ulp_enable(card))){ 3447 if ((rc = qeth_ulp_enable(card))){
3455 QETH_DBF_TEXT_(setup, 2, "4err%d", rc); 3448 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3456 return rc; 3449 goto out_qdio;
3457 } 3450 }
3458 if ((rc = qeth_ulp_setup(card))){ 3451 if ((rc = qeth_ulp_setup(card))){
3459 QETH_DBF_TEXT_(setup, 2, "5err%d", rc); 3452 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3460 return rc; 3453 goto out_qdio;
3461 } 3454 }
3462 if ((rc = qeth_alloc_qdio_buffers(card))){ 3455 if ((rc = qeth_alloc_qdio_buffers(card))){
3463 QETH_DBF_TEXT_(setup, 2, "5err%d", rc); 3456 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3464 return rc; 3457 goto out_qdio;
3465 } 3458 }
3466 if ((rc = qeth_qdio_establish(card))){ 3459 if ((rc = qeth_qdio_establish(card))){
3467 QETH_DBF_TEXT_(setup, 2, "6err%d", rc); 3460 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
@@ -3795,12 +3788,16 @@ static inline int
3795qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb, 3788qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3796 struct qeth_hdr **hdr, int ipv) 3789 struct qeth_hdr **hdr, int ipv)
3797{ 3790{
3791 int rc;
3798#ifdef CONFIG_QETH_VLAN 3792#ifdef CONFIG_QETH_VLAN
3799 u16 *tag; 3793 u16 *tag;
3800#endif 3794#endif
3801 3795
3802 QETH_DBF_TEXT(trace, 6, "prepskb"); 3796 QETH_DBF_TEXT(trace, 6, "prepskb");
3803 3797
3798 rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
3799 if (rc)
3800 return rc;
3804#ifdef CONFIG_QETH_VLAN 3801#ifdef CONFIG_QETH_VLAN
3805 if (card->vlangrp && vlan_tx_tag_present(*skb) && 3802 if (card->vlangrp && vlan_tx_tag_present(*skb) &&
3806 ((ipv == 6) || card->options.layer2) ) { 3803 ((ipv == 6) || card->options.layer2) ) {
@@ -4251,7 +4248,8 @@ out:
4251} 4248}
4252 4249
4253static inline int 4250static inline int
4254qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb) 4251qeth_get_elements_no(struct qeth_card *card, void *hdr,
4252 struct sk_buff *skb, int elems)
4255{ 4253{
4256 int elements_needed = 0; 4254 int elements_needed = 0;
4257 4255
@@ -4261,9 +4259,10 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
4261 if (elements_needed == 0 ) 4259 if (elements_needed == 0 )
4262 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) 4260 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
4263 + skb->len) >> PAGE_SHIFT); 4261 + skb->len) >> PAGE_SHIFT);
4264 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){ 4262 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
4265 PRINT_ERR("qeth_do_send_packet: invalid size of " 4263 PRINT_ERR("qeth_do_send_packet: invalid size of "
4266 "IP packet. Discarded."); 4264 "IP packet (Number=%d / Length=%d). Discarded.\n",
4265 (elements_needed+elems), skb->len);
4267 return 0; 4266 return 0;
4268 } 4267 }
4269 return elements_needed; 4268 return elements_needed;
@@ -4275,7 +4274,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4275 int ipv = 0; 4274 int ipv = 0;
4276 int cast_type; 4275 int cast_type;
4277 struct qeth_qdio_out_q *queue; 4276 struct qeth_qdio_out_q *queue;
4278 struct qeth_hdr *hdr; 4277 struct qeth_hdr *hdr = NULL;
4279 int elements_needed = 0; 4278 int elements_needed = 0;
4280 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 4279 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4281 struct qeth_eddp_context *ctx = NULL; 4280 struct qeth_eddp_context *ctx = NULL;
@@ -4337,9 +4336,11 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4337 return -EINVAL; 4336 return -EINVAL;
4338 } 4337 }
4339 } else { 4338 } else {
4340 elements_needed += qeth_get_elements_no(card,(void*) hdr, skb); 4339 int elems = qeth_get_elements_no(card,(void*) hdr, skb,
4341 if (!elements_needed) 4340 elements_needed);
4341 if (!elems)
4342 return -EINVAL; 4342 return -EINVAL;
4343 elements_needed += elems;
4343 } 4344 }
4344 4345
4345 if (card->info.type != QETH_CARD_TYPE_IQD) 4346 if (card->info.type != QETH_CARD_TYPE_IQD)
@@ -4504,7 +4505,11 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4504 4505
4505 QETH_DBF_TEXT(trace,3,"arpstnoe"); 4506 QETH_DBF_TEXT(trace,3,"arpstnoe");
4506 4507
4507 /* TODO: really not supported by GuestLAN? */ 4508 /*
4509 * currently GuestLAN only supports the ARP assist function
4510 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
4511 * thus we say EOPNOTSUPP for this ARP function
4512 */
4508 if (card->info.guestlan) 4513 if (card->info.guestlan)
4509 return -EOPNOTSUPP; 4514 return -EOPNOTSUPP;
4510 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { 4515 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
@@ -4681,14 +4686,6 @@ qeth_arp_query(struct qeth_card *card, char *udata)
4681 4686
4682 QETH_DBF_TEXT(trace,3,"arpquery"); 4687 QETH_DBF_TEXT(trace,3,"arpquery");
4683 4688
4684 /*
4685 * currently GuestLAN does only deliver all zeros on query arp,
4686 * even though arp processing is supported (according to IPA supp.
4687 * funcs flags); since all zeros is no valueable information,
4688 * we say EOPNOTSUPP for all ARP functions
4689 */
4690 /*if (card->info.guestlan)
4691 return -EOPNOTSUPP; */
4692 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 4689 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4693 IPA_ARP_PROCESSING)) { 4690 IPA_ARP_PROCESSING)) {
4694 PRINT_WARN("ARP processing not supported " 4691 PRINT_WARN("ARP processing not supported "
@@ -4894,10 +4891,9 @@ qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4894 QETH_DBF_TEXT(trace,3,"arpadent"); 4891 QETH_DBF_TEXT(trace,3,"arpadent");
4895 4892
4896 /* 4893 /*
4897 * currently GuestLAN does only deliver all zeros on query arp, 4894 * currently GuestLAN only supports the ARP assist function
4898 * even though arp processing is supported (according to IPA supp. 4895 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
4899 * funcs flags); since all zeros is no valueable information, 4896 * thus we say EOPNOTSUPP for this ARP function
4900 * we say EOPNOTSUPP for all ARP functions
4901 */ 4897 */
4902 if (card->info.guestlan) 4898 if (card->info.guestlan)
4903 return -EOPNOTSUPP; 4899 return -EOPNOTSUPP;
@@ -4937,10 +4933,9 @@ qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry
4937 QETH_DBF_TEXT(trace,3,"arprment"); 4933 QETH_DBF_TEXT(trace,3,"arprment");
4938 4934
4939 /* 4935 /*
4940 * currently GuestLAN does only deliver all zeros on query arp, 4936 * currently GuestLAN only supports the ARP assist function
4941 * even though arp processing is supported (according to IPA supp. 4937 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
4942 * funcs flags); since all zeros is no valueable information, 4938 * thus we say EOPNOTSUPP for this ARP function
4943 * we say EOPNOTSUPP for all ARP functions
4944 */ 4939 */
4945 if (card->info.guestlan) 4940 if (card->info.guestlan)
4946 return -EOPNOTSUPP; 4941 return -EOPNOTSUPP;
@@ -4978,11 +4973,10 @@ qeth_arp_flush_cache(struct qeth_card *card)
4978 QETH_DBF_TEXT(trace,3,"arpflush"); 4973 QETH_DBF_TEXT(trace,3,"arpflush");
4979 4974
4980 /* 4975 /*
4981 * currently GuestLAN does only deliver all zeros on query arp, 4976 * currently GuestLAN only supports the ARP assist function
4982 * even though arp processing is supported (according to IPA supp. 4977 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
4983 * funcs flags); since all zeros is no valueable information, 4978 * thus we say EOPNOTSUPP for this ARP function
4984 * we say EOPNOTSUPP for all ARP functions 4979 */
4985 */
4986 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) 4980 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
4987 return -EOPNOTSUPP; 4981 return -EOPNOTSUPP;
4988 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { 4982 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
@@ -7038,14 +7032,16 @@ qeth_setrouting_v6(struct qeth_card *card)
7038} 7032}
7039 7033
7040int 7034int
7041qeth_set_large_send(struct qeth_card *card) 7035qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
7042{ 7036{
7043 int rc = 0; 7037 int rc = 0;
7044 7038
7045 if (card->dev == NULL) 7039 if (card->dev == NULL) {
7040 card->options.large_send = type;
7046 return 0; 7041 return 0;
7047 7042 }
7048 netif_stop_queue(card->dev); 7043 netif_stop_queue(card->dev);
7044 card->options.large_send = type;
7049 switch (card->options.large_send) { 7045 switch (card->options.large_send) {
7050 case QETH_LARGE_SEND_EDDP: 7046 case QETH_LARGE_SEND_EDDP:
7051 card->dev->features |= NETIF_F_TSO | NETIF_F_SG; 7047 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
@@ -7066,7 +7062,6 @@ qeth_set_large_send(struct qeth_card *card)
7066 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); 7062 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7067 break; 7063 break;
7068 } 7064 }
7069
7070 netif_wake_queue(card->dev); 7065 netif_wake_queue(card->dev);
7071 return rc; 7066 return rc;
7072} 7067}
@@ -8257,7 +8252,6 @@ qeth_init(void)
8257{ 8252{
8258 int rc=0; 8253 int rc=0;
8259 8254
8260 qeth_eyecatcher();
8261 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n", 8255 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
8262 version, VERSION_QETH_C, VERSION_QETH_H, 8256 version, VERSION_QETH_C, VERSION_QETH_H,
8263 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C, 8257 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
@@ -8338,7 +8332,6 @@ again:
8338 printk("qeth: removed\n"); 8332 printk("qeth: removed\n");
8339} 8333}
8340 8334
8341EXPORT_SYMBOL(qeth_eyecatcher);
8342module_init(qeth_init); 8335module_init(qeth_init);
8343module_exit(qeth_exit); 8336module_exit(qeth_exit);
8344MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>"); 8337MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 98bedb0cb387..dda105b73063 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.51 $) 3 * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.54 $)
4 * 4 *
5 * Linux on zSeries OSA Express and HiperSockets support 5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to sysfs. 6 * This file contains code related to sysfs.
@@ -20,7 +20,7 @@
20#include "qeth_mpc.h" 20#include "qeth_mpc.h"
21#include "qeth_fs.h" 21#include "qeth_fs.h"
22 22
23const char *VERSION_QETH_SYS_C = "$Revision: 1.51 $"; 23const char *VERSION_QETH_SYS_C = "$Revision: 1.54 $";
24 24
25/*****************************************************************************/ 25/*****************************************************************************/
26/* */ 26/* */
@@ -722,10 +722,13 @@ qeth_dev_layer2_store(struct device *dev, struct device_attribute *attr, const c
722 722
723 if (!card) 723 if (!card)
724 return -EINVAL; 724 return -EINVAL;
725 if (card->info.type == QETH_CARD_TYPE_IQD) {
726 PRINT_WARN("Layer2 on Hipersockets is not supported! \n");
727 return -EPERM;
728 }
725 729
726 if (((card->state != CARD_STATE_DOWN) && 730 if (((card->state != CARD_STATE_DOWN) &&
727 (card->state != CARD_STATE_RECOVER)) || 731 (card->state != CARD_STATE_RECOVER)))
728 (card->info.type != QETH_CARD_TYPE_OSAE))
729 return -EPERM; 732 return -EPERM;
730 733
731 i = simple_strtoul(buf, &tmp, 16); 734 i = simple_strtoul(buf, &tmp, 16);
@@ -771,9 +774,7 @@ qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, con
771 774
772 if (!card) 775 if (!card)
773 return -EINVAL; 776 return -EINVAL;
774
775 tmp = strsep((char **) &buf, "\n"); 777 tmp = strsep((char **) &buf, "\n");
776
777 if (!strcmp(tmp, "no")){ 778 if (!strcmp(tmp, "no")){
778 type = QETH_LARGE_SEND_NO; 779 type = QETH_LARGE_SEND_NO;
779 } else if (!strcmp(tmp, "EDDP")) { 780 } else if (!strcmp(tmp, "EDDP")) {
@@ -786,10 +787,8 @@ qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, con
786 } 787 }
787 if (card->options.large_send == type) 788 if (card->options.large_send == type)
788 return count; 789 return count;
789 card->options.large_send = type; 790 if ((rc = qeth_set_large_send(card, type)))
790 if ((rc = qeth_set_large_send(card)))
791 return rc; 791 return rc;
792
793 return count; 792 return count;
794} 793}
795 794
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index fc145307a7d4..d6a78f1a2f16 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ 5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
6 zfcp_fsf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \ 6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o 7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o
8 8
9obj-$(CONFIG_ZFCP) += zfcp.o 9obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bfe3ba73bc0f..0b5087f7cabc 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,95 +122,6 @@ _zfcp_hex_dump(char *addr, int count)
122 122
123#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER 123#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
124 124
125static inline int
126zfcp_fsf_req_is_scsi_cmnd(struct zfcp_fsf_req *fsf_req)
127{
128 return ((fsf_req->fsf_command == FSF_QTCB_FCP_CMND) &&
129 !(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT));
130}
131
132void
133zfcp_cmd_dbf_event_fsf(const char *text, struct zfcp_fsf_req *fsf_req,
134 void *add_data, int add_length)
135{
136 struct zfcp_adapter *adapter = fsf_req->adapter;
137 struct scsi_cmnd *scsi_cmnd;
138 int level = 3;
139 int i;
140 unsigned long flags;
141
142 spin_lock_irqsave(&adapter->dbf_lock, flags);
143 if (zfcp_fsf_req_is_scsi_cmnd(fsf_req)) {
144 scsi_cmnd = fsf_req->data.send_fcp_command_task.scsi_cmnd;
145 debug_text_event(adapter->cmd_dbf, level, "fsferror");
146 debug_text_event(adapter->cmd_dbf, level, text);
147 debug_event(adapter->cmd_dbf, level, &fsf_req,
148 sizeof (unsigned long));
149 debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
150 sizeof (u32));
151 debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
152 sizeof (unsigned long));
153 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
154 min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
155 for (i = 0; i < add_length; i += ZFCP_CMD_DBF_LENGTH)
156 debug_event(adapter->cmd_dbf,
157 level,
158 (char *) add_data + i,
159 min(ZFCP_CMD_DBF_LENGTH, add_length - i));
160 }
161 spin_unlock_irqrestore(&adapter->dbf_lock, flags);
162}
163
164/* XXX additionally log unit if available */
165/* ---> introduce new parameter for unit, see 2.4 code */
166void
167zfcp_cmd_dbf_event_scsi(const char *text, struct scsi_cmnd *scsi_cmnd)
168{
169 struct zfcp_adapter *adapter;
170 union zfcp_req_data *req_data;
171 struct zfcp_fsf_req *fsf_req;
172 int level = ((host_byte(scsi_cmnd->result) != 0) ? 1 : 5);
173 unsigned long flags;
174
175 adapter = (struct zfcp_adapter *) scsi_cmnd->device->host->hostdata[0];
176 req_data = (union zfcp_req_data *) scsi_cmnd->host_scribble;
177 fsf_req = (req_data ? req_data->send_fcp_command_task.fsf_req : NULL);
178 spin_lock_irqsave(&adapter->dbf_lock, flags);
179 debug_text_event(adapter->cmd_dbf, level, "hostbyte");
180 debug_text_event(adapter->cmd_dbf, level, text);
181 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->result, sizeof (u32));
182 debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
183 sizeof (unsigned long));
184 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
185 min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
186 if (likely(fsf_req)) {
187 debug_event(adapter->cmd_dbf, level, &fsf_req,
188 sizeof (unsigned long));
189 debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
190 sizeof (u32));
191 } else {
192 debug_text_event(adapter->cmd_dbf, level, "");
193 debug_text_event(adapter->cmd_dbf, level, "");
194 }
195 spin_unlock_irqrestore(&adapter->dbf_lock, flags);
196}
197
198void
199zfcp_in_els_dbf_event(struct zfcp_adapter *adapter, const char *text,
200 struct fsf_status_read_buffer *status_buffer, int length)
201{
202 int level = 1;
203 int i;
204
205 debug_text_event(adapter->in_els_dbf, level, text);
206 debug_event(adapter->in_els_dbf, level, &status_buffer->d_id, 8);
207 for (i = 0; i < length; i += ZFCP_IN_ELS_DBF_LENGTH)
208 debug_event(adapter->in_els_dbf,
209 level,
210 (char *) status_buffer->payload + i,
211 min(ZFCP_IN_ELS_DBF_LENGTH, length - i));
212}
213
214/** 125/**
215 * zfcp_device_setup - setup function 126 * zfcp_device_setup - setup function
216 * @str: pointer to parameter string 127 * @str: pointer to parameter string
@@ -1017,81 +928,6 @@ zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
1017 mempool_destroy(adapter->pool.data_gid_pn); 928 mempool_destroy(adapter->pool.data_gid_pn);
1018} 929}
1019 930
1020/**
1021 * zfcp_adapter_debug_register - registers debug feature for an adapter
1022 * @adapter: pointer to adapter for which debug features should be registered
1023 * return: -ENOMEM on error, 0 otherwise
1024 */
1025int
1026zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
1027{
1028 char dbf_name[20];
1029
1030 /* debug feature area which records SCSI command failures (hostbyte) */
1031 spin_lock_init(&adapter->dbf_lock);
1032
1033 sprintf(dbf_name, ZFCP_CMD_DBF_NAME "%s",
1034 zfcp_get_busid_by_adapter(adapter));
1035 adapter->cmd_dbf = debug_register(dbf_name, ZFCP_CMD_DBF_INDEX,
1036 ZFCP_CMD_DBF_AREAS,
1037 ZFCP_CMD_DBF_LENGTH);
1038 debug_register_view(adapter->cmd_dbf, &debug_hex_ascii_view);
1039 debug_set_level(adapter->cmd_dbf, ZFCP_CMD_DBF_LEVEL);
1040
1041 /* debug feature area which records SCSI command aborts */
1042 sprintf(dbf_name, ZFCP_ABORT_DBF_NAME "%s",
1043 zfcp_get_busid_by_adapter(adapter));
1044 adapter->abort_dbf = debug_register(dbf_name, ZFCP_ABORT_DBF_INDEX,
1045 ZFCP_ABORT_DBF_AREAS,
1046 ZFCP_ABORT_DBF_LENGTH);
1047 debug_register_view(adapter->abort_dbf, &debug_hex_ascii_view);
1048 debug_set_level(adapter->abort_dbf, ZFCP_ABORT_DBF_LEVEL);
1049
1050 /* debug feature area which records incoming ELS commands */
1051 sprintf(dbf_name, ZFCP_IN_ELS_DBF_NAME "%s",
1052 zfcp_get_busid_by_adapter(adapter));
1053 adapter->in_els_dbf = debug_register(dbf_name, ZFCP_IN_ELS_DBF_INDEX,
1054 ZFCP_IN_ELS_DBF_AREAS,
1055 ZFCP_IN_ELS_DBF_LENGTH);
1056 debug_register_view(adapter->in_els_dbf, &debug_hex_ascii_view);
1057 debug_set_level(adapter->in_els_dbf, ZFCP_IN_ELS_DBF_LEVEL);
1058
1059 /* debug feature area which records erp events */
1060 sprintf(dbf_name, ZFCP_ERP_DBF_NAME "%s",
1061 zfcp_get_busid_by_adapter(adapter));
1062 adapter->erp_dbf = debug_register(dbf_name, ZFCP_ERP_DBF_INDEX,
1063 ZFCP_ERP_DBF_AREAS,
1064 ZFCP_ERP_DBF_LENGTH);
1065 debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
1066 debug_set_level(adapter->erp_dbf, ZFCP_ERP_DBF_LEVEL);
1067
1068 if (!(adapter->cmd_dbf && adapter->abort_dbf &&
1069 adapter->in_els_dbf && adapter->erp_dbf)) {
1070 zfcp_adapter_debug_unregister(adapter);
1071 return -ENOMEM;
1072 }
1073
1074 return 0;
1075
1076}
1077
1078/**
1079 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
1080 * @adapter: pointer to adapter for which debug features should be unregistered
1081 */
1082void
1083zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
1084{
1085 debug_unregister(adapter->abort_dbf);
1086 debug_unregister(adapter->cmd_dbf);
1087 debug_unregister(adapter->erp_dbf);
1088 debug_unregister(adapter->in_els_dbf);
1089 adapter->abort_dbf = NULL;
1090 adapter->cmd_dbf = NULL;
1091 adapter->erp_dbf = NULL;
1092 adapter->in_els_dbf = NULL;
1093}
1094
1095void 931void
1096zfcp_dummy_release(struct device *dev) 932zfcp_dummy_release(struct device *dev)
1097{ 933{
@@ -1462,10 +1298,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1462 /* see FC-FS */ 1298 /* see FC-FS */
1463 no_entries = (fcp_rscn_head->payload_len / 4); 1299 no_entries = (fcp_rscn_head->payload_len / 4);
1464 1300
1465 zfcp_in_els_dbf_event(adapter, "##rscn", status_buffer,
1466 fcp_rscn_head->payload_len);
1467
1468 debug_text_event(adapter->erp_dbf, 1, "unsol_els_rscn:");
1469 for (i = 1; i < no_entries; i++) { 1301 for (i = 1; i < no_entries; i++) {
1470 /* skip head and start with 1st element */ 1302 /* skip head and start with 1st element */
1471 fcp_rscn_element++; 1303 fcp_rscn_element++;
@@ -1497,8 +1329,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1497 (ZFCP_STATUS_PORT_DID_DID, &port->status)) { 1329 (ZFCP_STATUS_PORT_DID_DID, &port->status)) {
1498 ZFCP_LOG_INFO("incoming RSCN, trying to open " 1330 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1499 "port 0x%016Lx\n", port->wwpn); 1331 "port 0x%016Lx\n", port->wwpn);
1500 debug_text_event(adapter->erp_dbf, 1,
1501 "unsol_els_rscnu:");
1502 zfcp_erp_port_reopen(port, 1332 zfcp_erp_port_reopen(port,
1503 ZFCP_STATUS_COMMON_ERP_FAILED); 1333 ZFCP_STATUS_COMMON_ERP_FAILED);
1504 continue; 1334 continue;
@@ -1524,8 +1354,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1524 */ 1354 */
1525 ZFCP_LOG_INFO("incoming RSCN, trying to open " 1355 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1526 "port 0x%016Lx\n", port->wwpn); 1356 "port 0x%016Lx\n", port->wwpn);
1527 debug_text_event(adapter->erp_dbf, 1,
1528 "unsol_els_rscnk:");
1529 zfcp_test_link(port); 1357 zfcp_test_link(port);
1530 } 1358 }
1531 } 1359 }
@@ -1541,8 +1369,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
1541 struct zfcp_port *port; 1369 struct zfcp_port *port;
1542 unsigned long flags; 1370 unsigned long flags;
1543 1371
1544 zfcp_in_els_dbf_event(adapter, "##plogi", status_buffer, 28);
1545
1546 read_lock_irqsave(&zfcp_data.config_lock, flags); 1372 read_lock_irqsave(&zfcp_data.config_lock, flags);
1547 list_for_each_entry(port, &adapter->port_list_head, list) { 1373 list_for_each_entry(port, &adapter->port_list_head, list) {
1548 if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn)) 1374 if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn))
@@ -1556,8 +1382,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
1556 status_buffer->d_id, 1382 status_buffer->d_id,
1557 zfcp_get_busid_by_adapter(adapter)); 1383 zfcp_get_busid_by_adapter(adapter));
1558 } else { 1384 } else {
1559 debug_text_event(adapter->erp_dbf, 1, "unsol_els_plogi:");
1560 debug_event(adapter->erp_dbf, 1, &els_logi->nport_wwn, 8);
1561 zfcp_erp_port_forced_reopen(port, 0); 1385 zfcp_erp_port_forced_reopen(port, 0);
1562 } 1386 }
1563} 1387}
@@ -1570,8 +1394,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
1570 struct zfcp_port *port; 1394 struct zfcp_port *port;
1571 unsigned long flags; 1395 unsigned long flags;
1572 1396
1573 zfcp_in_els_dbf_event(adapter, "##logo", status_buffer, 16);
1574
1575 read_lock_irqsave(&zfcp_data.config_lock, flags); 1397 read_lock_irqsave(&zfcp_data.config_lock, flags);
1576 list_for_each_entry(port, &adapter->port_list_head, list) { 1398 list_for_each_entry(port, &adapter->port_list_head, list) {
1577 if (port->wwpn == els_logo->nport_wwpn) 1399 if (port->wwpn == els_logo->nport_wwpn)
@@ -1585,8 +1407,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
1585 status_buffer->d_id, 1407 status_buffer->d_id,
1586 zfcp_get_busid_by_adapter(adapter)); 1408 zfcp_get_busid_by_adapter(adapter));
1587 } else { 1409 } else {
1588 debug_text_event(adapter->erp_dbf, 1, "unsol_els_logo:");
1589 debug_event(adapter->erp_dbf, 1, &els_logo->nport_wwpn, 8);
1590 zfcp_erp_port_forced_reopen(port, 0); 1410 zfcp_erp_port_forced_reopen(port, 0);
1591 } 1411 }
1592} 1412}
@@ -1595,7 +1415,6 @@ static void
1595zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter, 1415zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
1596 struct fsf_status_read_buffer *status_buffer) 1416 struct fsf_status_read_buffer *status_buffer)
1597{ 1417{
1598 zfcp_in_els_dbf_event(adapter, "##undef", status_buffer, 24);
1599 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x " 1418 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
1600 "for adapter %s\n", *(u32 *) (status_buffer->payload), 1419 "for adapter %s\n", *(u32 *) (status_buffer->payload),
1601 zfcp_get_busid_by_adapter(adapter)); 1420 zfcp_get_busid_by_adapter(adapter));
@@ -1609,10 +1428,11 @@ zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
1609 u32 els_type; 1428 u32 els_type;
1610 struct zfcp_adapter *adapter; 1429 struct zfcp_adapter *adapter;
1611 1430
1612 status_buffer = fsf_req->data.status_read.buffer; 1431 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
1613 els_type = *(u32 *) (status_buffer->payload); 1432 els_type = *(u32 *) (status_buffer->payload);
1614 adapter = fsf_req->adapter; 1433 adapter = fsf_req->adapter;
1615 1434
1435 zfcp_san_dbf_event_incoming_els(fsf_req);
1616 if (els_type == LS_PLOGI) 1436 if (els_type == LS_PLOGI)
1617 zfcp_fsf_incoming_els_plogi(adapter, status_buffer); 1437 zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
1618 else if (els_type == LS_LOGO) 1438 else if (els_type == LS_LOGO)
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index b30abab77da3..0fc46381fc22 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -202,19 +202,9 @@ static int
202zfcp_ccw_set_offline(struct ccw_device *ccw_device) 202zfcp_ccw_set_offline(struct ccw_device *ccw_device)
203{ 203{
204 struct zfcp_adapter *adapter; 204 struct zfcp_adapter *adapter;
205 struct zfcp_port *port;
206 struct fc_rport *rport;
207 205
208 down(&zfcp_data.config_sema); 206 down(&zfcp_data.config_sema);
209 adapter = dev_get_drvdata(&ccw_device->dev); 207 adapter = dev_get_drvdata(&ccw_device->dev);
210 /* might be racy, but we cannot take config_lock due to the fact that
211 fc_remote_port_delete might sleep */
212 list_for_each_entry(port, &adapter->port_list_head, list)
213 if (port->rport) {
214 rport = port->rport;
215 port->rport = NULL;
216 fc_remote_port_delete(rport);
217 }
218 zfcp_erp_adapter_shutdown(adapter, 0); 208 zfcp_erp_adapter_shutdown(adapter, 0);
219 zfcp_erp_wait(adapter); 209 zfcp_erp_wait(adapter);
220 zfcp_adapter_scsi_unregister(adapter); 210 zfcp_adapter_scsi_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
new file mode 100644
index 000000000000..826fb3b00605
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -0,0 +1,995 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_dbf.c
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * Debugging facilities
8 *
9 * (C) Copyright IBM Corp. 2005
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#define ZFCP_DBF_REVISION "$Revision$"
27
28#include <asm/debug.h>
29#include <linux/ctype.h>
30#include "zfcp_ext.h"
31
32static u32 dbfsize = 4;
33
34module_param(dbfsize, uint, 0400);
35MODULE_PARM_DESC(dbfsize,
36 "number of pages for each debug feature area (default 4)");
37
38#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
39
40static inline int
41zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
42{
43 unsigned long long sec;
44 struct timespec xtime;
45 int len = 0;
46
47 stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
48 sec = stck >> 12;
49 do_div(sec, 1000000);
50 xtime.tv_sec = sec;
51 stck -= (sec * 1000000) << 12;
52 xtime.tv_nsec = ((stck * 1000) >> 12);
53 len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
54 label, xtime.tv_sec, xtime.tv_nsec);
55
56 return len;
57}
58
59static int zfcp_dbf_tag(char *out_buf, const char *label, const char *tag)
60{
61 int len = 0, i;
62
63 len += sprintf(out_buf + len, "%-24s", label);
64 for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
65 len += sprintf(out_buf + len, "%c", tag[i]);
66 len += sprintf(out_buf + len, "\n");
67
68 return len;
69}
70
71static int
72zfcp_dbf_view(char *out_buf, const char *label, const char *format, ...)
73{
74 va_list arg;
75 int len = 0;
76
77 len += sprintf(out_buf + len, "%-24s", label);
78 va_start(arg, format);
79 len += vsprintf(out_buf + len, format, arg);
80 va_end(arg);
81 len += sprintf(out_buf + len, "\n");
82
83 return len;
84}
85
86static int
87zfcp_dbf_view_dump(char *out_buf, const char *label,
88 char *buffer, int buflen, int offset, int total_size)
89{
90 int len = 0;
91
92 if (offset == 0)
93 len += sprintf(out_buf + len, "%-24s ", label);
94
95 while (buflen--) {
96 if (offset > 0) {
97 if ((offset % 32) == 0)
98 len += sprintf(out_buf + len, "\n%-24c ", ' ');
99 else if ((offset % 4) == 0)
100 len += sprintf(out_buf + len, " ");
101 }
102 len += sprintf(out_buf + len, "%02x", *buffer++);
103 if (++offset == total_size) {
104 len += sprintf(out_buf + len, "\n");
105 break;
106 }
107 }
108
109 if (total_size == 0)
110 len += sprintf(out_buf + len, "\n");
111
112 return len;
113}
114
115static inline int
116zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
117 debug_entry_t * entry, char *out_buf)
118{
119 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
120 int len = 0;
121
122 if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
123 len += zfcp_dbf_stck(out_buf + len, "timestamp",
124 entry->id.stck);
125 len += zfcp_dbf_view(out_buf + len, "cpu", "%02i",
126 entry->id.fields.cpuid);
127 } else {
128 len += zfcp_dbf_view_dump(out_buf + len, NULL,
129 dump->data,
130 dump->size,
131 dump->offset, dump->total_size);
132 if ((dump->offset + dump->size) == dump->total_size)
133 len += sprintf(out_buf + len, "\n");
134 }
135
136 return len;
137}
138
139inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
140{
141 struct zfcp_adapter *adapter = fsf_req->adapter;
142 struct fsf_qtcb *qtcb = fsf_req->qtcb;
143 union fsf_prot_status_qual *prot_status_qual =
144 &qtcb->prefix.prot_status_qual;
145 union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual;
146 struct scsi_cmnd *scsi_cmnd;
147 struct zfcp_port *port;
148 struct zfcp_unit *unit;
149 struct zfcp_send_els *send_els;
150 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
151 struct zfcp_hba_dbf_record_response *response = &rec->type.response;
152 int level;
153 unsigned long flags;
154
155 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
156 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
157 strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
158
159 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
160 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
161 strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE);
162 level = 1;
163 } else if (qtcb->header.fsf_status != FSF_GOOD) {
164 strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE);
165 level = 1;
166 } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
167 (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
168 strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
169 level = 4;
170 } else if ((prot_status_qual->doubleword[0] != 0) ||
171 (prot_status_qual->doubleword[1] != 0) ||
172 (fsf_status_qual->doubleword[0] != 0) ||
173 (fsf_status_qual->doubleword[1] != 0)) {
174 strncpy(rec->tag2, "qual", ZFCP_DBF_TAG_SIZE);
175 level = 3;
176 } else {
177 strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
178 level = 6;
179 }
180
181 response->fsf_command = fsf_req->fsf_command;
182 response->fsf_reqid = (unsigned long)fsf_req;
183 response->fsf_seqno = fsf_req->seq_no;
184 response->fsf_issued = fsf_req->issued;
185 response->fsf_prot_status = qtcb->prefix.prot_status;
186 response->fsf_status = qtcb->header.fsf_status;
187 memcpy(response->fsf_prot_status_qual,
188 prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
189 memcpy(response->fsf_status_qual,
190 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
191 response->fsf_req_status = fsf_req->status;
192 response->sbal_first = fsf_req->sbal_first;
193 response->sbal_curr = fsf_req->sbal_curr;
194 response->sbal_last = fsf_req->sbal_last;
195 response->pool = fsf_req->pool != NULL;
196 response->erp_action = (unsigned long)fsf_req->erp_action;
197
198 switch (fsf_req->fsf_command) {
199 case FSF_QTCB_FCP_CMND:
200 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
201 break;
202 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
203 if (scsi_cmnd != NULL) {
204 response->data.send_fcp.scsi_cmnd
205 = (unsigned long)scsi_cmnd;
206 response->data.send_fcp.scsi_serial
207 = scsi_cmnd->serial_number;
208 }
209 break;
210
211 case FSF_QTCB_OPEN_PORT_WITH_DID:
212 case FSF_QTCB_CLOSE_PORT:
213 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
214 port = (struct zfcp_port *)fsf_req->data;
215 response->data.port.wwpn = port->wwpn;
216 response->data.port.d_id = port->d_id;
217 response->data.port.port_handle = qtcb->header.port_handle;
218 break;
219
220 case FSF_QTCB_OPEN_LUN:
221 case FSF_QTCB_CLOSE_LUN:
222 unit = (struct zfcp_unit *)fsf_req->data;
223 port = unit->port;
224 response->data.unit.wwpn = port->wwpn;
225 response->data.unit.fcp_lun = unit->fcp_lun;
226 response->data.unit.port_handle = qtcb->header.port_handle;
227 response->data.unit.lun_handle = qtcb->header.lun_handle;
228 break;
229
230 case FSF_QTCB_SEND_ELS:
231 send_els = (struct zfcp_send_els *)fsf_req->data;
232 response->data.send_els.d_id = qtcb->bottom.support.d_id;
233 response->data.send_els.ls_code = send_els->ls_code >> 24;
234 break;
235
236 case FSF_QTCB_ABORT_FCP_CMND:
237 case FSF_QTCB_SEND_GENERIC:
238 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
239 case FSF_QTCB_EXCHANGE_PORT_DATA:
240 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
241 case FSF_QTCB_UPLOAD_CONTROL_FILE:
242 break;
243 }
244
245 debug_event(adapter->hba_dbf, level,
246 rec, sizeof(struct zfcp_hba_dbf_record));
247 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
248}
249
250inline void
251zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
252 struct fsf_status_read_buffer *status_buffer)
253{
254 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
255 unsigned long flags;
256
257 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
258 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
259 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
260 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
261
262 rec->type.status.failed = adapter->status_read_failed;
263 if (status_buffer != NULL) {
264 rec->type.status.status_type = status_buffer->status_type;
265 rec->type.status.status_subtype = status_buffer->status_subtype;
266 memcpy(&rec->type.status.queue_designator,
267 &status_buffer->queue_designator,
268 sizeof(struct fsf_queue_designator));
269
270 switch (status_buffer->status_type) {
271 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
272 rec->type.status.payload_size =
273 ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
274 break;
275
276 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
277 rec->type.status.payload_size =
278 ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
279 break;
280
281 case FSF_STATUS_READ_LINK_DOWN:
282 switch (status_buffer->status_subtype) {
283 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
284 case FSF_STATUS_READ_SUB_FDISC_FAILED:
285 rec->type.status.payload_size =
286 sizeof(struct fsf_link_down_info);
287 }
288 break;
289
290 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
291 rec->type.status.payload_size =
292 ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
293 break;
294 }
295 memcpy(&rec->type.status.payload,
296 &status_buffer->payload, rec->type.status.payload_size);
297 }
298
299 debug_event(adapter->hba_dbf, 2,
300 rec, sizeof(struct zfcp_hba_dbf_record));
301 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
302}
303
304inline void
305zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
306 unsigned int qdio_error, unsigned int siga_error,
307 int sbal_index, int sbal_count)
308{
309 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
310 unsigned long flags;
311
312 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
313 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
314 strncpy(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE);
315 rec->type.qdio.status = status;
316 rec->type.qdio.qdio_error = qdio_error;
317 rec->type.qdio.siga_error = siga_error;
318 rec->type.qdio.sbal_index = sbal_index;
319 rec->type.qdio.sbal_count = sbal_count;
320 debug_event(adapter->hba_dbf, 0,
321 rec, sizeof(struct zfcp_hba_dbf_record));
322 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
323}
324
325static inline int
326zfcp_hba_dbf_view_response(char *out_buf,
327 struct zfcp_hba_dbf_record_response *rec)
328{
329 int len = 0;
330
331 len += zfcp_dbf_view(out_buf + len, "fsf_command", "0x%08x",
332 rec->fsf_command);
333 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
334 rec->fsf_reqid);
335 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
336 rec->fsf_seqno);
337 len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
338 len += zfcp_dbf_view(out_buf + len, "fsf_prot_status", "0x%08x",
339 rec->fsf_prot_status);
340 len += zfcp_dbf_view(out_buf + len, "fsf_status", "0x%08x",
341 rec->fsf_status);
342 len += zfcp_dbf_view_dump(out_buf + len, "fsf_prot_status_qual",
343 rec->fsf_prot_status_qual,
344 FSF_PROT_STATUS_QUAL_SIZE,
345 0, FSF_PROT_STATUS_QUAL_SIZE);
346 len += zfcp_dbf_view_dump(out_buf + len, "fsf_status_qual",
347 rec->fsf_status_qual,
348 FSF_STATUS_QUALIFIER_SIZE,
349 0, FSF_STATUS_QUALIFIER_SIZE);
350 len += zfcp_dbf_view(out_buf + len, "fsf_req_status", "0x%08x",
351 rec->fsf_req_status);
352 len += zfcp_dbf_view(out_buf + len, "sbal_first", "0x%02x",
353 rec->sbal_first);
354 len += zfcp_dbf_view(out_buf + len, "sbal_curr", "0x%02x",
355 rec->sbal_curr);
356 len += zfcp_dbf_view(out_buf + len, "sbal_last", "0x%02x",
357 rec->sbal_last);
358 len += zfcp_dbf_view(out_buf + len, "pool", "0x%02x", rec->pool);
359
360 switch (rec->fsf_command) {
361 case FSF_QTCB_FCP_CMND:
362 if (rec->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
363 break;
364 len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
365 rec->data.send_fcp.scsi_cmnd);
366 len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
367 rec->data.send_fcp.scsi_serial);
368 break;
369
370 case FSF_QTCB_OPEN_PORT_WITH_DID:
371 case FSF_QTCB_CLOSE_PORT:
372 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
373 len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
374 rec->data.port.wwpn);
375 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
376 rec->data.port.d_id);
377 len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
378 rec->data.port.port_handle);
379 break;
380
381 case FSF_QTCB_OPEN_LUN:
382 case FSF_QTCB_CLOSE_LUN:
383 len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
384 rec->data.unit.wwpn);
385 len += zfcp_dbf_view(out_buf + len, "fcp_lun", "0x%016Lx",
386 rec->data.unit.fcp_lun);
387 len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
388 rec->data.unit.port_handle);
389 len += zfcp_dbf_view(out_buf + len, "lun_handle", "0x%08x",
390 rec->data.unit.lun_handle);
391 break;
392
393 case FSF_QTCB_SEND_ELS:
394 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
395 rec->data.send_els.d_id);
396 len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
397 rec->data.send_els.ls_code);
398 break;
399
400 case FSF_QTCB_ABORT_FCP_CMND:
401 case FSF_QTCB_SEND_GENERIC:
402 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
403 case FSF_QTCB_EXCHANGE_PORT_DATA:
404 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
405 case FSF_QTCB_UPLOAD_CONTROL_FILE:
406 break;
407 }
408
409 return len;
410}
411
412static inline int
413zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
414{
415 int len = 0;
416
417 len += zfcp_dbf_view(out_buf + len, "failed", "0x%02x", rec->failed);
418 len += zfcp_dbf_view(out_buf + len, "status_type", "0x%08x",
419 rec->status_type);
420 len += zfcp_dbf_view(out_buf + len, "status_subtype", "0x%08x",
421 rec->status_subtype);
422 len += zfcp_dbf_view_dump(out_buf + len, "queue_designator",
423 (char *)&rec->queue_designator,
424 sizeof(struct fsf_queue_designator),
425 0, sizeof(struct fsf_queue_designator));
426 len += zfcp_dbf_view_dump(out_buf + len, "payload",
427 (char *)&rec->payload,
428 rec->payload_size, 0, rec->payload_size);
429
430 return len;
431}
432
433static inline int
434zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
435{
436 int len = 0;
437
438 len += zfcp_dbf_view(out_buf + len, "status", "0x%08x", rec->status);
439 len += zfcp_dbf_view(out_buf + len, "qdio_error", "0x%08x",
440 rec->qdio_error);
441 len += zfcp_dbf_view(out_buf + len, "siga_error", "0x%08x",
442 rec->siga_error);
443 len += zfcp_dbf_view(out_buf + len, "sbal_index", "0x%02x",
444 rec->sbal_index);
445 len += zfcp_dbf_view(out_buf + len, "sbal_count", "0x%02x",
446 rec->sbal_count);
447
448 return len;
449}
450
451static int
452zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
453 char *out_buf, const char *in_buf)
454{
455 struct zfcp_hba_dbf_record *rec = (struct zfcp_hba_dbf_record *)in_buf;
456 int len = 0;
457
458 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
459 return 0;
460
461 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
462 if (isalpha(rec->tag2[0]))
463 len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
464 if (strncmp(rec->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
465 len += zfcp_hba_dbf_view_response(out_buf + len,
466 &rec->type.response);
467 else if (strncmp(rec->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
468 len += zfcp_hba_dbf_view_status(out_buf + len,
469 &rec->type.status);
470 else if (strncmp(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
471 len += zfcp_hba_dbf_view_qdio(out_buf + len, &rec->type.qdio);
472
473 len += sprintf(out_buf + len, "\n");
474
475 return len;
476}
477
478struct debug_view zfcp_hba_dbf_view = {
479 "structured",
480 NULL,
481 &zfcp_dbf_view_header,
482 &zfcp_hba_dbf_view_format,
483 NULL,
484 NULL
485};
486
487inline void
488_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
489 u32 s_id, u32 d_id, void *buffer, int buflen)
490{
491 struct zfcp_send_ct *send_ct = (struct zfcp_send_ct *)fsf_req->data;
492 struct zfcp_port *port = send_ct->port;
493 struct zfcp_adapter *adapter = port->adapter;
494 struct ct_hdr *header = (struct ct_hdr *)buffer;
495 struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
496 struct zfcp_san_dbf_record_ct *ct = &rec->type.ct;
497 unsigned long flags;
498
499 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
500 memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
501 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
502 rec->fsf_reqid = (unsigned long)fsf_req;
503 rec->fsf_seqno = fsf_req->seq_no;
504 rec->s_id = s_id;
505 rec->d_id = d_id;
506 if (strncmp(tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
507 ct->type.request.cmd_req_code = header->cmd_rsp_code;
508 ct->type.request.revision = header->revision;
509 ct->type.request.gs_type = header->gs_type;
510 ct->type.request.gs_subtype = header->gs_subtype;
511 ct->type.request.options = header->options;
512 ct->type.request.max_res_size = header->max_res_size;
513 } else if (strncmp(tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
514 ct->type.response.cmd_rsp_code = header->cmd_rsp_code;
515 ct->type.response.revision = header->revision;
516 ct->type.response.reason_code = header->reason_code;
517 ct->type.response.reason_code_expl = header->reason_code_expl;
518 ct->type.response.vendor_unique = header->vendor_unique;
519 }
520 ct->payload_size =
521 min(buflen - (int)sizeof(struct ct_hdr), ZFCP_DBF_CT_PAYLOAD);
522 memcpy(ct->payload, buffer + sizeof(struct ct_hdr), ct->payload_size);
523 debug_event(adapter->san_dbf, 3,
524 rec, sizeof(struct zfcp_san_dbf_record));
525 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
526}
527
528inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
529{
530 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
531 struct zfcp_port *port = ct->port;
532 struct zfcp_adapter *adapter = port->adapter;
533
534 _zfcp_san_dbf_event_common_ct("octc", fsf_req,
535 fc_host_port_id(adapter->scsi_host),
536 port->d_id, zfcp_sg_to_address(ct->req),
537 ct->req->length);
538}
539
540inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
541{
542 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
543 struct zfcp_port *port = ct->port;
544 struct zfcp_adapter *adapter = port->adapter;
545
546 _zfcp_san_dbf_event_common_ct("rctc", fsf_req, port->d_id,
547 fc_host_port_id(adapter->scsi_host),
548 zfcp_sg_to_address(ct->resp),
549 ct->resp->length);
550}
551
552static inline void
553_zfcp_san_dbf_event_common_els(const char *tag, int level,
554 struct zfcp_fsf_req *fsf_req, u32 s_id,
555 u32 d_id, u8 ls_code, void *buffer, int buflen)
556{
557 struct zfcp_adapter *adapter = fsf_req->adapter;
558 struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
559 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
560 unsigned long flags;
561 int offset = 0;
562
563 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
564 do {
565 memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
566 if (offset == 0) {
567 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
568 rec->fsf_reqid = (unsigned long)fsf_req;
569 rec->fsf_seqno = fsf_req->seq_no;
570 rec->s_id = s_id;
571 rec->d_id = d_id;
572 rec->type.els.ls_code = ls_code;
573 buflen = min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD);
574 rec->type.els.payload_size = buflen;
575 memcpy(rec->type.els.payload,
576 buffer, min(buflen, ZFCP_DBF_ELS_PAYLOAD));
577 offset += min(buflen, ZFCP_DBF_ELS_PAYLOAD);
578 } else {
579 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
580 dump->total_size = buflen;
581 dump->offset = offset;
582 dump->size = min(buflen - offset,
583 (int)sizeof(struct zfcp_san_dbf_record)
584 - (int)sizeof(struct zfcp_dbf_dump));
585 memcpy(dump->data, buffer + offset, dump->size);
586 offset += dump->size;
587 }
588 debug_event(adapter->san_dbf, level,
589 rec, sizeof(struct zfcp_san_dbf_record));
590 } while (offset < buflen);
591 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
592}
593
594inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
595{
596 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
597
598 _zfcp_san_dbf_event_common_els("oels", 2, fsf_req,
599 fc_host_port_id(els->adapter->scsi_host),
600 els->d_id,
601 *(u8 *) zfcp_sg_to_address(els->req),
602 zfcp_sg_to_address(els->req),
603 els->req->length);
604}
605
606inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
607{
608 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
609
610 _zfcp_san_dbf_event_common_els("rels", 2, fsf_req, els->d_id,
611 fc_host_port_id(els->adapter->scsi_host),
612 *(u8 *) zfcp_sg_to_address(els->req),
613 zfcp_sg_to_address(els->resp),
614 els->resp->length);
615}
616
617inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
618{
619 struct zfcp_adapter *adapter = fsf_req->adapter;
620 struct fsf_status_read_buffer *status_buffer =
621 (struct fsf_status_read_buffer *)fsf_req->data;
622 int length = (int)status_buffer->length -
623 (int)((void *)&status_buffer->payload - (void *)status_buffer);
624
625 _zfcp_san_dbf_event_common_els("iels", 1, fsf_req, status_buffer->d_id,
626 fc_host_port_id(adapter->scsi_host),
627 *(u8 *) status_buffer->payload,
628 (void *)status_buffer->payload, length);
629}
630
631static int
632zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
633 char *out_buf, const char *in_buf)
634{
635 struct zfcp_san_dbf_record *rec = (struct zfcp_san_dbf_record *)in_buf;
636 char *buffer = NULL;
637 int buflen = 0, total = 0;
638 int len = 0;
639
640 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
641 return 0;
642
643 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
644 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
645 rec->fsf_reqid);
646 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
647 rec->fsf_seqno);
648 len += zfcp_dbf_view(out_buf + len, "s_id", "0x%06x", rec->s_id);
649 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", rec->d_id);
650
651 if (strncmp(rec->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
652 len += zfcp_dbf_view(out_buf + len, "cmd_req_code", "0x%04x",
653 rec->type.ct.type.request.cmd_req_code);
654 len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
655 rec->type.ct.type.request.revision);
656 len += zfcp_dbf_view(out_buf + len, "gs_type", "0x%02x",
657 rec->type.ct.type.request.gs_type);
658 len += zfcp_dbf_view(out_buf + len, "gs_subtype", "0x%02x",
659 rec->type.ct.type.request.gs_subtype);
660 len += zfcp_dbf_view(out_buf + len, "options", "0x%02x",
661 rec->type.ct.type.request.options);
662 len += zfcp_dbf_view(out_buf + len, "max_res_size", "0x%04x",
663 rec->type.ct.type.request.max_res_size);
664 total = rec->type.ct.payload_size;
665 buffer = rec->type.ct.payload;
666 buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
667 } else if (strncmp(rec->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
668 len += zfcp_dbf_view(out_buf + len, "cmd_rsp_code", "0x%04x",
669 rec->type.ct.type.response.cmd_rsp_code);
670 len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
671 rec->type.ct.type.response.revision);
672 len += zfcp_dbf_view(out_buf + len, "reason_code", "0x%02x",
673 rec->type.ct.type.response.reason_code);
674 len +=
675 zfcp_dbf_view(out_buf + len, "reason_code_expl", "0x%02x",
676 rec->type.ct.type.response.reason_code_expl);
677 len +=
678 zfcp_dbf_view(out_buf + len, "vendor_unique", "0x%02x",
679 rec->type.ct.type.response.vendor_unique);
680 total = rec->type.ct.payload_size;
681 buffer = rec->type.ct.payload;
682 buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
683 } else if (strncmp(rec->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
684 strncmp(rec->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
685 strncmp(rec->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
686 len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
687 rec->type.els.ls_code);
688 total = rec->type.els.payload_size;
689 buffer = rec->type.els.payload;
690 buflen = min(total, ZFCP_DBF_ELS_PAYLOAD);
691 }
692
693 len += zfcp_dbf_view_dump(out_buf + len, "payload",
694 buffer, buflen, 0, total);
695
696 if (buflen == total)
697 len += sprintf(out_buf + len, "\n");
698
699 return len;
700}
701
702struct debug_view zfcp_san_dbf_view = {
703 "structured",
704 NULL,
705 &zfcp_dbf_view_header,
706 &zfcp_san_dbf_view_format,
707 NULL,
708 NULL
709};
710
711static inline void
712_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
713 struct zfcp_adapter *adapter,
714 struct scsi_cmnd *scsi_cmnd,
715 struct zfcp_fsf_req *new_fsf_req)
716{
717 struct zfcp_fsf_req *fsf_req =
718 (struct zfcp_fsf_req *)scsi_cmnd->host_scribble;
719 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
720 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
721 unsigned long flags;
722 struct fcp_rsp_iu *fcp_rsp;
723 char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
724 int offset = 0, buflen = 0;
725
726 spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
727 do {
728 memset(rec, 0, sizeof(struct zfcp_scsi_dbf_record));
729 if (offset == 0) {
730 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
731 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
732 if (scsi_cmnd->device) {
733 rec->scsi_id = scsi_cmnd->device->id;
734 rec->scsi_lun = scsi_cmnd->device->lun;
735 }
736 rec->scsi_result = scsi_cmnd->result;
737 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
738 rec->scsi_serial = scsi_cmnd->serial_number;
739 memcpy(rec->scsi_opcode,
740 &scsi_cmnd->cmnd,
741 min((int)scsi_cmnd->cmd_len,
742 ZFCP_DBF_SCSI_OPCODE));
743 rec->scsi_retries = scsi_cmnd->retries;
744 rec->scsi_allowed = scsi_cmnd->allowed;
745 if (fsf_req != NULL) {
746 fcp_rsp = (struct fcp_rsp_iu *)
747 &(fsf_req->qtcb->bottom.io.fcp_rsp);
748 fcp_rsp_info =
749 zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
750 fcp_sns_info =
751 zfcp_get_fcp_sns_info_ptr(fcp_rsp);
752
753 rec->type.fcp.rsp_validity =
754 fcp_rsp->validity.value;
755 rec->type.fcp.rsp_scsi_status =
756 fcp_rsp->scsi_status;
757 rec->type.fcp.rsp_resid = fcp_rsp->fcp_resid;
758 if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
759 rec->type.fcp.rsp_code =
760 *(fcp_rsp_info + 3);
761 if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
762 buflen = min((int)fcp_rsp->fcp_sns_len,
763 ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
764 rec->type.fcp.sns_info_len = buflen;
765 memcpy(rec->type.fcp.sns_info,
766 fcp_sns_info,
767 min(buflen,
768 ZFCP_DBF_SCSI_FCP_SNS_INFO));
769 offset += min(buflen,
770 ZFCP_DBF_SCSI_FCP_SNS_INFO);
771 }
772
773 rec->fsf_reqid = (unsigned long)fsf_req;
774 rec->fsf_seqno = fsf_req->seq_no;
775 rec->fsf_issued = fsf_req->issued;
776 }
777 if (new_fsf_req != NULL) {
778 rec->type.new_fsf_req.fsf_reqid =
779 (unsigned long)
780 new_fsf_req;
781 rec->type.new_fsf_req.fsf_seqno =
782 new_fsf_req->seq_no;
783 rec->type.new_fsf_req.fsf_issued =
784 new_fsf_req->issued;
785 }
786 } else {
787 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
788 dump->total_size = buflen;
789 dump->offset = offset;
790 dump->size = min(buflen - offset,
791 (int)sizeof(struct
792 zfcp_scsi_dbf_record) -
793 (int)sizeof(struct zfcp_dbf_dump));
794 memcpy(dump->data, fcp_sns_info + offset, dump->size);
795 offset += dump->size;
796 }
797 debug_event(adapter->scsi_dbf, level,
798 rec, sizeof(struct zfcp_scsi_dbf_record));
799 } while (offset < buflen);
800 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
801}
802
803inline void
804zfcp_scsi_dbf_event_result(const char *tag, int level,
805 struct zfcp_adapter *adapter,
806 struct scsi_cmnd *scsi_cmnd)
807{
808 _zfcp_scsi_dbf_event_common("rslt",
809 tag, level, adapter, scsi_cmnd, NULL);
810}
811
812inline void
813zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
814 struct scsi_cmnd *scsi_cmnd,
815 struct zfcp_fsf_req *new_fsf_req)
816{
817 _zfcp_scsi_dbf_event_common("abrt",
818 tag, 1, adapter, scsi_cmnd, new_fsf_req);
819}
820
821inline void
822zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
823 struct scsi_cmnd *scsi_cmnd)
824{
825 struct zfcp_adapter *adapter = unit->port->adapter;
826
827 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
828 tag, 1, adapter, scsi_cmnd, NULL);
829}
830
831static int
832zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
833 char *out_buf, const char *in_buf)
834{
835 struct zfcp_scsi_dbf_record *rec =
836 (struct zfcp_scsi_dbf_record *)in_buf;
837 int len = 0;
838
839 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
840 return 0;
841
842 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
843 len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
844 len += zfcp_dbf_view(out_buf + len, "scsi_id", "0x%08x", rec->scsi_id);
845 len += zfcp_dbf_view(out_buf + len, "scsi_lun", "0x%08x",
846 rec->scsi_lun);
847 len += zfcp_dbf_view(out_buf + len, "scsi_result", "0x%08x",
848 rec->scsi_result);
849 len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
850 rec->scsi_cmnd);
851 len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
852 rec->scsi_serial);
853 len += zfcp_dbf_view_dump(out_buf + len, "scsi_opcode",
854 rec->scsi_opcode,
855 ZFCP_DBF_SCSI_OPCODE,
856 0, ZFCP_DBF_SCSI_OPCODE);
857 len += zfcp_dbf_view(out_buf + len, "scsi_retries", "0x%02x",
858 rec->scsi_retries);
859 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
860 rec->scsi_allowed);
861 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
862 rec->fsf_reqid);
863 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
864 rec->fsf_seqno);
865 len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
866 if (strncmp(rec->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
867 len +=
868 zfcp_dbf_view(out_buf + len, "fcp_rsp_validity", "0x%02x",
869 rec->type.fcp.rsp_validity);
870 len +=
871 zfcp_dbf_view(out_buf + len, "fcp_rsp_scsi_status",
872 "0x%02x", rec->type.fcp.rsp_scsi_status);
873 len +=
874 zfcp_dbf_view(out_buf + len, "fcp_rsp_resid", "0x%08x",
875 rec->type.fcp.rsp_resid);
876 len +=
877 zfcp_dbf_view(out_buf + len, "fcp_rsp_code", "0x%08x",
878 rec->type.fcp.rsp_code);
879 len +=
880 zfcp_dbf_view(out_buf + len, "fcp_sns_info_len", "0x%08x",
881 rec->type.fcp.sns_info_len);
882 len +=
883 zfcp_dbf_view_dump(out_buf + len, "fcp_sns_info",
884 rec->type.fcp.sns_info,
885 min((int)rec->type.fcp.sns_info_len,
886 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
887 rec->type.fcp.sns_info_len);
888 } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
889 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx",
890 rec->type.new_fsf_req.fsf_reqid);
891 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x",
892 rec->type.new_fsf_req.fsf_seqno);
893 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
894 rec->type.new_fsf_req.fsf_issued);
895 } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) ||
896 (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) {
897 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx",
898 rec->type.new_fsf_req.fsf_reqid);
899 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x",
900 rec->type.new_fsf_req.fsf_seqno);
901 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
902 rec->type.new_fsf_req.fsf_issued);
903 }
904
905 len += sprintf(out_buf + len, "\n");
906
907 return len;
908}
909
910struct debug_view zfcp_scsi_dbf_view = {
911 "structured",
912 NULL,
913 &zfcp_dbf_view_header,
914 &zfcp_scsi_dbf_view_format,
915 NULL,
916 NULL
917};
918
919/**
920 * zfcp_adapter_debug_register - registers debug feature for an adapter
921 * @adapter: pointer to adapter for which debug features should be registered
922 * return: -ENOMEM on error, 0 otherwise
923 */
924int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
925{
926 char dbf_name[DEBUG_MAX_NAME_LEN];
927
928 /* debug feature area which records recovery activity */
929 spin_lock_init(&adapter->erp_dbf_lock);
930 sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter));
931 adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2,
932 sizeof(struct zfcp_erp_dbf_record));
933 if (!adapter->erp_dbf)
934 goto failed;
935 debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
936 debug_set_level(adapter->erp_dbf, 3);
937
938 /* debug feature area which records HBA (FSF and QDIO) conditions */
939 spin_lock_init(&adapter->hba_dbf_lock);
940 sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
941 adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
942 sizeof(struct zfcp_hba_dbf_record));
943 if (!adapter->hba_dbf)
944 goto failed;
945 debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view);
946 debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view);
947 debug_set_level(adapter->hba_dbf, 3);
948
949 /* debug feature area which records SAN command failures and recovery */
950 spin_lock_init(&adapter->san_dbf_lock);
951 sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter));
952 adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
953 sizeof(struct zfcp_san_dbf_record));
954 if (!adapter->san_dbf)
955 goto failed;
956 debug_register_view(adapter->san_dbf, &debug_hex_ascii_view);
957 debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view);
958 debug_set_level(adapter->san_dbf, 6);
959
960 /* debug feature area which records SCSI command failures and recovery */
961 spin_lock_init(&adapter->scsi_dbf_lock);
962 sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter));
963 adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
964 sizeof(struct zfcp_scsi_dbf_record));
965 if (!adapter->scsi_dbf)
966 goto failed;
967 debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view);
968 debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view);
969 debug_set_level(adapter->scsi_dbf, 3);
970
971 return 0;
972
973 failed:
974 zfcp_adapter_debug_unregister(adapter);
975
976 return -ENOMEM;
977}
978
979/**
980 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
981 * @adapter: pointer to adapter for which debug features should be unregistered
982 */
983void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
984{
985 debug_unregister(adapter->scsi_dbf);
986 debug_unregister(adapter->san_dbf);
987 debug_unregister(adapter->hba_dbf);
988 debug_unregister(adapter->erp_dbf);
989 adapter->scsi_dbf = NULL;
990 adapter->san_dbf = NULL;
991 adapter->hba_dbf = NULL;
992 adapter->erp_dbf = NULL;
993}
994
995#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 455e902533a9..d81b737d68cc 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -66,7 +66,7 @@
66/********************* GENERAL DEFINES *********************************/ 66/********************* GENERAL DEFINES *********************************/
67 67
68/* zfcp version number, it consists of major, minor, and patch-level number */ 68/* zfcp version number, it consists of major, minor, and patch-level number */
69#define ZFCP_VERSION "4.3.0" 69#define ZFCP_VERSION "4.5.0"
70 70
71/** 71/**
72 * zfcp_sg_to_address - determine kernel address from struct scatterlist 72 * zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -154,13 +154,17 @@ typedef u32 scsi_lun_t;
154#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 154#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
155#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 155#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
156 156
157/* Retry 5 times every 2 second, then every minute */
158#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5
159#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200
160#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000
161
157/* timeout value for "default timer" for fsf requests */ 162/* timeout value for "default timer" for fsf requests */
158#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 163#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
159 164
160/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 165/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
161 166
162typedef unsigned long long wwn_t; 167typedef unsigned long long wwn_t;
163typedef unsigned int fc_id_t;
164typedef unsigned long long fcp_lun_t; 168typedef unsigned long long fcp_lun_t;
165/* data length field may be at variable position in FCP-2 FCP_CMND IU */ 169/* data length field may be at variable position in FCP-2 FCP_CMND IU */
166typedef unsigned int fcp_dl_t; 170typedef unsigned int fcp_dl_t;
@@ -281,6 +285,171 @@ struct fcp_logo {
281} __attribute__((packed)); 285} __attribute__((packed));
282 286
283/* 287/*
288 * DBF stuff
289 */
290#define ZFCP_DBF_TAG_SIZE 4
291
292struct zfcp_dbf_dump {
293 u8 tag[ZFCP_DBF_TAG_SIZE];
294 u32 total_size; /* size of total dump data */
295 u32 offset; /* how much data has being already dumped */
296 u32 size; /* how much data comes with this record */
297 u8 data[]; /* dump data */
298} __attribute__ ((packed));
299
300/* FIXME: to be inflated when reworking the erp dbf */
301struct zfcp_erp_dbf_record {
302 u8 dummy[16];
303} __attribute__ ((packed));
304
305struct zfcp_hba_dbf_record_response {
306 u32 fsf_command;
307 u64 fsf_reqid;
308 u32 fsf_seqno;
309 u64 fsf_issued;
310 u32 fsf_prot_status;
311 u32 fsf_status;
312 u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
313 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
314 u32 fsf_req_status;
315 u8 sbal_first;
316 u8 sbal_curr;
317 u8 sbal_last;
318 u8 pool;
319 u64 erp_action;
320 union {
321 struct {
322 u64 scsi_cmnd;
323 u64 scsi_serial;
324 } send_fcp;
325 struct {
326 u64 wwpn;
327 u32 d_id;
328 u32 port_handle;
329 } port;
330 struct {
331 u64 wwpn;
332 u64 fcp_lun;
333 u32 port_handle;
334 u32 lun_handle;
335 } unit;
336 struct {
337 u32 d_id;
338 u8 ls_code;
339 } send_els;
340 } data;
341} __attribute__ ((packed));
342
343struct zfcp_hba_dbf_record_status {
344 u8 failed;
345 u32 status_type;
346 u32 status_subtype;
347 struct fsf_queue_designator
348 queue_designator;
349 u32 payload_size;
350#define ZFCP_DBF_UNSOL_PAYLOAD 80
351#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32
352#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56
353#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32)
354 u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
355} __attribute__ ((packed));
356
357struct zfcp_hba_dbf_record_qdio {
358 u32 status;
359 u32 qdio_error;
360 u32 siga_error;
361 u8 sbal_index;
362 u8 sbal_count;
363} __attribute__ ((packed));
364
365struct zfcp_hba_dbf_record {
366 u8 tag[ZFCP_DBF_TAG_SIZE];
367 u8 tag2[ZFCP_DBF_TAG_SIZE];
368 union {
369 struct zfcp_hba_dbf_record_response response;
370 struct zfcp_hba_dbf_record_status status;
371 struct zfcp_hba_dbf_record_qdio qdio;
372 } type;
373} __attribute__ ((packed));
374
375struct zfcp_san_dbf_record_ct {
376 union {
377 struct {
378 u16 cmd_req_code;
379 u8 revision;
380 u8 gs_type;
381 u8 gs_subtype;
382 u8 options;
383 u16 max_res_size;
384 } request;
385 struct {
386 u16 cmd_rsp_code;
387 u8 revision;
388 u8 reason_code;
389 u8 reason_code_expl;
390 u8 vendor_unique;
391 } response;
392 } type;
393 u32 payload_size;
394#define ZFCP_DBF_CT_PAYLOAD 24
395 u8 payload[ZFCP_DBF_CT_PAYLOAD];
396} __attribute__ ((packed));
397
398struct zfcp_san_dbf_record_els {
399 u8 ls_code;
400 u32 payload_size;
401#define ZFCP_DBF_ELS_PAYLOAD 32
402#define ZFCP_DBF_ELS_MAX_PAYLOAD 1024
403 u8 payload[ZFCP_DBF_ELS_PAYLOAD];
404} __attribute__ ((packed));
405
406struct zfcp_san_dbf_record {
407 u8 tag[ZFCP_DBF_TAG_SIZE];
408 u64 fsf_reqid;
409 u32 fsf_seqno;
410 u32 s_id;
411 u32 d_id;
412 union {
413 struct zfcp_san_dbf_record_ct ct;
414 struct zfcp_san_dbf_record_els els;
415 } type;
416} __attribute__ ((packed));
417
418struct zfcp_scsi_dbf_record {
419 u8 tag[ZFCP_DBF_TAG_SIZE];
420 u8 tag2[ZFCP_DBF_TAG_SIZE];
421 u32 scsi_id;
422 u32 scsi_lun;
423 u32 scsi_result;
424 u64 scsi_cmnd;
425 u64 scsi_serial;
426#define ZFCP_DBF_SCSI_OPCODE 16
427 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
428 u8 scsi_retries;
429 u8 scsi_allowed;
430 u64 fsf_reqid;
431 u32 fsf_seqno;
432 u64 fsf_issued;
433 union {
434 struct {
435 u64 fsf_reqid;
436 u32 fsf_seqno;
437 u64 fsf_issued;
438 } new_fsf_req;
439 struct {
440 u8 rsp_validity;
441 u8 rsp_scsi_status;
442 u32 rsp_resid;
443 u8 rsp_code;
444#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16
445#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256
446 u32 sns_info_len;
447 u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
448 } fcp;
449 } type;
450} __attribute__ ((packed));
451
452/*
284 * FC-FS stuff 453 * FC-FS stuff
285 */ 454 */
286#define R_A_TOV 10 /* seconds */ 455#define R_A_TOV 10 /* seconds */
@@ -339,34 +508,6 @@ struct zfcp_rc_entry {
339 */ 508 */
340#define ZFCP_CT_TIMEOUT (3 * R_A_TOV) 509#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
341 510
342
343/***************** S390 DEBUG FEATURE SPECIFIC DEFINES ***********************/
344
345/* debug feature entries per adapter */
346#define ZFCP_ERP_DBF_INDEX 1
347#define ZFCP_ERP_DBF_AREAS 2
348#define ZFCP_ERP_DBF_LENGTH 16
349#define ZFCP_ERP_DBF_LEVEL 3
350#define ZFCP_ERP_DBF_NAME "zfcperp"
351
352#define ZFCP_CMD_DBF_INDEX 2
353#define ZFCP_CMD_DBF_AREAS 1
354#define ZFCP_CMD_DBF_LENGTH 8
355#define ZFCP_CMD_DBF_LEVEL 3
356#define ZFCP_CMD_DBF_NAME "zfcpcmd"
357
358#define ZFCP_ABORT_DBF_INDEX 2
359#define ZFCP_ABORT_DBF_AREAS 1
360#define ZFCP_ABORT_DBF_LENGTH 8
361#define ZFCP_ABORT_DBF_LEVEL 6
362#define ZFCP_ABORT_DBF_NAME "zfcpabt"
363
364#define ZFCP_IN_ELS_DBF_INDEX 2
365#define ZFCP_IN_ELS_DBF_AREAS 1
366#define ZFCP_IN_ELS_DBF_LENGTH 8
367#define ZFCP_IN_ELS_DBF_LEVEL 6
368#define ZFCP_IN_ELS_DBF_NAME "zfcpels"
369
370/******************** LOGGING MACROS AND DEFINES *****************************/ 511/******************** LOGGING MACROS AND DEFINES *****************************/
371 512
372/* 513/*
@@ -501,6 +642,7 @@ do { \
501#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 642#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
502#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 643#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
503#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 644#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
645#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
504 646
505#define ZFCP_STATUS_ADAPTER_SCSI_UP \ 647#define ZFCP_STATUS_ADAPTER_SCSI_UP \
506 (ZFCP_STATUS_COMMON_UNBLOCKED | \ 648 (ZFCP_STATUS_COMMON_UNBLOCKED | \
@@ -635,45 +777,6 @@ struct zfcp_adapter_mempool {
635 mempool_t *data_gid_pn; 777 mempool_t *data_gid_pn;
636}; 778};
637 779
638struct zfcp_exchange_config_data{
639};
640
641struct zfcp_open_port {
642 struct zfcp_port *port;
643};
644
645struct zfcp_close_port {
646 struct zfcp_port *port;
647};
648
649struct zfcp_open_unit {
650 struct zfcp_unit *unit;
651};
652
653struct zfcp_close_unit {
654 struct zfcp_unit *unit;
655};
656
657struct zfcp_close_physical_port {
658 struct zfcp_port *port;
659};
660
661struct zfcp_send_fcp_command_task {
662 struct zfcp_fsf_req *fsf_req;
663 struct zfcp_unit *unit;
664 struct scsi_cmnd *scsi_cmnd;
665 unsigned long start_jiffies;
666};
667
668struct zfcp_send_fcp_command_task_management {
669 struct zfcp_unit *unit;
670};
671
672struct zfcp_abort_fcp_command {
673 struct zfcp_fsf_req *fsf_req;
674 struct zfcp_unit *unit;
675};
676
677/* 780/*
678 * header for CT_IU 781 * header for CT_IU
679 */ 782 */
@@ -702,7 +805,7 @@ struct ct_iu_gid_pn_req {
702/* FS_ACC IU and data unit for GID_PN nameserver request */ 805/* FS_ACC IU and data unit for GID_PN nameserver request */
703struct ct_iu_gid_pn_resp { 806struct ct_iu_gid_pn_resp {
704 struct ct_hdr header; 807 struct ct_hdr header;
705 fc_id_t d_id; 808 u32 d_id;
706} __attribute__ ((packed)); 809} __attribute__ ((packed));
707 810
708typedef void (*zfcp_send_ct_handler_t)(unsigned long); 811typedef void (*zfcp_send_ct_handler_t)(unsigned long);
@@ -768,7 +871,7 @@ typedef void (*zfcp_send_els_handler_t)(unsigned long);
768struct zfcp_send_els { 871struct zfcp_send_els {
769 struct zfcp_adapter *adapter; 872 struct zfcp_adapter *adapter;
770 struct zfcp_port *port; 873 struct zfcp_port *port;
771 fc_id_t d_id; 874 u32 d_id;
772 struct scatterlist *req; 875 struct scatterlist *req;
773 struct scatterlist *resp; 876 struct scatterlist *resp;
774 unsigned int req_count; 877 unsigned int req_count;
@@ -781,33 +884,6 @@ struct zfcp_send_els {
781 int status; 884 int status;
782}; 885};
783 886
784struct zfcp_status_read {
785 struct fsf_status_read_buffer *buffer;
786};
787
788struct zfcp_fsf_done {
789 struct completion *complete;
790 int status;
791};
792
793/* request specific data */
794union zfcp_req_data {
795 struct zfcp_exchange_config_data exchange_config_data;
796 struct zfcp_open_port open_port;
797 struct zfcp_close_port close_port;
798 struct zfcp_open_unit open_unit;
799 struct zfcp_close_unit close_unit;
800 struct zfcp_close_physical_port close_physical_port;
801 struct zfcp_send_fcp_command_task send_fcp_command_task;
802 struct zfcp_send_fcp_command_task_management
803 send_fcp_command_task_management;
804 struct zfcp_abort_fcp_command abort_fcp_command;
805 struct zfcp_send_ct *send_ct;
806 struct zfcp_send_els *send_els;
807 struct zfcp_status_read status_read;
808 struct fsf_qtcb_bottom_port *port_data;
809};
810
811struct zfcp_qdio_queue { 887struct zfcp_qdio_queue {
812 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ 888 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
813 u8 free_index; /* index of next free bfr 889 u8 free_index; /* index of next free bfr
@@ -838,21 +914,19 @@ struct zfcp_adapter {
838 atomic_t refcount; /* reference count */ 914 atomic_t refcount; /* reference count */
839 wait_queue_head_t remove_wq; /* can be used to wait for 915 wait_queue_head_t remove_wq; /* can be used to wait for
840 refcount drop to zero */ 916 refcount drop to zero */
841 wwn_t wwnn; /* WWNN */
842 wwn_t wwpn; /* WWPN */
843 fc_id_t s_id; /* N_Port ID */
844 wwn_t peer_wwnn; /* P2P peer WWNN */ 917 wwn_t peer_wwnn; /* P2P peer WWNN */
845 wwn_t peer_wwpn; /* P2P peer WWPN */ 918 wwn_t peer_wwpn; /* P2P peer WWPN */
846 fc_id_t peer_d_id; /* P2P peer D_ID */ 919 u32 peer_d_id; /* P2P peer D_ID */
920 wwn_t physical_wwpn; /* WWPN of physical port */
921 u32 physical_s_id; /* local FC port ID */
847 struct ccw_device *ccw_device; /* S/390 ccw device */ 922 struct ccw_device *ccw_device; /* S/390 ccw device */
848 u8 fc_service_class; 923 u8 fc_service_class;
849 u32 fc_topology; /* FC topology */ 924 u32 fc_topology; /* FC topology */
850 u32 fc_link_speed; /* FC interface speed */
851 u32 hydra_version; /* Hydra version */ 925 u32 hydra_version; /* Hydra version */
852 u32 fsf_lic_version; 926 u32 fsf_lic_version;
853 u32 supported_features;/* of FCP channel */ 927 u32 adapter_features; /* FCP channel features */
928 u32 connection_features; /* host connection features */
854 u32 hardware_version; /* of FCP channel */ 929 u32 hardware_version; /* of FCP channel */
855 u8 serial_number[32]; /* of hardware */
856 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 930 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
857 unsigned short scsi_host_no; /* Assigned host number */ 931 unsigned short scsi_host_no; /* Assigned host number */
858 unsigned char name[9]; 932 unsigned char name[9];
@@ -889,11 +963,18 @@ struct zfcp_adapter {
889 u32 erp_low_mem_count; /* nr of erp actions waiting 963 u32 erp_low_mem_count; /* nr of erp actions waiting
890 for memory */ 964 for memory */
891 struct zfcp_port *nameserver_port; /* adapter's nameserver */ 965 struct zfcp_port *nameserver_port; /* adapter's nameserver */
892 debug_info_t *erp_dbf; /* S/390 debug features */ 966 debug_info_t *erp_dbf;
893 debug_info_t *abort_dbf; 967 debug_info_t *hba_dbf;
894 debug_info_t *in_els_dbf; 968 debug_info_t *san_dbf; /* debug feature areas */
895 debug_info_t *cmd_dbf; 969 debug_info_t *scsi_dbf;
896 spinlock_t dbf_lock; 970 spinlock_t erp_dbf_lock;
971 spinlock_t hba_dbf_lock;
972 spinlock_t san_dbf_lock;
973 spinlock_t scsi_dbf_lock;
974 struct zfcp_erp_dbf_record erp_dbf_buf;
975 struct zfcp_hba_dbf_record hba_dbf_buf;
976 struct zfcp_san_dbf_record san_dbf_buf;
977 struct zfcp_scsi_dbf_record scsi_dbf_buf;
897 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 978 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
898 struct qdio_initialize qdio_init_data; /* for qdio_establish */ 979 struct qdio_initialize qdio_init_data; /* for qdio_establish */
899 struct device generic_services; /* directory for WKA ports */ 980 struct device generic_services; /* directory for WKA ports */
@@ -919,7 +1000,7 @@ struct zfcp_port {
919 atomic_t status; /* status of this remote port */ 1000 atomic_t status; /* status of this remote port */
920 wwn_t wwnn; /* WWNN if known */ 1001 wwn_t wwnn; /* WWNN if known */
921 wwn_t wwpn; /* WWPN */ 1002 wwn_t wwpn; /* WWPN */
922 fc_id_t d_id; /* D_ID */ 1003 u32 d_id; /* D_ID */
923 u32 handle; /* handle assigned by FSF */ 1004 u32 handle; /* handle assigned by FSF */
924 struct zfcp_erp_action erp_action; /* pending error recovery */ 1005 struct zfcp_erp_action erp_action; /* pending error recovery */
925 atomic_t erp_counter; 1006 atomic_t erp_counter;
@@ -963,11 +1044,13 @@ struct zfcp_fsf_req {
963 u32 fsf_command; /* FSF Command copy */ 1044 u32 fsf_command; /* FSF Command copy */
964 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 1045 struct fsf_qtcb *qtcb; /* address of associated QTCB */
965 u32 seq_no; /* Sequence number of request */ 1046 u32 seq_no; /* Sequence number of request */
966 union zfcp_req_data data; /* Info fields of request */ 1047 unsigned long data; /* private data of request */
967 struct zfcp_erp_action *erp_action; /* used if this request is 1048 struct zfcp_erp_action *erp_action; /* used if this request is
968 issued on behalf of erp */ 1049 issued on behalf of erp */
969 mempool_t *pool; /* used if request was alloacted 1050 mempool_t *pool; /* used if request was alloacted
970 from emergency pool */ 1051 from emergency pool */
1052 unsigned long long issued; /* request sent time (STCK) */
1053 struct zfcp_unit *unit;
971}; 1054};
972 1055
973typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*); 1056typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index cb4f612550ba..023f4e558ae4 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -82,6 +82,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
82static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 82static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
83static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 83static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
84static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *); 84static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
85static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *);
85static int zfcp_erp_adapter_strategy_open_fsf_statusread( 86static int zfcp_erp_adapter_strategy_open_fsf_statusread(
86 struct zfcp_erp_action *); 87 struct zfcp_erp_action *);
87 88
@@ -345,13 +346,13 @@ zfcp_erp_adisc(struct zfcp_port *port)
345 346
346 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports 347 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
347 without FC-AL-2 capability, so we don't set it */ 348 without FC-AL-2 capability, so we don't set it */
348 adisc->wwpn = adapter->wwpn; 349 adisc->wwpn = fc_host_port_name(adapter->scsi_host);
349 adisc->wwnn = adapter->wwnn; 350 adisc->wwnn = fc_host_node_name(adapter->scsi_host);
350 adisc->nport_id = adapter->s_id; 351 adisc->nport_id = fc_host_port_id(adapter->scsi_host);
351 ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x " 352 ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x "
352 "(wwpn=0x%016Lx, wwnn=0x%016Lx, " 353 "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
353 "hard_nport_id=0x%08x, nport_id=0x%08x)\n", 354 "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
354 adapter->s_id, send_els->d_id, (wwn_t) adisc->wwpn, 355 adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
355 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 356 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
356 adisc->nport_id); 357 adisc->nport_id);
357 358
@@ -404,7 +405,7 @@ zfcp_erp_adisc_handler(unsigned long data)
404 struct zfcp_send_els *send_els; 405 struct zfcp_send_els *send_els;
405 struct zfcp_port *port; 406 struct zfcp_port *port;
406 struct zfcp_adapter *adapter; 407 struct zfcp_adapter *adapter;
407 fc_id_t d_id; 408 u32 d_id;
408 struct zfcp_ls_adisc_acc *adisc; 409 struct zfcp_ls_adisc_acc *adisc;
409 410
410 send_els = (struct zfcp_send_els *) data; 411 send_els = (struct zfcp_send_els *) data;
@@ -435,9 +436,9 @@ zfcp_erp_adisc_handler(unsigned long data)
435 ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id " 436 ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id "
436 "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, " 437 "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
437 "hard_nport_id=0x%08x, nport_id=0x%08x)\n", 438 "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
438 d_id, adapter->s_id, (wwn_t) adisc->wwpn, 439 d_id, fc_host_port_id(adapter->scsi_host),
439 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 440 (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
440 adisc->nport_id); 441 adisc->hard_nport_id, adisc->nport_id);
441 442
442 /* set wwnn for port */ 443 /* set wwnn for port */
443 if (port->wwnn == 0) 444 if (port->wwnn == 0)
@@ -886,7 +887,7 @@ static int
886zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) 887zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
887{ 888{
888 int retval = 0; 889 int retval = 0;
889 struct zfcp_fsf_req *fsf_req; 890 struct zfcp_fsf_req *fsf_req = NULL;
890 struct zfcp_adapter *adapter = erp_action->adapter; 891 struct zfcp_adapter *adapter = erp_action->adapter;
891 892
892 if (erp_action->fsf_req) { 893 if (erp_action->fsf_req) {
@@ -896,7 +897,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
896 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) 897 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
897 if (fsf_req == erp_action->fsf_req) 898 if (fsf_req == erp_action->fsf_req)
898 break; 899 break;
899 if (fsf_req == erp_action->fsf_req) { 900 if (fsf_req && (fsf_req->erp_action == erp_action)) {
900 /* fsf_req still exists */ 901 /* fsf_req still exists */
901 debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); 902 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
902 debug_event(adapter->erp_dbf, 3, &fsf_req, 903 debug_event(adapter->erp_dbf, 3, &fsf_req,
@@ -2258,16 +2259,21 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2258static int 2259static int
2259zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) 2260zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2260{ 2261{
2261 int retval; 2262 int xconfig, xport;
2263
2264 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2265 &erp_action->adapter->status)) {
2266 zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2267 atomic_set(&erp_action->adapter->erp_counter, 0);
2268 return ZFCP_ERP_FAILED;
2269 }
2262 2270
2263 /* do 'exchange configuration data' */ 2271 xconfig = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
2264 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2272 xport = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2265 if (retval == ZFCP_ERP_FAILED) 2273 if ((xconfig == ZFCP_ERP_FAILED) || (xport == ZFCP_ERP_FAILED))
2266 return retval; 2274 return ZFCP_ERP_FAILED;
2267 2275
2268 /* start the desired number of Status Reads */ 2276 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2269 retval = zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2270 return retval;
2271} 2277}
2272 2278
2273/* 2279/*
@@ -2291,7 +2297,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2291 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 2297 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2292 &adapter->status); 2298 &adapter->status);
2293 ZFCP_LOG_DEBUG("Doing exchange config data\n"); 2299 ZFCP_LOG_DEBUG("Doing exchange config data\n");
2300 write_lock(&adapter->erp_lock);
2294 zfcp_erp_action_to_running(erp_action); 2301 zfcp_erp_action_to_running(erp_action);
2302 write_unlock(&adapter->erp_lock);
2295 zfcp_erp_timeout_init(erp_action); 2303 zfcp_erp_timeout_init(erp_action);
2296 if (zfcp_fsf_exchange_config_data(erp_action)) { 2304 if (zfcp_fsf_exchange_config_data(erp_action)) {
2297 retval = ZFCP_ERP_FAILED; 2305 retval = ZFCP_ERP_FAILED;
@@ -2348,6 +2356,76 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2348 return retval; 2356 return retval;
2349} 2357}
2350 2358
2359static int
2360zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2361{
2362 int retval = ZFCP_ERP_SUCCEEDED;
2363 int retries;
2364 int sleep;
2365 struct zfcp_adapter *adapter = erp_action->adapter;
2366
2367 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2368
2369 for (retries = 0; ; retries++) {
2370 ZFCP_LOG_DEBUG("Doing exchange port data\n");
2371 zfcp_erp_action_to_running(erp_action);
2372 zfcp_erp_timeout_init(erp_action);
2373 if (zfcp_fsf_exchange_port_data(erp_action, adapter, NULL)) {
2374 retval = ZFCP_ERP_FAILED;
2375 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
2376 ZFCP_LOG_INFO("error: initiation of exchange of "
2377 "port data failed for adapter %s\n",
2378 zfcp_get_busid_by_adapter(adapter));
2379 break;
2380 }
2381 debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
2382 ZFCP_LOG_DEBUG("Xchange underway\n");
2383
2384 /*
2385 * Why this works:
2386 * Both the normal completion handler as well as the timeout
2387 * handler will do an 'up' when the 'exchange port data'
2388 * request completes or times out. Thus, the signal to go on
2389 * won't be lost utilizing this semaphore.
2390 * Furthermore, this 'adapter_reopen' action is
2391 * guaranteed to be the only action being there (highest action
2392 * which prevents other actions from being created).
2393 * Resulting from that, the wake signal recognized here
2394 * _must_ be the one belonging to the 'exchange port
2395 * data' request.
2396 */
2397 down(&adapter->erp_ready_sem);
2398 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2399 ZFCP_LOG_INFO("error: exchange of port data "
2400 "for adapter %s timed out\n",
2401 zfcp_get_busid_by_adapter(adapter));
2402 break;
2403 }
2404
2405 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2406 &adapter->status))
2407 break;
2408
2409 ZFCP_LOG_DEBUG("host connection still initialising... "
2410 "waiting and retrying...\n");
2411 /* sleep a little bit before retry */
2412 sleep = retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES ?
2413 ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP :
2414 ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP;
2415 msleep(jiffies_to_msecs(sleep));
2416 }
2417
2418 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2419 &adapter->status)) {
2420 ZFCP_LOG_INFO("error: exchange of port data for "
2421 "adapter %s failed\n",
2422 zfcp_get_busid_by_adapter(adapter));
2423 retval = ZFCP_ERP_FAILED;
2424 }
2425
2426 return retval;
2427}
2428
2351/* 2429/*
2352 * function: 2430 * function:
2353 * 2431 *
@@ -3194,11 +3272,19 @@ zfcp_erp_action_enqueue(int action,
3194 /* fall through !!! */ 3272 /* fall through !!! */
3195 3273
3196 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 3274 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3197 if (atomic_test_mask 3275 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
3198 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status) 3276 &port->status)) {
3199 && port->erp_action.action == 3277 if (port->erp_action.action !=
3200 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) { 3278 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
3201 debug_text_event(adapter->erp_dbf, 4, "pf_actenq_drp"); 3279 ZFCP_LOG_INFO("dropped erp action %i (port "
3280 "0x%016Lx, action in use: %i)\n",
3281 action, port->wwpn,
3282 port->erp_action.action);
3283 debug_text_event(adapter->erp_dbf, 4,
3284 "pf_actenq_drp");
3285 } else
3286 debug_text_event(adapter->erp_dbf, 4,
3287 "pf_actenq_drpcp");
3202 debug_event(adapter->erp_dbf, 4, &port->wwpn, 3288 debug_event(adapter->erp_dbf, 4, &port->wwpn,
3203 sizeof (wwn_t)); 3289 sizeof (wwn_t));
3204 goto out; 3290 goto out;
@@ -3589,6 +3675,9 @@ zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter)
3589 struct zfcp_port *port; 3675 struct zfcp_port *port;
3590 unsigned long flags; 3676 unsigned long flags;
3591 3677
3678 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
3679 return;
3680
3592 debug_text_event(adapter->erp_dbf, 3, "a_access_recover"); 3681 debug_text_event(adapter->erp_dbf, 3, "a_access_recover");
3593 debug_event(adapter->erp_dbf, 3, &adapter->name, 8); 3682 debug_event(adapter->erp_dbf, 3, &adapter->name, 8);
3594 3683
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index cd98a2de9f8f..c3782261cb5c 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -96,7 +96,8 @@ extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
96extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); 96extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
97 97
98extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); 98extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
99extern int zfcp_fsf_exchange_port_data(struct zfcp_adapter *, 99extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *,
100 struct zfcp_adapter *,
100 struct fsf_qtcb_bottom_port *); 101 struct fsf_qtcb_bottom_port *);
101extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **, 102extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
102 u32, u32, struct zfcp_sg_list *); 103 u32, u32, struct zfcp_sg_list *);
@@ -109,7 +110,6 @@ extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
109extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, 110extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
110 struct zfcp_erp_action *); 111 struct zfcp_erp_action *);
111extern int zfcp_fsf_send_els(struct zfcp_send_els *); 112extern int zfcp_fsf_send_els(struct zfcp_send_els *);
112extern int zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *, int, u32 *);
113extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, 113extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
114 struct zfcp_unit *, 114 struct zfcp_unit *,
115 struct scsi_cmnd *, 115 struct scsi_cmnd *,
@@ -182,9 +182,25 @@ extern void zfcp_erp_port_access_changed(struct zfcp_port *);
182extern void zfcp_erp_unit_access_changed(struct zfcp_unit *); 182extern void zfcp_erp_unit_access_changed(struct zfcp_unit *);
183 183
184/******************************** AUX ****************************************/ 184/******************************** AUX ****************************************/
185extern void zfcp_cmd_dbf_event_fsf(const char *, struct zfcp_fsf_req *, 185extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
186 void *, int); 186extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
187extern void zfcp_cmd_dbf_event_scsi(const char *, struct scsi_cmnd *); 187 struct fsf_status_read_buffer *);
188extern void zfcp_in_els_dbf_event(struct zfcp_adapter *, const char *, 188extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
189 struct fsf_status_read_buffer *, int); 189 unsigned int, unsigned int, unsigned int,
190 int, int);
191
192extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
193extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
194extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
195extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
196extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
197
198extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
199 struct scsi_cmnd *);
200extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
201 struct scsi_cmnd *,
202 struct zfcp_fsf_req *);
203extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
204 struct scsi_cmnd *);
205
190#endif /* ZFCP_EXT_H */ 206#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c007b6424e74..3b0fc1163f5f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -59,6 +59,8 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *);
59static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); 59static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
60static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); 60static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
61static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); 61static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
62static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
63 struct fsf_link_down_info *);
62static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); 64static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
63static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); 65static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
64 66
@@ -285,51 +287,51 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
285{ 287{
286 int retval = 0; 288 int retval = 0;
287 struct zfcp_adapter *adapter = fsf_req->adapter; 289 struct zfcp_adapter *adapter = fsf_req->adapter;
290 struct fsf_qtcb *qtcb = fsf_req->qtcb;
291 union fsf_prot_status_qual *prot_status_qual =
292 &qtcb->prefix.prot_status_qual;
288 293
289 ZFCP_LOG_DEBUG("QTCB is at %p\n", fsf_req->qtcb); 294 zfcp_hba_dbf_event_fsf_response(fsf_req);
290 295
291 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 296 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
292 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n", 297 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
293 (unsigned long) fsf_req); 298 (unsigned long) fsf_req);
294 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | 299 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
295 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */ 300 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
296 zfcp_cmd_dbf_event_fsf("dismiss", fsf_req, NULL, 0);
297 goto skip_protstatus; 301 goto skip_protstatus;
298 } 302 }
299 303
300 /* log additional information provided by FSF (if any) */ 304 /* log additional information provided by FSF (if any) */
301 if (unlikely(fsf_req->qtcb->header.log_length)) { 305 if (unlikely(qtcb->header.log_length)) {
302 /* do not trust them ;-) */ 306 /* do not trust them ;-) */
303 if (fsf_req->qtcb->header.log_start > sizeof(struct fsf_qtcb)) { 307 if (qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
304 ZFCP_LOG_NORMAL 308 ZFCP_LOG_NORMAL
305 ("bug: ULP (FSF logging) log data starts " 309 ("bug: ULP (FSF logging) log data starts "
306 "beyond end of packet header. Ignored. " 310 "beyond end of packet header. Ignored. "
307 "(start=%i, size=%li)\n", 311 "(start=%i, size=%li)\n",
308 fsf_req->qtcb->header.log_start, 312 qtcb->header.log_start,
309 sizeof(struct fsf_qtcb)); 313 sizeof(struct fsf_qtcb));
310 goto forget_log; 314 goto forget_log;
311 } 315 }
312 if ((size_t) (fsf_req->qtcb->header.log_start + 316 if ((size_t) (qtcb->header.log_start + qtcb->header.log_length)
313 fsf_req->qtcb->header.log_length)
314 > sizeof(struct fsf_qtcb)) { 317 > sizeof(struct fsf_qtcb)) {
315 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends " 318 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
316 "beyond end of packet header. Ignored. " 319 "beyond end of packet header. Ignored. "
317 "(start=%i, length=%i, size=%li)\n", 320 "(start=%i, length=%i, size=%li)\n",
318 fsf_req->qtcb->header.log_start, 321 qtcb->header.log_start,
319 fsf_req->qtcb->header.log_length, 322 qtcb->header.log_length,
320 sizeof(struct fsf_qtcb)); 323 sizeof(struct fsf_qtcb));
321 goto forget_log; 324 goto forget_log;
322 } 325 }
323 ZFCP_LOG_TRACE("ULP log data: \n"); 326 ZFCP_LOG_TRACE("ULP log data: \n");
324 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, 327 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
325 (char *) fsf_req->qtcb + 328 (char *) qtcb + qtcb->header.log_start,
326 fsf_req->qtcb->header.log_start, 329 qtcb->header.log_length);
327 fsf_req->qtcb->header.log_length);
328 } 330 }
329 forget_log: 331 forget_log:
330 332
331 /* evaluate FSF Protocol Status */ 333 /* evaluate FSF Protocol Status */
332 switch (fsf_req->qtcb->prefix.prot_status) { 334 switch (qtcb->prefix.prot_status) {
333 335
334 case FSF_PROT_GOOD: 336 case FSF_PROT_GOOD:
335 case FSF_PROT_FSF_STATUS_PRESENTED: 337 case FSF_PROT_FSF_STATUS_PRESENTED:
@@ -340,14 +342,9 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
340 "microcode of version 0x%x, the device driver " 342 "microcode of version 0x%x, the device driver "
341 "only supports 0x%x. Aborting.\n", 343 "only supports 0x%x. Aborting.\n",
342 zfcp_get_busid_by_adapter(adapter), 344 zfcp_get_busid_by_adapter(adapter),
343 fsf_req->qtcb->prefix.prot_status_qual. 345 prot_status_qual->version_error.fsf_version,
344 version_error.fsf_version, ZFCP_QTCB_VERSION); 346 ZFCP_QTCB_VERSION);
345 /* stop operation for this adapter */
346 debug_text_exception(adapter->erp_dbf, 0, "prot_ver_err");
347 zfcp_erp_adapter_shutdown(adapter, 0); 347 zfcp_erp_adapter_shutdown(adapter, 0);
348 zfcp_cmd_dbf_event_fsf("qverserr", fsf_req,
349 &fsf_req->qtcb->prefix.prot_status_qual,
350 sizeof (union fsf_prot_status_qual));
351 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 348 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
352 break; 349 break;
353 350
@@ -355,16 +352,10 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
355 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between " 352 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between "
356 "driver (0x%x) and adapter %s (0x%x). " 353 "driver (0x%x) and adapter %s (0x%x). "
357 "Restarting all operations on this adapter.\n", 354 "Restarting all operations on this adapter.\n",
358 fsf_req->qtcb->prefix.req_seq_no, 355 qtcb->prefix.req_seq_no,
359 zfcp_get_busid_by_adapter(adapter), 356 zfcp_get_busid_by_adapter(adapter),
360 fsf_req->qtcb->prefix.prot_status_qual. 357 prot_status_qual->sequence_error.exp_req_seq_no);
361 sequence_error.exp_req_seq_no);
362 debug_text_exception(adapter->erp_dbf, 0, "prot_seq_err");
363 /* restart operation on this adapter */
364 zfcp_erp_adapter_reopen(adapter, 0); 358 zfcp_erp_adapter_reopen(adapter, 0);
365 zfcp_cmd_dbf_event_fsf("seqnoerr", fsf_req,
366 &fsf_req->qtcb->prefix.prot_status_qual,
367 sizeof (union fsf_prot_status_qual));
368 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; 359 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
369 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 360 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
370 break; 361 break;
@@ -375,116 +366,35 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
375 "that used on adapter %s. " 366 "that used on adapter %s. "
376 "Stopping all operations on this adapter.\n", 367 "Stopping all operations on this adapter.\n",
377 zfcp_get_busid_by_adapter(adapter)); 368 zfcp_get_busid_by_adapter(adapter));
378 debug_text_exception(adapter->erp_dbf, 0, "prot_unsup_qtcb");
379 zfcp_erp_adapter_shutdown(adapter, 0); 369 zfcp_erp_adapter_shutdown(adapter, 0);
380 zfcp_cmd_dbf_event_fsf("unsqtcbt", fsf_req,
381 &fsf_req->qtcb->prefix.prot_status_qual,
382 sizeof (union fsf_prot_status_qual));
383 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 370 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
384 break; 371 break;
385 372
386 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 373 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
387 zfcp_cmd_dbf_event_fsf("hconinit", fsf_req,
388 &fsf_req->qtcb->prefix.prot_status_qual,
389 sizeof (union fsf_prot_status_qual));
390 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 374 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
391 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 375 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
392 &(adapter->status)); 376 &(adapter->status));
393 debug_text_event(adapter->erp_dbf, 3, "prot_con_init");
394 break; 377 break;
395 378
396 case FSF_PROT_DUPLICATE_REQUEST_ID: 379 case FSF_PROT_DUPLICATE_REQUEST_ID:
397 if (fsf_req->qtcb) {
398 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx " 380 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx "
399 "to the adapter %s is ambiguous. " 381 "to the adapter %s is ambiguous. "
400 "Stopping all operations on this " 382 "Stopping all operations on this adapter.\n",
401 "adapter.\n", 383 *(unsigned long long*)
402 *(unsigned long long *) 384 (&qtcb->bottom.support.req_handle),
403 (&fsf_req->qtcb->bottom.support.
404 req_handle),
405 zfcp_get_busid_by_adapter(adapter));
406 } else {
407 ZFCP_LOG_NORMAL("bug: The request identifier %p "
408 "to the adapter %s is ambiguous. "
409 "Stopping all operations on this "
410 "adapter. "
411 "(bug: got this for an unsolicited "
412 "status read request)\n",
413 fsf_req,
414 zfcp_get_busid_by_adapter(adapter)); 385 zfcp_get_busid_by_adapter(adapter));
415 }
416 debug_text_exception(adapter->erp_dbf, 0, "prot_dup_id");
417 zfcp_erp_adapter_shutdown(adapter, 0); 386 zfcp_erp_adapter_shutdown(adapter, 0);
418 zfcp_cmd_dbf_event_fsf("dupreqid", fsf_req,
419 &fsf_req->qtcb->prefix.prot_status_qual,
420 sizeof (union fsf_prot_status_qual));
421 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 387 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
422 break; 388 break;
423 389
424 case FSF_PROT_LINK_DOWN: 390 case FSF_PROT_LINK_DOWN:
425 /* 391 zfcp_fsf_link_down_info_eval(adapter,
426 * 'test and set' is not atomic here - 392 &prot_status_qual->link_down_info);
427 * it's ok as long as calls to our response queue handler
428 * (and thus execution of this code here) are serialized
429 * by the qdio module
430 */
431 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
432 &adapter->status)) {
433 switch (fsf_req->qtcb->prefix.prot_status_qual.
434 locallink_error.code) {
435 case FSF_PSQ_LINK_NOLIGHT:
436 ZFCP_LOG_INFO("The local link to adapter %s "
437 "is down (no light detected).\n",
438 zfcp_get_busid_by_adapter(
439 adapter));
440 break;
441 case FSF_PSQ_LINK_WRAPPLUG:
442 ZFCP_LOG_INFO("The local link to adapter %s "
443 "is down (wrap plug detected).\n",
444 zfcp_get_busid_by_adapter(
445 adapter));
446 break;
447 case FSF_PSQ_LINK_NOFCP:
448 ZFCP_LOG_INFO("The local link to adapter %s "
449 "is down (adjacent node on "
450 "link does not support FCP).\n",
451 zfcp_get_busid_by_adapter(
452 adapter));
453 break;
454 default:
455 ZFCP_LOG_INFO("The local link to adapter %s "
456 "is down "
457 "(warning: unknown reason "
458 "code).\n",
459 zfcp_get_busid_by_adapter(
460 adapter));
461 break;
462
463 }
464 /*
465 * Due to the 'erp failed' flag the adapter won't
466 * be recovered but will be just set to 'blocked'
467 * state. All subordinary devices will have state
468 * 'blocked' and 'erp failed', too.
469 * Thus the adapter is still able to provide
470 * 'link up' status without being flooded with
471 * requests.
472 * (note: even 'close port' is not permitted)
473 */
474 ZFCP_LOG_INFO("Stopping all operations for adapter "
475 "%s.\n",
476 zfcp_get_busid_by_adapter(adapter));
477 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
478 ZFCP_STATUS_COMMON_ERP_FAILED,
479 &adapter->status);
480 zfcp_erp_adapter_reopen(adapter, 0);
481 }
482 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 393 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
483 break; 394 break;
484 395
485 case FSF_PROT_REEST_QUEUE: 396 case FSF_PROT_REEST_QUEUE:
486 debug_text_event(adapter->erp_dbf, 1, "prot_reest_queue"); 397 ZFCP_LOG_NORMAL("The local link to adapter with "
487 ZFCP_LOG_INFO("The local link to adapter with "
488 "%s was re-plugged. " 398 "%s was re-plugged. "
489 "Re-starting operations on this adapter.\n", 399 "Re-starting operations on this adapter.\n",
490 zfcp_get_busid_by_adapter(adapter)); 400 zfcp_get_busid_by_adapter(adapter));
@@ -495,9 +405,6 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
495 zfcp_erp_adapter_reopen(adapter, 405 zfcp_erp_adapter_reopen(adapter,
496 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 406 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
497 | ZFCP_STATUS_COMMON_ERP_FAILED); 407 | ZFCP_STATUS_COMMON_ERP_FAILED);
498 zfcp_cmd_dbf_event_fsf("reestque", fsf_req,
499 &fsf_req->qtcb->prefix.prot_status_qual,
500 sizeof (union fsf_prot_status_qual));
501 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 408 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
502 break; 409 break;
503 410
@@ -507,12 +414,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
507 "Restarting all operations on this " 414 "Restarting all operations on this "
508 "adapter.\n", 415 "adapter.\n",
509 zfcp_get_busid_by_adapter(adapter)); 416 zfcp_get_busid_by_adapter(adapter));
510 debug_text_event(adapter->erp_dbf, 0, "prot_err_sta");
511 /* restart operation on this adapter */
512 zfcp_erp_adapter_reopen(adapter, 0); 417 zfcp_erp_adapter_reopen(adapter, 0);
513 zfcp_cmd_dbf_event_fsf("proterrs", fsf_req,
514 &fsf_req->qtcb->prefix.prot_status_qual,
515 sizeof (union fsf_prot_status_qual));
516 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; 418 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
517 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 419 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
518 break; 420 break;
@@ -524,11 +426,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
524 "Stopping all operations on this adapter. " 426 "Stopping all operations on this adapter. "
525 "(debug info 0x%x).\n", 427 "(debug info 0x%x).\n",
526 zfcp_get_busid_by_adapter(adapter), 428 zfcp_get_busid_by_adapter(adapter),
527 fsf_req->qtcb->prefix.prot_status); 429 qtcb->prefix.prot_status);
528 debug_text_event(adapter->erp_dbf, 0, "prot_inval:");
529 debug_exception(adapter->erp_dbf, 0,
530 &fsf_req->qtcb->prefix.prot_status,
531 sizeof (u32));
532 zfcp_erp_adapter_shutdown(adapter, 0); 430 zfcp_erp_adapter_shutdown(adapter, 0);
533 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 431 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
534 } 432 }
@@ -568,28 +466,18 @@ zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
568 "(debug info 0x%x).\n", 466 "(debug info 0x%x).\n",
569 zfcp_get_busid_by_adapter(fsf_req->adapter), 467 zfcp_get_busid_by_adapter(fsf_req->adapter),
570 fsf_req->qtcb->header.fsf_command); 468 fsf_req->qtcb->header.fsf_command);
571 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
572 "fsf_s_unknown");
573 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0); 469 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
574 zfcp_cmd_dbf_event_fsf("unknownc", fsf_req,
575 &fsf_req->qtcb->header.fsf_status_qual,
576 sizeof (union fsf_status_qual));
577 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 470 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
578 break; 471 break;
579 472
580 case FSF_FCP_RSP_AVAILABLE: 473 case FSF_FCP_RSP_AVAILABLE:
581 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the " 474 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
582 "SCSI stack.\n"); 475 "SCSI stack.\n");
583 debug_text_event(fsf_req->adapter->erp_dbf, 3, "fsf_s_rsp");
584 break; 476 break;
585 477
586 case FSF_ADAPTER_STATUS_AVAILABLE: 478 case FSF_ADAPTER_STATUS_AVAILABLE:
587 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_astatus");
588 zfcp_fsf_fsfstatus_qual_eval(fsf_req); 479 zfcp_fsf_fsfstatus_qual_eval(fsf_req);
589 break; 480 break;
590
591 default:
592 break;
593 } 481 }
594 482
595 skip_fsfstatus: 483 skip_fsfstatus:
@@ -617,44 +505,28 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
617 505
618 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { 506 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
619 case FSF_SQ_FCP_RSP_AVAILABLE: 507 case FSF_SQ_FCP_RSP_AVAILABLE:
620 debug_text_event(fsf_req->adapter->erp_dbf, 4, "fsf_sq_rsp");
621 break; 508 break;
622 case FSF_SQ_RETRY_IF_POSSIBLE: 509 case FSF_SQ_RETRY_IF_POSSIBLE:
623 /* The SCSI-stack may now issue retries or escalate */ 510 /* The SCSI-stack may now issue retries or escalate */
624 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_retry");
625 zfcp_cmd_dbf_event_fsf("sqretry", fsf_req,
626 &fsf_req->qtcb->header.fsf_status_qual,
627 sizeof (union fsf_status_qual));
628 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 511 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
629 break; 512 break;
630 case FSF_SQ_COMMAND_ABORTED: 513 case FSF_SQ_COMMAND_ABORTED:
631 /* Carry the aborted state on to upper layer */ 514 /* Carry the aborted state on to upper layer */
632 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_abort");
633 zfcp_cmd_dbf_event_fsf("sqabort", fsf_req,
634 &fsf_req->qtcb->header.fsf_status_qual,
635 sizeof (union fsf_status_qual));
636 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED; 515 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
637 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 516 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
638 break; 517 break;
639 case FSF_SQ_NO_RECOM: 518 case FSF_SQ_NO_RECOM:
640 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
641 "fsf_sq_no_rec");
642 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a" 519 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
643 "problem on the adapter %s " 520 "problem on the adapter %s "
644 "Stopping all operations on this adapter. ", 521 "Stopping all operations on this adapter. ",
645 zfcp_get_busid_by_adapter(fsf_req->adapter)); 522 zfcp_get_busid_by_adapter(fsf_req->adapter));
646 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0); 523 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
647 zfcp_cmd_dbf_event_fsf("sqnrecom", fsf_req,
648 &fsf_req->qtcb->header.fsf_status_qual,
649 sizeof (union fsf_status_qual));
650 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 524 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
651 break; 525 break;
652 case FSF_SQ_ULP_PROGRAMMING_ERROR: 526 case FSF_SQ_ULP_PROGRAMMING_ERROR:
653 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer " 527 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
654 "(adapter %s)\n", 528 "(adapter %s)\n",
655 zfcp_get_busid_by_adapter(fsf_req->adapter)); 529 zfcp_get_busid_by_adapter(fsf_req->adapter));
656 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
657 "fsf_sq_ulp_err");
658 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 530 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
659 break; 531 break;
660 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 532 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
@@ -668,13 +540,6 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
668 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 540 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
669 (char *) &fsf_req->qtcb->header.fsf_status_qual, 541 (char *) &fsf_req->qtcb->header.fsf_status_qual,
670 sizeof (union fsf_status_qual)); 542 sizeof (union fsf_status_qual));
671 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval:");
672 debug_exception(fsf_req->adapter->erp_dbf, 0,
673 &fsf_req->qtcb->header.fsf_status_qual.word[0],
674 sizeof (u32));
675 zfcp_cmd_dbf_event_fsf("squndef", fsf_req,
676 &fsf_req->qtcb->header.fsf_status_qual,
677 sizeof (union fsf_status_qual));
678 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 543 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
679 break; 544 break;
680 } 545 }
@@ -682,6 +547,110 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
682 return retval; 547 return retval;
683} 548}
684 549
550/**
551 * zfcp_fsf_link_down_info_eval - evaluate link down information block
552 */
553static void
554zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
555 struct fsf_link_down_info *link_down)
556{
557 switch (link_down->error_code) {
558 case FSF_PSQ_LINK_NO_LIGHT:
559 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
560 "(no light detected)\n",
561 zfcp_get_busid_by_adapter(adapter));
562 break;
563 case FSF_PSQ_LINK_WRAP_PLUG:
564 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
565 "(wrap plug detected)\n",
566 zfcp_get_busid_by_adapter(adapter));
567 break;
568 case FSF_PSQ_LINK_NO_FCP:
569 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
570 "(adjacent node on link does not support FCP)\n",
571 zfcp_get_busid_by_adapter(adapter));
572 break;
573 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
574 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
575 "(firmware update in progress)\n",
576 zfcp_get_busid_by_adapter(adapter));
577 break;
578 case FSF_PSQ_LINK_INVALID_WWPN:
579 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
580 "(duplicate or invalid WWPN detected)\n",
581 zfcp_get_busid_by_adapter(adapter));
582 break;
583 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
584 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
585 "(no support for NPIV by Fabric)\n",
586 zfcp_get_busid_by_adapter(adapter));
587 break;
588 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
589 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
590 "(out of resource in FCP daughtercard)\n",
591 zfcp_get_busid_by_adapter(adapter));
592 break;
593 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
594 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
595 "(out of resource in Fabric)\n",
596 zfcp_get_busid_by_adapter(adapter));
597 break;
598 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
599 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
600 "(unable to Fabric login)\n",
601 zfcp_get_busid_by_adapter(adapter));
602 break;
603 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
604 ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n",
605 zfcp_get_busid_by_adapter(adapter));
606 break;
607 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
608 ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n",
609 zfcp_get_busid_by_adapter(adapter));
610 break;
611 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
612 ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n",
613 zfcp_get_busid_by_adapter(adapter));
614 break;
615 default:
616 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
617 "(warning: unknown reason code %d)\n",
618 zfcp_get_busid_by_adapter(adapter),
619 link_down->error_code);
620 }
621
622 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
623 ZFCP_LOG_DEBUG("Debug information to link down: "
624 "primary_status=0x%02x "
625 "ioerr_code=0x%02x "
626 "action_code=0x%02x "
627 "reason_code=0x%02x "
628 "explanation_code=0x%02x "
629 "vendor_specific_code=0x%02x\n",
630 link_down->primary_status,
631 link_down->ioerr_code,
632 link_down->action_code,
633 link_down->reason_code,
634 link_down->explanation_code,
635 link_down->vendor_specific_code);
636
637 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
638 &adapter->status)) {
639 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
640 &adapter->status);
641 switch (link_down->error_code) {
642 case FSF_PSQ_LINK_NO_LIGHT:
643 case FSF_PSQ_LINK_WRAP_PLUG:
644 case FSF_PSQ_LINK_NO_FCP:
645 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
646 zfcp_erp_adapter_reopen(adapter, 0);
647 break;
648 default:
649 zfcp_erp_adapter_failed(adapter);
650 }
651 }
652}
653
685/* 654/*
686 * function: zfcp_fsf_req_dispatch 655 * function: zfcp_fsf_req_dispatch
687 * 656 *
@@ -696,11 +665,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
696 struct zfcp_adapter *adapter = fsf_req->adapter; 665 struct zfcp_adapter *adapter = fsf_req->adapter;
697 int retval = 0; 666 int retval = 0;
698 667
699 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
700 ZFCP_LOG_TRACE("fsf_req=%p, QTCB=%p\n", fsf_req, fsf_req->qtcb);
701 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
702 (char *) fsf_req->qtcb, sizeof(struct fsf_qtcb));
703 }
704 668
705 switch (fsf_req->fsf_command) { 669 switch (fsf_req->fsf_command) {
706 670
@@ -760,13 +724,13 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
760 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 724 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
761 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is " 725 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
762 "not supported by the adapter %s\n", 726 "not supported by the adapter %s\n",
763 zfcp_get_busid_by_adapter(fsf_req->adapter)); 727 zfcp_get_busid_by_adapter(adapter));
764 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command) 728 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
765 ZFCP_LOG_NORMAL 729 ZFCP_LOG_NORMAL
766 ("bug: Command issued by the device driver differs " 730 ("bug: Command issued by the device driver differs "
767 "from the command returned by the adapter %s " 731 "from the command returned by the adapter %s "
768 "(debug info 0x%x, 0x%x).\n", 732 "(debug info 0x%x, 0x%x).\n",
769 zfcp_get_busid_by_adapter(fsf_req->adapter), 733 zfcp_get_busid_by_adapter(adapter),
770 fsf_req->fsf_command, 734 fsf_req->fsf_command,
771 fsf_req->qtcb->header.fsf_command); 735 fsf_req->qtcb->header.fsf_command);
772 } 736 }
@@ -774,8 +738,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
774 if (!erp_action) 738 if (!erp_action)
775 return retval; 739 return retval;
776 740
777 debug_text_event(adapter->erp_dbf, 3, "a_frh");
778 debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
779 zfcp_erp_async_handler(erp_action, 0); 741 zfcp_erp_async_handler(erp_action, 0);
780 742
781 return retval; 743 return retval;
@@ -821,7 +783,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
821 goto failed_buf; 783 goto failed_buf;
822 } 784 }
823 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer)); 785 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
824 fsf_req->data.status_read.buffer = status_buffer; 786 fsf_req->data = (unsigned long) status_buffer;
825 787
826 /* insert pointer to respective buffer */ 788 /* insert pointer to respective buffer */
827 sbale = zfcp_qdio_sbale_curr(fsf_req); 789 sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -846,6 +808,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
846 failed_buf: 808 failed_buf:
847 zfcp_fsf_req_free(fsf_req); 809 zfcp_fsf_req_free(fsf_req);
848 failed_req_create: 810 failed_req_create:
811 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
849 out: 812 out:
850 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 813 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
851 return retval; 814 return retval;
@@ -859,7 +822,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
859 struct zfcp_port *port; 822 struct zfcp_port *port;
860 unsigned long flags; 823 unsigned long flags;
861 824
862 status_buffer = fsf_req->data.status_read.buffer; 825 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
863 adapter = fsf_req->adapter; 826 adapter = fsf_req->adapter;
864 827
865 read_lock_irqsave(&zfcp_data.config_lock, flags); 828 read_lock_irqsave(&zfcp_data.config_lock, flags);
@@ -918,38 +881,33 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
918 int retval = 0; 881 int retval = 0;
919 struct zfcp_adapter *adapter = fsf_req->adapter; 882 struct zfcp_adapter *adapter = fsf_req->adapter;
920 struct fsf_status_read_buffer *status_buffer = 883 struct fsf_status_read_buffer *status_buffer =
921 fsf_req->data.status_read.buffer; 884 (struct fsf_status_read_buffer *) fsf_req->data;
922 885
923 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 886 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
887 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer);
924 mempool_free(status_buffer, adapter->pool.data_status_read); 888 mempool_free(status_buffer, adapter->pool.data_status_read);
925 zfcp_fsf_req_free(fsf_req); 889 zfcp_fsf_req_free(fsf_req);
926 goto out; 890 goto out;
927 } 891 }
928 892
893 zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer);
894
929 switch (status_buffer->status_type) { 895 switch (status_buffer->status_type) {
930 896
931 case FSF_STATUS_READ_PORT_CLOSED: 897 case FSF_STATUS_READ_PORT_CLOSED:
932 debug_text_event(adapter->erp_dbf, 3, "unsol_pclosed:");
933 debug_event(adapter->erp_dbf, 3,
934 &status_buffer->d_id, sizeof (u32));
935 zfcp_fsf_status_read_port_closed(fsf_req); 898 zfcp_fsf_status_read_port_closed(fsf_req);
936 break; 899 break;
937 900
938 case FSF_STATUS_READ_INCOMING_ELS: 901 case FSF_STATUS_READ_INCOMING_ELS:
939 debug_text_event(adapter->erp_dbf, 3, "unsol_els:");
940 zfcp_fsf_incoming_els(fsf_req); 902 zfcp_fsf_incoming_els(fsf_req);
941 break; 903 break;
942 904
943 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 905 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
944 debug_text_event(adapter->erp_dbf, 3, "unsol_sense:");
945 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n", 906 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n",
946 zfcp_get_busid_by_adapter(adapter)); 907 zfcp_get_busid_by_adapter(adapter));
947 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, (char *) status_buffer,
948 sizeof(struct fsf_status_read_buffer));
949 break; 908 break;
950 909
951 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 910 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
952 debug_text_event(adapter->erp_dbf, 3, "unsol_bit_err:");
953 ZFCP_LOG_NORMAL("Bit error threshold data received:\n"); 911 ZFCP_LOG_NORMAL("Bit error threshold data received:\n");
954 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 912 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
955 (char *) status_buffer, 913 (char *) status_buffer,
@@ -957,17 +915,32 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
957 break; 915 break;
958 916
959 case FSF_STATUS_READ_LINK_DOWN: 917 case FSF_STATUS_READ_LINK_DOWN:
960 debug_text_event(adapter->erp_dbf, 0, "unsol_link_down:"); 918 switch (status_buffer->status_subtype) {
961 ZFCP_LOG_INFO("Local link to adapter %s is down\n", 919 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
920 ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
921 zfcp_get_busid_by_adapter(adapter));
922 break;
923 case FSF_STATUS_READ_SUB_FDISC_FAILED:
924 ZFCP_LOG_INFO("Local link to adapter %s is down "
925 "due to failed FDISC login\n",
962 zfcp_get_busid_by_adapter(adapter)); 926 zfcp_get_busid_by_adapter(adapter));
963 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 927 break;
964 &adapter->status); 928 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
965 zfcp_erp_adapter_failed(adapter); 929 ZFCP_LOG_INFO("Local link to adapter %s is down "
930 "due to firmware update on adapter\n",
931 zfcp_get_busid_by_adapter(adapter));
932 break;
933 default:
934 ZFCP_LOG_INFO("Local link to adapter %s is down "
935 "due to unknown reason\n",
936 zfcp_get_busid_by_adapter(adapter));
937 };
938 zfcp_fsf_link_down_info_eval(adapter,
939 (struct fsf_link_down_info *) &status_buffer->payload);
966 break; 940 break;
967 941
968 case FSF_STATUS_READ_LINK_UP: 942 case FSF_STATUS_READ_LINK_UP:
969 debug_text_event(adapter->erp_dbf, 2, "unsol_link_up:"); 943 ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. "
970 ZFCP_LOG_INFO("Local link to adapter %s was replugged. "
971 "Restarting operations on this adapter\n", 944 "Restarting operations on this adapter\n",
972 zfcp_get_busid_by_adapter(adapter)); 945 zfcp_get_busid_by_adapter(adapter));
973 /* All ports should be marked as ready to run again */ 946 /* All ports should be marked as ready to run again */
@@ -980,35 +953,40 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
980 break; 953 break;
981 954
982 case FSF_STATUS_READ_CFDC_UPDATED: 955 case FSF_STATUS_READ_CFDC_UPDATED:
983 debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_update:"); 956 ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n",
984 ZFCP_LOG_INFO("CFDC has been updated on the adapter %s\n",
985 zfcp_get_busid_by_adapter(adapter)); 957 zfcp_get_busid_by_adapter(adapter));
986 zfcp_erp_adapter_access_changed(adapter); 958 zfcp_erp_adapter_access_changed(adapter);
987 break; 959 break;
988 960
989 case FSF_STATUS_READ_CFDC_HARDENED: 961 case FSF_STATUS_READ_CFDC_HARDENED:
990 debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_harden:");
991 switch (status_buffer->status_subtype) { 962 switch (status_buffer->status_subtype) {
992 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE: 963 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
993 ZFCP_LOG_INFO("CFDC of adapter %s saved on SE\n", 964 ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n",
994 zfcp_get_busid_by_adapter(adapter)); 965 zfcp_get_busid_by_adapter(adapter));
995 break; 966 break;
996 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2: 967 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
997 ZFCP_LOG_INFO("CFDC of adapter %s has been copied " 968 ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied "
998 "to the secondary SE\n", 969 "to the secondary SE\n",
999 zfcp_get_busid_by_adapter(adapter)); 970 zfcp_get_busid_by_adapter(adapter));
1000 break; 971 break;
1001 default: 972 default:
1002 ZFCP_LOG_INFO("CFDC of adapter %s has been hardened\n", 973 ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n",
1003 zfcp_get_busid_by_adapter(adapter)); 974 zfcp_get_busid_by_adapter(adapter));
1004 } 975 }
1005 break; 976 break;
1006 977
978 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
979 debug_text_event(adapter->erp_dbf, 2, "unsol_features:");
980 ZFCP_LOG_INFO("List of supported features on adapter %s has "
981 "been changed from 0x%08X to 0x%08X\n",
982 zfcp_get_busid_by_adapter(adapter),
983 *(u32*) (status_buffer->payload + 4),
984 *(u32*) (status_buffer->payload));
985 adapter->adapter_features = *(u32*) status_buffer->payload;
986 break;
987
1007 default: 988 default:
1008 debug_text_event(adapter->erp_dbf, 0, "unsol_unknown:"); 989 ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown "
1009 debug_exception(adapter->erp_dbf, 0,
1010 &status_buffer->status_type, sizeof (u32));
1011 ZFCP_LOG_NORMAL("bug: An unsolicited status packet of unknown "
1012 "type was received (debug info 0x%x)\n", 990 "type was received (debug info 0x%x)\n",
1013 status_buffer->status_type); 991 status_buffer->status_type);
1014 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n", 992 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n",
@@ -1093,7 +1071,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1093 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1071 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1094 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1072 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1095 1073
1096 fsf_req->data.abort_fcp_command.unit = unit; 1074 fsf_req->data = (unsigned long) unit;
1097 1075
1098 /* set handles of unit and its parent port in QTCB */ 1076 /* set handles of unit and its parent port in QTCB */
1099 fsf_req->qtcb->header.lun_handle = unit->handle; 1077 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -1139,7 +1117,7 @@ static int
1139zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) 1117zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1140{ 1118{
1141 int retval = -EINVAL; 1119 int retval = -EINVAL;
1142 struct zfcp_unit *unit = new_fsf_req->data.abort_fcp_command.unit; 1120 struct zfcp_unit *unit;
1143 unsigned char status_qual = 1121 unsigned char status_qual =
1144 new_fsf_req->qtcb->header.fsf_status_qual.word[0]; 1122 new_fsf_req->qtcb->header.fsf_status_qual.word[0];
1145 1123
@@ -1150,6 +1128,8 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1150 goto skip_fsfstatus; 1128 goto skip_fsfstatus;
1151 } 1129 }
1152 1130
1131 unit = (struct zfcp_unit *) new_fsf_req->data;
1132
1153 /* evaluate FSF status in QTCB */ 1133 /* evaluate FSF status in QTCB */
1154 switch (new_fsf_req->qtcb->header.fsf_status) { 1134 switch (new_fsf_req->qtcb->header.fsf_status) {
1155 1135
@@ -1364,7 +1344,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1364 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]); 1344 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
1365 sbale[3].length = ct->resp[0].length; 1345 sbale[3].length = ct->resp[0].length;
1366 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; 1346 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1367 } else if (adapter->supported_features & 1347 } else if (adapter->adapter_features &
1368 FSF_FEATURE_ELS_CT_CHAINED_SBALS) { 1348 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1369 /* try to use chained SBALs */ 1349 /* try to use chained SBALs */
1370 bytes = zfcp_qdio_sbals_from_sg(fsf_req, 1350 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1414,7 +1394,9 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1414 fsf_req->qtcb->header.port_handle = port->handle; 1394 fsf_req->qtcb->header.port_handle = port->handle;
1415 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class; 1395 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
1416 fsf_req->qtcb->bottom.support.timeout = ct->timeout; 1396 fsf_req->qtcb->bottom.support.timeout = ct->timeout;
1417 fsf_req->data.send_ct = ct; 1397 fsf_req->data = (unsigned long) ct;
1398
1399 zfcp_san_dbf_event_ct_request(fsf_req);
1418 1400
1419 /* start QDIO request for this FSF request */ 1401 /* start QDIO request for this FSF request */
1420 ret = zfcp_fsf_req_send(fsf_req, ct->timer); 1402 ret = zfcp_fsf_req_send(fsf_req, ct->timer);
@@ -1445,10 +1427,10 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1445 * zfcp_fsf_send_ct_handler - handler for Generic Service requests 1427 * zfcp_fsf_send_ct_handler - handler for Generic Service requests
1446 * @fsf_req: pointer to struct zfcp_fsf_req 1428 * @fsf_req: pointer to struct zfcp_fsf_req
1447 * 1429 *
1448 * Data specific for the Generic Service request is passed by 1430 * Data specific for the Generic Service request is passed using
1449 * fsf_req->data.send_ct 1431 * fsf_req->data. There we find the pointer to struct zfcp_send_ct.
1450 * Usually a specific handler for the request is called via 1432 * Usually a specific handler for the CT request is called which is
1451 * fsf_req->data.send_ct->handler at end of this function. 1433 * found in this structure.
1452 */ 1434 */
1453static int 1435static int
1454zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) 1436zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
@@ -1462,7 +1444,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1462 u16 subtable, rule, counter; 1444 u16 subtable, rule, counter;
1463 1445
1464 adapter = fsf_req->adapter; 1446 adapter = fsf_req->adapter;
1465 send_ct = fsf_req->data.send_ct; 1447 send_ct = (struct zfcp_send_ct *) fsf_req->data;
1466 port = send_ct->port; 1448 port = send_ct->port;
1467 header = &fsf_req->qtcb->header; 1449 header = &fsf_req->qtcb->header;
1468 bottom = &fsf_req->qtcb->bottom.support; 1450 bottom = &fsf_req->qtcb->bottom.support;
@@ -1474,6 +1456,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1474 switch (header->fsf_status) { 1456 switch (header->fsf_status) {
1475 1457
1476 case FSF_GOOD: 1458 case FSF_GOOD:
1459 zfcp_san_dbf_event_ct_response(fsf_req);
1477 retval = 0; 1460 retval = 0;
1478 break; 1461 break;
1479 1462
@@ -1634,7 +1617,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1634{ 1617{
1635 volatile struct qdio_buffer_element *sbale; 1618 volatile struct qdio_buffer_element *sbale;
1636 struct zfcp_fsf_req *fsf_req; 1619 struct zfcp_fsf_req *fsf_req;
1637 fc_id_t d_id; 1620 u32 d_id;
1638 struct zfcp_adapter *adapter; 1621 struct zfcp_adapter *adapter;
1639 unsigned long lock_flags; 1622 unsigned long lock_flags;
1640 int bytes; 1623 int bytes;
@@ -1664,7 +1647,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1664 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]); 1647 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
1665 sbale[3].length = els->resp[0].length; 1648 sbale[3].length = els->resp[0].length;
1666 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; 1649 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1667 } else if (adapter->supported_features & 1650 } else if (adapter->adapter_features &
1668 FSF_FEATURE_ELS_CT_CHAINED_SBALS) { 1651 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1669 /* try to use chained SBALs */ 1652 /* try to use chained SBALs */
1670 bytes = zfcp_qdio_sbals_from_sg(fsf_req, 1653 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1714,10 +1697,12 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1714 fsf_req->qtcb->bottom.support.d_id = d_id; 1697 fsf_req->qtcb->bottom.support.d_id = d_id;
1715 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class; 1698 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
1716 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT; 1699 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
1717 fsf_req->data.send_els = els; 1700 fsf_req->data = (unsigned long) els;
1718 1701
1719 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1702 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1720 1703
1704 zfcp_san_dbf_event_els_request(fsf_req);
1705
1721 /* start QDIO request for this FSF request */ 1706 /* start QDIO request for this FSF request */
1722 ret = zfcp_fsf_req_send(fsf_req, els->timer); 1707 ret = zfcp_fsf_req_send(fsf_req, els->timer);
1723 if (ret) { 1708 if (ret) {
@@ -1746,23 +1731,23 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1746 * zfcp_fsf_send_els_handler - handler for ELS commands 1731 * zfcp_fsf_send_els_handler - handler for ELS commands
1747 * @fsf_req: pointer to struct zfcp_fsf_req 1732 * @fsf_req: pointer to struct zfcp_fsf_req
1748 * 1733 *
1749 * Data specific for the ELS command is passed by 1734 * Data specific for the ELS command is passed using
1750 * fsf_req->data.send_els 1735 * fsf_req->data. There we find the pointer to struct zfcp_send_els.
1751 * Usually a specific handler for the command is called via 1736 * Usually a specific handler for the ELS command is called which is
1752 * fsf_req->data.send_els->handler at end of this function. 1737 * found in this structure.
1753 */ 1738 */
1754static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req) 1739static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1755{ 1740{
1756 struct zfcp_adapter *adapter; 1741 struct zfcp_adapter *adapter;
1757 struct zfcp_port *port; 1742 struct zfcp_port *port;
1758 fc_id_t d_id; 1743 u32 d_id;
1759 struct fsf_qtcb_header *header; 1744 struct fsf_qtcb_header *header;
1760 struct fsf_qtcb_bottom_support *bottom; 1745 struct fsf_qtcb_bottom_support *bottom;
1761 struct zfcp_send_els *send_els; 1746 struct zfcp_send_els *send_els;
1762 int retval = -EINVAL; 1747 int retval = -EINVAL;
1763 u16 subtable, rule, counter; 1748 u16 subtable, rule, counter;
1764 1749
1765 send_els = fsf_req->data.send_els; 1750 send_els = (struct zfcp_send_els *) fsf_req->data;
1766 adapter = send_els->adapter; 1751 adapter = send_els->adapter;
1767 port = send_els->port; 1752 port = send_els->port;
1768 d_id = send_els->d_id; 1753 d_id = send_els->d_id;
@@ -1775,6 +1760,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1775 switch (header->fsf_status) { 1760 switch (header->fsf_status) {
1776 1761
1777 case FSF_GOOD: 1762 case FSF_GOOD:
1763 zfcp_san_dbf_event_els_response(fsf_req);
1778 retval = 0; 1764 retval = 0;
1779 break; 1765 break;
1780 1766
@@ -1954,7 +1940,9 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1954 1940
1955 erp_action->fsf_req->erp_action = erp_action; 1941 erp_action->fsf_req->erp_action = erp_action;
1956 erp_action->fsf_req->qtcb->bottom.config.feature_selection = 1942 erp_action->fsf_req->qtcb->bottom.config.feature_selection =
1957 (FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING); 1943 FSF_FEATURE_CFDC |
1944 FSF_FEATURE_LUN_SHARING |
1945 FSF_FEATURE_UPDATE_ALERT;
1958 1946
1959 /* start QDIO request for this FSF request */ 1947 /* start QDIO request for this FSF request */
1960 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 1948 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
@@ -1990,29 +1978,36 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
1990{ 1978{
1991 struct fsf_qtcb_bottom_config *bottom; 1979 struct fsf_qtcb_bottom_config *bottom;
1992 struct zfcp_adapter *adapter = fsf_req->adapter; 1980 struct zfcp_adapter *adapter = fsf_req->adapter;
1981 struct Scsi_Host *shost = adapter->scsi_host;
1993 1982
1994 bottom = &fsf_req->qtcb->bottom.config; 1983 bottom = &fsf_req->qtcb->bottom.config;
1995 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n", 1984 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
1996 bottom->low_qtcb_version, bottom->high_qtcb_version); 1985 bottom->low_qtcb_version, bottom->high_qtcb_version);
1997 adapter->fsf_lic_version = bottom->lic_version; 1986 adapter->fsf_lic_version = bottom->lic_version;
1998 adapter->supported_features = bottom->supported_features; 1987 adapter->adapter_features = bottom->adapter_features;
1988 adapter->connection_features = bottom->connection_features;
1999 adapter->peer_wwpn = 0; 1989 adapter->peer_wwpn = 0;
2000 adapter->peer_wwnn = 0; 1990 adapter->peer_wwnn = 0;
2001 adapter->peer_d_id = 0; 1991 adapter->peer_d_id = 0;
2002 1992
2003 if (xchg_ok) { 1993 if (xchg_ok) {
2004 adapter->wwnn = bottom->nport_serv_param.wwnn; 1994 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
2005 adapter->wwpn = bottom->nport_serv_param.wwpn; 1995 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
2006 adapter->s_id = bottom->s_id & ZFCP_DID_MASK; 1996 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
1997 fc_host_speed(shost) = bottom->fc_link_speed;
1998 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
2007 adapter->fc_topology = bottom->fc_topology; 1999 adapter->fc_topology = bottom->fc_topology;
2008 adapter->fc_link_speed = bottom->fc_link_speed;
2009 adapter->hydra_version = bottom->adapter_type; 2000 adapter->hydra_version = bottom->adapter_type;
2001 if (adapter->physical_wwpn == 0)
2002 adapter->physical_wwpn = fc_host_port_name(shost);
2003 if (adapter->physical_s_id == 0)
2004 adapter->physical_s_id = fc_host_port_id(shost);
2010 } else { 2005 } else {
2011 adapter->wwnn = 0; 2006 fc_host_node_name(shost) = 0;
2012 adapter->wwpn = 0; 2007 fc_host_port_name(shost) = 0;
2013 adapter->s_id = 0; 2008 fc_host_port_id(shost) = 0;
2009 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
2014 adapter->fc_topology = 0; 2010 adapter->fc_topology = 0;
2015 adapter->fc_link_speed = 0;
2016 adapter->hydra_version = 0; 2011 adapter->hydra_version = 0;
2017 } 2012 }
2018 2013
@@ -2022,26 +2017,28 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2022 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 2017 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
2023 } 2018 }
2024 2019
2025 if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){ 2020 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
2026 adapter->hardware_version = bottom->hardware_version; 2021 adapter->hardware_version = bottom->hardware_version;
2027 memcpy(adapter->serial_number, bottom->serial_number, 17); 2022 memcpy(fc_host_serial_number(shost), bottom->serial_number,
2028 EBCASC(adapter->serial_number, sizeof(adapter->serial_number)); 2023 min(FC_SERIAL_NUMBER_SIZE, 17));
2024 EBCASC(fc_host_serial_number(shost),
2025 min(FC_SERIAL_NUMBER_SIZE, 17));
2029 } 2026 }
2030 2027
2031 ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n" 2028 ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n"
2032 "WWNN 0x%016Lx, " 2029 "WWNN 0x%016Lx, "
2033 "WWPN 0x%016Lx, " 2030 "WWPN 0x%016Lx, "
2034 "S_ID 0x%08x,\n" 2031 "S_ID 0x%08x,\n"
2035 "adapter version 0x%x, " 2032 "adapter version 0x%x, "
2036 "LIC version 0x%x, " 2033 "LIC version 0x%x, "
2037 "FC link speed %d Gb/s\n", 2034 "FC link speed %d Gb/s\n",
2038 zfcp_get_busid_by_adapter(adapter), 2035 zfcp_get_busid_by_adapter(adapter),
2039 adapter->wwnn, 2036 (wwn_t) fc_host_node_name(shost),
2040 adapter->wwpn, 2037 (wwn_t) fc_host_port_name(shost),
2041 (unsigned int) adapter->s_id, 2038 fc_host_port_id(shost),
2042 adapter->hydra_version, 2039 adapter->hydra_version,
2043 adapter->fsf_lic_version, 2040 adapter->fsf_lic_version,
2044 adapter->fc_link_speed); 2041 fc_host_speed(shost));
2045 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) { 2042 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
2046 ZFCP_LOG_NORMAL("error: the adapter %s " 2043 ZFCP_LOG_NORMAL("error: the adapter %s "
2047 "only supports newer control block " 2044 "only supports newer control block "
@@ -2062,7 +2059,6 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2062 zfcp_erp_adapter_shutdown(adapter, 0); 2059 zfcp_erp_adapter_shutdown(adapter, 0);
2063 return -EIO; 2060 return -EIO;
2064 } 2061 }
2065 zfcp_set_fc_host_attrs(adapter);
2066 return 0; 2062 return 0;
2067} 2063}
2068 2064
@@ -2078,11 +2074,12 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2078{ 2074{
2079 struct fsf_qtcb_bottom_config *bottom; 2075 struct fsf_qtcb_bottom_config *bottom;
2080 struct zfcp_adapter *adapter = fsf_req->adapter; 2076 struct zfcp_adapter *adapter = fsf_req->adapter;
2077 struct fsf_qtcb *qtcb = fsf_req->qtcb;
2081 2078
2082 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2079 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2083 return -EIO; 2080 return -EIO;
2084 2081
2085 switch (fsf_req->qtcb->header.fsf_status) { 2082 switch (qtcb->header.fsf_status) {
2086 2083
2087 case FSF_GOOD: 2084 case FSF_GOOD:
2088 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1)) 2085 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
@@ -2112,7 +2109,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2112 zfcp_erp_adapter_shutdown(adapter, 0); 2109 zfcp_erp_adapter_shutdown(adapter, 0);
2113 return -EIO; 2110 return -EIO;
2114 case FSF_TOPO_FABRIC: 2111 case FSF_TOPO_FABRIC:
2115 ZFCP_LOG_INFO("Switched fabric fibrechannel " 2112 ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
2116 "network detected at adapter %s.\n", 2113 "network detected at adapter %s.\n",
2117 zfcp_get_busid_by_adapter(adapter)); 2114 zfcp_get_busid_by_adapter(adapter));
2118 break; 2115 break;
@@ -2130,7 +2127,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2130 zfcp_erp_adapter_shutdown(adapter, 0); 2127 zfcp_erp_adapter_shutdown(adapter, 0);
2131 return -EIO; 2128 return -EIO;
2132 } 2129 }
2133 bottom = &fsf_req->qtcb->bottom.config; 2130 bottom = &qtcb->bottom.config;
2134 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 2131 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
2135 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) " 2132 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
2136 "allowed by the adapter %s " 2133 "allowed by the adapter %s "
@@ -2155,12 +2152,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2155 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0)) 2152 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
2156 return -EIO; 2153 return -EIO;
2157 2154
2158 ZFCP_LOG_INFO("Local link to adapter %s is down\n", 2155 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
2159 zfcp_get_busid_by_adapter(adapter)); 2156
2160 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 2157 zfcp_fsf_link_down_info_eval(adapter,
2161 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2158 &qtcb->header.fsf_status_qual.link_down_info);
2162 &adapter->status);
2163 zfcp_erp_adapter_failed(adapter);
2164 break; 2159 break;
2165 default: 2160 default:
2166 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng"); 2161 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng");
@@ -2174,11 +2169,13 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2174 2169
2175/** 2170/**
2176 * zfcp_fsf_exchange_port_data - request information about local port 2171 * zfcp_fsf_exchange_port_data - request information about local port
2172 * @erp_action: ERP action for the adapter for which port data is requested
2177 * @adapter: for which port data is requested 2173 * @adapter: for which port data is requested
2178 * @data: response to exchange port data request 2174 * @data: response to exchange port data request
2179 */ 2175 */
2180int 2176int
2181zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter, 2177zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2178 struct zfcp_adapter *adapter,
2182 struct fsf_qtcb_bottom_port *data) 2179 struct fsf_qtcb_bottom_port *data)
2183{ 2180{
2184 volatile struct qdio_buffer_element *sbale; 2181 volatile struct qdio_buffer_element *sbale;
@@ -2187,7 +2184,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2187 struct zfcp_fsf_req *fsf_req; 2184 struct zfcp_fsf_req *fsf_req;
2188 struct timer_list *timer; 2185 struct timer_list *timer;
2189 2186
2190 if(!(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT)){ 2187 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2191 ZFCP_LOG_INFO("error: exchange port data " 2188 ZFCP_LOG_INFO("error: exchange port data "
2192 "command not supported by adapter %s\n", 2189 "command not supported by adapter %s\n",
2193 zfcp_get_busid_by_adapter(adapter)); 2190 zfcp_get_busid_by_adapter(adapter));
@@ -2211,12 +2208,18 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2211 goto out; 2208 goto out;
2212 } 2209 }
2213 2210
2211 if (erp_action) {
2212 erp_action->fsf_req = fsf_req;
2213 fsf_req->erp_action = erp_action;
2214 }
2215
2216 if (data)
2217 fsf_req->data = (unsigned long) data;
2218
2214 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 2219 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2215 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2220 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2216 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2221 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2217 2222
2218 fsf_req->data.port_data = data;
2219
2220 init_timer(timer); 2223 init_timer(timer);
2221 timer->function = zfcp_fsf_request_timeout_handler; 2224 timer->function = zfcp_fsf_request_timeout_handler;
2222 timer->data = (unsigned long) adapter; 2225 timer->data = (unsigned long) adapter;
@@ -2228,6 +2231,8 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2228 "command on the adapter %s\n", 2231 "command on the adapter %s\n",
2229 zfcp_get_busid_by_adapter(adapter)); 2232 zfcp_get_busid_by_adapter(adapter));
2230 zfcp_fsf_req_free(fsf_req); 2233 zfcp_fsf_req_free(fsf_req);
2234 if (erp_action)
2235 erp_action->fsf_req = NULL;
2231 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2236 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2232 lock_flags); 2237 lock_flags);
2233 goto out; 2238 goto out;
@@ -2256,21 +2261,42 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2256static void 2261static void
2257zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2262zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2258{ 2263{
2259 struct fsf_qtcb_bottom_port *bottom; 2264 struct zfcp_adapter *adapter = fsf_req->adapter;
2260 struct fsf_qtcb_bottom_port *data = fsf_req->data.port_data; 2265 struct Scsi_Host *shost = adapter->scsi_host;
2266 struct fsf_qtcb *qtcb = fsf_req->qtcb;
2267 struct fsf_qtcb_bottom_port *bottom, *data;
2261 2268
2262 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2269 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2263 return; 2270 return;
2264 2271
2265 switch (fsf_req->qtcb->header.fsf_status) { 2272 switch (qtcb->header.fsf_status) {
2266 case FSF_GOOD: 2273 case FSF_GOOD:
2267 bottom = &fsf_req->qtcb->bottom.port; 2274 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2268 memcpy(data, bottom, sizeof(*data)); 2275
2276 bottom = &qtcb->bottom.port;
2277 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2278 if (data)
2279 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2280 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
2281 adapter->physical_wwpn = bottom->wwpn;
2282 adapter->physical_s_id = bottom->fc_port_id;
2283 } else {
2284 adapter->physical_wwpn = fc_host_port_name(shost);
2285 adapter->physical_s_id = fc_host_port_id(shost);
2286 }
2287 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2288 break;
2289
2290 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2291 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2292
2293 zfcp_fsf_link_down_info_eval(adapter,
2294 &qtcb->header.fsf_status_qual.link_down_info);
2269 break; 2295 break;
2270 2296
2271 default: 2297 default:
2272 debug_text_event(fsf_req->adapter->erp_dbf, 0, "xchg-port-ng"); 2298 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
2273 debug_event(fsf_req->adapter->erp_dbf, 0, 2299 debug_event(adapter->erp_dbf, 0,
2274 &fsf_req->qtcb->header.fsf_status, sizeof(u32)); 2300 &fsf_req->qtcb->header.fsf_status, sizeof(u32));
2275 } 2301 }
2276} 2302}
@@ -2312,7 +2338,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2312 2338
2313 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id; 2339 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2314 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status); 2340 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2315 erp_action->fsf_req->data.open_port.port = erp_action->port; 2341 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2316 erp_action->fsf_req->erp_action = erp_action; 2342 erp_action->fsf_req->erp_action = erp_action;
2317 2343
2318 /* start QDIO request for this FSF request */ 2344 /* start QDIO request for this FSF request */
@@ -2353,7 +2379,7 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
2353 struct fsf_qtcb_header *header; 2379 struct fsf_qtcb_header *header;
2354 u16 subtable, rule, counter; 2380 u16 subtable, rule, counter;
2355 2381
2356 port = fsf_req->data.open_port.port; 2382 port = (struct zfcp_port *) fsf_req->data;
2357 header = &fsf_req->qtcb->header; 2383 header = &fsf_req->qtcb->header;
2358 2384
2359 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2385 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2566,7 +2592,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2566 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2592 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2567 2593
2568 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); 2594 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2569 erp_action->fsf_req->data.close_port.port = erp_action->port; 2595 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2570 erp_action->fsf_req->erp_action = erp_action; 2596 erp_action->fsf_req->erp_action = erp_action;
2571 erp_action->fsf_req->qtcb->header.port_handle = 2597 erp_action->fsf_req->qtcb->header.port_handle =
2572 erp_action->port->handle; 2598 erp_action->port->handle;
@@ -2606,7 +2632,7 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2606 int retval = -EINVAL; 2632 int retval = -EINVAL;
2607 struct zfcp_port *port; 2633 struct zfcp_port *port;
2608 2634
2609 port = fsf_req->data.close_port.port; 2635 port = (struct zfcp_port *) fsf_req->data;
2610 2636
2611 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2637 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2612 /* don't change port status in our bookkeeping */ 2638 /* don't change port status in our bookkeeping */
@@ -2703,8 +2729,8 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2703 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 2729 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2704 &erp_action->port->status); 2730 &erp_action->port->status);
2705 /* save a pointer to this port */ 2731 /* save a pointer to this port */
2706 erp_action->fsf_req->data.close_physical_port.port = erp_action->port; 2732 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2707 /* port to be closeed */ 2733 /* port to be closed */
2708 erp_action->fsf_req->qtcb->header.port_handle = 2734 erp_action->fsf_req->qtcb->header.port_handle =
2709 erp_action->port->handle; 2735 erp_action->port->handle;
2710 erp_action->fsf_req->erp_action = erp_action; 2736 erp_action->fsf_req->erp_action = erp_action;
@@ -2747,7 +2773,7 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
2747 struct fsf_qtcb_header *header; 2773 struct fsf_qtcb_header *header;
2748 u16 subtable, rule, counter; 2774 u16 subtable, rule, counter;
2749 2775
2750 port = fsf_req->data.close_physical_port.port; 2776 port = (struct zfcp_port *) fsf_req->data;
2751 header = &fsf_req->qtcb->header; 2777 header = &fsf_req->qtcb->header;
2752 2778
2753 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2779 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2908,10 +2934,11 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2908 erp_action->port->handle; 2934 erp_action->port->handle;
2909 erp_action->fsf_req->qtcb->bottom.support.fcp_lun = 2935 erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
2910 erp_action->unit->fcp_lun; 2936 erp_action->unit->fcp_lun;
2937 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2911 erp_action->fsf_req->qtcb->bottom.support.option = 2938 erp_action->fsf_req->qtcb->bottom.support.option =
2912 FSF_OPEN_LUN_SUPPRESS_BOXING; 2939 FSF_OPEN_LUN_SUPPRESS_BOXING;
2913 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); 2940 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
2914 erp_action->fsf_req->data.open_unit.unit = erp_action->unit; 2941 erp_action->fsf_req->data = (unsigned long) erp_action->unit;
2915 erp_action->fsf_req->erp_action = erp_action; 2942 erp_action->fsf_req->erp_action = erp_action;
2916 2943
2917 /* start QDIO request for this FSF request */ 2944 /* start QDIO request for this FSF request */
@@ -2955,9 +2982,9 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
2955 struct fsf_qtcb_bottom_support *bottom; 2982 struct fsf_qtcb_bottom_support *bottom;
2956 struct fsf_queue_designator *queue_designator; 2983 struct fsf_queue_designator *queue_designator;
2957 u16 subtable, rule, counter; 2984 u16 subtable, rule, counter;
2958 u32 allowed, exclusive, readwrite; 2985 int exclusive, readwrite;
2959 2986
2960 unit = fsf_req->data.open_unit.unit; 2987 unit = (struct zfcp_unit *) fsf_req->data;
2961 2988
2962 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2989 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2963 /* don't change unit status in our bookkeeping */ 2990 /* don't change unit status in our bookkeeping */
@@ -2969,10 +2996,6 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
2969 bottom = &fsf_req->qtcb->bottom.support; 2996 bottom = &fsf_req->qtcb->bottom.support;
2970 queue_designator = &header->fsf_status_qual.fsf_queue_designator; 2997 queue_designator = &header->fsf_status_qual.fsf_queue_designator;
2971 2998
2972 allowed = bottom->lun_access_info & FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED;
2973 exclusive = bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE;
2974 readwrite = bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER;
2975
2976 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 2999 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2977 ZFCP_STATUS_UNIT_SHARED | 3000 ZFCP_STATUS_UNIT_SHARED |
2978 ZFCP_STATUS_UNIT_READONLY, 3001 ZFCP_STATUS_UNIT_READONLY,
@@ -3146,10 +3169,15 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3146 unit->handle); 3169 unit->handle);
3147 /* mark unit as open */ 3170 /* mark unit as open */
3148 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 3171 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3149 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 3172
3150 ZFCP_STATUS_COMMON_ACCESS_BOXED, 3173 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
3151 &unit->status); 3174 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
3152 if (adapter->supported_features & FSF_FEATURE_LUN_SHARING){ 3175 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
3176 exclusive = (bottom->lun_access_info &
3177 FSF_UNIT_ACCESS_EXCLUSIVE);
3178 readwrite = (bottom->lun_access_info &
3179 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
3180
3153 if (!exclusive) 3181 if (!exclusive)
3154 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED, 3182 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
3155 &unit->status); 3183 &unit->status);
@@ -3242,7 +3270,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3242 erp_action->port->handle; 3270 erp_action->port->handle;
3243 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle; 3271 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3244 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); 3272 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3245 erp_action->fsf_req->data.close_unit.unit = erp_action->unit; 3273 erp_action->fsf_req->data = (unsigned long) erp_action->unit;
3246 erp_action->fsf_req->erp_action = erp_action; 3274 erp_action->fsf_req->erp_action = erp_action;
3247 3275
3248 /* start QDIO request for this FSF request */ 3276 /* start QDIO request for this FSF request */
@@ -3281,7 +3309,7 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3281 int retval = -EINVAL; 3309 int retval = -EINVAL;
3282 struct zfcp_unit *unit; 3310 struct zfcp_unit *unit;
3283 3311
3284 unit = fsf_req->data.close_unit.unit; /* restore unit */ 3312 unit = (struct zfcp_unit *) fsf_req->data;
3285 3313
3286 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 3314 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
3287 /* don't change unit status in our bookkeeping */ 3315 /* don't change unit status in our bookkeeping */
@@ -3305,9 +3333,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3305 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3333 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3306 "fsf_s_phand_nv"); 3334 "fsf_s_phand_nv");
3307 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 3335 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3308 zfcp_cmd_dbf_event_fsf("porthinv", fsf_req,
3309 &fsf_req->qtcb->header.fsf_status_qual,
3310 sizeof (union fsf_status_qual));
3311 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3336 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3312 break; 3337 break;
3313 3338
@@ -3326,9 +3351,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3326 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3351 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3327 "fsf_s_lhand_nv"); 3352 "fsf_s_lhand_nv");
3328 zfcp_erp_port_reopen(unit->port, 0); 3353 zfcp_erp_port_reopen(unit->port, 0);
3329 zfcp_cmd_dbf_event_fsf("lunhinv", fsf_req,
3330 &fsf_req->qtcb->header.fsf_status_qual,
3331 sizeof (union fsf_status_qual));
3332 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3354 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3333 break; 3355 break;
3334 3356
@@ -3436,21 +3458,14 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3436 goto failed_req_create; 3458 goto failed_req_create;
3437 } 3459 }
3438 3460
3439 /* 3461 zfcp_unit_get(unit);
3440 * associate FSF request with SCSI request 3462 fsf_req->unit = unit;
3441 * (need this for look up on abort)
3442 */
3443 fsf_req->data.send_fcp_command_task.fsf_req = fsf_req;
3444 scsi_cmnd->host_scribble = (char *) &(fsf_req->data);
3445 3463
3446 /* 3464 /* associate FSF request with SCSI request (for look up on abort) */
3447 * associate SCSI command with FSF request 3465 scsi_cmnd->host_scribble = (char *) fsf_req;
3448 * (need this for look up on normal command completion) 3466
3449 */ 3467 /* associate SCSI command with FSF request */
3450 fsf_req->data.send_fcp_command_task.scsi_cmnd = scsi_cmnd; 3468 fsf_req->data = (unsigned long) scsi_cmnd;
3451 fsf_req->data.send_fcp_command_task.start_jiffies = jiffies;
3452 fsf_req->data.send_fcp_command_task.unit = unit;
3453 ZFCP_LOG_DEBUG("unit=%p, fcp_lun=0x%016Lx\n", unit, unit->fcp_lun);
3454 3469
3455 /* set handles of unit and its parent port in QTCB */ 3470 /* set handles of unit and its parent port in QTCB */
3456 fsf_req->qtcb->header.lun_handle = unit->handle; 3471 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3584,6 +3599,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3584 send_failed: 3599 send_failed:
3585 no_fit: 3600 no_fit:
3586 failed_scsi_cmnd: 3601 failed_scsi_cmnd:
3602 zfcp_unit_put(unit);
3587 zfcp_fsf_req_free(fsf_req); 3603 zfcp_fsf_req_free(fsf_req);
3588 fsf_req = NULL; 3604 fsf_req = NULL;
3589 scsi_cmnd->host_scribble = NULL; 3605 scsi_cmnd->host_scribble = NULL;
@@ -3640,7 +3656,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3640 * hold a pointer to the unit being target of this 3656 * hold a pointer to the unit being target of this
3641 * task management request 3657 * task management request
3642 */ 3658 */
3643 fsf_req->data.send_fcp_command_task_management.unit = unit; 3659 fsf_req->data = (unsigned long) unit;
3644 3660
3645 /* set FSF related fields in QTCB */ 3661 /* set FSF related fields in QTCB */
3646 fsf_req->qtcb->header.lun_handle = unit->handle; 3662 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3706,9 +3722,9 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3706 header = &fsf_req->qtcb->header; 3722 header = &fsf_req->qtcb->header;
3707 3723
3708 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) 3724 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
3709 unit = fsf_req->data.send_fcp_command_task_management.unit; 3725 unit = (struct zfcp_unit *) fsf_req->data;
3710 else 3726 else
3711 unit = fsf_req->data.send_fcp_command_task.unit; 3727 unit = fsf_req->unit;
3712 3728
3713 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 3729 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
3714 /* go directly to calls of special handlers */ 3730 /* go directly to calls of special handlers */
@@ -3765,10 +3781,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3765 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3781 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3766 "fsf_s_hand_mis"); 3782 "fsf_s_hand_mis");
3767 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 3783 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3768 zfcp_cmd_dbf_event_fsf("handmism",
3769 fsf_req,
3770 &header->fsf_status_qual,
3771 sizeof (union fsf_status_qual));
3772 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3784 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3773 break; 3785 break;
3774 3786
@@ -3789,10 +3801,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3789 debug_text_exception(fsf_req->adapter->erp_dbf, 0, 3801 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
3790 "fsf_s_class_nsup"); 3802 "fsf_s_class_nsup");
3791 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3803 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3792 zfcp_cmd_dbf_event_fsf("unsclass",
3793 fsf_req,
3794 &header->fsf_status_qual,
3795 sizeof (union fsf_status_qual));
3796 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3804 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3797 break; 3805 break;
3798 3806
@@ -3811,10 +3819,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3811 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3819 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3812 "fsf_s_fcp_lun_nv"); 3820 "fsf_s_fcp_lun_nv");
3813 zfcp_erp_port_reopen(unit->port, 0); 3821 zfcp_erp_port_reopen(unit->port, 0);
3814 zfcp_cmd_dbf_event_fsf("fluninv",
3815 fsf_req,
3816 &header->fsf_status_qual,
3817 sizeof (union fsf_status_qual));
3818 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3822 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3819 break; 3823 break;
3820 3824
@@ -3853,10 +3857,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3853 debug_text_event(fsf_req->adapter->erp_dbf, 0, 3857 debug_text_event(fsf_req->adapter->erp_dbf, 0,
3854 "fsf_s_dir_ind_nv"); 3858 "fsf_s_dir_ind_nv");
3855 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3859 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3856 zfcp_cmd_dbf_event_fsf("dirinv",
3857 fsf_req,
3858 &header->fsf_status_qual,
3859 sizeof (union fsf_status_qual));
3860 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3860 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3861 break; 3861 break;
3862 3862
@@ -3872,10 +3872,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3872 debug_text_event(fsf_req->adapter->erp_dbf, 0, 3872 debug_text_event(fsf_req->adapter->erp_dbf, 0,
3873 "fsf_s_cmd_len_nv"); 3873 "fsf_s_cmd_len_nv");
3874 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3874 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3875 zfcp_cmd_dbf_event_fsf("cleninv",
3876 fsf_req,
3877 &header->fsf_status_qual,
3878 sizeof (union fsf_status_qual));
3879 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3875 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3880 break; 3876 break;
3881 3877
@@ -3947,6 +3943,8 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3947 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req); 3943 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req);
3948 } else { 3944 } else {
3949 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req); 3945 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
3946 fsf_req->unit = NULL;
3947 zfcp_unit_put(unit);
3950 } 3948 }
3951 return retval; 3949 return retval;
3952} 3950}
@@ -3970,10 +3968,10 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
3970 u32 sns_len; 3968 u32 sns_len;
3971 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 3969 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
3972 unsigned long flags; 3970 unsigned long flags;
3973 struct zfcp_unit *unit = fsf_req->data.send_fcp_command_task.unit; 3971 struct zfcp_unit *unit = fsf_req->unit;
3974 3972
3975 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags); 3973 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
3976 scpnt = fsf_req->data.send_fcp_command_task.scsi_cmnd; 3974 scpnt = (struct scsi_cmnd *) fsf_req->data;
3977 if (unlikely(!scpnt)) { 3975 if (unlikely(!scpnt)) {
3978 ZFCP_LOG_DEBUG 3976 ZFCP_LOG_DEBUG
3979 ("Command with fsf_req %p is not associated to " 3977 ("Command with fsf_req %p is not associated to "
@@ -4043,7 +4041,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4043 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4041 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4044 (char *) &fsf_req->qtcb-> 4042 (char *) &fsf_req->qtcb->
4045 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4043 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4046 zfcp_cmd_dbf_event_fsf("clenmis", fsf_req, NULL, 0);
4047 set_host_byte(&scpnt->result, DID_ERROR); 4044 set_host_byte(&scpnt->result, DID_ERROR);
4048 goto skip_fsfstatus; 4045 goto skip_fsfstatus;
4049 case RSP_CODE_FIELD_INVALID: 4046 case RSP_CODE_FIELD_INVALID:
@@ -4062,7 +4059,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4062 (char *) &fsf_req->qtcb-> 4059 (char *) &fsf_req->qtcb->
4063 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4060 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4064 set_host_byte(&scpnt->result, DID_ERROR); 4061 set_host_byte(&scpnt->result, DID_ERROR);
4065 zfcp_cmd_dbf_event_fsf("codeinv", fsf_req, NULL, 0);
4066 goto skip_fsfstatus; 4062 goto skip_fsfstatus;
4067 case RSP_CODE_RO_MISMATCH: 4063 case RSP_CODE_RO_MISMATCH:
4068 /* hardware bug */ 4064 /* hardware bug */
@@ -4079,7 +4075,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4079 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4075 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4080 (char *) &fsf_req->qtcb-> 4076 (char *) &fsf_req->qtcb->
4081 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4077 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4082 zfcp_cmd_dbf_event_fsf("codemism", fsf_req, NULL, 0);
4083 set_host_byte(&scpnt->result, DID_ERROR); 4078 set_host_byte(&scpnt->result, DID_ERROR);
4084 goto skip_fsfstatus; 4079 goto skip_fsfstatus;
4085 default: 4080 default:
@@ -4096,7 +4091,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4096 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4091 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4097 (char *) &fsf_req->qtcb-> 4092 (char *) &fsf_req->qtcb->
4098 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4093 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4099 zfcp_cmd_dbf_event_fsf("undeffcp", fsf_req, NULL, 0);
4100 set_host_byte(&scpnt->result, DID_ERROR); 4094 set_host_byte(&scpnt->result, DID_ERROR);
4101 goto skip_fsfstatus; 4095 goto skip_fsfstatus;
4102 } 4096 }
@@ -4158,19 +4152,17 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4158 skip_fsfstatus: 4152 skip_fsfstatus:
4159 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4153 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4160 4154
4161 zfcp_cmd_dbf_event_scsi("response", scpnt); 4155 if (scpnt->result != 0)
4156 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt);
4157 else if (scpnt->retries > 0)
4158 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt);
4159 else
4160 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt);
4162 4161
4163 /* cleanup pointer (need this especially for abort) */ 4162 /* cleanup pointer (need this especially for abort) */
4164 scpnt->host_scribble = NULL; 4163 scpnt->host_scribble = NULL;
4165 4164
4166 /*
4167 * NOTE:
4168 * according to the outcome of a discussion on linux-scsi we
4169 * don't need to grab the io_request_lock here since we use
4170 * the new eh
4171 */
4172 /* always call back */ 4165 /* always call back */
4173
4174 (scpnt->scsi_done) (scpnt); 4166 (scpnt->scsi_done) (scpnt);
4175 4167
4176 /* 4168 /*
@@ -4198,8 +4190,7 @@ zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4198 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 4190 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
4199 &(fsf_req->qtcb->bottom.io.fcp_rsp); 4191 &(fsf_req->qtcb->bottom.io.fcp_rsp);
4200 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 4192 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
4201 struct zfcp_unit *unit = 4193 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
4202 fsf_req->data.send_fcp_command_task_management.unit;
4203 4194
4204 del_timer(&fsf_req->adapter->scsi_er_timer); 4195 del_timer(&fsf_req->adapter->scsi_er_timer);
4205 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 4196 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -4276,7 +4267,7 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4276 int direction; 4267 int direction;
4277 int retval = 0; 4268 int retval = 0;
4278 4269
4279 if (!(adapter->supported_features & FSF_FEATURE_CFDC)) { 4270 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) {
4280 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n", 4271 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
4281 zfcp_get_busid_by_adapter(adapter)); 4272 zfcp_get_busid_by_adapter(adapter));
4282 retval = -EOPNOTSUPP; 4273 retval = -EOPNOTSUPP;
@@ -4549,52 +4540,6 @@ skip_fsfstatus:
4549 return retval; 4540 return retval;
4550} 4541}
4551 4542
4552
4553/*
4554 * function: zfcp_fsf_req_wait_and_cleanup
4555 *
4556 * purpose:
4557 *
4558 * FIXME(design): signal seems to be <0 !!!
4559 * returns: 0 - request completed (*status is valid), cleanup succ.
4560 * <0 - request completed (*status is valid), cleanup failed
4561 * >0 - signal which interrupted waiting (*status invalid),
4562 * request not completed, no cleanup
4563 *
4564 * *status is a copy of status of completed fsf_req
4565 */
4566int
4567zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req,
4568 int interruptible, u32 * status)
4569{
4570 int retval = 0;
4571 int signal = 0;
4572
4573 if (interruptible) {
4574 __wait_event_interruptible(fsf_req->completion_wq,
4575 fsf_req->status &
4576 ZFCP_STATUS_FSFREQ_COMPLETED,
4577 signal);
4578 if (signal) {
4579 ZFCP_LOG_DEBUG("Caught signal %i while waiting for the "
4580 "completion of the request at %p\n",
4581 signal, fsf_req);
4582 retval = signal;
4583 goto out;
4584 }
4585 } else {
4586 __wait_event(fsf_req->completion_wq,
4587 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4588 }
4589
4590 *status = fsf_req->status;
4591
4592 /* cleanup request */
4593 zfcp_fsf_req_free(fsf_req);
4594 out:
4595 return retval;
4596}
4597
4598static inline int 4543static inline int
4599zfcp_fsf_req_sbal_check(unsigned long *flags, 4544zfcp_fsf_req_sbal_check(unsigned long *flags,
4600 struct zfcp_qdio_queue *queue, int needed) 4545 struct zfcp_qdio_queue *queue, int needed)
@@ -4610,15 +4555,16 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
4610 * set qtcb pointer in fsf_req and initialize QTCB 4555 * set qtcb pointer in fsf_req and initialize QTCB
4611 */ 4556 */
4612static inline void 4557static inline void
4613zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req, u32 fsf_cmd) 4558zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4614{ 4559{
4615 if (likely(fsf_req->qtcb != NULL)) { 4560 if (likely(fsf_req->qtcb != NULL)) {
4561 fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no;
4616 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; 4562 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
4617 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; 4563 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4618 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 4564 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command];
4619 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; 4565 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4620 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; 4566 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
4621 fsf_req->qtcb->header.fsf_command = fsf_cmd; 4567 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4622 } 4568 }
4623} 4569}
4624 4570
@@ -4686,7 +4632,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4686 goto failed_fsf_req; 4632 goto failed_fsf_req;
4687 } 4633 }
4688 4634
4689 zfcp_fsf_req_qtcb_init(fsf_req, fsf_cmd); 4635 fsf_req->adapter = adapter;
4636 fsf_req->fsf_command = fsf_cmd;
4637
4638 zfcp_fsf_req_qtcb_init(fsf_req);
4690 4639
4691 /* initialize waitqueue which may be used to wait on 4640 /* initialize waitqueue which may be used to wait on
4692 this request completion */ 4641 this request completion */
@@ -4708,8 +4657,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4708 goto failed_sbals; 4657 goto failed_sbals;
4709 } 4658 }
4710 4659
4711 fsf_req->adapter = adapter; /* pointer to "parent" adapter */ 4660 if (fsf_req->qtcb) {
4712 fsf_req->fsf_command = fsf_cmd; 4661 fsf_req->seq_no = adapter->fsf_req_seq_no;
4662 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
4663 }
4713 fsf_req->sbal_number = 1; 4664 fsf_req->sbal_number = 1;
4714 fsf_req->sbal_first = req_queue->free_index; 4665 fsf_req->sbal_first = req_queue->free_index;
4715 fsf_req->sbal_curr = req_queue->free_index; 4666 fsf_req->sbal_curr = req_queue->free_index;
@@ -4760,9 +4711,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4760 struct zfcp_adapter *adapter; 4711 struct zfcp_adapter *adapter;
4761 struct zfcp_qdio_queue *req_queue; 4712 struct zfcp_qdio_queue *req_queue;
4762 volatile struct qdio_buffer_element *sbale; 4713 volatile struct qdio_buffer_element *sbale;
4714 int inc_seq_no;
4763 int new_distance_from_int; 4715 int new_distance_from_int;
4764 unsigned long flags; 4716 unsigned long flags;
4765 int inc_seq_no = 1;
4766 int retval = 0; 4717 int retval = 0;
4767 4718
4768 adapter = fsf_req->adapter; 4719 adapter = fsf_req->adapter;
@@ -4776,23 +4727,13 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4776 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 4727 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
4777 sbale[1].length); 4728 sbale[1].length);
4778 4729
4779 /* set sequence counter in QTCB */
4780 if (likely(fsf_req->qtcb)) {
4781 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
4782 fsf_req->seq_no = adapter->fsf_req_seq_no;
4783 ZFCP_LOG_TRACE("FSF request %p of adapter %s gets "
4784 "FSF sequence counter value of %i\n",
4785 fsf_req,
4786 zfcp_get_busid_by_adapter(adapter),
4787 fsf_req->qtcb->prefix.req_seq_no);
4788 } else
4789 inc_seq_no = 0;
4790
4791 /* put allocated FSF request at list tail */ 4730 /* put allocated FSF request at list tail */
4792 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4731 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
4793 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); 4732 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
4794 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4733 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
4795 4734
4735 inc_seq_no = (fsf_req->qtcb != NULL);
4736
4796 /* figure out expiration time of timeout and start timeout */ 4737 /* figure out expiration time of timeout and start timeout */
4797 if (unlikely(timer)) { 4738 if (unlikely(timer)) {
4798 timer->expires += jiffies; 4739 timer->expires += jiffies;
@@ -4822,6 +4763,8 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4822 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */ 4763 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
4823 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req); 4764 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
4824 4765
4766 fsf_req->issued = get_clock();
4767
4825 retval = do_QDIO(adapter->ccw_device, 4768 retval = do_QDIO(adapter->ccw_device,
4826 QDIO_FLAG_SYNC_OUTPUT, 4769 QDIO_FLAG_SYNC_OUTPUT,
4827 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 4770 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
@@ -4860,15 +4803,11 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4860 * routines resulting in missing sequence counter values 4803 * routines resulting in missing sequence counter values
4861 * otherwise, 4804 * otherwise,
4862 */ 4805 */
4806
4863 /* Don't increase for unsolicited status */ 4807 /* Don't increase for unsolicited status */
4864 if (likely(inc_seq_no)) { 4808 if (inc_seq_no)
4865 adapter->fsf_req_seq_no++; 4809 adapter->fsf_req_seq_no++;
4866 ZFCP_LOG_TRACE 4810
4867 ("FSF sequence counter value of adapter %s "
4868 "increased to %i\n",
4869 zfcp_get_busid_by_adapter(adapter),
4870 adapter->fsf_req_seq_no);
4871 }
4872 /* count FSF requests pending */ 4811 /* count FSF requests pending */
4873 atomic_inc(&adapter->fsf_reqs_active); 4812 atomic_inc(&adapter->fsf_reqs_active);
4874 } 4813 }
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 07140dfda2a7..48719f055952 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -116,6 +116,7 @@
116#define FSF_INVALID_COMMAND_OPTION 0x000000E5 116#define FSF_INVALID_COMMAND_OPTION 0x000000E5
117/* #define FSF_ERROR 0x000000FF */ 117/* #define FSF_ERROR 0x000000FF */
118 118
119#define FSF_PROT_STATUS_QUAL_SIZE 16
119#define FSF_STATUS_QUALIFIER_SIZE 16 120#define FSF_STATUS_QUALIFIER_SIZE 16
120 121
121/* FSF status qualifier, recommendations */ 122/* FSF status qualifier, recommendations */
@@ -139,9 +140,18 @@
139#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004 140#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
140 141
141/* FSF status qualifier (most significant 4 bytes), local link down */ 142/* FSF status qualifier (most significant 4 bytes), local link down */
142#define FSF_PSQ_LINK_NOLIGHT 0x00000004 143#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
143#define FSF_PSQ_LINK_WRAPPLUG 0x00000008 144#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
144#define FSF_PSQ_LINK_NOFCP 0x00000010 145#define FSF_PSQ_LINK_NO_FCP 0x00000010
146#define FSF_PSQ_LINK_FIRMWARE_UPDATE 0x00000020
147#define FSF_PSQ_LINK_INVALID_WWPN 0x00000100
148#define FSF_PSQ_LINK_NO_NPIV_SUPPORT 0x00000200
149#define FSF_PSQ_LINK_NO_FCP_RESOURCES 0x00000400
150#define FSF_PSQ_LINK_NO_FABRIC_RESOURCES 0x00000800
151#define FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE 0x00001000
152#define FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED 0x00002000
153#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED 0x00004000
154#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT 0x00008000
145 155
146/* payload size in status read buffer */ 156/* payload size in status read buffer */
147#define FSF_STATUS_READ_PAYLOAD_SIZE 4032 157#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
@@ -154,15 +164,21 @@
154#define FSF_STATUS_READ_INCOMING_ELS 0x00000002 164#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
155#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003 165#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003
156#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004 166#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
157#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */ 167#define FSF_STATUS_READ_LINK_DOWN 0x00000005
158#define FSF_STATUS_READ_LINK_UP 0x00000006 168#define FSF_STATUS_READ_LINK_UP 0x00000006
159#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A 169#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
160#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B 170#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
171#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
161 172
162/* status subtypes in status read buffer */ 173/* status subtypes in status read buffer */
163#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001 174#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
164#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002 175#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
165 176
177/* status subtypes for link down */
178#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
179#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001
180#define FSF_STATUS_READ_SUB_FIRMWARE_UPDATE 0x00000002
181
166/* status subtypes for CFDC */ 182/* status subtypes for CFDC */
167#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002 183#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
168#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F 184#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
@@ -193,11 +209,15 @@
193#define FSF_QTCB_LOG_SIZE 1024 209#define FSF_QTCB_LOG_SIZE 1024
194 210
195/* channel features */ 211/* channel features */
196#define FSF_FEATURE_QTCB_SUPPRESSION 0x00000001
197#define FSF_FEATURE_CFDC 0x00000002 212#define FSF_FEATURE_CFDC 0x00000002
198#define FSF_FEATURE_LUN_SHARING 0x00000004 213#define FSF_FEATURE_LUN_SHARING 0x00000004
199#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 214#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
200#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 215#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
216#define FSF_FEATURE_UPDATE_ALERT 0x00000100
217
218/* host connection features */
219#define FSF_FEATURE_NPIV_MODE 0x00000001
220#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
201 221
202/* option */ 222/* option */
203#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001 223#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
@@ -305,15 +325,23 @@ struct fsf_qual_sequence_error {
305 u32 res1[3]; 325 u32 res1[3];
306} __attribute__ ((packed)); 326} __attribute__ ((packed));
307 327
308struct fsf_qual_locallink_error { 328struct fsf_link_down_info {
309 u32 code; 329 u32 error_code;
310 u32 res1[3]; 330 u32 res1;
331 u8 res2[2];
332 u8 primary_status;
333 u8 ioerr_code;
334 u8 action_code;
335 u8 reason_code;
336 u8 explanation_code;
337 u8 vendor_specific_code;
311} __attribute__ ((packed)); 338} __attribute__ ((packed));
312 339
313union fsf_prot_status_qual { 340union fsf_prot_status_qual {
341 u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
314 struct fsf_qual_version_error version_error; 342 struct fsf_qual_version_error version_error;
315 struct fsf_qual_sequence_error sequence_error; 343 struct fsf_qual_sequence_error sequence_error;
316 struct fsf_qual_locallink_error locallink_error; 344 struct fsf_link_down_info link_down_info;
317} __attribute__ ((packed)); 345} __attribute__ ((packed));
318 346
319struct fsf_qtcb_prefix { 347struct fsf_qtcb_prefix {
@@ -331,7 +359,9 @@ union fsf_status_qual {
331 u8 byte[FSF_STATUS_QUALIFIER_SIZE]; 359 u8 byte[FSF_STATUS_QUALIFIER_SIZE];
332 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)]; 360 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
333 u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)]; 361 u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
362 u64 doubleword[FSF_STATUS_QUALIFIER_SIZE / sizeof(u64)];
334 struct fsf_queue_designator fsf_queue_designator; 363 struct fsf_queue_designator fsf_queue_designator;
364 struct fsf_link_down_info link_down_info;
335} __attribute__ ((packed)); 365} __attribute__ ((packed));
336 366
337struct fsf_qtcb_header { 367struct fsf_qtcb_header {
@@ -406,8 +436,8 @@ struct fsf_qtcb_bottom_config {
406 u32 low_qtcb_version; 436 u32 low_qtcb_version;
407 u32 max_qtcb_size; 437 u32 max_qtcb_size;
408 u32 max_data_transfer_size; 438 u32 max_data_transfer_size;
409 u32 supported_features; 439 u32 adapter_features;
410 u8 res1[4]; 440 u32 connection_features;
411 u32 fc_topology; 441 u32 fc_topology;
412 u32 fc_link_speed; 442 u32 fc_link_speed;
413 u32 adapter_type; 443 u32 adapter_type;
@@ -425,7 +455,7 @@ struct fsf_qtcb_bottom_config {
425} __attribute__ ((packed)); 455} __attribute__ ((packed));
426 456
427struct fsf_qtcb_bottom_port { 457struct fsf_qtcb_bottom_port {
428 u8 res1[8]; 458 u64 wwpn;
429 u32 fc_port_id; 459 u32 fc_port_id;
430 u32 port_type; 460 u32 port_type;
431 u32 port_state; 461 u32 port_state;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 24e16ec331d9..d719f66a29a4 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -54,8 +54,7 @@ static inline int zfcp_qdio_sbals_from_buffer
54static qdio_handler_t zfcp_qdio_request_handler; 54static qdio_handler_t zfcp_qdio_request_handler;
55static qdio_handler_t zfcp_qdio_response_handler; 55static qdio_handler_t zfcp_qdio_response_handler;
56static int zfcp_qdio_handler_error_check(struct zfcp_adapter *, 56static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
57 unsigned int, 57 unsigned int, unsigned int, unsigned int, int, int);
58 unsigned int, unsigned int);
59 58
60#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO 59#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
61 60
@@ -214,22 +213,12 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
214 * 213 *
215 */ 214 */
216static inline int 215static inline int
217zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, 216zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
218 unsigned int status, 217 unsigned int qdio_error, unsigned int siga_error,
219 unsigned int qdio_error, unsigned int siga_error) 218 int first_element, int elements_processed)
220{ 219{
221 int retval = 0; 220 int retval = 0;
222 221
223 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) {
224 if (status & QDIO_STATUS_INBOUND_INT) {
225 ZFCP_LOG_TRACE("status is"
226 " QDIO_STATUS_INBOUND_INT \n");
227 }
228 if (status & QDIO_STATUS_OUTBOUND_INT) {
229 ZFCP_LOG_TRACE("status is"
230 " QDIO_STATUS_OUTBOUND_INT \n");
231 }
232 }
233 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 222 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
234 retval = -EIO; 223 retval = -EIO;
235 224
@@ -237,9 +226,10 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
237 "qdio_error=0x%x, siga_error=0x%x)\n", 226 "qdio_error=0x%x, siga_error=0x%x)\n",
238 status, qdio_error, siga_error); 227 status, qdio_error, siga_error);
239 228
240 /* Restarting IO on the failed adapter from scratch */ 229 zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error,
241 debug_text_event(adapter->erp_dbf, 1, "qdio_err"); 230 first_element, elements_processed);
242 /* 231 /*
232 * Restarting IO on the failed adapter from scratch.
243 * Since we have been using this adapter, it is save to assume 233 * Since we have been using this adapter, it is save to assume
244 * that it is not failed but recoverable. The card seems to 234 * that it is not failed but recoverable. The card seems to
245 * report link-up events by self-initiated queue shutdown. 235 * report link-up events by self-initiated queue shutdown.
@@ -282,7 +272,8 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
282 first_element, elements_processed); 272 first_element, elements_processed);
283 273
284 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 274 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
285 siga_error))) 275 siga_error, first_element,
276 elements_processed)))
286 goto out; 277 goto out;
287 /* 278 /*
288 * we stored address of struct zfcp_adapter data structure 279 * we stored address of struct zfcp_adapter data structure
@@ -334,7 +325,8 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
334 queue = &adapter->response_queue; 325 queue = &adapter->response_queue;
335 326
336 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 327 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
337 siga_error))) 328 siga_error, first_element,
329 elements_processed)))
338 goto out; 330 goto out;
339 331
340 /* 332 /*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 31a76065cf28..3dcd1bfba3b4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -44,7 +44,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
44static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); 44static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
45static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); 45static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
46static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); 46static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
47static int zfcp_task_management_function(struct zfcp_unit *, u8); 47static int zfcp_task_management_function(struct zfcp_unit *, u8,
48 struct scsi_cmnd *);
48 49
49static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t, 50static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t,
50 scsi_lun_t); 51 scsi_lun_t);
@@ -242,7 +243,10 @@ static void
242zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) 243zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
243{ 244{
244 set_host_byte(&scpnt->result, result); 245 set_host_byte(&scpnt->result, result);
245 zfcp_cmd_dbf_event_scsi("failing", scpnt); 246 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
247 zfcp_scsi_dbf_event_result("fail", 4,
248 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
249 scpnt);
246 /* return directly */ 250 /* return directly */
247 scpnt->scsi_done(scpnt); 251 scpnt->scsi_done(scpnt);
248} 252}
@@ -414,67 +418,38 @@ zfcp_port_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id)
414 return (struct zfcp_port *) NULL; 418 return (struct zfcp_port *) NULL;
415} 419}
416 420
417/* 421/**
418 * function: zfcp_scsi_eh_abort_handler 422 * zfcp_scsi_eh_abort_handler - abort the specified SCSI command
419 * 423 * @scpnt: pointer to scsi_cmnd to be aborted
420 * purpose: tries to abort the specified (timed out) SCSI command 424 * Return: SUCCESS - command has been aborted and cleaned up in internal
421 * 425 * bookkeeping, SCSI stack won't be called for aborted command
422 * note: We do not need to care for a SCSI command which completes 426 * FAILED - otherwise
423 * normally but late during this abort routine runs.
424 * We are allowed to return late commands to the SCSI stack.
425 * It tracks the state of commands and will handle late commands.
426 * (Usually, the normal completion of late commands is ignored with
427 * respect to the running abort operation. Grep for 'done_late'
428 * in the SCSI stacks sources.)
429 * 427 *
430 * returns: SUCCESS - command has been aborted and cleaned up in internal 428 * We do not need to care for a SCSI command which completes normally
431 * bookkeeping, 429 * but late during this abort routine runs. We are allowed to return
432 * SCSI stack won't be called for aborted command 430 * late commands to the SCSI stack. It tracks the state of commands and
433 * FAILED - otherwise 431 * will handle late commands. (Usually, the normal completion of late
432 * commands is ignored with respect to the running abort operation.)
434 */ 433 */
435int 434int
436__zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 435zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
437{ 436{
437 struct Scsi_Host *scsi_host;
438 struct zfcp_adapter *adapter;
439 struct zfcp_unit *unit;
438 int retval = SUCCESS; 440 int retval = SUCCESS;
439 struct zfcp_fsf_req *new_fsf_req, *old_fsf_req; 441 struct zfcp_fsf_req *new_fsf_req = NULL;
440 struct zfcp_adapter *adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 442 struct zfcp_fsf_req *old_fsf_req;
441 struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
442 struct zfcp_port *port = unit->port;
443 struct Scsi_Host *scsi_host = scpnt->device->host;
444 union zfcp_req_data *req_data = NULL;
445 unsigned long flags; 443 unsigned long flags;
446 u32 status = 0; 444
447 445 scsi_host = scpnt->device->host;
448 /* the components of a abort_dbf record (fixed size record) */ 446 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
449 u64 dbf_scsi_cmnd = (unsigned long) scpnt; 447 unit = (struct zfcp_unit *) scpnt->device->hostdata;
450 char dbf_opcode[ZFCP_ABORT_DBF_LENGTH];
451 wwn_t dbf_wwn = port->wwpn;
452 fcp_lun_t dbf_fcp_lun = unit->fcp_lun;
453 u64 dbf_retries = scpnt->retries;
454 u64 dbf_allowed = scpnt->allowed;
455 u64 dbf_timeout = 0;
456 u64 dbf_fsf_req = 0;
457 u64 dbf_fsf_status = 0;
458 u64 dbf_fsf_qual[2] = { 0, 0 };
459 char dbf_result[ZFCP_ABORT_DBF_LENGTH] = "##undef";
460
461 memset(dbf_opcode, 0, ZFCP_ABORT_DBF_LENGTH);
462 memcpy(dbf_opcode,
463 scpnt->cmnd,
464 min(scpnt->cmd_len, (unsigned char) ZFCP_ABORT_DBF_LENGTH));
465 448
466 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n", 449 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
467 scpnt, zfcp_get_busid_by_adapter(adapter)); 450 scpnt, zfcp_get_busid_by_adapter(adapter));
468 451
469 spin_unlock_irq(scsi_host->host_lock); 452 /* avoid race condition between late normal completion and abort */
470
471 /*
472 * Race condition between normal (late) completion and abort has
473 * to be avoided.
474 * The entirity of all accesses to scsi_req have to be atomic.
475 * scsi_req is usually part of the fsf_req and thus we block the
476 * release of fsf_req as long as we need to access scsi_req.
477 */
478 write_lock_irqsave(&adapter->abort_lock, flags); 453 write_lock_irqsave(&adapter->abort_lock, flags);
479 454
480 /* 455 /*
@@ -484,144 +459,47 @@ __zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
484 * this routine returns. (scpnt is parameter passed to this routine 459 * this routine returns. (scpnt is parameter passed to this routine
485 * and must not disappear during abort even on late completion.) 460 * and must not disappear during abort even on late completion.)
486 */ 461 */
487 req_data = (union zfcp_req_data *) scpnt->host_scribble; 462 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
488 /* DEBUG */
489 ZFCP_LOG_DEBUG("req_data=%p\n", req_data);
490 if (!req_data) {
491 ZFCP_LOG_DEBUG("late command completion overtook abort\n");
492 /*
493 * That's it.
494 * Do not initiate abort but return SUCCESS.
495 */
496 write_unlock_irqrestore(&adapter->abort_lock, flags);
497 retval = SUCCESS;
498 strncpy(dbf_result, "##late1", ZFCP_ABORT_DBF_LENGTH);
499 goto out;
500 }
501
502 /* Figure out which fsf_req needs to be aborted. */
503 old_fsf_req = req_data->send_fcp_command_task.fsf_req;
504
505 dbf_fsf_req = (unsigned long) old_fsf_req;
506 dbf_timeout =
507 (jiffies - req_data->send_fcp_command_task.start_jiffies) / HZ;
508
509 ZFCP_LOG_DEBUG("old_fsf_req=%p\n", old_fsf_req);
510 if (!old_fsf_req) { 463 if (!old_fsf_req) {
511 write_unlock_irqrestore(&adapter->abort_lock, flags); 464 write_unlock_irqrestore(&adapter->abort_lock, flags);
512 ZFCP_LOG_NORMAL("bug: no old fsf request found\n"); 465 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req);
513 ZFCP_LOG_NORMAL("req_data:\n"); 466 retval = SUCCESS;
514 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
515 (char *) req_data, sizeof (union zfcp_req_data));
516 ZFCP_LOG_NORMAL("scsi_cmnd:\n");
517 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
518 (char *) scpnt, sizeof (struct scsi_cmnd));
519 retval = FAILED;
520 strncpy(dbf_result, "##bug:r", ZFCP_ABORT_DBF_LENGTH);
521 goto out; 467 goto out;
522 } 468 }
523 old_fsf_req->data.send_fcp_command_task.scsi_cmnd = NULL; 469 old_fsf_req->data = 0;
524 /* mark old request as being aborted */
525 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; 470 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
526 /*
527 * We have to collect all information (e.g. unit) needed by
528 * zfcp_fsf_abort_fcp_command before calling that routine
529 * since that routine is not allowed to access
530 * fsf_req which it is going to abort.
531 * This is because of we need to release fsf_req_list_lock
532 * before calling zfcp_fsf_abort_fcp_command.
533 * Since this lock will not be held, fsf_req may complete
534 * late and may be released meanwhile.
535 */
536 ZFCP_LOG_DEBUG("unit 0x%016Lx (%p)\n", unit->fcp_lun, unit);
537 471
538 /* 472 /* don't access old_fsf_req after releasing the abort_lock */
539 * We block (call schedule)
540 * That's why we must release the lock and enable the
541 * interrupts before.
542 * On the other hand we do not need the lock anymore since
543 * all critical accesses to scsi_req are done.
544 */
545 write_unlock_irqrestore(&adapter->abort_lock, flags); 473 write_unlock_irqrestore(&adapter->abort_lock, flags);
546 /* call FSF routine which does the abort */ 474 /* call FSF routine which does the abort */
547 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req, 475 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req,
548 adapter, unit, 0); 476 adapter, unit, 0);
549 ZFCP_LOG_DEBUG("new_fsf_req=%p\n", new_fsf_req);
550 if (!new_fsf_req) { 477 if (!new_fsf_req) {
478 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
551 retval = FAILED; 479 retval = FAILED;
552 ZFCP_LOG_NORMAL("error: initiation of Abort FCP Cmnd "
553 "failed\n");
554 strncpy(dbf_result, "##nores", ZFCP_ABORT_DBF_LENGTH);
555 goto out; 480 goto out;
556 } 481 }
557 482
558 /* wait for completion of abort */ 483 /* wait for completion of abort */
559 ZFCP_LOG_DEBUG("waiting for cleanup...\n");
560#if 1
561 /*
562 * FIXME:
563 * copying zfcp_fsf_req_wait_and_cleanup code is not really nice
564 */
565 __wait_event(new_fsf_req->completion_wq, 484 __wait_event(new_fsf_req->completion_wq,
566 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 485 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
567 status = new_fsf_req->status; 486
568 dbf_fsf_status = new_fsf_req->qtcb->header.fsf_status;
569 /*
570 * Ralphs special debug load provides timestamps in the FSF
571 * status qualifier. This might be specified later if being
572 * useful for debugging aborts.
573 */
574 dbf_fsf_qual[0] =
575 *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0];
576 dbf_fsf_qual[1] =
577 *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2];
578 zfcp_fsf_req_free(new_fsf_req);
579#else
580 retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req,
581 ZFCP_UNINTERRUPTIBLE, &status);
582#endif
583 ZFCP_LOG_DEBUG("Waiting for cleanup complete, status=0x%x\n", status);
584 /* status should be valid since signals were not permitted */ 487 /* status should be valid since signals were not permitted */
585 if (status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 488 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
489 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req);
586 retval = SUCCESS; 490 retval = SUCCESS;
587 strncpy(dbf_result, "##succ", ZFCP_ABORT_DBF_LENGTH); 491 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
588 } else if (status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 492 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req);
589 retval = SUCCESS; 493 retval = SUCCESS;
590 strncpy(dbf_result, "##late2", ZFCP_ABORT_DBF_LENGTH);
591 } else { 494 } else {
495 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req);
592 retval = FAILED; 496 retval = FAILED;
593 strncpy(dbf_result, "##fail", ZFCP_ABORT_DBF_LENGTH);
594 } 497 }
595 498 zfcp_fsf_req_free(new_fsf_req);
596 out: 499 out:
597 debug_event(adapter->abort_dbf, 1, &dbf_scsi_cmnd, sizeof (u64));
598 debug_event(adapter->abort_dbf, 1, &dbf_opcode, ZFCP_ABORT_DBF_LENGTH);
599 debug_event(adapter->abort_dbf, 1, &dbf_wwn, sizeof (wwn_t));
600 debug_event(adapter->abort_dbf, 1, &dbf_fcp_lun, sizeof (fcp_lun_t));
601 debug_event(adapter->abort_dbf, 1, &dbf_retries, sizeof (u64));
602 debug_event(adapter->abort_dbf, 1, &dbf_allowed, sizeof (u64));
603 debug_event(adapter->abort_dbf, 1, &dbf_timeout, sizeof (u64));
604 debug_event(adapter->abort_dbf, 1, &dbf_fsf_req, sizeof (u64));
605 debug_event(adapter->abort_dbf, 1, &dbf_fsf_status, sizeof (u64));
606 debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[0], sizeof (u64));
607 debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[1], sizeof (u64));
608 debug_text_event(adapter->abort_dbf, 1, dbf_result);
609
610 spin_lock_irq(scsi_host->host_lock);
611 return retval; 500 return retval;
612} 501}
613 502
614int
615zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
616{
617 int rc;
618 struct Scsi_Host *scsi_host = scpnt->device->host;
619 spin_lock_irq(scsi_host->host_lock);
620 rc = __zfcp_scsi_eh_abort_handler(scpnt);
621 spin_unlock_irq(scsi_host->host_lock);
622 return rc;
623}
624
625/* 503/*
626 * function: zfcp_scsi_eh_device_reset_handler 504 * function: zfcp_scsi_eh_device_reset_handler
627 * 505 *
@@ -651,8 +529,9 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
651 */ 529 */
652 if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET, 530 if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
653 &unit->status)) { 531 &unit->status)) {
654 retval = 532 retval = zfcp_task_management_function(unit,
655 zfcp_task_management_function(unit, FCP_LOGICAL_UNIT_RESET); 533 FCP_LOGICAL_UNIT_RESET,
534 scpnt);
656 if (retval) { 535 if (retval) {
657 ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit); 536 ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit);
658 if (retval == -ENOTSUPP) 537 if (retval == -ENOTSUPP)
@@ -668,7 +547,7 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
668 goto out; 547 goto out;
669 } 548 }
670 } 549 }
671 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET); 550 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
672 if (retval) { 551 if (retval) {
673 ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit); 552 ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit);
674 retval = FAILED; 553 retval = FAILED;
@@ -681,12 +560,12 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
681} 560}
682 561
683static int 562static int
684zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags) 563zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
564 struct scsi_cmnd *scpnt)
685{ 565{
686 struct zfcp_adapter *adapter = unit->port->adapter; 566 struct zfcp_adapter *adapter = unit->port->adapter;
687 int retval;
688 int status;
689 struct zfcp_fsf_req *fsf_req; 567 struct zfcp_fsf_req *fsf_req;
568 int retval = 0;
690 569
691 /* issue task management function */ 570 /* issue task management function */
692 fsf_req = zfcp_fsf_send_fcp_command_task_management 571 fsf_req = zfcp_fsf_send_fcp_command_task_management
@@ -696,70 +575,63 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
696 "failed for unit 0x%016Lx on port 0x%016Lx on " 575 "failed for unit 0x%016Lx on port 0x%016Lx on "
697 "adapter %s\n", unit->fcp_lun, unit->port->wwpn, 576 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
698 zfcp_get_busid_by_adapter(adapter)); 577 zfcp_get_busid_by_adapter(adapter));
578 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
699 retval = -ENOMEM; 579 retval = -ENOMEM;
700 goto out; 580 goto out;
701 } 581 }
702 582
703 retval = zfcp_fsf_req_wait_and_cleanup(fsf_req, 583 __wait_event(fsf_req->completion_wq,
704 ZFCP_UNINTERRUPTIBLE, &status); 584 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
585
705 /* 586 /*
706 * check completion status of task management function 587 * check completion status of task management function
707 * (status should always be valid since no signals permitted)
708 */ 588 */
709 if (status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) 589 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
590 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
710 retval = -EIO; 591 retval = -EIO;
711 else if (status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) 592 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
593 zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
712 retval = -ENOTSUPP; 594 retval = -ENOTSUPP;
713 else 595 } else
714 retval = 0; 596 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
597
598 zfcp_fsf_req_free(fsf_req);
715 out: 599 out:
716 return retval; 600 return retval;
717} 601}
718 602
719/* 603/**
720 * function: zfcp_scsi_eh_bus_reset_handler 604 * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter)
721 *
722 * purpose:
723 *
724 * returns:
725 */ 605 */
726int 606int
727zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) 607zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
728{ 608{
729 int retval = 0; 609 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
730 struct zfcp_unit *unit; 610 struct zfcp_adapter *adapter = unit->port->adapter;
731 611
732 unit = (struct zfcp_unit *) scpnt->device->hostdata;
733 ZFCP_LOG_NORMAL("bus reset because of problems with " 612 ZFCP_LOG_NORMAL("bus reset because of problems with "
734 "unit 0x%016Lx\n", unit->fcp_lun); 613 "unit 0x%016Lx\n", unit->fcp_lun);
735 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 614 zfcp_erp_adapter_reopen(adapter, 0);
736 zfcp_erp_wait(unit->port->adapter); 615 zfcp_erp_wait(adapter);
737 retval = SUCCESS;
738 616
739 return retval; 617 return SUCCESS;
740} 618}
741 619
742/* 620/**
743 * function: zfcp_scsi_eh_host_reset_handler 621 * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter)
744 *
745 * purpose:
746 *
747 * returns:
748 */ 622 */
749int 623int
750zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 624zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
751{ 625{
752 int retval = 0; 626 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
753 struct zfcp_unit *unit; 627 struct zfcp_adapter *adapter = unit->port->adapter;
754 628
755 unit = (struct zfcp_unit *) scpnt->device->hostdata;
756 ZFCP_LOG_NORMAL("host reset because of problems with " 629 ZFCP_LOG_NORMAL("host reset because of problems with "
757 "unit 0x%016Lx\n", unit->fcp_lun); 630 "unit 0x%016Lx\n", unit->fcp_lun);
758 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 631 zfcp_erp_adapter_reopen(adapter, 0);
759 zfcp_erp_wait(unit->port->adapter); 632 zfcp_erp_wait(adapter);
760 retval = SUCCESS;
761 633
762 return retval; 634 return SUCCESS;
763} 635}
764 636
765/* 637/*
@@ -826,10 +698,16 @@ void
826zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) 698zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
827{ 699{
828 struct Scsi_Host *shost; 700 struct Scsi_Host *shost;
701 struct zfcp_port *port;
829 702
830 shost = adapter->scsi_host; 703 shost = adapter->scsi_host;
831 if (!shost) 704 if (!shost)
832 return; 705 return;
706 read_lock_irq(&zfcp_data.config_lock);
707 list_for_each_entry(port, &adapter->port_list_head, list)
708 if (port->rport)
709 port->rport = NULL;
710 read_unlock_irq(&zfcp_data.config_lock);
833 fc_remove_host(shost); 711 fc_remove_host(shost);
834 scsi_remove_host(shost); 712 scsi_remove_host(shost);
835 scsi_host_put(shost); 713 scsi_host_put(shost);
@@ -904,18 +782,6 @@ zfcp_get_node_name(struct scsi_target *starget)
904 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 782 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
905} 783}
906 784
907void
908zfcp_set_fc_host_attrs(struct zfcp_adapter *adapter)
909{
910 struct Scsi_Host *shost = adapter->scsi_host;
911
912 fc_host_node_name(shost) = adapter->wwnn;
913 fc_host_port_name(shost) = adapter->wwpn;
914 strncpy(fc_host_serial_number(shost), adapter->serial_number,
915 min(FC_SERIAL_NUMBER_SIZE, 32));
916 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
917}
918
919struct fc_function_template zfcp_transport_functions = { 785struct fc_function_template zfcp_transport_functions = {
920 .get_starget_port_id = zfcp_get_port_id, 786 .get_starget_port_id = zfcp_get_port_id,
921 .get_starget_port_name = zfcp_get_port_name, 787 .get_starget_port_name = zfcp_get_port_name,
@@ -927,7 +793,10 @@ struct fc_function_template zfcp_transport_functions = {
927 .show_host_node_name = 1, 793 .show_host_node_name = 1,
928 .show_host_port_name = 1, 794 .show_host_port_name = 1,
929 .show_host_supported_classes = 1, 795 .show_host_supported_classes = 1,
796 .show_host_maxframe_size = 1,
930 .show_host_serial_number = 1, 797 .show_host_serial_number = 1,
798 .show_host_speed = 1,
799 .show_host_port_id = 1,
931}; 800};
932 801
933/** 802/**
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index e7345a74800a..0cd435280e7d 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -62,21 +62,18 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct devi
62static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL); 62static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
63 63
64ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); 64ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
65ZFCP_DEFINE_ADAPTER_ATTR(wwnn, "0x%016llx\n", adapter->wwnn);
66ZFCP_DEFINE_ADAPTER_ATTR(wwpn, "0x%016llx\n", adapter->wwpn);
67ZFCP_DEFINE_ADAPTER_ATTR(s_id, "0x%06x\n", adapter->s_id);
68ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 65ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
69ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 66ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
70ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 67ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
68ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn);
69ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id);
71ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 70ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
72ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 71ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
73ZFCP_DEFINE_ADAPTER_ATTR(fc_link_speed, "%d Gb/s\n", adapter->fc_link_speed);
74ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class); 72ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class);
75ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n", 73ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n",
76 fc_topologies[adapter->fc_topology]); 74 fc_topologies[adapter->fc_topology]);
77ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", 75ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
78 adapter->hardware_version); 76 adapter->hardware_version);
79ZFCP_DEFINE_ADAPTER_ATTR(serial_number, "%17s\n", adapter->serial_number);
80ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no); 77ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no);
81ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask 78ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
82 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)); 79 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
@@ -255,21 +252,18 @@ static struct attribute *zfcp_adapter_attrs[] = {
255 &dev_attr_in_recovery.attr, 252 &dev_attr_in_recovery.attr,
256 &dev_attr_port_remove.attr, 253 &dev_attr_port_remove.attr,
257 &dev_attr_port_add.attr, 254 &dev_attr_port_add.attr,
258 &dev_attr_wwnn.attr,
259 &dev_attr_wwpn.attr,
260 &dev_attr_s_id.attr,
261 &dev_attr_peer_wwnn.attr, 255 &dev_attr_peer_wwnn.attr,
262 &dev_attr_peer_wwpn.attr, 256 &dev_attr_peer_wwpn.attr,
263 &dev_attr_peer_d_id.attr, 257 &dev_attr_peer_d_id.attr,
258 &dev_attr_physical_wwpn.attr,
259 &dev_attr_physical_s_id.attr,
264 &dev_attr_card_version.attr, 260 &dev_attr_card_version.attr,
265 &dev_attr_lic_version.attr, 261 &dev_attr_lic_version.attr,
266 &dev_attr_fc_link_speed.attr,
267 &dev_attr_fc_service_class.attr, 262 &dev_attr_fc_service_class.attr,
268 &dev_attr_fc_topology.attr, 263 &dev_attr_fc_topology.attr,
269 &dev_attr_scsi_host_no.attr, 264 &dev_attr_scsi_host_no.attr,
270 &dev_attr_status.attr, 265 &dev_attr_status.attr,
271 &dev_attr_hardware_version.attr, 266 &dev_attr_hardware_version.attr,
272 &dev_attr_serial_number.attr,
273 NULL 267 NULL
274}; 268};
275 269
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c932b3b94490..876d1de8480d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1109,15 +1109,6 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
1109 return (0); 1109 return (0);
1110} 1110}
1111 1111
1112uint64_t
1113ahc_linux_get_memsize(void)
1114{
1115 struct sysinfo si;
1116
1117 si_meminfo(&si);
1118 return ((uint64_t)si.totalram << PAGE_SHIFT);
1119}
1120
1121/* 1112/*
1122 * Place the SCSI bus into a known state by either resetting it, 1113 * Place the SCSI bus into a known state by either resetting it,
1123 * or forcing transfer negotiations on the next command to any 1114 * or forcing transfer negotiations on the next command to any
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index c52996269240..be9edbe26dbe 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -494,8 +494,6 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
494int ahc_linux_register_host(struct ahc_softc *, 494int ahc_linux_register_host(struct ahc_softc *,
495 struct scsi_host_template *); 495 struct scsi_host_template *);
496 496
497uint64_t ahc_linux_get_memsize(void);
498
499/*************************** Pretty Printing **********************************/ 497/*************************** Pretty Printing **********************************/
500struct info_str { 498struct info_str {
501 char *buffer; 499 char *buffer;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0d44a6907dd2..3ce77ddc889e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -180,6 +180,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
180 struct ahc_pci_identity *entry; 180 struct ahc_pci_identity *entry;
181 char *name; 181 char *name;
182 int error; 182 int error;
183 struct device *dev = &pdev->dev;
183 184
184 pci = pdev; 185 pci = pdev;
185 entry = ahc_find_pci_device(pci); 186 entry = ahc_find_pci_device(pci);
@@ -209,11 +210,12 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
209 pci_set_master(pdev); 210 pci_set_master(pdev);
210 211
211 if (sizeof(dma_addr_t) > 4 212 if (sizeof(dma_addr_t) > 4
212 && ahc_linux_get_memsize() > 0x80000000 213 && ahc->features & AHC_LARGE_SCBS
213 && pci_set_dma_mask(pdev, mask_39bit) == 0) { 214 && dma_set_mask(dev, mask_39bit) == 0
215 && dma_get_required_mask(dev) > DMA_32BIT_MASK) {
214 ahc->flags |= AHC_39BIT_ADDRESSING; 216 ahc->flags |= AHC_39BIT_ADDRESSING;
215 } else { 217 } else {
216 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 218 if (dma_set_mask(dev, DMA_32BIT_MASK)) {
217 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); 219 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
218 return (-ENODEV); 220 return (-ENODEV);
219 } 221 }
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e6153fe5842a..a8cfbef304b5 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -996,6 +996,7 @@ oktosend:
996#ifdef ED_DBGP 996#ifdef ED_DBGP
997 printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id); 997 printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id);
998#endif 998#endif
999 dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus;
999 outl(dev->id[c][target_id].prdaddr, tmpcip); 1000 outl(dev->id[c][target_id].prdaddr, tmpcip);
1000 tmpcip = tmpcip - 2; 1001 tmpcip = tmpcip - 2;
1001 outb(0x06, tmpcip); 1002 outb(0x06, tmpcip);
@@ -2572,7 +2573,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
2572 for (k = 0; k < 16; k++) { 2573 for (k = 0; k < 16; k++) {
2573 if (!atp_dev->id[j][k].prd_table) 2574 if (!atp_dev->id[j][k].prd_table)
2574 continue; 2575 continue;
2575 pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prdaddr); 2576 pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
2576 atp_dev->id[j][k].prd_table = NULL; 2577 atp_dev->id[j][k].prd_table = NULL;
2577 } 2578 }
2578 } 2579 }
@@ -2584,12 +2585,13 @@ static int atp870u_init_tables(struct Scsi_Host *host)
2584 int c,k; 2585 int c,k;
2585 for(c=0;c < 2;c++) { 2586 for(c=0;c < 2;c++) {
2586 for(k=0;k<16;k++) { 2587 for(k=0;k<16;k++) {
2587 atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prdaddr)); 2588 atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
2588 if (!atp_dev->id[c][k].prd_table) { 2589 if (!atp_dev->id[c][k].prd_table) {
2589 printk("atp870u_init_tables fail\n"); 2590 printk("atp870u_init_tables fail\n");
2590 atp870u_free_tables(host); 2591 atp870u_free_tables(host);
2591 return -ENOMEM; 2592 return -ENOMEM;
2592 } 2593 }
2594 atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus;
2593 atp_dev->id[c][k].devsp=0x20; 2595 atp_dev->id[c][k].devsp=0x20;
2594 atp_dev->id[c][k].devtype = 0x7f; 2596 atp_dev->id[c][k].devtype = 0x7f;
2595 atp_dev->id[c][k].curr_req = NULL; 2597 atp_dev->id[c][k].curr_req = NULL;
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 89f43af39cf2..62bae64a01c1 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -54,8 +54,9 @@ struct atp_unit
54 unsigned long tran_len; 54 unsigned long tran_len;
55 unsigned long last_len; 55 unsigned long last_len;
56 unsigned char *prd_pos; 56 unsigned char *prd_pos;
57 unsigned char *prd_table; 57 unsigned char *prd_table; /* Kernel address of PRD table */
58 dma_addr_t prdaddr; 58 dma_addr_t prd_bus; /* Bus address of PRD */
59 dma_addr_t prdaddr; /* Dynamically updated in driver */
59 struct scsi_cmnd *curr_req; 60 struct scsi_cmnd *curr_req;
60 } id[2][16]; 61 } id[2][16];
61 struct Scsi_Host *host; 62 struct Scsi_Host *host;
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index fa652f8aa643..d59d449a9e4d 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -1360,3 +1360,5 @@ static Scsi_Host_Template driver_template = {
1360 .use_clustering = DISABLE_CLUSTERING, 1360 .use_clustering = DISABLE_CLUSTERING,
1361}; 1361};
1362#include "scsi_module.c" 1362#include "scsi_module.c"
1363
1364MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 85503fad789a..f2a72d33132c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -98,6 +98,7 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
98 switch (oldstate) { 98 switch (oldstate) {
99 case SHOST_CREATED: 99 case SHOST_CREATED:
100 case SHOST_RUNNING: 100 case SHOST_RUNNING:
101 case SHOST_CANCEL_RECOVERY:
101 break; 102 break;
102 default: 103 default:
103 goto illegal; 104 goto illegal;
@@ -107,12 +108,31 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
107 case SHOST_DEL: 108 case SHOST_DEL:
108 switch (oldstate) { 109 switch (oldstate) {
109 case SHOST_CANCEL: 110 case SHOST_CANCEL:
111 case SHOST_DEL_RECOVERY:
110 break; 112 break;
111 default: 113 default:
112 goto illegal; 114 goto illegal;
113 } 115 }
114 break; 116 break;
115 117
118 case SHOST_CANCEL_RECOVERY:
119 switch (oldstate) {
120 case SHOST_CANCEL:
121 case SHOST_RECOVERY:
122 break;
123 default:
124 goto illegal;
125 }
126 break;
127
128 case SHOST_DEL_RECOVERY:
129 switch (oldstate) {
130 case SHOST_CANCEL_RECOVERY:
131 break;
132 default:
133 goto illegal;
134 }
135 break;
116 } 136 }
117 shost->shost_state = state; 137 shost->shost_state = state;
118 return 0; 138 return 0;
@@ -134,13 +154,24 @@ EXPORT_SYMBOL(scsi_host_set_state);
134 **/ 154 **/
135void scsi_remove_host(struct Scsi_Host *shost) 155void scsi_remove_host(struct Scsi_Host *shost)
136{ 156{
157 unsigned long flags;
137 down(&shost->scan_mutex); 158 down(&shost->scan_mutex);
138 scsi_host_set_state(shost, SHOST_CANCEL); 159 spin_lock_irqsave(shost->host_lock, flags);
160 if (scsi_host_set_state(shost, SHOST_CANCEL))
161 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
162 spin_unlock_irqrestore(shost->host_lock, flags);
163 up(&shost->scan_mutex);
164 return;
165 }
166 spin_unlock_irqrestore(shost->host_lock, flags);
139 up(&shost->scan_mutex); 167 up(&shost->scan_mutex);
140 scsi_forget_host(shost); 168 scsi_forget_host(shost);
141 scsi_proc_host_rm(shost); 169 scsi_proc_host_rm(shost);
142 170
143 scsi_host_set_state(shost, SHOST_DEL); 171 spin_lock_irqsave(shost->host_lock, flags);
172 if (scsi_host_set_state(shost, SHOST_DEL))
173 BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
174 spin_unlock_irqrestore(shost->host_lock, flags);
144 175
145 transport_unregister_device(&shost->shost_gendev); 176 transport_unregister_device(&shost->shost_gendev);
146 class_device_unregister(&shost->shost_classdev); 177 class_device_unregister(&shost->shost_classdev);
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 6e54c7d9b33c..19392f651272 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -460,6 +460,8 @@ MODULE_PARM(adisplay, "1i");
460MODULE_PARM(normal, "1i"); 460MODULE_PARM(normal, "1i");
461MODULE_PARM(ansi, "1i"); 461MODULE_PARM(ansi, "1i");
462#endif 462#endif
463
464MODULE_LICENSE("GPL");
463#endif 465#endif
464/*counter of concurrent disk read/writes, to turn on/off disk led */ 466/*counter of concurrent disk read/writes, to turn on/off disk led */
465static int disk_rw_in_progress = 0; 467static int disk_rw_in_progress = 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5b14934ba861..ff25210b00ba 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -727,6 +727,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
727 if (hostdata->madapter_info.port_max_txu[0]) 727 if (hostdata->madapter_info.port_max_txu[0])
728 hostdata->host->max_sectors = 728 hostdata->host->max_sectors =
729 hostdata->madapter_info.port_max_txu[0] >> 9; 729 hostdata->madapter_info.port_max_txu[0] >> 9;
730
731 if (hostdata->madapter_info.os_type == 3 &&
732 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
733 printk("ibmvscsi: host (Ver. %s) doesn't support large"
734 "transfers\n",
735 hostdata->madapter_info.srp_version);
736 printk("ibmvscsi: limiting scatterlists to %d\n",
737 MAX_INDIRECT_BUFS);
738 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
739 }
730 } 740 }
731} 741}
732 742
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 5cc53cd9323e..d92273cbe0de 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2465,9 +2465,12 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2465 * 2465 *
2466 * LOCKING: 2466 * LOCKING:
2467 * None. (executing in kernel thread context) 2467 * None. (executing in kernel thread context)
2468 *
2469 * RETURNS:
2470 * Non-zero if qc completed, zero otherwise.
2468 */ 2471 */
2469 2472
2470static void ata_pio_complete (struct ata_port *ap) 2473static int ata_pio_complete (struct ata_port *ap)
2471{ 2474{
2472 struct ata_queued_cmd *qc; 2475 struct ata_queued_cmd *qc;
2473 u8 drv_stat; 2476 u8 drv_stat;
@@ -2486,14 +2489,14 @@ static void ata_pio_complete (struct ata_port *ap)
2486 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2489 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2487 ap->pio_task_state = PIO_ST_LAST_POLL; 2490 ap->pio_task_state = PIO_ST_LAST_POLL;
2488 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2491 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2489 return; 2492 return 0;
2490 } 2493 }
2491 } 2494 }
2492 2495
2493 drv_stat = ata_wait_idle(ap); 2496 drv_stat = ata_wait_idle(ap);
2494 if (!ata_ok(drv_stat)) { 2497 if (!ata_ok(drv_stat)) {
2495 ap->pio_task_state = PIO_ST_ERR; 2498 ap->pio_task_state = PIO_ST_ERR;
2496 return; 2499 return 0;
2497 } 2500 }
2498 2501
2499 qc = ata_qc_from_tag(ap, ap->active_tag); 2502 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -2502,6 +2505,10 @@ static void ata_pio_complete (struct ata_port *ap)
2502 ap->pio_task_state = PIO_ST_IDLE; 2505 ap->pio_task_state = PIO_ST_IDLE;
2503 2506
2504 ata_poll_qc_complete(qc, drv_stat); 2507 ata_poll_qc_complete(qc, drv_stat);
2508
2509 /* another command may start at this point */
2510
2511 return 1;
2505} 2512}
2506 2513
2507 2514
@@ -2709,7 +2716,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2709 2716
2710next_sg: 2717next_sg:
2711 if (unlikely(qc->cursg >= qc->n_elem)) { 2718 if (unlikely(qc->cursg >= qc->n_elem)) {
2712 /* 2719 /*
2713 * The end of qc->sg is reached and the device expects 2720 * The end of qc->sg is reached and the device expects
2714 * more data to transfer. In order not to overrun qc->sg 2721 * more data to transfer. In order not to overrun qc->sg
2715 * and fulfill length specified in the byte count register, 2722 * and fulfill length specified in the byte count register,
@@ -2721,7 +2728,7 @@ next_sg:
2721 unsigned int i; 2728 unsigned int i;
2722 2729
2723 if (words) /* warning if bytes > 1 */ 2730 if (words) /* warning if bytes > 1 */
2724 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 2731 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
2725 ap->id, bytes); 2732 ap->id, bytes);
2726 2733
2727 for (i = 0; i < words; i++) 2734 for (i = 0; i < words; i++)
@@ -2849,9 +2856,7 @@ static void ata_pio_block(struct ata_port *ap)
2849 if (is_atapi_taskfile(&qc->tf)) { 2856 if (is_atapi_taskfile(&qc->tf)) {
2850 /* no more data to transfer or unsupported ATAPI command */ 2857 /* no more data to transfer or unsupported ATAPI command */
2851 if ((status & ATA_DRQ) == 0) { 2858 if ((status & ATA_DRQ) == 0) {
2852 ap->pio_task_state = PIO_ST_IDLE; 2859 ap->pio_task_state = PIO_ST_LAST;
2853
2854 ata_poll_qc_complete(qc, status);
2855 return; 2860 return;
2856 } 2861 }
2857 2862
@@ -2887,7 +2892,12 @@ static void ata_pio_error(struct ata_port *ap)
2887static void ata_pio_task(void *_data) 2892static void ata_pio_task(void *_data)
2888{ 2893{
2889 struct ata_port *ap = _data; 2894 struct ata_port *ap = _data;
2890 unsigned long timeout = 0; 2895 unsigned long timeout;
2896 int qc_completed;
2897
2898fsm_start:
2899 timeout = 0;
2900 qc_completed = 0;
2891 2901
2892 switch (ap->pio_task_state) { 2902 switch (ap->pio_task_state) {
2893 case PIO_ST_IDLE: 2903 case PIO_ST_IDLE:
@@ -2898,7 +2908,7 @@ static void ata_pio_task(void *_data)
2898 break; 2908 break;
2899 2909
2900 case PIO_ST_LAST: 2910 case PIO_ST_LAST:
2901 ata_pio_complete(ap); 2911 qc_completed = ata_pio_complete(ap);
2902 break; 2912 break;
2903 2913
2904 case PIO_ST_POLL: 2914 case PIO_ST_POLL:
@@ -2913,10 +2923,9 @@ static void ata_pio_task(void *_data)
2913 } 2923 }
2914 2924
2915 if (timeout) 2925 if (timeout)
2916 queue_delayed_work(ata_wq, &ap->pio_task, 2926 queue_delayed_work(ata_wq, &ap->pio_task, timeout);
2917 timeout); 2927 else if (!qc_completed)
2918 else 2928 goto fsm_start;
2919 queue_work(ata_wq, &ap->pio_task);
2920} 2929}
2921 2930
2922static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, 2931static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3e9b64137873..23d095d3817b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -201,6 +201,7 @@ int
201qla2100_pci_config(scsi_qla_host_t *ha) 201qla2100_pci_config(scsi_qla_host_t *ha)
202{ 202{
203 uint16_t w, mwi; 203 uint16_t w, mwi;
204 uint32_t d;
204 unsigned long flags; 205 unsigned long flags;
205 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 206 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
206 207
@@ -215,9 +216,9 @@ qla2100_pci_config(scsi_qla_host_t *ha)
215 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 216 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
216 217
217 /* Reset expansion ROM address decode enable */ 218 /* Reset expansion ROM address decode enable */
218 pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w); 219 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
219 w &= ~PCI_ROM_ADDRESS_ENABLE; 220 d &= ~PCI_ROM_ADDRESS_ENABLE;
220 pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w); 221 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
221 222
222 /* Get PCI bus information. */ 223 /* Get PCI bus information. */
223 spin_lock_irqsave(&ha->hardware_lock, flags); 224 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -237,6 +238,7 @@ int
237qla2300_pci_config(scsi_qla_host_t *ha) 238qla2300_pci_config(scsi_qla_host_t *ha)
238{ 239{
239 uint16_t w, mwi; 240 uint16_t w, mwi;
241 uint32_t d;
240 unsigned long flags = 0; 242 unsigned long flags = 0;
241 uint32_t cnt; 243 uint32_t cnt;
242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 244 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -302,9 +304,9 @@ qla2300_pci_config(scsi_qla_host_t *ha)
302 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 304 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
303 305
304 /* Reset expansion ROM address decode enable */ 306 /* Reset expansion ROM address decode enable */
305 pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w); 307 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
306 w &= ~PCI_ROM_ADDRESS_ENABLE; 308 d &= ~PCI_ROM_ADDRESS_ENABLE;
307 pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w); 309 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
308 310
309 /* Get PCI bus information. */ 311 /* Get PCI bus information. */
310 spin_lock_irqsave(&ha->hardware_lock, flags); 312 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -324,6 +326,7 @@ int
324qla24xx_pci_config(scsi_qla_host_t *ha) 326qla24xx_pci_config(scsi_qla_host_t *ha)
325{ 327{
326 uint16_t w, mwi; 328 uint16_t w, mwi;
329 uint32_t d;
327 unsigned long flags = 0; 330 unsigned long flags = 0;
328 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 331 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
329 int pcix_cmd_reg, pcie_dctl_reg; 332 int pcix_cmd_reg, pcie_dctl_reg;
@@ -366,9 +369,9 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
366 } 369 }
367 370
368 /* Reset expansion ROM address decode enable */ 371 /* Reset expansion ROM address decode enable */
369 pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w); 372 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
370 w &= ~PCI_ROM_ADDRESS_ENABLE; 373 d &= ~PCI_ROM_ADDRESS_ENABLE;
371 pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w); 374 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
372 375
373 /* Get PCI bus information. */ 376 /* Get PCI bus information. */
374 spin_lock_irqsave(&ha->hardware_lock, flags); 377 spin_lock_irqsave(&ha->hardware_lock, flags);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index a63f93186e41..b227e51d12f4 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -161,7 +161,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
161{ 161{
162 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 162 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
163 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device); 163 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
164 u32 val, val2; 164 u32 val, val2 = 0;
165 u8 pmr; 165 u8 pmr;
166 166
167 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ 167 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
@@ -289,7 +289,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
289 if (ent->device != 0x182) { 289 if (ent->device != 0x182) {
290 if ((pmr & SIS_PMR_COMBINED) == 0) { 290 if ((pmr & SIS_PMR_COMBINED) == 0) {
291 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in SATA mode\n"); 291 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in SATA mode\n");
292 port2_start=0x64; 292 port2_start = 64;
293 } 293 }
294 else { 294 else {
295 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in combined mode\n"); 295 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in combined mode\n");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a780546eda9c..1f0ebabf6d47 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1265,9 +1265,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1265 list_for_each_safe(lh, lh_sf, &active_list) { 1265 list_for_each_safe(lh, lh_sf, &active_list) {
1266 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1266 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1267 list_del_init(lh); 1267 list_del_init(lh);
1268 if (recovery) { 1268 if (recovery &&
1269 scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD); 1269 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1270 } else {
1271 scmd->result = (DID_ABORT << 16); 1270 scmd->result = (DID_ABORT << 16);
1272 scsi_finish_command(scmd); 1271 scsi_finish_command(scmd);
1273 } 1272 }
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 07b554affcf2..64fc9e21f35b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -110,6 +110,7 @@ static struct {
110 {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ 110 {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
111 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ 111 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
112 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN}, 112 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
113 {"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
113 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ 114 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
114 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ 115 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
115 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ 116 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 895c9452be4c..ad5342165079 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,7 +50,7 @@
50void scsi_eh_wakeup(struct Scsi_Host *shost) 50void scsi_eh_wakeup(struct Scsi_Host *shost)
51{ 51{
52 if (shost->host_busy == shost->host_failed) { 52 if (shost->host_busy == shost->host_failed) {
53 up(shost->eh_wait); 53 wake_up_process(shost->ehandler);
54 SCSI_LOG_ERROR_RECOVERY(5, 54 SCSI_LOG_ERROR_RECOVERY(5,
55 printk("Waking error handler thread\n")); 55 printk("Waking error handler thread\n"));
56 } 56 }
@@ -68,19 +68,24 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
68{ 68{
69 struct Scsi_Host *shost = scmd->device->host; 69 struct Scsi_Host *shost = scmd->device->host;
70 unsigned long flags; 70 unsigned long flags;
71 int ret = 0;
71 72
72 if (shost->eh_wait == NULL) 73 if (!shost->ehandler)
73 return 0; 74 return 0;
74 75
75 spin_lock_irqsave(shost->host_lock, flags); 76 spin_lock_irqsave(shost->host_lock, flags);
77 if (scsi_host_set_state(shost, SHOST_RECOVERY))
78 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
79 goto out_unlock;
76 80
81 ret = 1;
77 scmd->eh_eflags |= eh_flag; 82 scmd->eh_eflags |= eh_flag;
78 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 83 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
79 scsi_host_set_state(shost, SHOST_RECOVERY);
80 shost->host_failed++; 84 shost->host_failed++;
81 scsi_eh_wakeup(shost); 85 scsi_eh_wakeup(shost);
86 out_unlock:
82 spin_unlock_irqrestore(shost->host_lock, flags); 87 spin_unlock_irqrestore(shost->host_lock, flags);
83 return 1; 88 return ret;
84} 89}
85 90
86/** 91/**
@@ -176,8 +181,8 @@ void scsi_times_out(struct scsi_cmnd *scmd)
176 } 181 }
177 182
178 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 183 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
179 panic("Error handler thread not present at %p %p %s %d", 184 scmd->result |= DID_TIME_OUT << 16;
180 scmd, scmd->device->host, __FILE__, __LINE__); 185 __scsi_done(scmd);
181 } 186 }
182} 187}
183 188
@@ -196,8 +201,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
196{ 201{
197 int online; 202 int online;
198 203
199 wait_event(sdev->host->host_wait, (sdev->host->shost_state != 204 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
200 SHOST_RECOVERY));
201 205
202 online = scsi_device_online(sdev); 206 online = scsi_device_online(sdev);
203 207
@@ -1441,6 +1445,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
1441static void scsi_restart_operations(struct Scsi_Host *shost) 1445static void scsi_restart_operations(struct Scsi_Host *shost)
1442{ 1446{
1443 struct scsi_device *sdev; 1447 struct scsi_device *sdev;
1448 unsigned long flags;
1444 1449
1445 /* 1450 /*
1446 * If the door was locked, we need to insert a door lock request 1451 * If the door was locked, we need to insert a door lock request
@@ -1460,7 +1465,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1460 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1465 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1461 __FUNCTION__)); 1466 __FUNCTION__));
1462 1467
1463 scsi_host_set_state(shost, SHOST_RUNNING); 1468 spin_lock_irqsave(shost->host_lock, flags);
1469 if (scsi_host_set_state(shost, SHOST_RUNNING))
1470 if (scsi_host_set_state(shost, SHOST_CANCEL))
1471 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1472 spin_unlock_irqrestore(shost->host_lock, flags);
1464 1473
1465 wake_up(&shost->host_wait); 1474 wake_up(&shost->host_wait);
1466 1475
@@ -1582,40 +1591,31 @@ int scsi_error_handler(void *data)
1582{ 1591{
1583 struct Scsi_Host *shost = (struct Scsi_Host *) data; 1592 struct Scsi_Host *shost = (struct Scsi_Host *) data;
1584 int rtn; 1593 int rtn;
1585 DECLARE_MUTEX_LOCKED(sem);
1586 1594
1587 current->flags |= PF_NOFREEZE; 1595 current->flags |= PF_NOFREEZE;
1588 shost->eh_wait = &sem;
1589 1596
1597
1590 /* 1598 /*
1591 * Wake up the thread that created us. 1599 * Note - we always use TASK_INTERRUPTIBLE even if the module
1600 * was loaded as part of the kernel. The reason is that
1601 * UNINTERRUPTIBLE would cause this thread to be counted in
1602 * the load average as a running process, and an interruptible
1603 * wait doesn't.
1592 */ 1604 */
1593 SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of" 1605 set_current_state(TASK_INTERRUPTIBLE);
1594 " scsi_eh_%d\n",shost->host_no)); 1606 while (!kthread_should_stop()) {
1595 1607 if (shost->host_failed == 0 ||
1596 while (1) { 1608 shost->host_failed != shost->host_busy) {
1597 /* 1609 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
1598 * If we get a signal, it means we are supposed to go 1610 " scsi_eh_%d"
1599 * away and die. This typically happens if the user is 1611 " sleeping\n",
1600 * trying to unload a module. 1612 shost->host_no));
1601 */ 1613 schedule();
1602 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1614 set_current_state(TASK_INTERRUPTIBLE);
1603 " scsi_eh_%d" 1615 continue;
1604 " sleeping\n",shost->host_no)); 1616 }
1605
1606 /*
1607 * Note - we always use down_interruptible with the semaphore
1608 * even if the module was loaded as part of the kernel. The
1609 * reason is that down() will cause this thread to be counted
1610 * in the load average as a running process, and down
1611 * interruptible doesn't. Given that we need to allow this
1612 * thread to die if the driver was loaded as a module, using
1613 * semaphores isn't unreasonable.
1614 */
1615 down_interruptible(&sem);
1616 if (kthread_should_stop())
1617 break;
1618 1617
1618 __set_current_state(TASK_RUNNING);
1619 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1619 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
1620 " scsi_eh_%d waking" 1620 " scsi_eh_%d waking"
1621 " up\n",shost->host_no)); 1621 " up\n",shost->host_no));
@@ -1642,7 +1642,7 @@ int scsi_error_handler(void *data)
1642 * which are still online. 1642 * which are still online.
1643 */ 1643 */
1644 scsi_restart_operations(shost); 1644 scsi_restart_operations(shost);
1645 1645 set_current_state(TASK_INTERRUPTIBLE);
1646 } 1646 }
1647 1647
1648 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d" 1648 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d"
@@ -1651,7 +1651,7 @@ int scsi_error_handler(void *data)
1651 /* 1651 /*
1652 * Make sure that nobody tries to wake us up again. 1652 * Make sure that nobody tries to wake us up again.
1653 */ 1653 */
1654 shost->eh_wait = NULL; 1654 shost->ehandler = NULL;
1655 return 0; 1655 return 0;
1656} 1656}
1657 1657
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b7fddac81347..de7f98cc38fe 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -458,7 +458,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
458 * error processing, as long as the device was opened 458 * error processing, as long as the device was opened
459 * non-blocking */ 459 * non-blocking */
460 if (filp && filp->f_flags & O_NONBLOCK) { 460 if (filp && filp->f_flags & O_NONBLOCK) {
461 if (sdev->host->shost_state == SHOST_RECOVERY) 461 if (scsi_host_in_recovery(sdev->host))
462 return -ENODEV; 462 return -ENODEV;
463 } else if (!scsi_block_when_processing_errors(sdev)) 463 } else if (!scsi_block_when_processing_errors(sdev))
464 return -ENODEV; 464 return -ENODEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 863bb6495daa..dc9c772bc874 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -118,7 +118,6 @@ static void scsi_unprep_request(struct request *req)
118 req->flags &= ~REQ_DONTPREP; 118 req->flags &= ~REQ_DONTPREP;
119 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; 119 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
120 120
121 scsi_release_buffers(cmd);
122 scsi_put_command(cmd); 121 scsi_put_command(cmd);
123} 122}
124 123
@@ -140,14 +139,12 @@ static void scsi_unprep_request(struct request *req)
140 * commands. 139 * commands.
141 * Notes: This could be called either from an interrupt context or a 140 * Notes: This could be called either from an interrupt context or a
142 * normal process context. 141 * normal process context.
143 * Notes: Upon return, cmd is a stale pointer.
144 */ 142 */
145int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 143int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
146{ 144{
147 struct Scsi_Host *host = cmd->device->host; 145 struct Scsi_Host *host = cmd->device->host;
148 struct scsi_device *device = cmd->device; 146 struct scsi_device *device = cmd->device;
149 struct request_queue *q = device->request_queue; 147 struct request_queue *q = device->request_queue;
150 struct request *req = cmd->request;
151 unsigned long flags; 148 unsigned long flags;
152 149
153 SCSI_LOG_MLQUEUE(1, 150 SCSI_LOG_MLQUEUE(1,
@@ -188,9 +185,8 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
188 * function. The SCSI request function detects the blocked condition 185 * function. The SCSI request function detects the blocked condition
189 * and plugs the queue appropriately. 186 * and plugs the queue appropriately.
190 */ 187 */
191 scsi_unprep_request(req);
192 spin_lock_irqsave(q->queue_lock, flags); 188 spin_lock_irqsave(q->queue_lock, flags);
193 blk_requeue_request(q, req); 189 blk_requeue_request(q, cmd->request);
194 spin_unlock_irqrestore(q->queue_lock, flags); 190 spin_unlock_irqrestore(q->queue_lock, flags);
195 191
196 scsi_run_queue(q); 192 scsi_run_queue(q);
@@ -451,7 +447,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
451 447
452 spin_lock_irqsave(shost->host_lock, flags); 448 spin_lock_irqsave(shost->host_lock, flags);
453 shost->host_busy--; 449 shost->host_busy--;
454 if (unlikely((shost->shost_state == SHOST_RECOVERY) && 450 if (unlikely(scsi_host_in_recovery(shost) &&
455 shost->host_failed)) 451 shost->host_failed))
456 scsi_eh_wakeup(shost); 452 scsi_eh_wakeup(shost);
457 spin_unlock(shost->host_lock); 453 spin_unlock(shost->host_lock);
@@ -1268,6 +1264,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1268 } 1264 }
1269 } else { 1265 } else {
1270 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1266 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1267 cmd->cmd_len = req->cmd_len;
1271 if (rq_data_dir(req) == WRITE) 1268 if (rq_data_dir(req) == WRITE)
1272 cmd->sc_data_direction = DMA_TO_DEVICE; 1269 cmd->sc_data_direction = DMA_TO_DEVICE;
1273 else if (req->data_len) 1270 else if (req->data_len)
@@ -1342,7 +1339,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1342 struct Scsi_Host *shost, 1339 struct Scsi_Host *shost,
1343 struct scsi_device *sdev) 1340 struct scsi_device *sdev)
1344{ 1341{
1345 if (shost->shost_state == SHOST_RECOVERY) 1342 if (scsi_host_in_recovery(shost))
1346 return 0; 1343 return 0;
1347 if (shost->host_busy == 0 && shost->host_blocked) { 1344 if (shost->host_busy == 0 && shost->host_blocked) {
1348 /* 1345 /*
@@ -1514,7 +1511,6 @@ static void scsi_request_fn(struct request_queue *q)
1514 * cases (host limits or settings) should run the queue at some 1511 * cases (host limits or settings) should run the queue at some
1515 * later time. 1512 * later time.
1516 */ 1513 */
1517 scsi_unprep_request(req);
1518 spin_lock_irq(q->queue_lock); 1514 spin_lock_irq(q->queue_lock);
1519 blk_requeue_request(q, req); 1515 blk_requeue_request(q, req);
1520 sdev->device_busy--; 1516 sdev->device_busy--;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b86f170fa8ed..fcf9f6cbb142 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1466,23 +1466,17 @@ EXPORT_SYMBOL(scsi_scan_single_target);
1466 1466
1467void scsi_forget_host(struct Scsi_Host *shost) 1467void scsi_forget_host(struct Scsi_Host *shost)
1468{ 1468{
1469 struct scsi_target *starget, *tmp; 1469 struct scsi_device *sdev;
1470 unsigned long flags; 1470 unsigned long flags;
1471 1471
1472 /* 1472 restart:
1473 * Ok, this look a bit strange. We always look for the first device
1474 * on the list as scsi_remove_device removes them from it - thus we
1475 * also have to release the lock.
1476 * We don't need to get another reference to the device before
1477 * releasing the lock as we already own the reference from
1478 * scsi_register_device that's release in scsi_remove_device. And
1479 * after that we don't look at sdev anymore.
1480 */
1481 spin_lock_irqsave(shost->host_lock, flags); 1473 spin_lock_irqsave(shost->host_lock, flags);
1482 list_for_each_entry_safe(starget, tmp, &shost->__targets, siblings) { 1474 list_for_each_entry(sdev, &shost->__devices, siblings) {
1475 if (sdev->sdev_state == SDEV_DEL)
1476 continue;
1483 spin_unlock_irqrestore(shost->host_lock, flags); 1477 spin_unlock_irqrestore(shost->host_lock, flags);
1484 scsi_remove_target(&starget->dev); 1478 __scsi_remove_device(sdev);
1485 spin_lock_irqsave(shost->host_lock, flags); 1479 goto restart;
1486 } 1480 }
1487 spin_unlock_irqrestore(shost->host_lock, flags); 1481 spin_unlock_irqrestore(shost->host_lock, flags);
1488} 1482}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b8052d5206cc..72a6550a056c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -57,6 +57,8 @@ static struct {
57 { SHOST_CANCEL, "cancel" }, 57 { SHOST_CANCEL, "cancel" },
58 { SHOST_DEL, "deleted" }, 58 { SHOST_DEL, "deleted" },
59 { SHOST_RECOVERY, "recovery" }, 59 { SHOST_RECOVERY, "recovery" },
60 { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
61 { SHOST_DEL_RECOVERY, "deleted/recovery", },
60}; 62};
61const char *scsi_host_state_name(enum scsi_host_state state) 63const char *scsi_host_state_name(enum scsi_host_state state)
62{ 64{
@@ -707,9 +709,11 @@ void __scsi_remove_device(struct scsi_device *sdev)
707 **/ 709 **/
708void scsi_remove_device(struct scsi_device *sdev) 710void scsi_remove_device(struct scsi_device *sdev)
709{ 711{
710 down(&sdev->host->scan_mutex); 712 struct Scsi_Host *shost = sdev->host;
713
714 down(&shost->scan_mutex);
711 __scsi_remove_device(sdev); 715 __scsi_remove_device(sdev);
712 up(&sdev->host->scan_mutex); 716 up(&shost->scan_mutex);
713} 717}
714EXPORT_SYMBOL(scsi_remove_device); 718EXPORT_SYMBOL(scsi_remove_device);
715 719
@@ -717,17 +721,20 @@ void __scsi_remove_target(struct scsi_target *starget)
717{ 721{
718 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 722 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
719 unsigned long flags; 723 unsigned long flags;
720 struct scsi_device *sdev, *tmp; 724 struct scsi_device *sdev;
721 725
722 spin_lock_irqsave(shost->host_lock, flags); 726 spin_lock_irqsave(shost->host_lock, flags);
723 starget->reap_ref++; 727 starget->reap_ref++;
724 list_for_each_entry_safe(sdev, tmp, &shost->__devices, siblings) { 728 restart:
729 list_for_each_entry(sdev, &shost->__devices, siblings) {
725 if (sdev->channel != starget->channel || 730 if (sdev->channel != starget->channel ||
726 sdev->id != starget->id) 731 sdev->id != starget->id ||
732 sdev->sdev_state == SDEV_DEL)
727 continue; 733 continue;
728 spin_unlock_irqrestore(shost->host_lock, flags); 734 spin_unlock_irqrestore(shost->host_lock, flags);
729 scsi_remove_device(sdev); 735 scsi_remove_device(sdev);
730 spin_lock_irqsave(shost->host_lock, flags); 736 spin_lock_irqsave(shost->host_lock, flags);
737 goto restart;
731 } 738 }
732 spin_unlock_irqrestore(shost->host_lock, flags); 739 spin_unlock_irqrestore(shost->host_lock, flags);
733 scsi_target_reap(starget); 740 scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de564b386052..9a1dc0cea03c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -235,6 +235,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
235 return 0; 235 return 0;
236 236
237 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 237 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
238 SCpnt->cmd_len = rq->cmd_len;
238 if (rq_data_dir(rq) == WRITE) 239 if (rq_data_dir(rq) == WRITE)
239 SCpnt->sc_data_direction = DMA_TO_DEVICE; 240 SCpnt->sc_data_direction = DMA_TO_DEVICE;
240 else if (rq->data_len) 241 else if (rq->data_len)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9ea4765d1d12..4d09a6e4dd2e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1027,7 +1027,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1027 if (sdp->detached) 1027 if (sdp->detached)
1028 return -ENODEV; 1028 return -ENODEV;
1029 if (filp->f_flags & O_NONBLOCK) { 1029 if (filp->f_flags & O_NONBLOCK) {
1030 if (sdp->device->host->shost_state == SHOST_RECOVERY) 1030 if (scsi_host_in_recovery(sdp->device->host))
1031 return -EBUSY; 1031 return -EBUSY;
1032 } else if (!scsi_block_when_processing_errors(sdp->device)) 1032 } else if (!scsi_block_when_processing_errors(sdp->device))
1033 return -EBUSY; 1033 return -EBUSY;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ce63fc8312dc..561901b1cf11 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -326,6 +326,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
326 return 0; 326 return 0;
327 327
328 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 328 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
329 SCpnt->cmd_len = rq->cmd_len;
329 if (!rq->data_len) 330 if (!rq->data_len)
330 SCpnt->sc_data_direction = DMA_NONE; 331 SCpnt->sc_data_direction = DMA_NONE;
331 else if (rq_data_dir(rq) == WRITE) 332 else if (rq_data_dir(rq) == WRITE)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a93308ae9736..d001c046551b 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4206,6 +4206,7 @@ static int st_init_command(struct scsi_cmnd *SCpnt)
4206 return 0; 4206 return 0;
4207 4207
4208 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 4208 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
4209 SCpnt->cmd_len = rq->cmd_len;
4209 4210
4210 if (rq_data_dir(rq) == WRITE) 4211 if (rq_data_dir(rq) == WRITE)
4211 SCpnt->sc_data_direction = DMA_TO_DEVICE; 4212 SCpnt->sc_data_direction = DMA_TO_DEVICE;
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index aec39fb261ca..b5cf39468d18 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -463,7 +463,7 @@ static int __init serial21285_console_setup(struct console *co, char *options)
463 return uart_set_options(port, co, baud, parity, bits, flow); 463 return uart_set_options(port, co, baud, parity, bits, flow);
464} 464}
465 465
466extern struct uart_driver serial21285_reg; 466static struct uart_driver serial21285_reg;
467 467
468static struct console serial21285_console = 468static struct console serial21285_console =
469{ 469{
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 978e12437e61..679e678c7e6a 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -689,7 +689,7 @@ static int __init pl010_console_setup(struct console *co, char *options)
689 return uart_set_options(port, co, baud, parity, bits, flow); 689 return uart_set_options(port, co, baud, parity, bits, flow);
690} 690}
691 691
692extern struct uart_driver amba_reg; 692static struct uart_driver amba_reg;
693static struct console amba_console = { 693static struct console amba_console = {
694 .name = "ttyAM", 694 .name = "ttyAM",
695 .write = pl010_console_write, 695 .write = pl010_console_write,
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 56071309744c..1ff629c74750 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -701,7 +701,7 @@ static int __init pl011_console_setup(struct console *co, char *options)
701 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 701 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
702} 702}
703 703
704extern struct uart_driver amba_reg; 704static struct uart_driver amba_reg;
705static struct console amba_console = { 705static struct console amba_console = {
706 .name = "ttyAMA", 706 .name = "ttyAMA",
707 .write = pl011_console_write, 707 .write = pl011_console_write,
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index d822896b488c..78c1f36ad9b7 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -525,7 +525,7 @@ static int __init clps711xuart_console_setup(struct console *co, char *options)
525 return uart_set_options(port, co, baud, parity, bits, flow); 525 return uart_set_options(port, co, baud, parity, bits, flow);
526} 526}
527 527
528extern struct uart_driver clps711x_reg; 528static struct uart_driver clps711x_reg;
529static struct console clps711x_console = { 529static struct console clps711x_console = {
530 .name = "ttyCL", 530 .name = "ttyCL",
531 .write = clps711xuart_console_write, 531 .write = clps711xuart_console_write,
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index a3cd0ee8486d..0585ab27ffde 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -781,7 +781,7 @@ mpc52xx_uart_remove(struct device *dev)
781 781
782#ifdef CONFIG_PM 782#ifdef CONFIG_PM
783static int 783static int
784mpc52xx_uart_suspend(struct device *dev, u32 state, u32 level) 784mpc52xx_uart_suspend(struct device *dev, pm_message_t state, u32 level)
785{ 785{
786 struct uart_port *port = (struct uart_port *) dev_get_drvdata(dev); 786 struct uart_port *port = (struct uart_port *) dev_get_drvdata(dev);
787 787
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index eaa0af835290..672b359b07ce 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -589,8 +589,8 @@ serial_pxa_type(struct uart_port *port)
589 589
590#ifdef CONFIG_SERIAL_PXA_CONSOLE 590#ifdef CONFIG_SERIAL_PXA_CONSOLE
591 591
592extern struct uart_pxa_port serial_pxa_ports[]; 592static struct uart_pxa_port serial_pxa_ports[];
593extern struct uart_driver serial_pxa_reg; 593static struct uart_driver serial_pxa_reg;
594 594
595#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 595#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
596 596
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 1225b14f6e9d..dd8aed242357 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -799,7 +799,7 @@ sa1100_console_setup(struct console *co, char *options)
799 return uart_set_options(&sport->port, co, baud, parity, bits, flow); 799 return uart_set_options(&sport->port, co, baud, parity, bits, flow);
800} 800}
801 801
802extern struct uart_driver sa1100_reg; 802static struct uart_driver sa1100_reg;
803static struct console sa1100_console = { 803static struct console sa1100_console = {
804 .name = "ttySA", 804 .name = "ttySA",
805 .write = sa1100_console_write, 805 .write = sa1100_console_write,
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c
index 8302376800c0..d01dbe5da3b9 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/serial/serial_lh7a40x.c
@@ -632,7 +632,7 @@ static int __init lh7a40xuart_console_setup (struct console* co, char* options)
632 return uart_set_options (port, co, baud, parity, bits, flow); 632 return uart_set_options (port, co, baud, parity, bits, flow);
633} 633}
634 634
635extern struct uart_driver lh7a40x_reg; 635static struct uart_driver lh7a40x_reg;
636static struct console lh7a40x_console = { 636static struct console lh7a40x_console = {
637 .name = "ttyAM", 637 .name = "ttyAM",
638 .write = lh7a40xuart_console_write, 638 .write = lh7a40xuart_console_write,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 31ee13eef7af..773ae11b4a19 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -650,6 +650,7 @@ config FB_NVIDIA
650 select FB_CFB_FILLRECT 650 select FB_CFB_FILLRECT
651 select FB_CFB_COPYAREA 651 select FB_CFB_COPYAREA
652 select FB_CFB_IMAGEBLIT 652 select FB_CFB_IMAGEBLIT
653 select FB_SOFT_CURSOR
653 help 654 help
654 This driver supports graphics boards with the nVidia chips, TNT 655 This driver supports graphics boards with the nVidia chips, TNT
655 and newer. For very old chipsets, such as the RIVA128, then use 656 and newer. For very old chipsets, such as the RIVA128, then use
diff --git a/drivers/video/aty/xlinit.c b/drivers/video/aty/xlinit.c
index 92643af12581..0bea0d8d7821 100644
--- a/drivers/video/aty/xlinit.c
+++ b/drivers/video/aty/xlinit.c
@@ -174,7 +174,7 @@ int atyfb_xl_init(struct fb_info *info)
174 const struct xl_card_cfg_t * card = &card_cfg[xl_card]; 174 const struct xl_card_cfg_t * card = &card_cfg[xl_card];
175 struct atyfb_par *par = (struct atyfb_par *) info->par; 175 struct atyfb_par *par = (struct atyfb_par *) info->par;
176 union aty_pll pll; 176 union aty_pll pll;
177 int i, err; 177 int err;
178 u32 temp; 178 u32 temp;
179 179
180 aty_st_8(CONFIG_STAT0, 0x85, par); 180 aty_st_8(CONFIG_STAT0, 0x85, par);
@@ -252,9 +252,12 @@ int atyfb_xl_init(struct fb_info *info)
252 aty_st_le32(0xEC, 0x00000000, par); 252 aty_st_le32(0xEC, 0x00000000, par);
253 aty_st_le32(0xFC, 0x00000000, par); 253 aty_st_le32(0xFC, 0x00000000, par);
254 254
255#if defined (CONFIG_FB_ATY_GENERIC_LCD)
256 int i;
255 for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) { 257 for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) {
256 aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par); 258 aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
257 } 259 }
260#endif
258 261
259 aty_st_le16(CONFIG_STAT0, 0x00A4, par); 262 aty_st_le16(CONFIG_STAT0, 0x00A4, par);
260 mdelay(10); 263 mdelay(10);
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index a32817678552..630f2dfa9699 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -19,17 +19,18 @@
19#include <linux/fb.h> 19#include <linux/fb.h>
20#include <linux/backlight.h> 20#include <linux/backlight.h>
21 21
22#include <asm/arch-pxa/corgi.h> 22#include <asm/mach-types.h>
23#include <asm/hardware/scoop.h> 23#include <asm/arch/sharpsl.h>
24 24
25#define CORGI_MAX_INTENSITY 0x3e
26#define CORGI_DEFAULT_INTENSITY 0x1f 25#define CORGI_DEFAULT_INTENSITY 0x1f
27#define CORGI_LIMIT_MASK 0x0b 26#define CORGI_LIMIT_MASK 0x0b
28 27
29static int corgibl_powermode = FB_BLANK_UNBLANK; 28static int corgibl_powermode = FB_BLANK_UNBLANK;
30static int current_intensity = 0; 29static int current_intensity = 0;
31static int corgibl_limit = 0; 30static int corgibl_limit = 0;
31static void (*corgibl_mach_set_intensity)(int intensity);
32static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED; 32static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
33static struct backlight_properties corgibl_data;
33 34
34static void corgibl_send_intensity(int intensity) 35static void corgibl_send_intensity(int intensity)
35{ 36{
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 0705cd741411..6ef6f7760e47 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1020,7 +1020,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
1020static int vgacon_resize(struct vc_data *c, unsigned int width, 1020static int vgacon_resize(struct vc_data *c, unsigned int width,
1021 unsigned int height) 1021 unsigned int height)
1022{ 1022{
1023 if (width % 2 || width > ORIG_VIDEO_COLS || height > ORIG_VIDEO_LINES) 1023 if (width % 2 || width > ORIG_VIDEO_COLS ||
1024 height > (ORIG_VIDEO_LINES * vga_default_font_height)/
1025 c->vc_font.height)
1024 return -EINVAL; 1026 return -EINVAL;
1025 1027
1026 if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */ 1028 if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index cfa61b512de0..0b6af00d197e 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -272,11 +272,11 @@ static void fb_cvt_convert_to_mode(struct fb_cvt_data *cvt,
272{ 272{
273 mode->refresh = cvt->f_refresh; 273 mode->refresh = cvt->f_refresh;
274 mode->pixclock = KHZ2PICOS(cvt->pixclock/1000); 274 mode->pixclock = KHZ2PICOS(cvt->pixclock/1000);
275 mode->left_margin = cvt->h_front_porch; 275 mode->left_margin = cvt->h_back_porch;
276 mode->right_margin = cvt->h_back_porch; 276 mode->right_margin = cvt->h_front_porch;
277 mode->hsync_len = cvt->hsync; 277 mode->hsync_len = cvt->hsync;
278 mode->upper_margin = cvt->v_front_porch; 278 mode->upper_margin = cvt->v_back_porch;
279 mode->lower_margin = cvt->v_back_porch; 279 mode->lower_margin = cvt->v_front_porch;
280 mode->vsync_len = cvt->vsync; 280 mode->vsync_len = cvt->vsync;
281 281
282 mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT); 282 mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index cabd53cec991..6c2244cf0e74 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -425,7 +425,7 @@ static void imxfb_setup_gpio(struct imxfb_info *fbi)
425 * Power management hooks. Note that we won't be called from IRQ context, 425 * Power management hooks. Note that we won't be called from IRQ context,
426 * unlike the blank functions above, so we may sleep. 426 * unlike the blank functions above, so we may sleep.
427 */ 427 */
428static int imxfb_suspend(struct device *dev, u32 state, u32 level) 428static int imxfb_suspend(struct device *dev, pm_message_t state, u32 level)
429{ 429{
430 struct imxfb_info *fbi = dev_get_drvdata(dev); 430 struct imxfb_info *fbi = dev_get_drvdata(dev);
431 pr_debug("%s\n",__FUNCTION__); 431 pr_debug("%s\n",__FUNCTION__);
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index ace484fa61ce..12f2884d3f0b 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -209,10 +209,13 @@ int nvidia_probe_i2c_connector(struct fb_info *info, int conn, u8 **out_edid)
209 209
210 if (!edid && conn == 1) { 210 if (!edid && conn == 1) {
211 /* try to get from firmware */ 211 /* try to get from firmware */
212 edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 212 const u8 *e = fb_firmware_edid(info->device);
213 if (edid) 213
214 memcpy(edid, fb_firmware_edid(info->device), 214 if (e != NULL) {
215 EDID_LENGTH); 215 edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
216 if (edid)
217 memcpy(edid, e, EDID_LENGTH);
218 }
216 } 219 }
217 220
218 if (out_edid) 221 if (out_edid)
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 3620de0f252e..a7f020ada630 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -893,7 +893,7 @@ static int nvidiafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
893 int i, set = cursor->set; 893 int i, set = cursor->set;
894 u16 fg, bg; 894 u16 fg, bg;
895 895
896 if (!hwcur || cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS) 896 if (cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
897 return -ENXIO; 897 return -ENXIO;
898 898
899 NVShowHideCursor(par, 0); 899 NVShowHideCursor(par, 0);
@@ -1356,6 +1356,9 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
1356 info->pixmap.size = 8 * 1024; 1356 info->pixmap.size = 8 * 1024;
1357 info->pixmap.flags = FB_PIXMAP_SYSTEM; 1357 info->pixmap.flags = FB_PIXMAP_SYSTEM;
1358 1358
1359 if (!hwcur)
1360 info->fbops->fb_cursor = soft_cursor;
1361
1359 info->var.accel_flags = (!noaccel); 1362 info->var.accel_flags = (!noaccel);
1360 1363
1361 switch (par->Architecture) { 1364 switch (par->Architecture) {
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 959404ad68f4..3c98457783c4 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -274,10 +274,13 @@ int savagefb_probe_i2c_connector(struct fb_info *info, u8 **out_edid)
274 274
275 if (!edid) { 275 if (!edid) {
276 /* try to get from firmware */ 276 /* try to get from firmware */
277 edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 277 const u8 *e = fb_firmware_edid(info->device);
278 if (edid) 278
279 memcpy(edid, fb_firmware_edid(info->device), 279 if (e) {
280 EDID_LENGTH); 280 edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
281 if (edid)
282 memcpy(edid, e, EDID_LENGTH);
283 }
281 } 284 }
282 285
283 if (out_edid) 286 if (out_edid)
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index d6f94742c9f2..ea17f7e0482c 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -60,8 +60,6 @@
60 60
61#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) 61#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
62 62
63#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) || (chip == S3_PROSAVAGEDDR))
64
65/* Chip tags. These are used to group the adapters into 63/* Chip tags. These are used to group the adapters into
66 * related families. 64 * related families.
67 */ 65 */
@@ -74,8 +72,6 @@ typedef enum {
74 S3_PROSAVAGE, 72 S3_PROSAVAGE,
75 S3_SUPERSAVAGE, 73 S3_SUPERSAVAGE,
76 S3_SAVAGE2000, 74 S3_SAVAGE2000,
77 S3_PROSAVAGEDDR,
78 S3_TWISTER,
79 S3_LAST 75 S3_LAST
80} savage_chipset; 76} savage_chipset;
81 77
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index b5ca3ef8271f..7c285455c924 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1773,8 +1773,7 @@ static int __devinit savage_init_hw (struct savagefb_par *par)
1773 } 1773 }
1774 } 1774 }
1775 1775
1776 if (S3_SAVAGE_MOBILE_SERIES(par->chip) || 1776 if (S3_SAVAGE_MOBILE_SERIES(par->chip) && !par->crtonly)
1777 (S3_MOBILE_TWISTER_SERIES(par->chip) && !par->crtonly))
1778 par->display_type = DISP_LCD; 1777 par->display_type = DISP_LCD;
1779 else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi)) 1778 else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi))
1780 par->display_type = DISP_DFP; 1779 par->display_type = DISP_DFP;
@@ -1783,7 +1782,7 @@ static int __devinit savage_init_hw (struct savagefb_par *par)
1783 1782
1784 /* Check LCD panel parrmation */ 1783 /* Check LCD panel parrmation */
1785 1784
1786 if (par->chip == S3_SAVAGE_MX) { 1785 if (par->display_type == DISP_LCD) {
1787 unsigned char cr6b = VGArCR( 0x6b ); 1786 unsigned char cr6b = VGArCR( 0x6b );
1788 1787
1789 int panelX = (VGArSEQ (0x61) + 1788 int panelX = (VGArSEQ (0x61) +
@@ -1922,15 +1921,15 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
1922 snprintf (info->fix.id, 16, "ProSavageKM"); 1921 snprintf (info->fix.id, 16, "ProSavageKM");
1923 break; 1922 break;
1924 case FB_ACCEL_S3TWISTER_P: 1923 case FB_ACCEL_S3TWISTER_P:
1925 par->chip = S3_TWISTER; 1924 par->chip = S3_PROSAVAGE;
1926 snprintf (info->fix.id, 16, "TwisterP"); 1925 snprintf (info->fix.id, 16, "TwisterP");
1927 break; 1926 break;
1928 case FB_ACCEL_S3TWISTER_K: 1927 case FB_ACCEL_S3TWISTER_K:
1929 par->chip = S3_TWISTER; 1928 par->chip = S3_PROSAVAGE;
1930 snprintf (info->fix.id, 16, "TwisterK"); 1929 snprintf (info->fix.id, 16, "TwisterK");
1931 break; 1930 break;
1932 case FB_ACCEL_PROSAVAGE_DDR: 1931 case FB_ACCEL_PROSAVAGE_DDR:
1933 par->chip = S3_PROSAVAGEDDR; 1932 par->chip = S3_PROSAVAGE;
1934 snprintf (info->fix.id, 16, "ProSavageDDR"); 1933 snprintf (info->fix.id, 16, "ProSavageDDR");
1935 break; 1934 break;
1936 case FB_ACCEL_PROSAVAGE_DDRK: 1935 case FB_ACCEL_PROSAVAGE_DDRK: