aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/cciss_scsi.c14
-rw-r--r--drivers/char/agp/agp.h2
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/efficeon-agp.c16
-rw-r--r--drivers/char/agp/frontend.c27
-rw-r--r--drivers/char/agp/generic.c39
-rw-r--r--drivers/char/agp/intel-agp.c173
-rw-r--r--drivers/char/agp/via-agp.c4
-rw-r--r--drivers/char/briq_panel.c19
-rw-r--r--drivers/char/istallion.c2
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c173
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/infiniband/core/mad_priv.h1
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c1
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c18
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/message/fusion/mptfc.c100
-rw-r--r--drivers/message/fusion/mptsas.c19
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/ssfdc.c58
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c6
-rw-r--r--drivers/net/acenic.c8
-rw-r--r--drivers/net/arcnet/com20020-pci.c1
-rw-r--r--drivers/net/bnx2.c2
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/chelsio/sge.c10
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/e1000/e1000_main.c8
-rw-r--r--drivers/net/forcedeth.c3
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/irda/ali-ircc.c8
-rw-r--r--drivers/net/irda/irport.c4
-rw-r--r--drivers/net/irda/via-ircc.c5
-rw-r--r--drivers/net/irda/w83977af_ir.c4
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sk98lin/skge.c6
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/sky2.c6
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/sungem.c4
-rw-r--r--drivers/net/sunhme.c6
-rw-r--r--drivers/net/tg3.c199
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c84
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c13
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c13
-rw-r--r--drivers/s390/scsi/zfcp_def.h24
-rw-r--r--drivers/s390/scsi/zfcp_erp.c231
-rw-r--r--drivers/s390/scsi/zfcp_ext.h18
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c299
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c112
-rw-r--r--drivers/scsi/BusLogic.c61
-rw-r--r--drivers/scsi/Kconfig32
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/a2091.c6
-rw-r--r--drivers/scsi/a2091.h4
-rw-r--r--drivers/scsi/a3000.c8
-rw-r--r--drivers/scsi/a3000.h4
-rw-r--r--drivers/scsi/aacraid/aachba.c60
-rw-r--r--drivers/scsi/aacraid/aacraid.h20
-rw-r--r--drivers/scsi/aacraid/commctrl.c25
-rw-r--r--drivers/scsi/aacraid/comminit.c13
-rw-r--r--drivers/scsi/aacraid/commsup.c279
-rw-r--r--drivers/scsi/aacraid/dpcsup.c10
-rw-r--r--drivers/scsi/aacraid/linit.c35
-rw-r--r--drivers/scsi/aacraid/rkt.c446
-rw-r--r--drivers/scsi/aacraid/rx.c117
-rw-r--r--drivers/scsi/aacraid/sa.c21
-rw-r--r--drivers/scsi/advansys.c90
-rw-r--r--drivers/scsi/aha152x.c53
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c23
-rw-r--r--drivers/scsi/aic7xxx_old.c11
-rw-r--r--drivers/scsi/aic94xx/Kconfig41
-rw-r--r--drivers/scsi/aic94xx/Makefile39
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h114
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c353
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c959
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.h52
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c1376
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h397
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c866
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg.c332
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg.h302
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg_def.h2398
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sas.h785
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c758
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c1089
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c1404
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.h70
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c642
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c636
-rw-r--r--drivers/scsi/arcmsr/Makefile6
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h472
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c381
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1496
-rw-r--r--drivers/scsi/dpt_i2o.c7
-rw-r--r--drivers/scsi/eata_generic.h1
-rw-r--r--drivers/scsi/eata_pio.c127
-rw-r--r--drivers/scsi/fcal.c3
-rw-r--r--drivers/scsi/g_NCR5380.c3
-rw-r--r--drivers/scsi/gvp11.c8
-rw-r--r--drivers/scsi/gvp11.h4
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/hptiop.c1
-rw-r--r--drivers/scsi/ipr.c34
-rw-r--r--drivers/scsi/ipr.h82
-rw-r--r--drivers/scsi/iscsi_tcp.c811
-rw-r--r--drivers/scsi/iscsi_tcp.h43
-rw-r--r--drivers/scsi/libata-eh.c1
-rw-r--r--drivers/scsi/libiscsi.c144
-rw-r--r--drivers/scsi/libsas/Kconfig39
-rw-r--r--drivers/scsi/libsas/Makefile36
-rw-r--r--drivers/scsi/libsas/sas_discover.c749
-rw-r--r--drivers/scsi/libsas/sas_dump.c76
-rw-r--r--drivers/scsi/libsas/sas_dump.h42
-rw-r--r--drivers/scsi/libsas/sas_event.c75
-rw-r--r--drivers/scsi/libsas/sas_expander.c1855
-rw-r--r--drivers/scsi/libsas/sas_init.c267
-rw-r--r--drivers/scsi/libsas/sas_internal.h146
-rw-r--r--drivers/scsi/libsas/sas_phy.c158
-rw-r--r--drivers/scsi/libsas/sas_port.c279
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c786
-rw-r--r--drivers/scsi/lpfc/lpfc.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c285
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c186
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c36
-rw-r--r--drivers/scsi/mvme147.c6
-rw-r--r--drivers/scsi/mvme147.h4
-rw-r--r--drivers/scsi/scsi.c58
-rw-r--r--drivers/scsi/scsi.h2
-rw-r--r--drivers/scsi/scsi_debug.c230
-rw-r--r--drivers/scsi/scsi_lib.c10
-rw-r--r--drivers/scsi/scsi_netlink.c199
-rw-r--r--drivers/scsi/scsi_priv.h11
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c146
-rw-r--r--drivers/scsi/scsi_transport_fc.c370
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c83
-rw-r--r--drivers/scsi/scsi_transport_spi.c30
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sgiwd93.c8
-rw-r--r--drivers/scsi/stex.c1252
-rw-r--r--drivers/scsi/ultrastor.c23
-rw-r--r--drivers/scsi/ultrastor.h12
-rw-r--r--drivers/usb/input/hid-core.c2
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/riva/fbdev.c4
176 files changed, 24436 insertions, 2515 deletions
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index ffcb9fd31c38..41e052fecd7f 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1912,7 +1912,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
1912 skb->tail = skb->data + skb->len; 1912 skb->tail = skb->data + skb->len;
1913#ifdef USE_CHECKSUM_HW 1913#ifdef USE_CHECKSUM_HW
1914 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { 1914 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1915 skb->ip_summed = CHECKSUM_HW; 1915 skb->ip_summed = CHECKSUM_COMPLETE;
1916 skb->csum = TCP_CKSUM(skb->data, 1916 skb->csum = TCP_CKSUM(skb->data,
1917 he_vcc->pdu_len); 1917 he_vcc->pdu_len);
1918 } 1918 }
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 4cd23c3eab41..a360215dbce7 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -7115,7 +7115,7 @@ static struct pci_device_id DAC960_id_table[] = {
7115 { 7115 {
7116 .vendor = PCI_VENDOR_ID_MYLEX, 7116 .vendor = PCI_VENDOR_ID_MYLEX,
7117 .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM, 7117 .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM,
7118 .subvendor = PCI_ANY_ID, 7118 .subvendor = PCI_VENDOR_ID_MYLEX,
7119 .subdevice = PCI_ANY_ID, 7119 .subdevice = PCI_ANY_ID,
7120 .driver_data = (unsigned long) &DAC960_GEM_privdata, 7120 .driver_data = (unsigned long) &DAC960_GEM_privdata,
7121 }, 7121 },
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index afdff32f6724..05f79d7393f7 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -251,10 +251,6 @@ scsi_cmd_stack_free(int ctlr)
251 stk->pool = NULL; 251 stk->pool = NULL;
252} 252}
253 253
254/* scsi_device_types comes from scsi.h */
255#define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \
256 "Unknown" : scsi_device_types[n]
257
258#if 0 254#if 0
259static int xmargin=8; 255static int xmargin=8;
260static int amargin=60; 256static int amargin=60;
@@ -389,7 +385,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
389 time anyway (the scsi layer's inquiries will show that info) */ 385 time anyway (the scsi layer's inquiries will show that info) */
390 if (hostno != -1) 386 if (hostno != -1)
391 printk("cciss%d: %s device c%db%dt%dl%d added.\n", 387 printk("cciss%d: %s device c%db%dt%dl%d added.\n",
392 ctlr, DEVICETYPE(sd->devtype), hostno, 388 ctlr, scsi_device_type(sd->devtype), hostno,
393 sd->bus, sd->target, sd->lun); 389 sd->bus, sd->target, sd->lun);
394 return 0; 390 return 0;
395} 391}
@@ -407,7 +403,7 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry)
407 ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1]; 403 ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
408 ccissscsi[ctlr].ndevices--; 404 ccissscsi[ctlr].ndevices--;
409 printk("cciss%d: %s device c%db%dt%dl%d removed.\n", 405 printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
410 ctlr, DEVICETYPE(sd.devtype), hostno, 406 ctlr, scsi_device_type(sd.devtype), hostno,
411 sd.bus, sd.target, sd.lun); 407 sd.bus, sd.target, sd.lun);
412} 408}
413 409
@@ -458,7 +454,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
458 if (found == 0) { /* device no longer present. */ 454 if (found == 0) { /* device no longer present. */
459 changes++; 455 changes++;
460 /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n", 456 /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
461 ctlr, DEVICETYPE(csd->devtype), hostno, 457 ctlr, scsi_device_type(csd->devtype), hostno,
462 csd->bus, csd->target, csd->lun); */ 458 csd->bus, csd->target, csd->lun); */
463 cciss_scsi_remove_entry(ctlr, hostno, i); 459 cciss_scsi_remove_entry(ctlr, hostno, i);
464 /* note, i not incremented */ 460 /* note, i not incremented */
@@ -468,7 +464,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
468 printk("cciss%d: device c%db%dt%dl%d type changed " 464 printk("cciss%d: device c%db%dt%dl%d type changed "
469 "(device type now %s).\n", 465 "(device type now %s).\n",
470 ctlr, hostno, csd->bus, csd->target, csd->lun, 466 ctlr, hostno, csd->bus, csd->target, csd->lun,
471 DEVICETYPE(csd->devtype)); 467 scsi_device_type(csd->devtype));
472 csd->devtype = sd[j].devtype; 468 csd->devtype = sd[j].devtype;
473 i++; /* so just move along. */ 469 i++; /* so just move along. */
474 } else /* device is same as it ever was, */ 470 } else /* device is same as it ever was, */
@@ -1098,7 +1094,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1098 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 1094 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
1099 printk(KERN_INFO "cciss%d: %s ignored, " 1095 printk(KERN_INFO "cciss%d: %s ignored, "
1100 "too many devices.\n", cntl_num, 1096 "too many devices.\n", cntl_num,
1101 DEVICETYPE(devtype)); 1097 scsi_device_type(devtype));
1102 break; 1098 break;
1103 } 1099 }
1104 memcpy(&currentsd[ncurrent].scsi3addr[0], 1100 memcpy(&currentsd[ncurrent].scsi3addr[0],
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 3c623b67ea1c..8b3317fd46c9 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -117,7 +117,7 @@ struct agp_bridge_driver {
117}; 117};
118 118
119struct agp_bridge_data { 119struct agp_bridge_data {
120 struct agp_version *version; 120 const struct agp_version *version;
121 struct agp_bridge_driver *driver; 121 struct agp_bridge_driver *driver;
122 struct vm_operations_struct *vm_ops; 122 struct vm_operations_struct *vm_ops;
123 void *previous_size; 123 void *previous_size;
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 509adc403250..d59e037ddd12 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -44,7 +44,7 @@
44 * past 0.99 at all due to some boolean logic error. */ 44 * past 0.99 at all due to some boolean logic error. */
45#define AGPGART_VERSION_MAJOR 0 45#define AGPGART_VERSION_MAJOR 0
46#define AGPGART_VERSION_MINOR 101 46#define AGPGART_VERSION_MINOR 101
47static struct agp_version agp_current_version = 47static const struct agp_version agp_current_version =
48{ 48{
49 .major = AGPGART_VERSION_MAJOR, 49 .major = AGPGART_VERSION_MAJOR,
50 .minor = AGPGART_VERSION_MINOR, 50 .minor = AGPGART_VERSION_MINOR,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index b788b0a3bbf3..30f730ff81c1 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -337,13 +337,6 @@ static struct agp_bridge_driver efficeon_driver = {
337 .agp_destroy_page = agp_generic_destroy_page, 337 .agp_destroy_page = agp_generic_destroy_page,
338}; 338};
339 339
340
341static int agp_efficeon_resume(struct pci_dev *pdev)
342{
343 printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
344 return efficeon_configure();
345}
346
347static int __devinit agp_efficeon_probe(struct pci_dev *pdev, 340static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
348 const struct pci_device_id *ent) 341 const struct pci_device_id *ent)
349{ 342{
@@ -414,11 +407,18 @@ static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
414 agp_put_bridge(bridge); 407 agp_put_bridge(bridge);
415} 408}
416 409
410#ifdef CONFIG_PM
417static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state) 411static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
418{ 412{
419 return 0; 413 return 0;
420} 414}
421 415
416static int agp_efficeon_resume(struct pci_dev *pdev)
417{
418 printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
419 return efficeon_configure();
420}
421#endif
422 422
423static struct pci_device_id agp_efficeon_pci_table[] = { 423static struct pci_device_id agp_efficeon_pci_table[] = {
424 { 424 {
@@ -439,8 +439,10 @@ static struct pci_driver agp_efficeon_pci_driver = {
439 .id_table = agp_efficeon_pci_table, 439 .id_table = agp_efficeon_pci_table,
440 .probe = agp_efficeon_probe, 440 .probe = agp_efficeon_probe,
441 .remove = agp_efficeon_remove, 441 .remove = agp_efficeon_remove,
442#ifdef CONFIG_PM
442 .suspend = agp_efficeon_suspend, 443 .suspend = agp_efficeon_suspend,
443 .resume = agp_efficeon_resume, 444 .resume = agp_efficeon_resume,
445#endif
444}; 446};
445 447
446static int __init agp_efficeon_init(void) 448static int __init agp_efficeon_init(void)
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index d9c5a9142ad1..0f2ed2aa2d81 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -151,35 +151,12 @@ static void agp_add_seg_to_client(struct agp_client *client,
151 client->segments = seg; 151 client->segments = seg;
152} 152}
153 153
154/* Originally taken from linux/mm/mmap.c from the array
155 * protection_map.
156 * The original really should be exported to modules, or
157 * some routine which does the conversion for you
158 */
159
160static const pgprot_t my_protect_map[16] =
161{
162 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
163 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
164};
165
166static pgprot_t agp_convert_mmap_flags(int prot) 154static pgprot_t agp_convert_mmap_flags(int prot)
167{ 155{
168#define _trans(x,bit1,bit2) \
169((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
170
171 unsigned long prot_bits; 156 unsigned long prot_bits;
172 pgprot_t temp;
173
174 prot_bits = _trans(prot, PROT_READ, VM_READ) |
175 _trans(prot, PROT_WRITE, VM_WRITE) |
176 _trans(prot, PROT_EXEC, VM_EXEC);
177
178 prot_bits |= VM_SHARED;
179 157
180 temp = my_protect_map[prot_bits & 0x0000000f]; 158 prot_bits = calc_vm_prot_bits(prot) | VM_SHARED;
181 159 return vm_get_page_prot(prot_bits);
182 return temp;
183} 160}
184 161
185static int agp_create_segment(struct agp_client *client, struct agp_region *region) 162static int agp_create_segment(struct agp_client *client, struct agp_region *region)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index cc5ea347a8a7..0dcdb363923f 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -568,25 +568,34 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
568 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 568 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
569 goto done; 569 goto done;
570 570
571 } else if (*requested_mode & AGPSTAT3_4X) {
572 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
573 *bridge_agpstat |= AGPSTAT3_4X;
574 goto done;
575
571 } else { 576 } else {
572 577
573 /* 578 /*
574 * If we didn't specify AGPx8, we can only do x4. 579 * If we didn't specify an AGP mode, we see if both
575 * If the hardware can't do x4, we're up shit creek, and never 580 * the graphics card, and the bridge can do x8, and use if so.
576 * should have got this far. 581 * If not, we fall back to x4 mode.
577 */ 582 */
578 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 583 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
579 if ((*bridge_agpstat & AGPSTAT3_4X) && (*vga_agpstat & AGPSTAT3_4X)) 584 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode supported by bridge & card (x8).\n");
580 *bridge_agpstat |= AGPSTAT3_4X; 585 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
581 else { 586 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
582 printk(KERN_INFO PFX "Badness. Don't know which AGP mode to set. " 587 } else {
583 "[bridge_agpstat:%x vga_agpstat:%x fell back to:- bridge_agpstat:%x vga_agpstat:%x]\n", 588 printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
584 origbridge, origvga, *bridge_agpstat, *vga_agpstat); 589 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
585 if (!(*bridge_agpstat & AGPSTAT3_4X)) 590 printk("bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", *bridge_agpstat, origbridge);
586 printk(KERN_INFO PFX "Bridge couldn't do AGP x4.\n"); 591 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
587 if (!(*vga_agpstat & AGPSTAT3_4X)) 592 *bridge_agpstat |= AGPSTAT3_4X;
588 printk(KERN_INFO PFX "Graphic card couldn't do AGP x4.\n"); 593 }
589 return; 594 if (!(*vga_agpstat & AGPSTAT3_8X)) {
595 printk("graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", *vga_agpstat, origvga);
596 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
597 *vga_agpstat |= AGPSTAT3_4X;
598 }
590 } 599 }
591 } 600 }
592 601
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 61ac3809f997..d1ede7db5a12 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2,14 +2,6 @@
2 * Intel AGPGART routines. 2 * Intel AGPGART routines.
3 */ 3 */
4 4
5/*
6 * Intel(R) 855GM/852GM and 865G support added by David Dawes
7 * <dawes@tungstengraphics.com>.
8 *
9 * Intel(R) 915G/915GM support added by Alan Hourihane
10 * <alanh@tungstengraphics.com>.
11 */
12
13#include <linux/module.h> 5#include <linux/module.h>
14#include <linux/pci.h> 6#include <linux/pci.h>
15#include <linux/init.h> 7#include <linux/init.h>
@@ -17,6 +9,21 @@
17#include <linux/agp_backend.h> 9#include <linux/agp_backend.h>
18#include "agp.h" 10#include "agp.h"
19 11
12#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
13#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
14#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
15#define PCI_DEVICE_ID_INTEL_82965G_1_IG 0x2982
16#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
17#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
18#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
19#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
20
21#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
22 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
23 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
24 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB)
25
26
20/* Intel 815 register */ 27/* Intel 815 register */
21#define INTEL_815_APCONT 0x51 28#define INTEL_815_APCONT 0x51
22#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF 29#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
@@ -40,6 +47,8 @@
40#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) 47#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
41#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) 48#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
42 49
50/* Intel 965G registers */
51#define I965_MSAC 0x62
43 52
44/* Intel 7505 registers */ 53/* Intel 7505 registers */
45#define INTEL_I7505_APSIZE 0x74 54#define INTEL_I7505_APSIZE 0x74
@@ -354,6 +363,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
354 /* The 64M mode still requires a 128k gatt */ 363 /* The 64M mode still requires a 128k gatt */
355 {64, 16384, 5}, 364 {64, 16384, 5},
356 {256, 65536, 6}, 365 {256, 65536, 6},
366 {512, 131072, 7},
357}; 367};
358 368
359static struct _intel_i830_private { 369static struct _intel_i830_private {
@@ -377,7 +387,11 @@ static void intel_i830_init_gtt_entries(void)
377 /* We obtain the size of the GTT, which is also stored (for some 387 /* We obtain the size of the GTT, which is also stored (for some
378 * reason) at the top of stolen memory. Then we add 4KB to that 388 * reason) at the top of stolen memory. Then we add 4KB to that
379 * for the video BIOS popup, which is also stored in there. */ 389 * for the video BIOS popup, which is also stored in there. */
380 size = agp_bridge->driver->fetch_size() + 4; 390
391 if (IS_I965)
392 size = 512 + 4;
393 else
394 size = agp_bridge->driver->fetch_size() + 4;
381 395
382 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 396 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
383 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 397 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
@@ -423,7 +437,7 @@ static void intel_i830_init_gtt_entries(void)
423 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || 437 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
424 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || 438 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
425 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || 439 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
426 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB) 440 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || IS_I965 )
427 gtt_entries = MB(48) - KB(size); 441 gtt_entries = MB(48) - KB(size);
428 else 442 else
429 gtt_entries = 0; 443 gtt_entries = 0;
@@ -433,7 +447,7 @@ static void intel_i830_init_gtt_entries(void)
433 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || 447 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
434 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || 448 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
435 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || 449 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
436 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB) 450 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || IS_I965)
437 gtt_entries = MB(64) - KB(size); 451 gtt_entries = MB(64) - KB(size);
438 else 452 else
439 gtt_entries = 0; 453 gtt_entries = 0;
@@ -791,6 +805,77 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
791 805
792 return 0; 806 return 0;
793} 807}
808static int intel_i965_fetch_size(void)
809{
810 struct aper_size_info_fixed *values;
811 u32 offset = 0;
812 u8 temp;
813
814#define I965_512MB_ADDRESS_MASK (3<<1)
815
816 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
817
818 pci_read_config_byte(intel_i830_private.i830_dev, I965_MSAC, &temp);
819 temp &= I965_512MB_ADDRESS_MASK;
820 switch (temp) {
821 case 0x00:
822 offset = 0; /* 128MB */
823 break;
824 case 0x06:
825 offset = 3; /* 512MB */
826 break;
827 default:
828 case 0x02:
829 offset = 2; /* 256MB */
830 break;
831 }
832
833 agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
834
835 return values[offset].size;
836}
837
838/* The intel i965 automatically initializes the agp aperture during POST.
839+ * Use the memory already set aside for in the GTT.
840+ */
841static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
842{
843 int page_order;
844 struct aper_size_info_fixed *size;
845 int num_entries;
846 u32 temp;
847
848 size = agp_bridge->current_size;
849 page_order = size->page_order;
850 num_entries = size->num_entries;
851 agp_bridge->gatt_table_real = NULL;
852
853 pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
854
855 temp &= 0xfff00000;
856 intel_i830_private.gtt = ioremap((temp + (512 * 1024)) , 512 * 1024);
857
858 if (!intel_i830_private.gtt)
859 return -ENOMEM;
860
861
862 intel_i830_private.registers = ioremap(temp,128 * 4096);
863 if (!intel_i830_private.registers)
864 return -ENOMEM;
865
866 temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
867 global_cache_flush(); /* FIXME: ? */
868
869 /* we have to call this as early as possible after the MMIO base address is known */
870 intel_i830_init_gtt_entries();
871
872 agp_bridge->gatt_table = NULL;
873
874 agp_bridge->gatt_bus_addr = temp;
875
876 return 0;
877}
878
794 879
795static int intel_fetch_size(void) 880static int intel_fetch_size(void)
796{ 881{
@@ -1307,7 +1392,7 @@ static struct agp_bridge_driver intel_830_driver = {
1307 .owner = THIS_MODULE, 1392 .owner = THIS_MODULE,
1308 .aperture_sizes = intel_i830_sizes, 1393 .aperture_sizes = intel_i830_sizes,
1309 .size_type = FIXED_APER_SIZE, 1394 .size_type = FIXED_APER_SIZE,
1310 .num_aperture_sizes = 3, 1395 .num_aperture_sizes = 4,
1311 .needs_scratch_page = TRUE, 1396 .needs_scratch_page = TRUE,
1312 .configure = intel_i830_configure, 1397 .configure = intel_i830_configure,
1313 .fetch_size = intel_i830_fetch_size, 1398 .fetch_size = intel_i830_fetch_size,
@@ -1469,7 +1554,7 @@ static struct agp_bridge_driver intel_915_driver = {
1469 .owner = THIS_MODULE, 1554 .owner = THIS_MODULE,
1470 .aperture_sizes = intel_i830_sizes, 1555 .aperture_sizes = intel_i830_sizes,
1471 .size_type = FIXED_APER_SIZE, 1556 .size_type = FIXED_APER_SIZE,
1472 .num_aperture_sizes = 3, 1557 .num_aperture_sizes = 4,
1473 .needs_scratch_page = TRUE, 1558 .needs_scratch_page = TRUE,
1474 .configure = intel_i915_configure, 1559 .configure = intel_i915_configure,
1475 .fetch_size = intel_i915_fetch_size, 1560 .fetch_size = intel_i915_fetch_size,
@@ -1489,6 +1574,29 @@ static struct agp_bridge_driver intel_915_driver = {
1489 .agp_destroy_page = agp_generic_destroy_page, 1574 .agp_destroy_page = agp_generic_destroy_page,
1490}; 1575};
1491 1576
1577static struct agp_bridge_driver intel_i965_driver = {
1578 .owner = THIS_MODULE,
1579 .aperture_sizes = intel_i830_sizes,
1580 .size_type = FIXED_APER_SIZE,
1581 .num_aperture_sizes = 4,
1582 .needs_scratch_page = TRUE,
1583 .configure = intel_i915_configure,
1584 .fetch_size = intel_i965_fetch_size,
1585 .cleanup = intel_i915_cleanup,
1586 .tlb_flush = intel_i810_tlbflush,
1587 .mask_memory = intel_i810_mask_memory,
1588 .masks = intel_i810_masks,
1589 .agp_enable = intel_i810_agp_enable,
1590 .cache_flush = global_cache_flush,
1591 .create_gatt_table = intel_i965_create_gatt_table,
1592 .free_gatt_table = intel_i830_free_gatt_table,
1593 .insert_memory = intel_i915_insert_entries,
1594 .remove_memory = intel_i915_remove_entries,
1595 .alloc_by_type = intel_i830_alloc_by_type,
1596 .free_by_type = intel_i810_free_by_type,
1597 .agp_alloc_page = agp_generic_alloc_page,
1598 .agp_destroy_page = agp_generic_destroy_page,
1599};
1492 1600
1493static struct agp_bridge_driver intel_7505_driver = { 1601static struct agp_bridge_driver intel_7505_driver = {
1494 .owner = THIS_MODULE, 1602 .owner = THIS_MODULE,
@@ -1684,6 +1792,35 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
1684 bridge->driver = &intel_845_driver; 1792 bridge->driver = &intel_845_driver;
1685 name = "945GM"; 1793 name = "945GM";
1686 break; 1794 break;
1795 case PCI_DEVICE_ID_INTEL_82946GZ_HB:
1796 if (find_i830(PCI_DEVICE_ID_INTEL_82946GZ_IG))
1797 bridge->driver = &intel_i965_driver;
1798 else
1799 bridge->driver = &intel_845_driver;
1800 name = "946GZ";
1801 break;
1802 case PCI_DEVICE_ID_INTEL_82965G_1_HB:
1803 if (find_i830(PCI_DEVICE_ID_INTEL_82965G_1_IG))
1804 bridge->driver = &intel_i965_driver;
1805 else
1806 bridge->driver = &intel_845_driver;
1807 name = "965G";
1808 break;
1809 case PCI_DEVICE_ID_INTEL_82965Q_HB:
1810 if (find_i830(PCI_DEVICE_ID_INTEL_82965Q_IG))
1811 bridge->driver = &intel_i965_driver;
1812 else
1813 bridge->driver = &intel_845_driver;
1814 name = "965Q";
1815 break;
1816 case PCI_DEVICE_ID_INTEL_82965G_HB:
1817 if (find_i830(PCI_DEVICE_ID_INTEL_82965G_IG))
1818 bridge->driver = &intel_i965_driver;
1819 else
1820 bridge->driver = &intel_845_driver;
1821 name = "965G";
1822 break;
1823
1687 case PCI_DEVICE_ID_INTEL_7505_0: 1824 case PCI_DEVICE_ID_INTEL_7505_0:
1688 bridge->driver = &intel_7505_driver; 1825 bridge->driver = &intel_7505_driver;
1689 name = "E7505"; 1826 name = "E7505";
@@ -1766,6 +1903,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
1766 agp_put_bridge(bridge); 1903 agp_put_bridge(bridge);
1767} 1904}
1768 1905
1906#ifdef CONFIG_PM
1769static int agp_intel_resume(struct pci_dev *pdev) 1907static int agp_intel_resume(struct pci_dev *pdev)
1770{ 1908{
1771 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 1909 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
@@ -1786,9 +1924,12 @@ static int agp_intel_resume(struct pci_dev *pdev)
1786 intel_i830_configure(); 1924 intel_i830_configure();
1787 else if (bridge->driver == &intel_810_driver) 1925 else if (bridge->driver == &intel_810_driver)
1788 intel_i810_configure(); 1926 intel_i810_configure();
1927 else if (bridge->driver == &intel_i965_driver)
1928 intel_i915_configure();
1789 1929
1790 return 0; 1930 return 0;
1791} 1931}
1932#endif
1792 1933
1793static struct pci_device_id agp_intel_pci_table[] = { 1934static struct pci_device_id agp_intel_pci_table[] = {
1794#define ID(x) \ 1935#define ID(x) \
@@ -1825,6 +1966,10 @@ static struct pci_device_id agp_intel_pci_table[] = {
1825 ID(PCI_DEVICE_ID_INTEL_82915GM_HB), 1966 ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
1826 ID(PCI_DEVICE_ID_INTEL_82945G_HB), 1967 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
1827 ID(PCI_DEVICE_ID_INTEL_82945GM_HB), 1968 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
1969 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
1970 ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
1971 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
1972 ID(PCI_DEVICE_ID_INTEL_82965G_HB),
1828 { } 1973 { }
1829}; 1974};
1830 1975
@@ -1835,7 +1980,9 @@ static struct pci_driver agp_intel_pci_driver = {
1835 .id_table = agp_intel_pci_table, 1980 .id_table = agp_intel_pci_table,
1836 .probe = agp_intel_probe, 1981 .probe = agp_intel_probe,
1837 .remove = __devexit_p(agp_intel_remove), 1982 .remove = __devexit_p(agp_intel_remove),
1983#ifdef CONFIG_PM
1838 .resume = agp_intel_resume, 1984 .resume = agp_intel_resume,
1985#endif
1839}; 1986};
1840 1987
1841static int __init agp_intel_init(void) 1988static int __init agp_intel_init(void)
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index b8ec25d17478..c149ac9ce9a7 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -9,7 +9,7 @@
9#include <linux/agp_backend.h> 9#include <linux/agp_backend.h>
10#include "agp.h" 10#include "agp.h"
11 11
12static struct pci_device_id agp_via_pci_table[]; 12static const struct pci_device_id agp_via_pci_table[];
13 13
14#define VIA_GARTCTRL 0x80 14#define VIA_GARTCTRL 0x80
15#define VIA_APSIZE 0x84 15#define VIA_APSIZE 0x84
@@ -485,7 +485,7 @@ static int agp_via_resume(struct pci_dev *pdev)
485#endif /* CONFIG_PM */ 485#endif /* CONFIG_PM */
486 486
487/* must be the same order as name table above */ 487/* must be the same order as name table above */
488static struct pci_device_id agp_via_pci_table[] = { 488static const struct pci_device_id agp_via_pci_table[] = {
489#define ID(x) \ 489#define ID(x) \
490 { \ 490 { \
491 .class = (PCI_CLASS_BRIDGE_HOST << 8), \ 491 .class = (PCI_CLASS_BRIDGE_HOST << 8), \
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
index a0e5eac5f33a..b8c22255f6ad 100644
--- a/drivers/char/briq_panel.c
+++ b/drivers/char/briq_panel.c
@@ -87,7 +87,7 @@ static int briq_panel_release(struct inode *ino, struct file *filep)
87 return 0; 87 return 0;
88} 88}
89 89
90static ssize_t briq_panel_read(struct file *file, char *buf, size_t count, 90static ssize_t briq_panel_read(struct file *file, char __user *buf, size_t count,
91 loff_t *ppos) 91 loff_t *ppos)
92{ 92{
93 unsigned short c; 93 unsigned short c;
@@ -135,7 +135,7 @@ static void scroll_vfd( void )
135 vfd_cursor = 20; 135 vfd_cursor = 20;
136} 136}
137 137
138static ssize_t briq_panel_write(struct file *file, const char *buf, size_t len, 138static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_t len,
139 loff_t *ppos) 139 loff_t *ppos)
140{ 140{
141 size_t indx = len; 141 size_t indx = len;
@@ -150,19 +150,22 @@ static ssize_t briq_panel_write(struct file *file, const char *buf, size_t len,
150 return -EBUSY; 150 return -EBUSY;
151 151
152 for (;;) { 152 for (;;) {
153 char c;
153 if (!indx) 154 if (!indx)
154 break; 155 break;
156 if (get_user(c, buf))
157 return -EFAULT;
155 if (esc) { 158 if (esc) {
156 set_led(*buf); 159 set_led(c);
157 esc = 0; 160 esc = 0;
158 } else if (*buf == 27) { 161 } else if (c == 27) {
159 esc = 1; 162 esc = 1;
160 } else if (*buf == 12) { 163 } else if (c == 12) {
161 /* do a form feed */ 164 /* do a form feed */
162 for (i=0; i<40; i++) 165 for (i=0; i<40; i++)
163 vfd[i] = ' '; 166 vfd[i] = ' ';
164 vfd_cursor = 0; 167 vfd_cursor = 0;
165 } else if (*buf == 10) { 168 } else if (c == 10) {
166 if (vfd_cursor < 20) 169 if (vfd_cursor < 20)
167 vfd_cursor = 20; 170 vfd_cursor = 20;
168 else if (vfd_cursor < 40) 171 else if (vfd_cursor < 40)
@@ -175,7 +178,7 @@ static ssize_t briq_panel_write(struct file *file, const char *buf, size_t len,
175 /* just a character */ 178 /* just a character */
176 if (vfd_cursor > 39) 179 if (vfd_cursor > 39)
177 scroll_vfd(); 180 scroll_vfd();
178 vfd[vfd_cursor++] = *buf; 181 vfd[vfd_cursor++] = c;
179 } 182 }
180 indx--; 183 indx--;
181 buf++; 184 buf++;
@@ -202,7 +205,7 @@ static struct miscdevice briq_panel_miscdev = {
202static int __init briq_panel_init(void) 205static int __init briq_panel_init(void)
203{ 206{
204 struct device_node *root = find_path_device("/"); 207 struct device_node *root = find_path_device("/");
205 char *machine; 208 const char *machine;
206 int i; 209 int i;
207 210
208 machine = get_property(root, "model", NULL); 211 machine = get_property(root, "model", NULL);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 84dfc4278139..8c09997cc3d6 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -3488,7 +3488,7 @@ static int stli_initecp(stlibrd_t *brdp)
3488 */ 3488 */
3489 EBRDENABLE(brdp); 3489 EBRDENABLE(brdp);
3490 sigsp = (cdkecpsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR); 3490 sigsp = (cdkecpsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
3491 memcpy(&sig, sigsp, sizeof(cdkecpsig_t)); 3491 memcpy_fromio(&sig, sigsp, sizeof(cdkecpsig_t));
3492 EBRDDISABLE(brdp); 3492 EBRDDISABLE(brdp);
3493 3493
3494 if (sig.magic != cpu_to_le32(ECP_MAGIC)) 3494 if (sig.magic != cpu_to_le32(ECP_MAGIC))
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b3df613ae4ec..d35a9f06ab7b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -32,7 +32,7 @@
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) 32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
33 33
34/** 34/**
35 * The "cpufreq driver" - the arch- or hardware-dependend low 35 * The "cpufreq driver" - the arch- or hardware-dependent low
36 * level driver of CPUFreq support, and its spinlock. This lock 36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array. 37 * also protects the cpufreq_cpu_data array.
38 */ 38 */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 52cf1f021825..bf8aa45d4f01 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -55,6 +55,10 @@ struct cpu_dbs_info_s {
55 struct cpufreq_policy *cur_policy; 55 struct cpufreq_policy *cur_policy;
56 struct work_struct work; 56 struct work_struct work;
57 unsigned int enable; 57 unsigned int enable;
58 struct cpufreq_frequency_table *freq_table;
59 unsigned int freq_lo;
60 unsigned int freq_lo_jiffies;
61 unsigned int freq_hi_jiffies;
58}; 62};
59static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 63static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
60 64
@@ -72,15 +76,15 @@ static DEFINE_MUTEX(dbs_mutex);
72 76
73static struct workqueue_struct *kondemand_wq; 77static struct workqueue_struct *kondemand_wq;
74 78
75struct dbs_tuners { 79static struct dbs_tuners {
76 unsigned int sampling_rate; 80 unsigned int sampling_rate;
77 unsigned int up_threshold; 81 unsigned int up_threshold;
78 unsigned int ignore_nice; 82 unsigned int ignore_nice;
79}; 83 unsigned int powersave_bias;
80 84} dbs_tuners_ins = {
81static struct dbs_tuners dbs_tuners_ins = {
82 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
83 .ignore_nice = 0, 86 .ignore_nice = 0,
87 .powersave_bias = 0,
84}; 88};
85 89
86static inline cputime64_t get_cpu_idle_time(unsigned int cpu) 90static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
@@ -96,6 +100,70 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
96 return retval; 100 return retval;
97} 101}
98 102
103/*
104 * Find right freq to be set now with powersave_bias on.
105 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
106 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
107 */
108static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
109 unsigned int freq_next,
110 unsigned int relation)
111{
112 unsigned int freq_req, freq_reduc, freq_avg;
113 unsigned int freq_hi, freq_lo;
114 unsigned int index = 0;
115 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
116 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
117
118 if (!dbs_info->freq_table) {
119 dbs_info->freq_lo = 0;
120 dbs_info->freq_lo_jiffies = 0;
121 return freq_next;
122 }
123
124 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
125 relation, &index);
126 freq_req = dbs_info->freq_table[index].frequency;
127 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
128 freq_avg = freq_req - freq_reduc;
129
130 /* Find freq bounds for freq_avg in freq_table */
131 index = 0;
132 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
133 CPUFREQ_RELATION_H, &index);
134 freq_lo = dbs_info->freq_table[index].frequency;
135 index = 0;
136 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
137 CPUFREQ_RELATION_L, &index);
138 freq_hi = dbs_info->freq_table[index].frequency;
139
140 /* Find out how long we have to be in hi and lo freqs */
141 if (freq_hi == freq_lo) {
142 dbs_info->freq_lo = 0;
143 dbs_info->freq_lo_jiffies = 0;
144 return freq_lo;
145 }
146 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
147 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
148 jiffies_hi += ((freq_hi - freq_lo) / 2);
149 jiffies_hi /= (freq_hi - freq_lo);
150 jiffies_lo = jiffies_total - jiffies_hi;
151 dbs_info->freq_lo = freq_lo;
152 dbs_info->freq_lo_jiffies = jiffies_lo;
153 dbs_info->freq_hi_jiffies = jiffies_hi;
154 return freq_hi;
155}
156
157static void ondemand_powersave_bias_init(void)
158{
159 int i;
160 for_each_online_cpu(i) {
161 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
162 dbs_info->freq_table = cpufreq_frequency_get_table(i);
163 dbs_info->freq_lo = 0;
164 }
165}
166
99/************************** sysfs interface ************************/ 167/************************** sysfs interface ************************/
100static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 168static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
101{ 169{
@@ -124,6 +192,7 @@ static ssize_t show_##file_name \
124show_one(sampling_rate, sampling_rate); 192show_one(sampling_rate, sampling_rate);
125show_one(up_threshold, up_threshold); 193show_one(up_threshold, up_threshold);
126show_one(ignore_nice_load, ignore_nice); 194show_one(ignore_nice_load, ignore_nice);
195show_one(powersave_bias, powersave_bias);
127 196
128static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 197static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
129 const char *buf, size_t count) 198 const char *buf, size_t count)
@@ -198,6 +267,27 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
198 return count; 267 return count;
199} 268}
200 269
270static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
271 const char *buf, size_t count)
272{
273 unsigned int input;
274 int ret;
275 ret = sscanf(buf, "%u", &input);
276
277 if (ret != 1)
278 return -EINVAL;
279
280 if (input > 1000)
281 input = 1000;
282
283 mutex_lock(&dbs_mutex);
284 dbs_tuners_ins.powersave_bias = input;
285 ondemand_powersave_bias_init();
286 mutex_unlock(&dbs_mutex);
287
288 return count;
289}
290
201#define define_one_rw(_name) \ 291#define define_one_rw(_name) \
202static struct freq_attr _name = \ 292static struct freq_attr _name = \
203__ATTR(_name, 0644, show_##_name, store_##_name) 293__ATTR(_name, 0644, show_##_name, store_##_name)
@@ -205,6 +295,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
205define_one_rw(sampling_rate); 295define_one_rw(sampling_rate);
206define_one_rw(up_threshold); 296define_one_rw(up_threshold);
207define_one_rw(ignore_nice_load); 297define_one_rw(ignore_nice_load);
298define_one_rw(powersave_bias);
208 299
209static struct attribute * dbs_attributes[] = { 300static struct attribute * dbs_attributes[] = {
210 &sampling_rate_max.attr, 301 &sampling_rate_max.attr,
@@ -212,6 +303,7 @@ static struct attribute * dbs_attributes[] = {
212 &sampling_rate.attr, 303 &sampling_rate.attr,
213 &up_threshold.attr, 304 &up_threshold.attr,
214 &ignore_nice_load.attr, 305 &ignore_nice_load.attr,
306 &powersave_bias.attr,
215 NULL 307 NULL
216}; 308};
217 309
@@ -234,6 +326,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
234 if (!this_dbs_info->enable) 326 if (!this_dbs_info->enable)
235 return; 327 return;
236 328
329 this_dbs_info->freq_lo = 0;
237 policy = this_dbs_info->cur_policy; 330 policy = this_dbs_info->cur_policy;
238 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); 331 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 332 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
@@ -274,11 +367,18 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
274 /* Check for frequency increase */ 367 /* Check for frequency increase */
275 if (load > dbs_tuners_ins.up_threshold) { 368 if (load > dbs_tuners_ins.up_threshold) {
276 /* if we are already at full speed then break out early */ 369 /* if we are already at full speed then break out early */
277 if (policy->cur == policy->max) 370 if (!dbs_tuners_ins.powersave_bias) {
278 return; 371 if (policy->cur == policy->max)
279 372 return;
280 __cpufreq_driver_target(policy, policy->max, 373
281 CPUFREQ_RELATION_H); 374 __cpufreq_driver_target(policy, policy->max,
375 CPUFREQ_RELATION_H);
376 } else {
377 int freq = powersave_bias_target(policy, policy->max,
378 CPUFREQ_RELATION_H);
379 __cpufreq_driver_target(policy, freq,
380 CPUFREQ_RELATION_L);
381 }
282 return; 382 return;
283 } 383 }
284 384
@@ -293,37 +393,64 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
293 * policy. To be safe, we focus 10 points under the threshold. 393 * policy. To be safe, we focus 10 points under the threshold.
294 */ 394 */
295 if (load < (dbs_tuners_ins.up_threshold - 10)) { 395 if (load < (dbs_tuners_ins.up_threshold - 10)) {
296 unsigned int freq_next; 396 unsigned int freq_next = (policy->cur * load) /
297 freq_next = (policy->cur * load) /
298 (dbs_tuners_ins.up_threshold - 10); 397 (dbs_tuners_ins.up_threshold - 10);
299 398 if (!dbs_tuners_ins.powersave_bias) {
300 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 399 __cpufreq_driver_target(policy, freq_next,
400 CPUFREQ_RELATION_L);
401 } else {
402 int freq = powersave_bias_target(policy, freq_next,
403 CPUFREQ_RELATION_L);
404 __cpufreq_driver_target(policy, freq,
405 CPUFREQ_RELATION_L);
406 }
301 } 407 }
302} 408}
303 409
410/* Sampling types */
411enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
412
304static void do_dbs_timer(void *data) 413static void do_dbs_timer(void *data)
305{ 414{
306 unsigned int cpu = smp_processor_id(); 415 unsigned int cpu = smp_processor_id();
307 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 416 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
417 /* We want all CPUs to do sampling nearly on same jiffy */
418 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
419 delay -= jiffies % delay;
308 420
309 if (!dbs_info->enable) 421 if (!dbs_info->enable)
310 return; 422 return;
311 423 /* Common NORMAL_SAMPLE setup */
312 lock_cpu_hotplug(); 424 INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
313 dbs_check_cpu(dbs_info); 425 if (!dbs_tuners_ins.powersave_bias ||
314 unlock_cpu_hotplug(); 426 (unsigned long) data == DBS_NORMAL_SAMPLE) {
315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 427 lock_cpu_hotplug();
316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 428 dbs_check_cpu(dbs_info);
429 unlock_cpu_hotplug();
430 if (dbs_info->freq_lo) {
431 /* Setup timer for SUB_SAMPLE */
432 INIT_WORK(&dbs_info->work, do_dbs_timer,
433 (void *)DBS_SUB_SAMPLE);
434 delay = dbs_info->freq_hi_jiffies;
435 }
436 } else {
437 __cpufreq_driver_target(dbs_info->cur_policy,
438 dbs_info->freq_lo,
439 CPUFREQ_RELATION_H);
440 }
441 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
317} 442}
318 443
319static inline void dbs_timer_init(unsigned int cpu) 444static inline void dbs_timer_init(unsigned int cpu)
320{ 445{
321 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 446 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
447 /* We want all CPUs to do sampling nearly on same jiffy */
448 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
449 delay -= jiffies % delay;
322 450
323 INIT_WORK(&dbs_info->work, do_dbs_timer, 0); 451 ondemand_powersave_bias_init();
324 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 452 INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
325 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 453 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
326 return;
327} 454}
328 455
329static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 456static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 25eee5394201..c2ecc599dc5f 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -350,12 +350,10 @@ __init cpufreq_stats_init(void)
350 } 350 }
351 351
352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
353 lock_cpu_hotplug();
354 for_each_online_cpu(cpu) { 353 for_each_online_cpu(cpu) {
355 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, 354 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
356 (void *)(long)cpu); 355 (void *)(long)cpu);
357 } 356 }
358 unlock_cpu_hotplug();
359 return 0; 357 return 0;
360} 358}
361static void 359static void
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 1da9adbccaec..d06b59083f6e 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -38,6 +38,7 @@
38#define __IB_MAD_PRIV_H__ 38#define __IB_MAD_PRIV_H__
39 39
40#include <linux/completion.h> 40#include <linux/completion.h>
41#include <linux/err.h>
41#include <linux/pci.h> 42#include <linux/pci.h>
42#include <linux/workqueue.h> 43#include <linux/workqueue.h>
43#include <rdma/ib_mad.h> 44#include <rdma/ib_mad.h>
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 8fddc8cccdf3..dd6af551108b 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -49,6 +49,7 @@
49#include <linux/init.h> 49#include <linux/init.h>
50#include <linux/dma-mapping.h> 50#include <linux/dma-mapping.h>
51#include <linux/if_arp.h> 51#include <linux/if_arp.h>
52#include <linux/vmalloc.h>
52 53
53#include <asm/io.h> 54#include <asm/io.h>
54#include <asm/irq.h> 55#include <asm/irq.h>
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 1c3c9d65ecea..f49a32b7a8f6 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -50,6 +50,7 @@
50#include <linux/dma-mapping.h> 50#include <linux/dma-mapping.h>
51#include <linux/mm.h> 51#include <linux/mm.h>
52#include <linux/inet.h> 52#include <linux/inet.h>
53#include <linux/vmalloc.h>
53 54
54#include <linux/route.h> 55#include <linux/route.h>
55 56
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 28b6b46c106a..29958b6e0214 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -43,6 +43,7 @@
43 43
44#include <linux/io.h> 44#include <linux/io.h>
45#include <linux/pci.h> 45#include <linux/pci.h>
46#include <linux/vmalloc.h>
46#include <asm/uaccess.h> 47#include <asm/uaccess.h>
47 48
48#include "ipath_kernel.h" 49#include "ipath_kernel.h"
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index e9cf1a9f1e1c..2a14fe2e3226 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -141,18 +141,11 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
141 141
142 if (sc->sc_data_direction == DMA_TO_DEVICE) { 142 if (sc->sc_data_direction == DMA_TO_DEVICE) {
143 BUG_ON(ctask->total_length == 0); 143 BUG_ON(ctask->total_length == 0);
144 /* bytes to be sent via RDMA operations */
145 iser_ctask->rdma_data_count = ctask->total_length -
146 ctask->imm_count -
147 ctask->unsol_count;
148 144
149 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d " 145 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
150 "rdma_data %d]\n",
151 ctask->itt, ctask->total_length, ctask->imm_count, 146 ctask->itt, ctask->total_length, ctask->imm_count,
152 ctask->unsol_count, iser_ctask->rdma_data_count); 147 ctask->unsol_count);
153 } else 148 }
154 /* bytes to be sent via RDMA operations */
155 iser_ctask->rdma_data_count = ctask->total_length;
156 149
157 iser_ctask_rdma_init(iser_ctask); 150 iser_ctask_rdma_init(iser_ctask);
158} 151}
@@ -196,13 +189,10 @@ iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
196{ 189{
197 struct iscsi_data hdr; 190 struct iscsi_data hdr;
198 int error = 0; 191 int error = 0;
199 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
200 192
201 /* Send data-out PDUs while there's still unsolicited data to send */ 193 /* Send data-out PDUs while there's still unsolicited data to send */
202 while (ctask->unsol_count > 0) { 194 while (ctask->unsol_count > 0) {
203 iscsi_prep_unsolicit_data_pdu(ctask, &hdr, 195 iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
204 iser_ctask->rdma_data_count);
205
206 debug_scsi("Sending data-out: itt 0x%x, data count %d\n", 196 debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
207 hdr.itt, ctask->data_count); 197 hdr.itt, ctask->data_count);
208 198
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 7e1a411db2a3..2cf9ae0def1c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -262,7 +262,6 @@ struct iscsi_iser_conn {
262struct iscsi_iser_cmd_task { 262struct iscsi_iser_cmd_task {
263 struct iser_desc desc; 263 struct iser_desc desc;
264 struct iscsi_iser_conn *iser_conn; 264 struct iscsi_iser_conn *iser_conn;
265 int rdma_data_count;/* RDMA bytes */
266 enum iser_task_status status; 265 enum iser_task_status status;
267 int command_sent; /* set if command sent */ 266 int command_sent; /* set if command sent */
268 int dir[ISER_DIRS_NUM]; /* set if dir use*/ 267 int dir[ISER_DIRS_NUM]; /* set if dir use*/
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index c69d23bb255e..efd51e01c06e 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -45,8 +45,8 @@
45#include <linux/pmu.h> 45#include <linux/pmu.h>
46 46
47#include <asm/machdep.h> 47#include <asm/machdep.h>
48#include <asm/backlight.h>
49#ifdef CONFIG_PPC_PMAC 48#ifdef CONFIG_PPC_PMAC
49#include <asm/backlight.h>
50#include <asm/pmac_feature.h> 50#include <asm/pmac_feature.h>
51#endif 51#endif
52 52
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 85696f34c310..e57bb035a021 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -162,7 +162,13 @@ static struct fc_function_template mptfc_transport_functions = {
162 .show_starget_port_id = 1, 162 .show_starget_port_id = 1,
163 .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo, 163 .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo,
164 .show_rport_dev_loss_tmo = 1, 164 .show_rport_dev_loss_tmo = 1,
165 165 .show_host_supported_speeds = 1,
166 .show_host_maxframe_size = 1,
167 .show_host_speed = 1,
168 .show_host_fabric_name = 1,
169 .show_host_port_type = 1,
170 .show_host_port_state = 1,
171 .show_host_symbolic_name = 1,
166}; 172};
167 173
168static void 174static void
@@ -839,33 +845,95 @@ mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc)
839static void 845static void
840mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) 846mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
841{ 847{
842 unsigned class = 0, cos = 0; 848 unsigned class = 0;
849 unsigned cos = 0;
850 unsigned speed;
851 unsigned port_type;
852 unsigned port_state;
853 FCPortPage0_t *pp0;
854 struct Scsi_Host *sh;
855 char *sn;
843 856
844 /* don't know what to do as only one scsi (fc) host was allocated */ 857 /* don't know what to do as only one scsi (fc) host was allocated */
845 if (portnum != 0) 858 if (portnum != 0)
846 return; 859 return;
847 860
848 class = ioc->fc_port_page0[portnum].SupportedServiceClass; 861 pp0 = &ioc->fc_port_page0[portnum];
862 sh = ioc->sh;
863
864 sn = fc_host_symbolic_name(sh);
865 snprintf(sn, FC_SYMBOLIC_NAME_SIZE, "%s %s%08xh",
866 ioc->prod_name,
867 MPT_FW_REV_MAGIC_ID_STRING,
868 ioc->facts.FWVersion.Word);
869
870 fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN;
871
872 fc_host_maxframe_size(sh) = pp0->MaxFrameSize;
873
874 fc_host_node_name(sh) =
875 (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low;
876
877 fc_host_port_name(sh) =
878 (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low;
879
880 fc_host_port_id(sh) = pp0->PortIdentifier;
881
882 class = pp0->SupportedServiceClass;
849 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1) 883 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1)
850 cos |= FC_COS_CLASS1; 884 cos |= FC_COS_CLASS1;
851 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2) 885 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2)
852 cos |= FC_COS_CLASS2; 886 cos |= FC_COS_CLASS2;
853 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3) 887 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3)
854 cos |= FC_COS_CLASS3; 888 cos |= FC_COS_CLASS3;
889 fc_host_supported_classes(sh) = cos;
890
891 if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT)
892 speed = FC_PORTSPEED_1GBIT;
893 else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT)
894 speed = FC_PORTSPEED_2GBIT;
895 else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT)
896 speed = FC_PORTSPEED_4GBIT;
897 else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT)
898 speed = FC_PORTSPEED_10GBIT;
899 else
900 speed = FC_PORTSPEED_UNKNOWN;
901 fc_host_speed(sh) = speed;
902
903 speed = 0;
904 if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED)
905 speed |= FC_PORTSPEED_1GBIT;
906 if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED)
907 speed |= FC_PORTSPEED_2GBIT;
908 if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED)
909 speed |= FC_PORTSPEED_4GBIT;
910 if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED)
911 speed |= FC_PORTSPEED_10GBIT;
912 fc_host_supported_speeds(sh) = speed;
913
914 port_state = FC_PORTSTATE_UNKNOWN;
915 if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE)
916 port_state = FC_PORTSTATE_ONLINE;
917 else if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_OFFLINE)
918 port_state = FC_PORTSTATE_LINKDOWN;
919 fc_host_port_state(sh) = port_state;
920
921 port_type = FC_PORTTYPE_UNKNOWN;
922 if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT)
923 port_type = FC_PORTTYPE_PTP;
924 else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP)
925 port_type = FC_PORTTYPE_LPORT;
926 else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP)
927 port_type = FC_PORTTYPE_NLPORT;
928 else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT)
929 port_type = FC_PORTTYPE_NPORT;
930 fc_host_port_type(sh) = port_type;
931
932 fc_host_fabric_name(sh) =
933 (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID) ?
934 (u64) pp0->FabricWWNN.High << 32 | (u64) pp0->FabricWWPN.Low :
935 (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low;
855 936
856 fc_host_node_name(ioc->sh) =
857 (u64)ioc->fc_port_page0[portnum].WWNN.High << 32
858 | (u64)ioc->fc_port_page0[portnum].WWNN.Low;
859
860 fc_host_port_name(ioc->sh) =
861 (u64)ioc->fc_port_page0[portnum].WWPN.High << 32
862 | (u64)ioc->fc_port_page0[portnum].WWPN.Low;
863
864 fc_host_port_id(ioc->sh) = ioc->fc_port_page0[portnum].PortIdentifier;
865
866 fc_host_supported_classes(ioc->sh) = cos;
867
868 fc_host_tgtid_bind_type(ioc->sh) = FC_TGTID_BIND_BY_WWPN;
869} 937}
870 938
871static void 939static void
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index f66f2203143a..b752a479f6db 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -852,6 +852,10 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
852 dma_addr_t dma_handle; 852 dma_addr_t dma_handle;
853 int error; 853 int error;
854 854
855 /* FIXME: only have link errors on local phys */
856 if (!scsi_is_sas_phy_local(phy))
857 return -EINVAL;
858
855 hdr.PageVersion = MPI_SASPHY1_PAGEVERSION; 859 hdr.PageVersion = MPI_SASPHY1_PAGEVERSION;
856 hdr.ExtPageLength = 0; 860 hdr.ExtPageLength = 0;
857 hdr.PageNumber = 1 /* page number 1*/; 861 hdr.PageNumber = 1 /* page number 1*/;
@@ -924,6 +928,10 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
924 unsigned long timeleft; 928 unsigned long timeleft;
925 int error = -ERESTARTSYS; 929 int error = -ERESTARTSYS;
926 930
931 /* FIXME: fusion doesn't allow non-local phy reset */
932 if (!scsi_is_sas_phy_local(phy))
933 return -EINVAL;
934
927 /* not implemented for expanders */ 935 /* not implemented for expanders */
928 if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP) 936 if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP)
929 return -ENXIO; 937 return -ENXIO;
@@ -1570,9 +1578,6 @@ static int mptsas_probe_one_phy(struct device *dev,
1570 1578
1571 if (!phy_info->phy) { 1579 if (!phy_info->phy) {
1572 1580
1573 if (local)
1574 phy->local_attached = 1;
1575
1576 error = sas_phy_add(phy); 1581 error = sas_phy_add(phy);
1577 if (error) { 1582 if (error) {
1578 sas_phy_free(phy); 1583 sas_phy_free(phy);
@@ -1642,14 +1647,18 @@ static int mptsas_probe_one_phy(struct device *dev,
1642 1647
1643 for (i = 0; i < port_info->num_phys; i++) 1648 for (i = 0; i < port_info->num_phys; i++)
1644 if (port_info->phy_info[i].identify.sas_address == 1649 if (port_info->phy_info[i].identify.sas_address ==
1645 identify.sas_address) 1650 identify.sas_address) {
1651 sas_port_mark_backlink(port);
1646 goto out; 1652 goto out;
1653 }
1647 1654
1648 } else if (scsi_is_sas_rphy(parent)) { 1655 } else if (scsi_is_sas_rphy(parent)) {
1649 struct sas_rphy *parent_rphy = dev_to_rphy(parent); 1656 struct sas_rphy *parent_rphy = dev_to_rphy(parent);
1650 if (identify.sas_address == 1657 if (identify.sas_address ==
1651 parent_rphy->identify.sas_address) 1658 parent_rphy->identify.sas_address) {
1659 sas_port_mark_backlink(port);
1652 goto out; 1660 goto out;
1661 }
1653 } 1662 }
1654 1663
1655 switch (identify.device_type) { 1664 switch (identify.device_type) {
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 717e90448fc6..a03e862851db 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -101,7 +101,7 @@ config MTD_REDBOOT_PARTS_READONLY
101 101
102config MTD_CMDLINE_PARTS 102config MTD_CMDLINE_PARTS
103 bool "Command line partition table parsing" 103 bool "Command line partition table parsing"
104 depends on MTD_PARTITIONS = "y" 104 depends on MTD_PARTITIONS = "y" && MTD = "y"
105 ---help--- 105 ---help---
106 Allow generic configuration of the MTD partition tables via the kernel 106 Allow generic configuration of the MTD partition tables via the kernel
107 command line. Multiple flash resources are supported for hardware where 107 command line. Multiple flash resources are supported for hardware where
@@ -264,7 +264,7 @@ config RFD_FTL
264 http://www.gensw.com/pages/prod/bios/rfd.htm 264 http://www.gensw.com/pages/prod/bios/rfd.htm
265 265
266config SSFDC 266config SSFDC
267 bool "NAND SSFDC (SmartMedia) read only translation layer" 267 tristate "NAND SSFDC (SmartMedia) read only translation layer"
268 depends on MTD 268 depends on MTD
269 default n 269 default n
270 help 270 help
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index ddbf015f4119..79d3bb659bfe 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -10,7 +10,6 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/config.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/init.h> 15#include <linux/init.h>
@@ -29,7 +28,7 @@ struct ssfdcr_record {
29 int cis_block; /* block n. containing CIS/IDI */ 28 int cis_block; /* block n. containing CIS/IDI */
30 int erase_size; /* phys_block_size */ 29 int erase_size; /* phys_block_size */
31 unsigned short *logic_block_map; /* all zones (max 8192 phys blocks on 30 unsigned short *logic_block_map; /* all zones (max 8192 phys blocks on
32 the 128MB) */ 31 the 128MiB) */
33 int map_len; /* n. phys_blocks on the card */ 32 int map_len; /* n. phys_blocks on the card */
34}; 33};
35 34
@@ -43,11 +42,11 @@ struct ssfdcr_record {
43#define MAX_LOGIC_BLK_PER_ZONE 1000 42#define MAX_LOGIC_BLK_PER_ZONE 1000
44#define MAX_PHYS_BLK_PER_ZONE 1024 43#define MAX_PHYS_BLK_PER_ZONE 1024
45 44
46#define KB(x) ( (x) * 1024L ) 45#define KiB(x) ( (x) * 1024L )
47#define MB(x) ( KB(x) * 1024L ) 46#define MiB(x) ( KiB(x) * 1024L )
48 47
49/** CHS Table 48/** CHS Table
50 1MB 2MB 4MB 8MB 16MB 32MB 64MB 128MB 49 1MiB 2MiB 4MiB 8MiB 16MiB 32MiB 64MiB 128MiB
51NCylinder 125 125 250 250 500 500 500 500 50NCylinder 125 125 250 250 500 500 500 500
52NHead 4 4 4 4 4 8 8 16 51NHead 4 4 4 4 4 8 8 16
53NSector 4 8 8 16 16 16 32 32 52NSector 4 8 8 16 16 16 32 32
@@ -64,14 +63,14 @@ typedef struct {
64 63
65/* Must be ordered by size */ 64/* Must be ordered by size */
66static const chs_entry_t chs_table[] = { 65static const chs_entry_t chs_table[] = {
67 { MB( 1), 125, 4, 4 }, 66 { MiB( 1), 125, 4, 4 },
68 { MB( 2), 125, 4, 8 }, 67 { MiB( 2), 125, 4, 8 },
69 { MB( 4), 250, 4, 8 }, 68 { MiB( 4), 250, 4, 8 },
70 { MB( 8), 250, 4, 16 }, 69 { MiB( 8), 250, 4, 16 },
71 { MB( 16), 500, 4, 16 }, 70 { MiB( 16), 500, 4, 16 },
72 { MB( 32), 500, 8, 16 }, 71 { MiB( 32), 500, 8, 16 },
73 { MB( 64), 500, 8, 32 }, 72 { MiB( 64), 500, 8, 32 },
74 { MB(128), 500, 16, 32 }, 73 { MiB(128), 500, 16, 32 },
75 { 0 }, 74 { 0 },
76}; 75};
77 76
@@ -109,25 +108,30 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
109 int ret, k, cis_sector; 108 int ret, k, cis_sector;
110 size_t retlen; 109 size_t retlen;
111 loff_t offset; 110 loff_t offset;
112 uint8_t sect_buf[SECTOR_SIZE]; 111 uint8_t *sect_buf;
112
113 cis_sector = -1;
114
115 sect_buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
116 if (!sect_buf)
117 goto out;
113 118
114 /* 119 /*
115 * Look for CIS/IDI sector on the first GOOD block (give up after 4 bad 120 * Look for CIS/IDI sector on the first GOOD block (give up after 4 bad
116 * blocks). If the first good block doesn't contain CIS number the flash 121 * blocks). If the first good block doesn't contain CIS number the flash
117 * is not SSFDC formatted 122 * is not SSFDC formatted
118 */ 123 */
119 cis_sector = -1;
120 for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) { 124 for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
121 if (!mtd->block_isbad(mtd, offset)) { 125 if (!mtd->block_isbad(mtd, offset)) {
122 ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, 126 ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
123 sect_buf); 127 sect_buf);
124 128
125 /* CIS pattern match on the sector buffer */ 129 /* CIS pattern match on the sector buffer */
126 if ( ret < 0 || retlen != SECTOR_SIZE ) { 130 if (ret < 0 || retlen != SECTOR_SIZE) {
127 printk(KERN_WARNING 131 printk(KERN_WARNING
128 "SSFDC_RO:can't read CIS/IDI sector\n"); 132 "SSFDC_RO:can't read CIS/IDI sector\n");
129 } else if ( !memcmp(sect_buf, cis_numbers, 133 } else if (!memcmp(sect_buf, cis_numbers,
130 sizeof(cis_numbers)) ) { 134 sizeof(cis_numbers))) {
131 /* Found */ 135 /* Found */
132 cis_sector = (int)(offset >> SECTOR_SHIFT); 136 cis_sector = (int)(offset >> SECTOR_SHIFT);
133 } else { 137 } else {
@@ -140,6 +144,8 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
140 } 144 }
141 } 145 }
142 146
147 kfree(sect_buf);
148 out:
143 return cis_sector; 149 return cis_sector;
144} 150}
145 151
@@ -227,7 +233,7 @@ static int get_logical_address(uint8_t *oob_buf)
227 } 233 }
228 } 234 }
229 235
230 if ( !ok ) 236 if (!ok)
231 block_address = -2; 237 block_address = -2;
232 238
233 DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n", 239 DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n",
@@ -245,8 +251,8 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
245 struct mtd_info *mtd = ssfdc->mbd.mtd; 251 struct mtd_info *mtd = ssfdc->mbd.mtd;
246 252
247 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n", 253 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
248 ssfdc->map_len, (unsigned long)ssfdc->map_len * 254 ssfdc->map_len,
249 ssfdc->erase_size / 1024 ); 255 (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
250 256
251 /* Scan every physical block, skip CIS block */ 257 /* Scan every physical block, skip CIS block */
252 for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len; 258 for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
@@ -323,21 +329,21 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
323 /* Set geometry */ 329 /* Set geometry */
324 ssfdc->heads = 16; 330 ssfdc->heads = 16;
325 ssfdc->sectors = 32; 331 ssfdc->sectors = 32;
326 get_chs( mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); 332 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
327 ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) / 333 ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) /
328 ((long)ssfdc->sectors * (long)ssfdc->heads)); 334 ((long)ssfdc->sectors * (long)ssfdc->heads));
329 335
330 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", 336 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
331 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, 337 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
332 (long)ssfdc->cylinders * (long)ssfdc->heads * 338 (long)ssfdc->cylinders * (long)ssfdc->heads *
333 (long)ssfdc->sectors ); 339 (long)ssfdc->sectors);
334 340
335 ssfdc->mbd.size = (long)ssfdc->heads * (long)ssfdc->cylinders * 341 ssfdc->mbd.size = (long)ssfdc->heads * (long)ssfdc->cylinders *
336 (long)ssfdc->sectors; 342 (long)ssfdc->sectors;
337 343
338 /* Allocate logical block map */ 344 /* Allocate logical block map */
339 ssfdc->logic_block_map = kmalloc( sizeof(ssfdc->logic_block_map[0]) * 345 ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
340 ssfdc->map_len, GFP_KERNEL); 346 ssfdc->map_len, GFP_KERNEL);
341 if (!ssfdc->logic_block_map) { 347 if (!ssfdc->logic_block_map) {
342 printk(KERN_WARNING 348 printk(KERN_WARNING
343 "SSFDC_RO: out of memory for data structures\n"); 349 "SSFDC_RO: out of memory for data structures\n");
@@ -408,7 +414,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
408 "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n", 414 "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
409 sect_no); 415 sect_no);
410 416
411 if (read_physical_sector( ssfdc->mbd.mtd, buf, sect_no ) < 0) 417 if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
412 return -EIO; 418 return -EIO;
413 } else { 419 } else {
414 memset(buf, 0xff, SECTOR_SIZE); 420 memset(buf, 0xff, SECTOR_SIZE);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1e53bbd150b..af301f09d674 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2077,7 +2077,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2077 2077
2078 vp->tx_ring[entry].next = 0; 2078 vp->tx_ring[entry].next = 0;
2079#if DO_ZEROCOPY 2079#if DO_ZEROCOPY
2080 if (skb->ip_summed != CHECKSUM_HW) 2080 if (skb->ip_summed != CHECKSUM_PARTIAL)
2081 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); 2081 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2082 else 2082 else
2083 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); 2083 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 5ba11fa08147..5a4990ae3730 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -805,7 +805,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
805 805
806 if (mss) 806 if (mss)
807 flags |= LargeSend | ((mss & MSSMask) << MSSShift); 807 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
808 else if (skb->ip_summed == CHECKSUM_HW) { 808 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
809 const struct iphdr *ip = skb->nh.iph; 809 const struct iphdr *ip = skb->nh.iph;
810 if (ip->protocol == IPPROTO_TCP) 810 if (ip->protocol == IPPROTO_TCP)
811 flags |= IPCS | TCPCS; 811 flags |= IPCS | TCPCS;
@@ -855,7 +855,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
855 if (mss) 855 if (mss)
856 ctrl |= LargeSend | 856 ctrl |= LargeSend |
857 ((mss & MSSMask) << MSSShift); 857 ((mss & MSSMask) << MSSShift);
858 else if (skb->ip_summed == CHECKSUM_HW) { 858 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
859 if (ip->protocol == IPPROTO_TCP) 859 if (ip->protocol == IPPROTO_TCP)
860 ctrl |= IPCS | TCPCS; 860 ctrl |= IPCS | TCPCS;
861 else if (ip->protocol == IPPROTO_UDP) 861 else if (ip->protocol == IPPROTO_UDP)
@@ -884,7 +884,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
884 txd->addr = cpu_to_le64(first_mapping); 884 txd->addr = cpu_to_le64(first_mapping);
885 wmb(); 885 wmb();
886 886
887 if (skb->ip_summed == CHECKSUM_HW) { 887 if (skb->ip_summed == CHECKSUM_PARTIAL) {
888 if (ip->protocol == IPPROTO_TCP) 888 if (ip->protocol == IPPROTO_TCP)
889 txd->opts1 = cpu_to_le32(first_eor | first_len | 889 txd->opts1 = cpu_to_le32(first_eor | first_len |
890 FirstFrag | DescOwn | 890 FirstFrag | DescOwn |
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 0473c6d14b4a..a075246f6f43 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2040,7 +2040,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2040 */ 2040 */
2041 if (bd_flags & BD_FLG_TCP_UDP_SUM) { 2041 if (bd_flags & BD_FLG_TCP_UDP_SUM) {
2042 skb->csum = htons(csum); 2042 skb->csum = htons(csum);
2043 skb->ip_summed = CHECKSUM_HW; 2043 skb->ip_summed = CHECKSUM_COMPLETE;
2044 } else { 2044 } else {
2045 skb->ip_summed = CHECKSUM_NONE; 2045 skb->ip_summed = CHECKSUM_NONE;
2046 } 2046 }
@@ -2511,7 +2511,7 @@ restart:
2511 2511
2512 mapping = ace_map_tx_skb(ap, skb, skb, idx); 2512 mapping = ace_map_tx_skb(ap, skb, skb, idx);
2513 flagsize = (skb->len << 16) | (BD_FLG_END); 2513 flagsize = (skb->len << 16) | (BD_FLG_END);
2514 if (skb->ip_summed == CHECKSUM_HW) 2514 if (skb->ip_summed == CHECKSUM_PARTIAL)
2515 flagsize |= BD_FLG_TCP_UDP_SUM; 2515 flagsize |= BD_FLG_TCP_UDP_SUM;
2516#if ACENIC_DO_VLAN 2516#if ACENIC_DO_VLAN
2517 if (vlan_tx_tag_present(skb)) { 2517 if (vlan_tx_tag_present(skb)) {
@@ -2534,7 +2534,7 @@ restart:
2534 2534
2535 mapping = ace_map_tx_skb(ap, skb, NULL, idx); 2535 mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2536 flagsize = (skb_headlen(skb) << 16); 2536 flagsize = (skb_headlen(skb) << 16);
2537 if (skb->ip_summed == CHECKSUM_HW) 2537 if (skb->ip_summed == CHECKSUM_PARTIAL)
2538 flagsize |= BD_FLG_TCP_UDP_SUM; 2538 flagsize |= BD_FLG_TCP_UDP_SUM;
2539#if ACENIC_DO_VLAN 2539#if ACENIC_DO_VLAN
2540 if (vlan_tx_tag_present(skb)) { 2540 if (vlan_tx_tag_present(skb)) {
@@ -2560,7 +2560,7 @@ restart:
2560 PCI_DMA_TODEVICE); 2560 PCI_DMA_TODEVICE);
2561 2561
2562 flagsize = (frag->size << 16); 2562 flagsize = (frag->size << 16);
2563 if (skb->ip_summed == CHECKSUM_HW) 2563 if (skb->ip_summed == CHECKSUM_PARTIAL)
2564 flagsize |= BD_FLG_TCP_UDP_SUM; 2564 flagsize |= BD_FLG_TCP_UDP_SUM;
2565 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2565 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2566 2566
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index fc256c197cd6..98d326b23c92 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -161,6 +161,7 @@ static struct pci_device_id com20020pci_id_table[] = {
161 { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 161 { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
162 { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 162 { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
163 { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 163 { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
164 { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
164 { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 165 { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
165 {0,} 166 {0,}
166}; 167};
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b158de28d6f9..7fcf015021ec 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4423,7 +4423,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4423 ring_prod = TX_RING_IDX(prod); 4423 ring_prod = TX_RING_IDX(prod);
4424 4424
4425 vlan_tag_flags = 0; 4425 vlan_tag_flags = 0;
4426 if (skb->ip_summed == CHECKSUM_HW) { 4426 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4427 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4427 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4428 } 4428 }
4429 4429
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 275057ca3dbc..7694365092f8 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2167,7 +2167,7 @@ end_copy_pkt:
2167 cas_page_unmap(addr); 2167 cas_page_unmap(addr);
2168 } 2168 }
2169 skb->csum = ntohs(i ^ 0xffff); 2169 skb->csum = ntohs(i ^ 0xffff);
2170 skb->ip_summed = CHECKSUM_HW; 2170 skb->ip_summed = CHECKSUM_COMPLETE;
2171 skb->protocol = eth_type_trans(skb, cp->dev); 2171 skb->protocol = eth_type_trans(skb, cp->dev);
2172 return len; 2172 return len;
2173} 2173}
@@ -2821,7 +2821,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2821 } 2821 }
2822 2822
2823 ctrl = 0; 2823 ctrl = 0;
2824 if (skb->ip_summed == CHECKSUM_HW) { 2824 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2825 u64 csum_start_off, csum_stuff_off; 2825 u64 csum_start_off, csum_stuff_off;
2826 2826
2827 csum_start_off = (u64) (skb->h.raw - skb->data); 2827 csum_start_off = (u64) (skb->h.raw - skb->data);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 61b3754f50ff..ddd0bdb498f4 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1470,9 +1470,9 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1470 } 1470 }
1471 1471
1472 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1472 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1473 skb->ip_summed == CHECKSUM_HW && 1473 skb->ip_summed == CHECKSUM_PARTIAL &&
1474 skb->nh.iph->protocol == IPPROTO_UDP) 1474 skb->nh.iph->protocol == IPPROTO_UDP)
1475 if (unlikely(skb_checksum_help(skb, 0))) { 1475 if (unlikely(skb_checksum_help(skb))) {
1476 dev_kfree_skb_any(skb); 1476 dev_kfree_skb_any(skb);
1477 return NETDEV_TX_OK; 1477 return NETDEV_TX_OK;
1478 } 1478 }
@@ -1495,11 +1495,11 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1495 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); 1495 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1496 cpl->opcode = CPL_TX_PKT; 1496 cpl->opcode = CPL_TX_PKT;
1497 cpl->ip_csum_dis = 1; /* SW calculates IP csum */ 1497 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1498 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1; 1498 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1499 /* the length field isn't used so don't bother setting it */ 1499 /* the length field isn't used so don't bother setting it */
1500 1500
1501 st->tx_cso += (skb->ip_summed == CHECKSUM_HW); 1501 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1502 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW); 1502 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_PARTIAL);
1503 sge->stats.tx_reg_pkts++; 1503 sge->stats.tx_reg_pkts++;
1504 } 1504 }
1505 cpl->iff = dev->if_port; 1505 cpl->iff = dev->if_port;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index c5c80da239de..7e95cf1a4872 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -611,7 +611,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
611 txdesc = &np->tx_ring[entry]; 611 txdesc = &np->tx_ring[entry];
612 612
613#if 0 613#if 0
614 if (skb->ip_summed == CHECKSUM_HW) { 614 if (skb->ip_summed == CHECKSUM_PARTIAL) {
615 txdesc->status |= 615 txdesc->status |=
616 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 616 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
617 IPChecksumEnable); 617 IPChecksumEnable);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 88276a6e656c..3f6a752700a1 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2631,7 +2631,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2631 unsigned int i; 2631 unsigned int i;
2632 uint8_t css; 2632 uint8_t css;
2633 2633
2634 if (likely(skb->ip_summed == CHECKSUM_HW)) { 2634 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2635 css = skb->h.raw - skb->data; 2635 css = skb->h.raw - skb->data;
2636 2636
2637 i = tx_ring->next_to_use; 2637 i = tx_ring->next_to_use;
@@ -2958,11 +2958,11 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2958 } 2958 }
2959 2959
2960 /* reserve a descriptor for the offload context */ 2960 /* reserve a descriptor for the offload context */
2961 if ((mss) || (skb->ip_summed == CHECKSUM_HW)) 2961 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
2962 count++; 2962 count++;
2963 count++; 2963 count++;
2964#else 2964#else
2965 if (skb->ip_summed == CHECKSUM_HW) 2965 if (skb->ip_summed == CHECKSUM_PARTIAL)
2966 count++; 2966 count++;
2967#endif 2967#endif
2968 2968
@@ -3639,7 +3639,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
3639 */ 3639 */
3640 csum = ntohl(csum ^ 0xFFFF); 3640 csum = ntohl(csum ^ 0xFFFF);
3641 skb->csum = csum; 3641 skb->csum = csum;
3642 skb->ip_summed = CHECKSUM_HW; 3642 skb->ip_summed = CHECKSUM_COMPLETE;
3643 } 3643 }
3644 adapter->hw_csum_good++; 3644 adapter->hw_csum_good++;
3645} 3645}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index b8df5ca4e6aa..97db910fbc8c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1540,7 +1540,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1540 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1540 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1541 else 1541 else
1542#endif 1542#endif
1543 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1543 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1544 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1544 1545
1545 /* vlan tag */ 1546 /* vlan tag */
1546 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1547 if (np->vlangrp && vlan_tx_tag_present(skb)) {
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4788a41da1c0..280b114e253f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -947,7 +947,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
947 947
948 /* Set up checksumming */ 948 /* Set up checksumming */
949 if (likely((dev->features & NETIF_F_IP_CSUM) 949 if (likely((dev->features & NETIF_F_IP_CSUM)
950 && (CHECKSUM_HW == skb->ip_summed))) { 950 && (CHECKSUM_PARTIAL == skb->ip_summed))) {
951 fcb = gfar_add_fcb(skb, txbdp); 951 fcb = gfar_add_fcb(skb, txbdp);
952 status |= TXBD_TOE; 952 status |= TXBD_TOE;
953 gfar_tx_checksum(skb, fcb); 953 gfar_tx_checksum(skb, fcb);
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index b59bab9e9792..5c89ae78a519 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1648,7 +1648,7 @@ static int hamachi_rx(struct net_device *dev)
1648 * could do the pseudo myself and return 1648 * could do the pseudo myself and return
1649 * CHECKSUM_UNNECESSARY 1649 * CHECKSUM_UNNECESSARY
1650 */ 1650 */
1651 skb->ip_summed = CHECKSUM_HW; 1651 skb->ip_summed = CHECKSUM_COMPLETE;
1652 } 1652 }
1653 } 1653 }
1654 } 1654 }
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 944eea66e790..d52e3bd01301 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1036,7 +1036,7 @@ static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1036 struct sk_buff *skb) 1036 struct sk_buff *skb)
1037{ 1037{
1038#if defined(CONFIG_IBM_EMAC_TAH) 1038#if defined(CONFIG_IBM_EMAC_TAH)
1039 if (skb->ip_summed == CHECKSUM_HW) { 1039 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1040 ++dev->stats.tx_packets_csum; 1040 ++dev->stats.tx_packets_csum;
1041 return EMAC_TX_CTRL_TAH_CSUM; 1041 return EMAC_TX_CTRL_TAH_CSUM;
1042 } 1042 }
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 7acba88f9b56..87650237dc5c 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1387,7 +1387,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1387 * MAC header which should not be summed and the TCP/UDP pseudo headers 1387 * MAC header which should not be summed and the TCP/UDP pseudo headers
1388 * manually. 1388 * manually.
1389 */ 1389 */
1390 if (skb->ip_summed == CHECKSUM_HW) { 1390 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1391 int proto = ntohs(skb->nh.iph->protocol); 1391 int proto = ntohs(skb->nh.iph->protocol);
1392 unsigned int csoff; 1392 unsigned int csoff;
1393 struct iphdr *ih = skb->nh.iph; 1393 struct iphdr *ih = skb->nh.iph;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index e3c8cd5eca67..68d4c418cb98 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -249,7 +249,7 @@ static void __exit ali_ircc_cleanup(void)
249 249
250 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 250 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
251 251
252 for (i=0; i < 4; i++) { 252 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
253 if (dev_self[i]) 253 if (dev_self[i])
254 ali_ircc_close(dev_self[i]); 254 ali_ircc_close(dev_self[i]);
255 } 255 }
@@ -273,6 +273,12 @@ static int ali_ircc_open(int i, chipio_t *info)
273 int err; 273 int err;
274 274
275 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 275 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
276
277 if (i >= ARRAY_SIZE(dev_self)) {
278 IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
279 __FUNCTION__);
280 return -ENOMEM;
281 }
276 282
277 /* Set FIR FIFO and DMA Threshold */ 283 /* Set FIR FIFO and DMA Threshold */
278 if ((ali_ircc_setup(info)) == -1) 284 if ((ali_ircc_setup(info)) == -1)
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 44efd49bf4a9..ba4f3eb988b3 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -1090,7 +1090,7 @@ static int __init irport_init(void)
1090{ 1090{
1091 int i; 1091 int i;
1092 1092
1093 for (i=0; (io[i] < 2000) && (i < 4); i++) { 1093 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
1094 if (irport_open(i, io[i], irq[i]) != NULL) 1094 if (irport_open(i, io[i], irq[i]) != NULL)
1095 return 0; 1095 return 0;
1096 } 1096 }
@@ -1112,7 +1112,7 @@ static void __exit irport_cleanup(void)
1112 1112
1113 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 1113 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
1114 1114
1115 for (i=0; i < 4; i++) { 1115 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
1116 if (dev_self[i]) 1116 if (dev_self[i])
1117 irport_close(dev_self[i]); 1117 irport_close(dev_self[i]);
1118 } 1118 }
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 8bafb455c102..79b85f327500 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -279,7 +279,7 @@ static void via_ircc_clean(void)
279 279
280 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 280 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
281 281
282 for (i=0; i < 4; i++) { 282 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
283 if (dev_self[i]) 283 if (dev_self[i])
284 via_ircc_close(dev_self[i]); 284 via_ircc_close(dev_self[i]);
285 } 285 }
@@ -327,6 +327,9 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
327 327
328 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 328 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
329 329
330 if (i >= ARRAY_SIZE(dev_self))
331 return -ENOMEM;
332
330 /* Allocate new instance of the driver */ 333 /* Allocate new instance of the driver */
331 dev = alloc_irdadev(sizeof(struct via_ircc_cb)); 334 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
332 if (dev == NULL) 335 if (dev == NULL)
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index b69776e00951..7de1afdeec3d 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -116,7 +116,7 @@ static int __init w83977af_init(void)
116 116
117 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 117 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
118 118
119 for (i=0; (io[i] < 2000) && (i < 4); i++) { 119 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) 120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
121 return 0; 121 return 0;
122 } 122 }
@@ -135,7 +135,7 @@ static void __exit w83977af_cleanup(void)
135 135
136 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 136 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
137 137
138 for (i=0; i < 4; i++) { 138 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
139 if (dev_self[i]) 139 if (dev_self[i])
140 w83977af_close(dev_self[i]); 140 w83977af_close(dev_self[i]);
141 } 141 }
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index e36dee1dd333..2e0f4b950a90 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1246,7 +1246,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1246 unsigned int i; 1246 unsigned int i;
1247 uint8_t css, cso; 1247 uint8_t css, cso;
1248 1248
1249 if(likely(skb->ip_summed == CHECKSUM_HW)) { 1249 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1250 struct ixgb_buffer *buffer_info; 1250 struct ixgb_buffer *buffer_info;
1251 css = skb->h.raw - skb->data; 1251 css = skb->h.raw - skb->data;
1252 cso = (skb->h.raw + skb->csum) - skb->data; 1252 cso = (skb->h.raw + skb->csum) - skb->data;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 6a74608eb9a3..7f8e5ad1b704 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1147,7 +1147,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1147 desc->byte_cnt = length; 1147 desc->byte_cnt = length;
1148 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 1148 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1149 1149
1150 if (skb->ip_summed == CHECKSUM_HW) { 1150 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1151 BUG_ON(skb->protocol != ETH_P_IP); 1151 BUG_ON(skb->protocol != ETH_P_IP);
1152 1152
1153 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | 1153 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 2773440c84be..4330197994df 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -947,7 +947,7 @@ static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
947 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || 947 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
948 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { 948 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
949 skb->csum = hw_csum; 949 skb->csum = hw_csum;
950 skb->ip_summed = CHECKSUM_HW; 950 skb->ip_summed = CHECKSUM_COMPLETE;
951 } 951 }
952} 952}
953 953
@@ -989,7 +989,7 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
989 if ((skb->protocol == ntohs(ETH_P_IP)) || 989 if ((skb->protocol == ntohs(ETH_P_IP)) ||
990 (skb->protocol == ntohs(ETH_P_IPV6))) { 990 (skb->protocol == ntohs(ETH_P_IPV6))) {
991 skb->csum = ntohs((u16) csum); 991 skb->csum = ntohs((u16) csum);
992 skb->ip_summed = CHECKSUM_HW; 992 skb->ip_summed = CHECKSUM_COMPLETE;
993 } else 993 } else
994 myri10ge_vlan_ip_csum(skb, ntohs((u16) csum)); 994 myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
995 } 995 }
@@ -1953,13 +1953,13 @@ again:
1953 pseudo_hdr_offset = 0; 1953 pseudo_hdr_offset = 0;
1954 odd_flag = 0; 1954 odd_flag = 0;
1955 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); 1955 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
1956 if (likely(skb->ip_summed == CHECKSUM_HW)) { 1956 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1957 cksum_offset = (skb->h.raw - skb->data); 1957 cksum_offset = (skb->h.raw - skb->data);
1958 pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data; 1958 pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
1959 /* If the headers are excessively large, then we must 1959 /* If the headers are excessively large, then we must
1960 * fall back to a software checksum */ 1960 * fall back to a software checksum */
1961 if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) { 1961 if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
1962 if (skb_checksum_help(skb, 0)) 1962 if (skb_checksum_help(skb))
1963 goto drop; 1963 goto drop;
1964 cksum_offset = 0; 1964 cksum_offset = 0;
1965 pseudo_hdr_offset = 0; 1965 pseudo_hdr_offset = 0;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index a05f6cbfdc0f..e10da1aa3d30 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1153,7 +1153,7 @@ again:
1153 if (!nr_frags) 1153 if (!nr_frags)
1154 frag = NULL; 1154 frag = NULL;
1155 extsts = 0; 1155 extsts = 0;
1156 if (skb->ip_summed == CHECKSUM_HW) { 1156 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1157 extsts |= EXTSTS_IPPKT; 1157 extsts |= EXTSTS_IPPKT;
1158 if (IPPROTO_TCP == skb->nh.iph->protocol) 1158 if (IPPROTO_TCP == skb->nh.iph->protocol)
1159 extsts |= EXTSTS_TCPPKT; 1159 extsts |= EXTSTS_TCPPKT;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7ac599410978..4c47c5b10ba0 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2259,7 +2259,7 @@ static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2259 if (mss) 2259 if (mss)
2260 return LargeSend | ((mss & MSSMask) << MSSShift); 2260 return LargeSend | ((mss & MSSMask) << MSSShift);
2261 } 2261 }
2262 if (skb->ip_summed == CHECKSUM_HW) { 2262 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2263 const struct iphdr *ip = skb->nh.iph; 2263 const struct iphdr *ip = skb->nh.iph;
2264 2264
2265 if (ip->protocol == IPPROTO_TCP) 2265 if (ip->protocol == IPPROTO_TCP)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index d579a44aab5c..f5dbeb27b6f0 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3894,7 +3894,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3894 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); 3894 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3895 } 3895 }
3896#endif 3896#endif
3897 if (skb->ip_summed == CHECKSUM_HW) { 3897 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3898 txdp->Control_2 |= 3898 txdp->Control_2 |=
3899 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3899 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3900 TXD_TX_CKO_UDP_EN); 3900 TXD_TX_CKO_UDP_EN);
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 0ecfc14e7990..99e92627642c 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -1559,7 +1559,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
1559 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); 1559 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
1560 pTxd->pMBuf = pMessage; 1560 pTxd->pMBuf = pMessage;
1561 1561
1562 if (pMessage->ip_summed == CHECKSUM_HW) { 1562 if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
1563 u16 hdrlen = pMessage->h.raw - pMessage->data; 1563 u16 hdrlen = pMessage->h.raw - pMessage->data;
1564 u16 offset = hdrlen + pMessage->csum; 1564 u16 offset = hdrlen + pMessage->csum;
1565 1565
@@ -1678,7 +1678,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
1678 /* 1678 /*
1679 ** Does the HW need to evaluate checksum for TCP or UDP packets? 1679 ** Does the HW need to evaluate checksum for TCP or UDP packets?
1680 */ 1680 */
1681 if (pMessage->ip_summed == CHECKSUM_HW) { 1681 if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
1682 u16 hdrlen = pMessage->h.raw - pMessage->data; 1682 u16 hdrlen = pMessage->h.raw - pMessage->data;
1683 u16 offset = hdrlen + pMessage->csum; 1683 u16 offset = hdrlen + pMessage->csum;
1684 1684
@@ -2158,7 +2158,7 @@ rx_start:
2158 2158
2159#ifdef USE_SK_RX_CHECKSUM 2159#ifdef USE_SK_RX_CHECKSUM
2160 pMsg->csum = pRxd->TcpSums & 0xffff; 2160 pMsg->csum = pRxd->TcpSums & 0xffff;
2161 pMsg->ip_summed = CHECKSUM_HW; 2161 pMsg->ip_summed = CHECKSUM_COMPLETE;
2162#else 2162#else
2163 pMsg->ip_summed = CHECKSUM_NONE; 2163 pMsg->ip_summed = CHECKSUM_NONE;
2164#endif 2164#endif
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 4b267b85fff2..9142d91355bc 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2328,7 +2328,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2328 td->dma_lo = map; 2328 td->dma_lo = map;
2329 td->dma_hi = map >> 32; 2329 td->dma_hi = map >> 32;
2330 2330
2331 if (skb->ip_summed == CHECKSUM_HW) { 2331 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2332 int offset = skb->h.raw - skb->data; 2332 int offset = skb->h.raw - skb->data;
2333 2333
2334 /* This seems backwards, but it is what the sk98lin 2334 /* This seems backwards, but it is what the sk98lin
@@ -2630,7 +2630,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
2630 skb_put(skb, len); 2630 skb_put(skb, len);
2631 if (skge->rx_csum) { 2631 if (skge->rx_csum) {
2632 skb->csum = csum; 2632 skb->csum = csum;
2633 skb->ip_summed = CHECKSUM_HW; 2633 skb->ip_summed = CHECKSUM_COMPLETE;
2634 } 2634 }
2635 2635
2636 skb->protocol = eth_type_trans(skb, dev); 2636 skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index c4c51f1418f5..7eeefa2d6c89 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1184,7 +1184,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1184 if (skb_is_gso(skb)) 1184 if (skb_is_gso(skb))
1185 ++count; 1185 ++count;
1186 1186
1187 if (skb->ip_summed == CHECKSUM_HW) 1187 if (skb->ip_summed == CHECKSUM_PARTIAL)
1188 ++count; 1188 ++count;
1189 1189
1190 return count; 1190 return count;
@@ -1284,7 +1284,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1284#endif 1284#endif
1285 1285
1286 /* Handle TCP checksum offload */ 1286 /* Handle TCP checksum offload */
1287 if (skb->ip_summed == CHECKSUM_HW) { 1287 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1288 unsigned offset = skb->h.raw - skb->data; 1288 unsigned offset = skb->h.raw - skb->data;
1289 u32 tcpsum; 1289 u32 tcpsum;
1290 1290
@@ -1982,7 +1982,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1982#endif 1982#endif
1983 case OP_RXCHKS: 1983 case OP_RXCHKS:
1984 skb = sky2->rx_ring[sky2->rx_next].skb; 1984 skb = sky2->rx_ring[sky2->rx_next].skb;
1985 skb->ip_summed = CHECKSUM_HW; 1985 skb->ip_summed = CHECKSUM_COMPLETE;
1986 skb->csum = status & 0xffff; 1986 skb->csum = status & 0xffff;
1987 break; 1987 break;
1988 1988
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 337c3b7ac90e..3d617e8f54b5 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1230,7 +1230,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1230 } 1230 }
1231 1231
1232#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 1232#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1233 if (skb->ip_summed == CHECKSUM_HW) { 1233 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1234 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK)) 1234 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1235 return NETDEV_TX_OK; 1235 return NETDEV_TX_OK;
1236 } 1236 }
@@ -1252,7 +1252,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1252 status |= TxDescIntr; 1252 status |= TxDescIntr;
1253 np->reap_tx = 0; 1253 np->reap_tx = 0;
1254 } 1254 }
1255 if (skb->ip_summed == CHECKSUM_HW) { 1255 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1256 status |= TxCalTCP; 1256 status |= TxCalTCP;
1257 np->stats.tx_compressed++; 1257 np->stats.tx_compressed++;
1258 } 1258 }
@@ -1499,7 +1499,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1499 * Until then, the printk stays. :-) -Ion 1499 * Until then, the printk stays. :-) -Ion
1500 */ 1500 */
1501 else if (le16_to_cpu(desc->status2) & 0x0040) { 1501 else if (le16_to_cpu(desc->status2) & 0x0040) {
1502 skb->ip_summed = CHECKSUM_HW; 1502 skb->ip_summed = CHECKSUM_COMPLETE;
1503 skb->csum = le16_to_cpu(desc->csum); 1503 skb->csum = le16_to_cpu(desc->csum);
1504 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2)); 1504 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1505 } 1505 }
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index e06c59d4dd62..0975695ae31b 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -855,7 +855,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
855 } 855 }
856 856
857 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff); 857 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
858 skb->ip_summed = CHECKSUM_HW; 858 skb->ip_summed = CHECKSUM_COMPLETE;
859 skb->protocol = eth_type_trans(skb, gp->dev); 859 skb->protocol = eth_type_trans(skb, gp->dev);
860 860
861 netif_receive_skb(skb); 861 netif_receive_skb(skb);
@@ -1026,7 +1026,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1026 unsigned long flags; 1026 unsigned long flags;
1027 1027
1028 ctrl = 0; 1028 ctrl = 0;
1029 if (skb->ip_summed == CHECKSUM_HW) { 1029 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1030 u64 csum_start_off, csum_stuff_off; 1030 u64 csum_start_off, csum_stuff_off;
1031 1031
1032 csum_start_off = (u64) (skb->h.raw - skb->data); 1032 csum_start_off = (u64) (skb->h.raw - skb->data);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 2ff0ded24000..f05eea53623b 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1207,7 +1207,7 @@ static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tr
1207 * flags, thus: 1207 * flags, thus:
1208 * 1208 *
1209 * skb->csum = rxd->rx_flags & 0xffff; 1209 * skb->csum = rxd->rx_flags & 0xffff;
1210 * skb->ip_summed = CHECKSUM_HW; 1210 * skb->ip_summed = CHECKSUM_COMPLETE;
1211 * 1211 *
1212 * before sending off the skb to the protocols, and we are good as gold. 1212 * before sending off the skb to the protocols, and we are good as gold.
1213 */ 1213 */
@@ -2074,7 +2074,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2074 2074
2075 /* This card is _fucking_ hot... */ 2075 /* This card is _fucking_ hot... */
2076 skb->csum = ntohs(csum ^ 0xffff); 2076 skb->csum = ntohs(csum ^ 0xffff);
2077 skb->ip_summed = CHECKSUM_HW; 2077 skb->ip_summed = CHECKSUM_COMPLETE;
2078 2078
2079 RXD(("len=%d csum=%4x]", len, csum)); 2079 RXD(("len=%d csum=%4x]", len, csum));
2080 skb->protocol = eth_type_trans(skb, dev); 2080 skb->protocol = eth_type_trans(skb, dev);
@@ -2268,7 +2268,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2268 u32 tx_flags; 2268 u32 tx_flags;
2269 2269
2270 tx_flags = TXFLAG_OWN; 2270 tx_flags = TXFLAG_OWN;
2271 if (skb->ip_summed == CHECKSUM_HW) { 2271 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2272 u32 csum_start_off, csum_stuff_off; 2272 u32 csum_start_off, csum_stuff_off;
2273 2273
2274 csum_start_off = (u32) (skb->h.raw - skb->data); 2274 csum_start_off = (u32) (skb->h.raw - skb->data);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 3b84ac234644..aaf45b907a78 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -149,122 +149,67 @@ module_param(tg3_debug, int, 0);
149MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 149MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150 150
151static struct pci_device_id tg3_pci_tbl[] = { 151static struct pci_device_id tg3_pci_tbl[] = {
152 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700, 152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701, 154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702, 156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703, 158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704, 160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE, 162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705, 164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2, 166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M, 168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2, 170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X, 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X, 174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S, 176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3, 178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3, 180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782, 182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788, 184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789, 186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901, 188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2, 190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2, 192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F, 194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720, 196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721, 198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750, 200 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 201 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751, 202 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 203 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M, 204 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 205 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M, 206 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 207 {}
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262 { 0, }
263}; 208};
264 209
265MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 210MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
266 211
267static struct { 212static const struct {
268 const char string[ETH_GSTRING_LEN]; 213 const char string[ETH_GSTRING_LEN];
269} ethtool_stats_keys[TG3_NUM_STATS] = { 214} ethtool_stats_keys[TG3_NUM_STATS] = {
270 { "rx_octets" }, 215 { "rx_octets" },
@@ -345,7 +290,7 @@ static struct {
345 { "nic_tx_threshold_hit" } 290 { "nic_tx_threshold_hit" }
346}; 291};
347 292
348static struct { 293static const struct {
349 const char string[ETH_GSTRING_LEN]; 294 const char string[ETH_GSTRING_LEN];
350} ethtool_test_keys[TG3_NUM_TEST] = { 295} ethtool_test_keys[TG3_NUM_TEST] = {
351 { "nvram test (online) " }, 296 { "nvram test (online) " },
@@ -3851,11 +3796,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3851 skb->h.th->check = 0; 3796 skb->h.th->check = 0;
3852 3797
3853 } 3798 }
3854 else if (skb->ip_summed == CHECKSUM_HW) 3799 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3855 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3800 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3856#else 3801#else
3857 mss = 0; 3802 mss = 0;
3858 if (skb->ip_summed == CHECKSUM_HW) 3803 if (skb->ip_summed == CHECKSUM_PARTIAL)
3859 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3804 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3860#endif 3805#endif
3861#if TG3_VLAN_TAG_USED 3806#if TG3_VLAN_TAG_USED
@@ -3981,7 +3926,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3981 3926
3982 entry = tp->tx_prod; 3927 entry = tp->tx_prod;
3983 base_flags = 0; 3928 base_flags = 0;
3984 if (skb->ip_summed == CHECKSUM_HW) 3929 if (skb->ip_summed == CHECKSUM_PARTIAL)
3985 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3930 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3986#if TG3_TSO_SUPPORT != 0 3931#if TG3_TSO_SUPPORT != 0
3987 mss = 0; 3932 mss = 0;
@@ -4969,7 +4914,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
4969#define TG3_FW_BSS_ADDR 0x08000a70 4914#define TG3_FW_BSS_ADDR 0x08000a70
4970#define TG3_FW_BSS_LEN 0x10 4915#define TG3_FW_BSS_LEN 0x10
4971 4916
4972static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { 4917static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4973 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800, 4918 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4974 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000, 4919 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4975 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034, 4920 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
@@ -5063,7 +5008,7 @@ static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5063 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000 5008 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5064}; 5009};
5065 5010
5066static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = { 5011static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5067 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430, 5012 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5068 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74, 5013 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5069 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 5014 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
@@ -5128,13 +5073,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5128struct fw_info { 5073struct fw_info {
5129 unsigned int text_base; 5074 unsigned int text_base;
5130 unsigned int text_len; 5075 unsigned int text_len;
5131 u32 *text_data; 5076 const u32 *text_data;
5132 unsigned int rodata_base; 5077 unsigned int rodata_base;
5133 unsigned int rodata_len; 5078 unsigned int rodata_len;
5134 u32 *rodata_data; 5079 const u32 *rodata_data;
5135 unsigned int data_base; 5080 unsigned int data_base;
5136 unsigned int data_len; 5081 unsigned int data_len;
5137 u32 *data_data; 5082 const u32 *data_data;
5138}; 5083};
5139 5084
5140/* tp->lock is held. */ 5085/* tp->lock is held. */
@@ -5266,7 +5211,7 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5266#define TG3_TSO_FW_BSS_ADDR 0x08001b80 5211#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5267#define TG3_TSO_FW_BSS_LEN 0x894 5212#define TG3_TSO_FW_BSS_LEN 0x894
5268 5213
5269static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { 5214static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5270 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000, 5215 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5271 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800, 5216 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5272 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, 5217 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
@@ -5553,7 +5498,7 @@ static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5553 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, 5498 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5554}; 5499};
5555 5500
5556static u32 tg3TsoFwRodata[] = { 5501static const u32 tg3TsoFwRodata[] = {
5557 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, 5502 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5558 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f, 5503 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5559 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000, 5504 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
@@ -5561,7 +5506,7 @@ static u32 tg3TsoFwRodata[] = {
5561 0x00000000, 5506 0x00000000,
5562}; 5507};
5563 5508
5564static u32 tg3TsoFwData[] = { 5509static const u32 tg3TsoFwData[] = {
5565 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000, 5510 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5566 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 5511 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5567 0x00000000, 5512 0x00000000,
@@ -5583,7 +5528,7 @@ static u32 tg3TsoFwData[] = {
5583#define TG3_TSO5_FW_BSS_ADDR 0x00010f50 5528#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5584#define TG3_TSO5_FW_BSS_LEN 0x88 5529#define TG3_TSO5_FW_BSS_LEN 0x88
5585 5530
5586static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { 5531static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5587 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000, 5532 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5588 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001, 5533 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5589 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, 5534 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
@@ -5742,14 +5687,14 @@ static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5742 0x00000000, 0x00000000, 0x00000000, 5687 0x00000000, 0x00000000, 0x00000000,
5743}; 5688};
5744 5689
5745static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = { 5690static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5746 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, 5691 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5747 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 5692 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5748 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 5693 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5749 0x00000000, 0x00000000, 0x00000000, 5694 0x00000000, 0x00000000, 0x00000000,
5750}; 5695};
5751 5696
5752static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = { 5697static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5753 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000, 5698 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5754 0x00000000, 0x00000000, 0x00000000, 5699 0x00000000, 0x00000000, 0x00000000,
5755}; 5700};
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 2fb4f978ed54..8f6f6fd8b87d 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -830,7 +830,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
830 first_txd->addrHi = (u64)((unsigned long) skb) >> 32; 830 first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
831 first_txd->processFlags = 0; 831 first_txd->processFlags = 0;
832 832
833 if(skb->ip_summed == CHECKSUM_HW) { 833 if(skb->ip_summed == CHECKSUM_PARTIAL) {
834 /* The 3XP will figure out if this is UDP/TCP */ 834 /* The 3XP will figure out if this is UDP/TCP */
835 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM; 835 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
836 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM; 836 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index acd0a91a09c3..cbebf1b96e9d 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1230,7 +1230,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1230 rp->tx_skbuff[entry] = skb; 1230 rp->tx_skbuff[entry] = skb;
1231 1231
1232 if ((rp->quirks & rqRhineI) && 1232 if ((rp->quirks & rqRhineI) &&
1233 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) { 1233 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1234 /* Must use alignment buffer. */ 1234 /* Must use alignment buffer. */
1235 if (skb->len > PKT_BUF_SZ) { 1235 if (skb->len > PKT_BUF_SZ) {
1236 /* packet too long, drop it */ 1236 /* packet too long, drop it */
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index dd472b64e5a2..7d8808ce541f 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2002,7 +2002,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2002 * Handle hardware checksum 2002 * Handle hardware checksum
2003 */ 2003 */
2004 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) 2004 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2005 && (skb->ip_summed == CHECKSUM_HW)) { 2005 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2006 struct iphdr *ip = skb->nh.iph; 2006 struct iphdr *ip = skb->nh.iph;
2007 if (ip->protocol == IPPROTO_TCP) 2007 if (ip->protocol == IPPROTO_TCP)
2008 td_ptr->tdesc1.TCR |= TCR0_TCPCK; 2008 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index adc9d8f2c28f..5d39b2df0cc4 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -189,6 +189,10 @@ struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
189 struct zfcp_fsf_req *request, *tmp; 189 struct zfcp_fsf_req *request, *tmp;
190 unsigned int i; 190 unsigned int i;
191 191
192 /* 0 is reserved as an invalid req_id */
193 if (req_id == 0)
194 return NULL;
195
192 i = req_id % REQUEST_LIST_SIZE; 196 i = req_id % REQUEST_LIST_SIZE;
193 197
194 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) 198 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
@@ -299,11 +303,45 @@ zfcp_init_device_configure(void)
299 return; 303 return;
300} 304}
301 305
306static int calc_alignment(int size)
307{
308 int align = 1;
309
310 if (!size)
311 return 0;
312
313 while ((size - align) > 0)
314 align <<= 1;
315
316 return align;
317}
318
302static int __init 319static int __init
303zfcp_module_init(void) 320zfcp_module_init(void)
304{ 321{
322 int retval = -ENOMEM;
323 int size, align;
324
325 size = sizeof(struct zfcp_fsf_req_qtcb);
326 align = calc_alignment(size);
327 zfcp_data.fsf_req_qtcb_cache =
328 kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL);
329 if (!zfcp_data.fsf_req_qtcb_cache)
330 goto out;
305 331
306 int retval = 0; 332 size = sizeof(struct fsf_status_read_buffer);
333 align = calc_alignment(size);
334 zfcp_data.sr_buffer_cache =
335 kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL);
336 if (!zfcp_data.sr_buffer_cache)
337 goto out_sr_cache;
338
339 size = sizeof(struct zfcp_gid_pn_data);
340 align = calc_alignment(size);
341 zfcp_data.gid_pn_cache =
342 kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL);
343 if (!zfcp_data.gid_pn_cache)
344 goto out_gid_cache;
307 345
308 atomic_set(&zfcp_data.loglevel, loglevel); 346 atomic_set(&zfcp_data.loglevel, loglevel);
309 347
@@ -313,15 +351,16 @@ zfcp_module_init(void)
313 /* initialize adapters to be removed list head */ 351 /* initialize adapters to be removed list head */
314 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh); 352 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
315 353
316 zfcp_transport_template = fc_attach_transport(&zfcp_transport_functions); 354 zfcp_data.scsi_transport_template =
317 if (!zfcp_transport_template) 355 fc_attach_transport(&zfcp_transport_functions);
318 return -ENODEV; 356 if (!zfcp_data.scsi_transport_template)
357 goto out_transport;
319 358
320 retval = misc_register(&zfcp_cfdc_misc); 359 retval = misc_register(&zfcp_cfdc_misc);
321 if (retval != 0) { 360 if (retval != 0) {
322 ZFCP_LOG_INFO("registration of misc device " 361 ZFCP_LOG_INFO("registration of misc device "
323 "zfcp_cfdc failed\n"); 362 "zfcp_cfdc failed\n");
324 goto out; 363 goto out_misc;
325 } 364 }
326 365
327 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n", 366 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
@@ -333,9 +372,6 @@ zfcp_module_init(void)
333 /* initialise configuration rw lock */ 372 /* initialise configuration rw lock */
334 rwlock_init(&zfcp_data.config_lock); 373 rwlock_init(&zfcp_data.config_lock);
335 374
336 /* save address of data structure managing the driver module */
337 zfcp_data.scsi_host_template.module = THIS_MODULE;
338
339 /* setup dynamic I/O */ 375 /* setup dynamic I/O */
340 retval = zfcp_ccw_register(); 376 retval = zfcp_ccw_register();
341 if (retval) { 377 if (retval) {
@@ -350,6 +386,14 @@ zfcp_module_init(void)
350 386
351 out_ccw_register: 387 out_ccw_register:
352 misc_deregister(&zfcp_cfdc_misc); 388 misc_deregister(&zfcp_cfdc_misc);
389 out_misc:
390 fc_release_transport(zfcp_data.scsi_transport_template);
391 out_transport:
392 kmem_cache_destroy(zfcp_data.gid_pn_cache);
393 out_gid_cache:
394 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
395 out_sr_cache:
396 kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
353 out: 397 out:
354 return retval; 398 return retval;
355} 399}
@@ -935,20 +979,20 @@ static int
935zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 979zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
936{ 980{
937 adapter->pool.fsf_req_erp = 981 adapter->pool.fsf_req_erp =
938 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR, 982 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
939 sizeof(struct zfcp_fsf_req_pool_element)); 983 zfcp_data.fsf_req_qtcb_cache);
940 if (!adapter->pool.fsf_req_erp) 984 if (!adapter->pool.fsf_req_erp)
941 return -ENOMEM; 985 return -ENOMEM;
942 986
943 adapter->pool.fsf_req_scsi = 987 adapter->pool.fsf_req_scsi =
944 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, 988 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
945 sizeof(struct zfcp_fsf_req_pool_element)); 989 zfcp_data.fsf_req_qtcb_cache);
946 if (!adapter->pool.fsf_req_scsi) 990 if (!adapter->pool.fsf_req_scsi)
947 return -ENOMEM; 991 return -ENOMEM;
948 992
949 adapter->pool.fsf_req_abort = 993 adapter->pool.fsf_req_abort =
950 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, 994 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
951 sizeof(struct zfcp_fsf_req_pool_element)); 995 zfcp_data.fsf_req_qtcb_cache);
952 if (!adapter->pool.fsf_req_abort) 996 if (!adapter->pool.fsf_req_abort)
953 return -ENOMEM; 997 return -ENOMEM;
954 998
@@ -959,14 +1003,14 @@ zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
959 return -ENOMEM; 1003 return -ENOMEM;
960 1004
961 adapter->pool.data_status_read = 1005 adapter->pool.data_status_read =
962 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, 1006 mempool_create_slab_pool(ZFCP_POOL_STATUS_READ_NR,
963 sizeof(struct fsf_status_read_buffer)); 1007 zfcp_data.sr_buffer_cache);
964 if (!adapter->pool.data_status_read) 1008 if (!adapter->pool.data_status_read)
965 return -ENOMEM; 1009 return -ENOMEM;
966 1010
967 adapter->pool.data_gid_pn = 1011 adapter->pool.data_gid_pn =
968 mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR, 1012 mempool_create_slab_pool(ZFCP_POOL_DATA_GID_PN_NR,
969 sizeof(struct zfcp_gid_pn_data)); 1013 zfcp_data.gid_pn_cache);
970 if (!adapter->pool.data_gid_pn) 1014 if (!adapter->pool.data_gid_pn)
971 return -ENOMEM; 1015 return -ENOMEM;
972 1016
@@ -1091,9 +1135,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1091 /* initialize lock of associated request queue */ 1135 /* initialize lock of associated request queue */
1092 rwlock_init(&adapter->request_queue.queue_lock); 1136 rwlock_init(&adapter->request_queue.queue_lock);
1093 1137
1094 /* intitialise SCSI ER timer */
1095 init_timer(&adapter->scsi_er_timer);
1096
1097 /* mark adapter unusable as long as sysfs registration is not complete */ 1138 /* mark adapter unusable as long as sysfs registration is not complete */
1098 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 1139 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1099 1140
@@ -1609,7 +1650,6 @@ zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
1609 gid_pn->ct.handler = zfcp_ns_gid_pn_handler; 1650 gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
1610 gid_pn->ct.handler_data = (unsigned long) gid_pn; 1651 gid_pn->ct.handler_data = (unsigned long) gid_pn;
1611 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; 1652 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
1612 gid_pn->ct.timer = &erp_action->timer;
1613 gid_pn->port = erp_action->port; 1653 gid_pn->port = erp_action->port;
1614 1654
1615 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, 1655 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index fdabadeaa9ee..81680efa1721 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -275,19 +275,6 @@ zfcp_ccw_register(void)
275} 275}
276 276
277/** 277/**
278 * zfcp_ccw_unregister - ccw unregister function
279 *
280 * Unregisters the driver from common i/o layer. Function will be called at
281 * module unload/system shutdown.
282 */
283void __exit
284zfcp_ccw_unregister(void)
285{
286 zfcp_sysfs_driver_remove_files(&zfcp_ccw_driver.driver);
287 ccw_driver_unregister(&zfcp_ccw_driver);
288}
289
290/**
291 * zfcp_ccw_shutdown - gets called on reboot/shutdown 278 * zfcp_ccw_shutdown - gets called on reboot/shutdown
292 * 279 *
293 * Makes sure that QDIO queues are down when the system gets stopped. 280 * Makes sure that QDIO queues are down when the system gets stopped.
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index c033145d0f19..0aa3b1ac76af 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -707,7 +707,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
707 struct zfcp_adapter *adapter, 707 struct zfcp_adapter *adapter,
708 struct scsi_cmnd *scsi_cmnd, 708 struct scsi_cmnd *scsi_cmnd,
709 struct zfcp_fsf_req *fsf_req, 709 struct zfcp_fsf_req *fsf_req,
710 struct zfcp_fsf_req *old_fsf_req) 710 unsigned long old_req_id)
711{ 711{
712 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 712 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
713 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 713 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
@@ -768,8 +768,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
768 rec->fsf_seqno = fsf_req->seq_no; 768 rec->fsf_seqno = fsf_req->seq_no;
769 rec->fsf_issued = fsf_req->issued; 769 rec->fsf_issued = fsf_req->issued;
770 } 770 }
771 rec->type.old_fsf_reqid = 771 rec->type.old_fsf_reqid = old_req_id;
772 (unsigned long) old_fsf_req;
773 } else { 772 } else {
774 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 773 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
775 dump->total_size = buflen; 774 dump->total_size = buflen;
@@ -794,17 +793,17 @@ zfcp_scsi_dbf_event_result(const char *tag, int level,
794 struct zfcp_fsf_req *fsf_req) 793 struct zfcp_fsf_req *fsf_req)
795{ 794{
796 _zfcp_scsi_dbf_event_common("rslt", tag, level, 795 _zfcp_scsi_dbf_event_common("rslt", tag, level,
797 adapter, scsi_cmnd, fsf_req, NULL); 796 adapter, scsi_cmnd, fsf_req, 0);
798} 797}
799 798
800inline void 799inline void
801zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
802 struct scsi_cmnd *scsi_cmnd, 801 struct scsi_cmnd *scsi_cmnd,
803 struct zfcp_fsf_req *new_fsf_req, 802 struct zfcp_fsf_req *new_fsf_req,
804 struct zfcp_fsf_req *old_fsf_req) 803 unsigned long old_req_id)
805{ 804{
806 _zfcp_scsi_dbf_event_common("abrt", tag, 1, 805 _zfcp_scsi_dbf_event_common("abrt", tag, 1,
807 adapter, scsi_cmnd, new_fsf_req, old_fsf_req); 806 adapter, scsi_cmnd, new_fsf_req, old_req_id);
808} 807}
809 808
810inline void 809inline void
@@ -814,7 +813,7 @@ zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
814 struct zfcp_adapter *adapter = unit->port->adapter; 813 struct zfcp_adapter *adapter = unit->port->adapter;
815 814
816 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 815 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
817 tag, 1, adapter, scsi_cmnd, NULL, NULL); 816 tag, 1, adapter, scsi_cmnd, NULL, 0);
818} 817}
819 818
820static int 819static int
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7c84b3d4bd94..8f882690994d 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -19,7 +19,6 @@
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22
23#ifndef ZFCP_DEF_H 22#ifndef ZFCP_DEF_H
24#define ZFCP_DEF_H 23#define ZFCP_DEF_H
25 24
@@ -32,6 +31,10 @@
32#include <linux/blkdev.h> 31#include <linux/blkdev.h>
33#include <linux/delay.h> 32#include <linux/delay.h>
34#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/mempool.h>
36#include <linux/syscalls.h>
37#include <linux/ioctl.h>
35#include <scsi/scsi.h> 38#include <scsi/scsi.h>
36#include <scsi/scsi_tcq.h> 39#include <scsi/scsi_tcq.h>
37#include <scsi/scsi_cmnd.h> 40#include <scsi/scsi_cmnd.h>
@@ -39,14 +42,11 @@
39#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
40#include <scsi/scsi_transport.h> 43#include <scsi/scsi_transport.h>
41#include <scsi/scsi_transport_fc.h> 44#include <scsi/scsi_transport_fc.h>
42#include "zfcp_fsf.h"
43#include <asm/ccwdev.h> 45#include <asm/ccwdev.h>
44#include <asm/qdio.h> 46#include <asm/qdio.h>
45#include <asm/debug.h> 47#include <asm/debug.h>
46#include <asm/ebcdic.h> 48#include <asm/ebcdic.h>
47#include <linux/mempool.h> 49#include "zfcp_fsf.h"
48#include <linux/syscalls.h>
49#include <linux/ioctl.h>
50 50
51 51
52/********************* GENERAL DEFINES *********************************/ 52/********************* GENERAL DEFINES *********************************/
@@ -137,7 +137,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
137#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 137#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
138 138
139/* timeout value for "default timer" for fsf requests */ 139/* timeout value for "default timer" for fsf requests */
140#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 140#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
141 141
142/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 142/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
143 143
@@ -779,7 +779,6 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
779 * @handler_data: data passed to handler function 779 * @handler_data: data passed to handler function
780 * @pool: pointer to memory pool for ct request structure 780 * @pool: pointer to memory pool for ct request structure
781 * @timeout: FSF timeout for this request 781 * @timeout: FSF timeout for this request
782 * @timer: timer (e.g. for request initiated by erp)
783 * @completion: completion for synchronization purposes 782 * @completion: completion for synchronization purposes
784 * @status: used to pass error status to calling function 783 * @status: used to pass error status to calling function
785 */ 784 */
@@ -793,7 +792,6 @@ struct zfcp_send_ct {
793 unsigned long handler_data; 792 unsigned long handler_data;
794 mempool_t *pool; 793 mempool_t *pool;
795 int timeout; 794 int timeout;
796 struct timer_list *timer;
797 struct completion *completion; 795 struct completion *completion;
798 int status; 796 int status;
799}; 797};
@@ -821,7 +819,6 @@ typedef void (*zfcp_send_els_handler_t)(unsigned long);
821 * @resp_count: number of elements in response scatter-gather list 819 * @resp_count: number of elements in response scatter-gather list
822 * @handler: handler function (called for response to the request) 820 * @handler: handler function (called for response to the request)
823 * @handler_data: data passed to handler function 821 * @handler_data: data passed to handler function
824 * @timer: timer (e.g. for request initiated by erp)
825 * @completion: completion for synchronization purposes 822 * @completion: completion for synchronization purposes
826 * @ls_code: hex code of ELS command 823 * @ls_code: hex code of ELS command
827 * @status: used to pass error status to calling function 824 * @status: used to pass error status to calling function
@@ -836,7 +833,6 @@ struct zfcp_send_els {
836 unsigned int resp_count; 833 unsigned int resp_count;
837 zfcp_send_els_handler_t handler; 834 zfcp_send_els_handler_t handler;
838 unsigned long handler_data; 835 unsigned long handler_data;
839 struct timer_list *timer;
840 struct completion *completion; 836 struct completion *completion;
841 int ls_code; 837 int ls_code;
842 int status; 838 int status;
@@ -886,7 +882,6 @@ struct zfcp_adapter {
886 struct list_head port_remove_lh; /* head of ports to be 882 struct list_head port_remove_lh; /* head of ports to be
887 removed */ 883 removed */
888 u32 ports; /* number of remote ports */ 884 u32 ports; /* number of remote ports */
889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */
890 atomic_t reqs_active; /* # active FSF reqs */ 885 atomic_t reqs_active; /* # active FSF reqs */
891 unsigned long req_no; /* unique FSF req number */ 886 unsigned long req_no; /* unique FSF req number */
892 struct list_head *req_list; /* list of pending reqs */ 887 struct list_head *req_list; /* list of pending reqs */
@@ -1003,6 +998,7 @@ struct zfcp_fsf_req {
1003 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 998 struct fsf_qtcb *qtcb; /* address of associated QTCB */
1004 u32 seq_no; /* Sequence number of request */ 999 u32 seq_no; /* Sequence number of request */
1005 unsigned long data; /* private data of request */ 1000 unsigned long data; /* private data of request */
1001 struct timer_list timer; /* used for erp or scsi er */
1006 struct zfcp_erp_action *erp_action; /* used if this request is 1002 struct zfcp_erp_action *erp_action; /* used if this request is
1007 issued on behalf of erp */ 1003 issued on behalf of erp */
1008 mempool_t *pool; /* used if request was alloacted 1004 mempool_t *pool; /* used if request was alloacted
@@ -1016,6 +1012,7 @@ typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
1016/* driver data */ 1012/* driver data */
1017struct zfcp_data { 1013struct zfcp_data {
1018 struct scsi_host_template scsi_host_template; 1014 struct scsi_host_template scsi_host_template;
1015 struct scsi_transport_template *scsi_transport_template;
1019 atomic_t status; /* Module status flags */ 1016 atomic_t status; /* Module status flags */
1020 struct list_head adapter_list_head; /* head of adapter list */ 1017 struct list_head adapter_list_head; /* head of adapter list */
1021 struct list_head adapter_remove_lh; /* head of adapters to be 1018 struct list_head adapter_remove_lh; /* head of adapters to be
@@ -1031,6 +1028,9 @@ struct zfcp_data {
1031 wwn_t init_wwpn; 1028 wwn_t init_wwpn;
1032 fcp_lun_t init_fcp_lun; 1029 fcp_lun_t init_fcp_lun;
1033 char *driver_version; 1030 char *driver_version;
1031 kmem_cache_t *fsf_req_qtcb_cache;
1032 kmem_cache_t *sr_buffer_cache;
1033 kmem_cache_t *gid_pn_cache;
1034}; 1034};
1035 1035
1036/** 1036/**
@@ -1051,7 +1051,7 @@ struct zfcp_sg_list {
1051#define ZFCP_POOL_DATA_GID_PN_NR 1 1051#define ZFCP_POOL_DATA_GID_PN_NR 1
1052 1052
1053/* struct used by memory pools for fsf_requests */ 1053/* struct used by memory pools for fsf_requests */
1054struct zfcp_fsf_req_pool_element { 1054struct zfcp_fsf_req_qtcb {
1055 struct zfcp_fsf_req fsf_req; 1055 struct zfcp_fsf_req fsf_req;
1056 struct fsf_qtcb qtcb; 1056 struct fsf_qtcb qtcb;
1057}; 1057};
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7f60b6fdf724..862a411a4aa0 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -64,8 +64,6 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); 64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); 65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); 66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
67static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
68static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); 67static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 68static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 69static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@@ -93,6 +91,7 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); 91static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); 92static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
95 93
94static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
96static void zfcp_erp_action_dismiss_port(struct zfcp_port *); 95static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
97static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); 96static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
98static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); 97static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
@@ -111,64 +110,86 @@ static inline void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
111static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *); 110static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *);
112 111
113static void zfcp_erp_memwait_handler(unsigned long); 112static void zfcp_erp_memwait_handler(unsigned long);
114static void zfcp_erp_timeout_handler(unsigned long);
115static inline void zfcp_erp_timeout_init(struct zfcp_erp_action *);
116 113
117/** 114/**
118 * zfcp_fsf_request_timeout_handler - called if a request timed out 115 * zfcp_close_qdio - close qdio queues for an adapter
119 * @data: pointer to adapter for handler function
120 *
121 * This function needs to be called if requests (ELS, Generic Service,
122 * or SCSI commands) exceed a certain time limit. The assumption is
123 * that after the time limit the adapter get stuck. So we trigger a reopen of
124 * the adapter. This should not be used for error recovery, SCSI abort
125 * commands and SCSI requests from SCSI mid-layer.
126 */ 116 */
127void 117static void zfcp_close_qdio(struct zfcp_adapter *adapter)
128zfcp_fsf_request_timeout_handler(unsigned long data)
129{ 118{
130 struct zfcp_adapter *adapter; 119 struct zfcp_qdio_queue *req_queue;
120 int first, count;
131 121
132 adapter = (struct zfcp_adapter *) data; 122 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
123 return;
133 124
134 zfcp_erp_adapter_reopen(adapter, 0); 125 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
126 req_queue = &adapter->request_queue;
127 write_lock_irq(&req_queue->queue_lock);
128 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
129 write_unlock_irq(&req_queue->queue_lock);
130
131 debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
132 while (qdio_shutdown(adapter->ccw_device,
133 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
134 msleep(1000);
135 debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
136
137 /* cleanup used outbound sbals */
138 count = atomic_read(&req_queue->free_count);
139 if (count < QDIO_MAX_BUFFERS_PER_Q) {
140 first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q;
141 count = QDIO_MAX_BUFFERS_PER_Q - count;
142 zfcp_qdio_zero_sbals(req_queue->buffer, first, count);
143 }
144 req_queue->free_index = 0;
145 atomic_set(&req_queue->free_count, 0);
146 req_queue->distance_from_int = 0;
147 adapter->response_queue.free_index = 0;
148 atomic_set(&adapter->response_queue.free_count, 0);
135} 149}
136 150
137/** 151/**
138 * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks 152 * zfcp_close_fsf - stop FSF operations for an adapter
139 * 153 *
140 * This function needs to be called whenever a SCSI error recovery 154 * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of
141 * action (abort/reset) does not return. Re-opening the adapter means 155 * requests waiting for completion; especially this returns SCSI commands
142 * that the abort/reset command can be returned by zfcp. It won't complete 156 * with error state).
143 * via the adapter anymore (because qdio queues are closed). If ERP is
144 * already running on this adapter it will be stopped.
145 */ 157 */
146void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) 158static void zfcp_close_fsf(struct zfcp_adapter *adapter)
147{ 159{
148 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 160 /* close queues to ensure that buffers are not accessed by adapter */
149 unsigned long flags; 161 zfcp_close_qdio(adapter);
150 162 zfcp_fsf_req_dismiss_all(adapter);
151 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " 163 /* reset FSF request sequence number */
152 "Restarting all operations on the adapter %s\n", 164 adapter->fsf_req_seq_no = 0;
153 zfcp_get_busid_by_adapter(adapter)); 165 /* all ports and units are closed */
154 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); 166 zfcp_erp_modify_adapter_status(adapter,
167 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
168}
155 169
156 write_lock_irqsave(&adapter->erp_lock, flags); 170/**
157 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 171 * zfcp_fsf_request_timeout_handler - called if a request timed out
158 &adapter->status)) { 172 * @data: pointer to adapter for handler function
159 zfcp_erp_modify_adapter_status(adapter, 173 *
160 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, 174 * This function needs to be called if requests (ELS, Generic Service,
161 ZFCP_CLEAR); 175 * or SCSI commands) exceed a certain time limit. The assumption is
162 zfcp_erp_action_dismiss_adapter(adapter); 176 * that after the time limit the adapter get stuck. So we trigger a reopen of
163 write_unlock_irqrestore(&adapter->erp_lock, flags); 177 * the adapter.
164 /* dismiss all pending requests including requests for ERP */ 178 */
165 zfcp_fsf_req_dismiss_all(adapter); 179static void zfcp_fsf_request_timeout_handler(unsigned long data)
166 adapter->fsf_req_seq_no = 0; 180{
167 } else 181 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
168 write_unlock_irqrestore(&adapter->erp_lock, flags);
169 zfcp_erp_adapter_reopen(adapter, 0); 182 zfcp_erp_adapter_reopen(adapter, 0);
170} 183}
171 184
185void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
186{
187 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
188 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
189 fsf_req->timer.expires = timeout;
190 add_timer(&fsf_req->timer);
191}
192
172/* 193/*
173 * function: 194 * function:
174 * 195 *
@@ -282,7 +303,6 @@ zfcp_erp_adisc(struct zfcp_port *port)
282 struct zfcp_ls_adisc *adisc; 303 struct zfcp_ls_adisc *adisc;
283 void *address = NULL; 304 void *address = NULL;
284 int retval = 0; 305 int retval = 0;
285 struct timer_list *timer;
286 306
287 send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC); 307 send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC);
288 if (send_els == NULL) 308 if (send_els == NULL)
@@ -329,22 +349,11 @@ zfcp_erp_adisc(struct zfcp_port *port)
329 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 349 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
330 adisc->nport_id); 350 adisc->nport_id);
331 351
332 timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC);
333 if (!timer)
334 goto nomem;
335
336 init_timer(timer);
337 timer->function = zfcp_fsf_request_timeout_handler;
338 timer->data = (unsigned long) adapter;
339 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
340 send_els->timer = timer;
341
342 retval = zfcp_fsf_send_els(send_els); 352 retval = zfcp_fsf_send_els(send_els);
343 if (retval != 0) { 353 if (retval != 0) {
344 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port " 354 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
345 "0x%08x on adapter %s\n", send_els->d_id, 355 "0x%08x on adapter %s\n", send_els->d_id,
346 zfcp_get_busid_by_adapter(adapter)); 356 zfcp_get_busid_by_adapter(adapter));
347 del_timer(send_els->timer);
348 goto freemem; 357 goto freemem;
349 } 358 }
350 359
@@ -356,7 +365,6 @@ zfcp_erp_adisc(struct zfcp_port *port)
356 if (address != NULL) 365 if (address != NULL)
357 __free_pages(send_els->req->page, 0); 366 __free_pages(send_els->req->page, 0);
358 if (send_els != NULL) { 367 if (send_els != NULL) {
359 kfree(send_els->timer);
360 kfree(send_els->req); 368 kfree(send_els->req);
361 kfree(send_els->resp); 369 kfree(send_els->resp);
362 kfree(send_els); 370 kfree(send_els);
@@ -382,9 +390,6 @@ zfcp_erp_adisc_handler(unsigned long data)
382 struct zfcp_ls_adisc_acc *adisc; 390 struct zfcp_ls_adisc_acc *adisc;
383 391
384 send_els = (struct zfcp_send_els *) data; 392 send_els = (struct zfcp_send_els *) data;
385
386 del_timer(send_els->timer);
387
388 adapter = send_els->adapter; 393 adapter = send_els->adapter;
389 port = send_els->port; 394 port = send_els->port;
390 d_id = send_els->d_id; 395 d_id = send_els->d_id;
@@ -433,7 +438,6 @@ zfcp_erp_adisc_handler(unsigned long data)
433 out: 438 out:
434 zfcp_port_put(port); 439 zfcp_port_put(port);
435 __free_pages(send_els->req->page, 0); 440 __free_pages(send_els->req->page, 0);
436 kfree(send_els->timer);
437 kfree(send_els->req); 441 kfree(send_els->req);
438 kfree(send_els->resp); 442 kfree(send_els->resp);
439 kfree(send_els); 443 kfree(send_els);
@@ -909,8 +913,6 @@ static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
909 debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex"); 913 debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex");
910 debug_event(adapter->erp_dbf, 2, &erp_action->action, 914 debug_event(adapter->erp_dbf, 2, &erp_action->action,
911 sizeof (int)); 915 sizeof (int));
912 if (!(set_mask & ZFCP_STATUS_ERP_TIMEDOUT))
913 del_timer(&erp_action->timer);
914 erp_action->status |= set_mask; 916 erp_action->status |= set_mask;
915 zfcp_erp_action_ready(erp_action); 917 zfcp_erp_action_ready(erp_action);
916 } else { 918 } else {
@@ -957,8 +959,7 @@ zfcp_erp_memwait_handler(unsigned long data)
957 * action gets an appropriate flag and will be processed 959 * action gets an appropriate flag and will be processed
958 * accordingly 960 * accordingly
959 */ 961 */
960static void 962void zfcp_erp_timeout_handler(unsigned long data)
961zfcp_erp_timeout_handler(unsigned long data)
962{ 963{
963 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; 964 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
964 struct zfcp_adapter *adapter = erp_action->adapter; 965 struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1934,8 +1935,7 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
1934 &erp_action->adapter->status); 1935 &erp_action->adapter->status);
1935 1936
1936 failed_openfcp: 1937 failed_openfcp:
1937 zfcp_erp_adapter_strategy_close_qdio(erp_action); 1938 zfcp_close_fsf(erp_action->adapter);
1938 zfcp_erp_adapter_strategy_close_fsf(erp_action);
1939 failed_qdio: 1939 failed_qdio:
1940 out: 1940 out:
1941 return retval; 1941 return retval;
@@ -2040,59 +2040,6 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2040 return retval; 2040 return retval;
2041} 2041}
2042 2042
2043/**
2044 * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
2045 */
2046static void
2047zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2048{
2049 int first_used;
2050 int used_count;
2051 struct zfcp_adapter *adapter = erp_action->adapter;
2052
2053 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
2054 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
2055 "queues on adapter %s\n",
2056 zfcp_get_busid_by_adapter(adapter));
2057 return;
2058 }
2059
2060 /*
2061 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
2062 * do_QDIO won't be called while qdio_shutdown is in progress.
2063 */
2064 write_lock_irq(&adapter->request_queue.queue_lock);
2065 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2066 write_unlock_irq(&adapter->request_queue.queue_lock);
2067
2068 debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
2069 while (qdio_shutdown(adapter->ccw_device,
2070 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
2071 msleep(1000);
2072 debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
2073
2074 /*
2075 * First we had to stop QDIO operation.
2076 * Now it is safe to take the following actions.
2077 */
2078
2079 /* Cleanup only necessary when there are unacknowledged buffers */
2080 if (atomic_read(&adapter->request_queue.free_count)
2081 < QDIO_MAX_BUFFERS_PER_Q) {
2082 first_used = (adapter->request_queue.free_index +
2083 atomic_read(&adapter->request_queue.free_count))
2084 % QDIO_MAX_BUFFERS_PER_Q;
2085 used_count = QDIO_MAX_BUFFERS_PER_Q -
2086 atomic_read(&adapter->request_queue.free_count);
2087 zfcp_qdio_zero_sbals(adapter->request_queue.buffer,
2088 first_used, used_count);
2089 }
2090 adapter->response_queue.free_index = 0;
2091 atomic_set(&adapter->response_queue.free_count, 0);
2092 adapter->request_queue.free_index = 0;
2093 atomic_set(&adapter->request_queue.free_count, 0);
2094 adapter->request_queue.distance_from_int = 0;
2095}
2096 2043
2097static int 2044static int
2098zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) 2045zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
@@ -2127,7 +2074,6 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2127 write_lock_irq(&adapter->erp_lock); 2074 write_lock_irq(&adapter->erp_lock);
2128 zfcp_erp_action_to_running(erp_action); 2075 zfcp_erp_action_to_running(erp_action);
2129 write_unlock_irq(&adapter->erp_lock); 2076 write_unlock_irq(&adapter->erp_lock);
2130 zfcp_erp_timeout_init(erp_action);
2131 if (zfcp_fsf_exchange_config_data(erp_action)) { 2077 if (zfcp_fsf_exchange_config_data(erp_action)) {
2132 retval = ZFCP_ERP_FAILED; 2078 retval = ZFCP_ERP_FAILED;
2133 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf"); 2079 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
@@ -2196,7 +2142,6 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2196 zfcp_erp_action_to_running(erp_action); 2142 zfcp_erp_action_to_running(erp_action);
2197 write_unlock_irq(&adapter->erp_lock); 2143 write_unlock_irq(&adapter->erp_lock);
2198 2144
2199 zfcp_erp_timeout_init(erp_action);
2200 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2145 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2201 if (ret == -EOPNOTSUPP) { 2146 if (ret == -EOPNOTSUPP) {
2202 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); 2147 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
@@ -2248,27 +2193,6 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2248 return retval; 2193 return retval;
2249} 2194}
2250 2195
2251/**
2252 * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
2253 */
2254static void
2255zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2256{
2257 struct zfcp_adapter *adapter = erp_action->adapter;
2258
2259 /*
2260 * wake waiting initiators of requests,
2261 * return SCSI commands (with error status),
2262 * clean up all requests (synchronously)
2263 */
2264 zfcp_fsf_req_dismiss_all(adapter);
2265 /* reset FSF request sequence number */
2266 adapter->fsf_req_seq_no = 0;
2267 /* all ports and units are closed */
2268 zfcp_erp_modify_adapter_status(adapter,
2269 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
2270}
2271
2272/* 2196/*
2273 * function: 2197 * function:
2274 * 2198 *
@@ -2605,7 +2529,6 @@ zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
2605 struct zfcp_adapter *adapter = erp_action->adapter; 2529 struct zfcp_adapter *adapter = erp_action->adapter;
2606 struct zfcp_port *port = erp_action->port; 2530 struct zfcp_port *port = erp_action->port;
2607 2531
2608 zfcp_erp_timeout_init(erp_action);
2609 retval = zfcp_fsf_close_physical_port(erp_action); 2532 retval = zfcp_fsf_close_physical_port(erp_action);
2610 if (retval == -ENOMEM) { 2533 if (retval == -ENOMEM) {
2611 debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem"); 2534 debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem");
@@ -2662,7 +2585,6 @@ zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
2662 struct zfcp_adapter *adapter = erp_action->adapter; 2585 struct zfcp_adapter *adapter = erp_action->adapter;
2663 struct zfcp_port *port = erp_action->port; 2586 struct zfcp_port *port = erp_action->port;
2664 2587
2665 zfcp_erp_timeout_init(erp_action);
2666 retval = zfcp_fsf_close_port(erp_action); 2588 retval = zfcp_fsf_close_port(erp_action);
2667 if (retval == -ENOMEM) { 2589 if (retval == -ENOMEM) {
2668 debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem"); 2590 debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem");
@@ -2700,7 +2622,6 @@ zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
2700 struct zfcp_adapter *adapter = erp_action->adapter; 2622 struct zfcp_adapter *adapter = erp_action->adapter;
2701 struct zfcp_port *port = erp_action->port; 2623 struct zfcp_port *port = erp_action->port;
2702 2624
2703 zfcp_erp_timeout_init(erp_action);
2704 retval = zfcp_fsf_open_port(erp_action); 2625 retval = zfcp_fsf_open_port(erp_action);
2705 if (retval == -ENOMEM) { 2626 if (retval == -ENOMEM) {
2706 debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem"); 2627 debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem");
@@ -2738,7 +2659,6 @@ zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
2738 struct zfcp_adapter *adapter = erp_action->adapter; 2659 struct zfcp_adapter *adapter = erp_action->adapter;
2739 struct zfcp_port *port = erp_action->port; 2660 struct zfcp_port *port = erp_action->port;
2740 2661
2741 zfcp_erp_timeout_init(erp_action);
2742 retval = zfcp_ns_gid_pn_request(erp_action); 2662 retval = zfcp_ns_gid_pn_request(erp_action);
2743 if (retval == -ENOMEM) { 2663 if (retval == -ENOMEM) {
2744 debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem"); 2664 debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem");
@@ -2864,7 +2784,6 @@ zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
2864 struct zfcp_adapter *adapter = erp_action->adapter; 2784 struct zfcp_adapter *adapter = erp_action->adapter;
2865 struct zfcp_unit *unit = erp_action->unit; 2785 struct zfcp_unit *unit = erp_action->unit;
2866 2786
2867 zfcp_erp_timeout_init(erp_action);
2868 retval = zfcp_fsf_close_unit(erp_action); 2787 retval = zfcp_fsf_close_unit(erp_action);
2869 if (retval == -ENOMEM) { 2788 if (retval == -ENOMEM) {
2870 debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem"); 2789 debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem");
@@ -2905,7 +2824,6 @@ zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2905 struct zfcp_adapter *adapter = erp_action->adapter; 2824 struct zfcp_adapter *adapter = erp_action->adapter;
2906 struct zfcp_unit *unit = erp_action->unit; 2825 struct zfcp_unit *unit = erp_action->unit;
2907 2826
2908 zfcp_erp_timeout_init(erp_action);
2909 retval = zfcp_fsf_open_unit(erp_action); 2827 retval = zfcp_fsf_open_unit(erp_action);
2910 if (retval == -ENOMEM) { 2828 if (retval == -ENOMEM) {
2911 debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem"); 2829 debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem");
@@ -2930,14 +2848,13 @@ zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2930 return retval; 2848 return retval;
2931} 2849}
2932 2850
2933static inline void 2851void zfcp_erp_start_timer(struct zfcp_fsf_req *fsf_req)
2934zfcp_erp_timeout_init(struct zfcp_erp_action *erp_action)
2935{ 2852{
2936 init_timer(&erp_action->timer); 2853 BUG_ON(!fsf_req->erp_action);
2937 erp_action->timer.function = zfcp_erp_timeout_handler; 2854 fsf_req->timer.function = zfcp_erp_timeout_handler;
2938 erp_action->timer.data = (unsigned long) erp_action; 2855 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
2939 /* jiffies will be added in zfcp_fsf_req_send */ 2856 fsf_req->timer.expires = jiffies + ZFCP_ERP_FSFREQ_TIMEOUT;
2940 erp_action->timer.expires = ZFCP_ERP_FSFREQ_TIMEOUT; 2857 add_timer(&fsf_req->timer);
2941} 2858}
2942 2859
2943/* 2860/*
@@ -3241,7 +3158,7 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3241} 3158}
3242 3159
3243 3160
3244void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 3161static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3245{ 3162{
3246 struct zfcp_port *port; 3163 struct zfcp_port *port;
3247 3164
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 146d7a2b4c4a..b8794d77285d 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -55,7 +55,6 @@ extern void zfcp_unit_dequeue(struct zfcp_unit *);
55 55
56/******************************* S/390 IO ************************************/ 56/******************************* S/390 IO ************************************/
57extern int zfcp_ccw_register(void); 57extern int zfcp_ccw_register(void);
58extern void zfcp_ccw_unregister(void);
59 58
60extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); 59extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int);
61extern int zfcp_qdio_allocate(struct zfcp_adapter *); 60extern int zfcp_qdio_allocate(struct zfcp_adapter *);
@@ -88,8 +87,8 @@ extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *,
88 struct fsf_qtcb_bottom_port *); 87 struct fsf_qtcb_bottom_port *);
89extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **, 88extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
90 u32, u32, struct zfcp_sg_list *); 89 u32, u32, struct zfcp_sg_list *);
91extern void zfcp_fsf_request_timeout_handler(unsigned long); 90extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long);
92extern void zfcp_fsf_scsi_er_timeout_handler(unsigned long); 91extern void zfcp_erp_start_timer(struct zfcp_fsf_req *);
93extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); 92extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
94extern int zfcp_fsf_status_read(struct zfcp_adapter *, int); 93extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
95extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *, 94extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
@@ -99,8 +98,7 @@ extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
99extern int zfcp_fsf_send_els(struct zfcp_send_els *); 98extern int zfcp_fsf_send_els(struct zfcp_send_els *);
100extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, 99extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
101 struct zfcp_unit *, 100 struct zfcp_unit *,
102 struct scsi_cmnd *, 101 struct scsi_cmnd *, int, int);
103 struct timer_list*, int);
104extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *); 102extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
105extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *); 103extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
106extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 104extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
@@ -124,14 +122,11 @@ extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
124extern void set_host_byte(u32 *, char); 122extern void set_host_byte(u32 *, char);
125extern void set_driver_byte(u32 *, char); 123extern void set_driver_byte(u32 *, char);
126extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
127extern void zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *);
128extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); 125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
129 126
130extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *, 127extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
131 struct scsi_cmnd *, struct timer_list *); 128 struct scsi_cmnd *, int);
132extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, 129extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int);
133 struct timer_list *);
134extern struct scsi_transport_template *zfcp_transport_template;
135extern struct fc_function_template zfcp_transport_functions; 130extern struct fc_function_template zfcp_transport_functions;
136 131
137/******************************** ERP ****************************************/ 132/******************************** ERP ****************************************/
@@ -139,7 +134,6 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
139extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); 134extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
140extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); 135extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
141extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); 136extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
142extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
143 137
144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); 138extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
145extern int zfcp_erp_port_reopen(struct zfcp_port *, int); 139extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@@ -187,7 +181,7 @@ extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
187 struct zfcp_fsf_req *); 181 struct zfcp_fsf_req *);
188extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 182extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
189 struct scsi_cmnd *, struct zfcp_fsf_req *, 183 struct scsi_cmnd *, struct zfcp_fsf_req *,
190 struct zfcp_fsf_req *); 184 unsigned long);
191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 185extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
192 struct scsi_cmnd *); 186 struct scsi_cmnd *);
193extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); 187extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ff2eacf5ec8c..277826cdd0c8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -42,7 +42,7 @@ static inline int zfcp_fsf_req_sbal_check(
42static inline int zfcp_use_one_sbal( 42static inline int zfcp_use_one_sbal(
43 struct scatterlist *, int, struct scatterlist *, int); 43 struct scatterlist *, int, struct scatterlist *, int);
44static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int); 44static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int);
45static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *); 45static int zfcp_fsf_req_send(struct zfcp_fsf_req *);
46static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); 46static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
47static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); 47static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
48static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); 48static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
@@ -100,14 +100,19 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
100 if (req_flags & ZFCP_REQ_NO_QTCB) 100 if (req_flags & ZFCP_REQ_NO_QTCB)
101 size = sizeof(struct zfcp_fsf_req); 101 size = sizeof(struct zfcp_fsf_req);
102 else 102 else
103 size = sizeof(struct zfcp_fsf_req_pool_element); 103 size = sizeof(struct zfcp_fsf_req_qtcb);
104 104
105 if (likely(pool != NULL)) 105 if (likely(pool))
106 ptr = mempool_alloc(pool, GFP_ATOMIC); 106 ptr = mempool_alloc(pool, GFP_ATOMIC);
107 else 107 else {
108 ptr = kmalloc(size, GFP_ATOMIC); 108 if (req_flags & ZFCP_REQ_NO_QTCB)
109 ptr = kmalloc(size, GFP_ATOMIC);
110 else
111 ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
112 SLAB_ATOMIC);
113 }
109 114
110 if (unlikely(NULL == ptr)) 115 if (unlikely(!ptr))
111 goto out; 116 goto out;
112 117
113 memset(ptr, 0, size); 118 memset(ptr, 0, size);
@@ -115,9 +120,8 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
115 if (req_flags & ZFCP_REQ_NO_QTCB) { 120 if (req_flags & ZFCP_REQ_NO_QTCB) {
116 fsf_req = (struct zfcp_fsf_req *) ptr; 121 fsf_req = (struct zfcp_fsf_req *) ptr;
117 } else { 122 } else {
118 fsf_req = &((struct zfcp_fsf_req_pool_element *) ptr)->fsf_req; 123 fsf_req = &((struct zfcp_fsf_req_qtcb *) ptr)->fsf_req;
119 fsf_req->qtcb = 124 fsf_req->qtcb = &((struct zfcp_fsf_req_qtcb *) ptr)->qtcb;
120 &((struct zfcp_fsf_req_pool_element *) ptr)->qtcb;
121 } 125 }
122 126
123 fsf_req->pool = pool; 127 fsf_req->pool = pool;
@@ -139,10 +143,17 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
139void 143void
140zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) 144zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
141{ 145{
142 if (likely(fsf_req->pool != NULL)) 146 if (likely(fsf_req->pool)) {
143 mempool_free(fsf_req, fsf_req->pool); 147 mempool_free(fsf_req, fsf_req->pool);
144 else 148 return;
145 kfree(fsf_req); 149 }
150
151 if (fsf_req->qtcb) {
152 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, fsf_req);
153 return;
154 }
155
156 kfree(fsf_req);
146} 157}
147 158
148/** 159/**
@@ -214,8 +225,10 @@ zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
214 */ 225 */
215 zfcp_fsf_status_read_handler(fsf_req); 226 zfcp_fsf_status_read_handler(fsf_req);
216 goto out; 227 goto out;
217 } else 228 } else {
229 del_timer(&fsf_req->timer);
218 zfcp_fsf_protstatus_eval(fsf_req); 230 zfcp_fsf_protstatus_eval(fsf_req);
231 }
219 232
220 /* 233 /*
221 * fsf_req may be deleted due to waking up functions, so 234 * fsf_req may be deleted due to waking up functions, so
@@ -774,8 +787,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
774 sbale->addr = (void *) status_buffer; 787 sbale->addr = (void *) status_buffer;
775 sbale->length = sizeof(struct fsf_status_read_buffer); 788 sbale->length = sizeof(struct fsf_status_read_buffer);
776 789
777 /* start QDIO request for this FSF request */ 790 retval = zfcp_fsf_req_send(fsf_req);
778 retval = zfcp_fsf_req_send(fsf_req, NULL);
779 if (retval) { 791 if (retval) {
780 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status " 792 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status "
781 "environment.\n"); 793 "environment.\n");
@@ -1101,8 +1113,8 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1101 struct zfcp_unit *unit, int req_flags) 1113 struct zfcp_unit *unit, int req_flags)
1102{ 1114{
1103 volatile struct qdio_buffer_element *sbale; 1115 volatile struct qdio_buffer_element *sbale;
1104 unsigned long lock_flags;
1105 struct zfcp_fsf_req *fsf_req = NULL; 1116 struct zfcp_fsf_req *fsf_req = NULL;
1117 unsigned long lock_flags;
1106 int retval = 0; 1118 int retval = 0;
1107 1119
1108 /* setup new FSF request */ 1120 /* setup new FSF request */
@@ -1132,12 +1144,9 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1132 /* set handle of request which should be aborted */ 1144 /* set handle of request which should be aborted */
1133 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id; 1145 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id;
1134 1146
1135 /* start QDIO request for this FSF request */ 1147 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
1136 1148 retval = zfcp_fsf_req_send(fsf_req);
1137 zfcp_fsf_start_scsi_er_timer(adapter);
1138 retval = zfcp_fsf_req_send(fsf_req, NULL);
1139 if (retval) { 1149 if (retval) {
1140 del_timer(&adapter->scsi_er_timer);
1141 ZFCP_LOG_INFO("error: Failed to send abort command request " 1150 ZFCP_LOG_INFO("error: Failed to send abort command request "
1142 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n", 1151 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
1143 zfcp_get_busid_by_adapter(adapter), 1152 zfcp_get_busid_by_adapter(adapter),
@@ -1173,8 +1182,6 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1173 unsigned char status_qual = 1182 unsigned char status_qual =
1174 new_fsf_req->qtcb->header.fsf_status_qual.word[0]; 1183 new_fsf_req->qtcb->header.fsf_status_qual.word[0];
1175 1184
1176 del_timer(&new_fsf_req->adapter->scsi_er_timer);
1177
1178 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1185 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1179 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */ 1186 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
1180 goto skip_fsfstatus; 1187 goto skip_fsfstatus;
@@ -1380,11 +1387,6 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1380 goto failed_req; 1387 goto failed_req;
1381 } 1388 }
1382 1389
1383 if (erp_action != NULL) {
1384 erp_action->fsf_req = fsf_req;
1385 fsf_req->erp_action = erp_action;
1386 }
1387
1388 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1390 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1389 if (zfcp_use_one_sbal(ct->req, ct->req_count, 1391 if (zfcp_use_one_sbal(ct->req, ct->req_count,
1390 ct->resp, ct->resp_count)){ 1392 ct->resp, ct->resp_count)){
@@ -1451,8 +1453,14 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1451 1453
1452 zfcp_san_dbf_event_ct_request(fsf_req); 1454 zfcp_san_dbf_event_ct_request(fsf_req);
1453 1455
1454 /* start QDIO request for this FSF request */ 1456 if (erp_action) {
1455 ret = zfcp_fsf_req_send(fsf_req, ct->timer); 1457 erp_action->fsf_req = fsf_req;
1458 fsf_req->erp_action = erp_action;
1459 zfcp_erp_start_timer(fsf_req);
1460 } else
1461 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1462
1463 ret = zfcp_fsf_req_send(fsf_req);
1456 if (ret) { 1464 if (ret) {
1457 ZFCP_LOG_DEBUG("error: initiation of CT request failed " 1465 ZFCP_LOG_DEBUG("error: initiation of CT request failed "
1458 "(adapter %s, port 0x%016Lx)\n", 1466 "(adapter %s, port 0x%016Lx)\n",
@@ -1749,8 +1757,8 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1749 1757
1750 zfcp_san_dbf_event_els_request(fsf_req); 1758 zfcp_san_dbf_event_els_request(fsf_req);
1751 1759
1752 /* start QDIO request for this FSF request */ 1760 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1753 ret = zfcp_fsf_req_send(fsf_req, els->timer); 1761 ret = zfcp_fsf_req_send(fsf_req);
1754 if (ret) { 1762 if (ret) {
1755 ZFCP_LOG_DEBUG("error: initiation of ELS request failed " 1763 ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
1756 "(adapter %s, port d_id: 0x%08x)\n", 1764 "(adapter %s, port d_id: 0x%08x)\n",
@@ -1947,6 +1955,7 @@ int
1947zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1955zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1948{ 1956{
1949 volatile struct qdio_buffer_element *sbale; 1957 volatile struct qdio_buffer_element *sbale;
1958 struct zfcp_fsf_req *fsf_req;
1950 unsigned long lock_flags; 1959 unsigned long lock_flags;
1951 int retval = 0; 1960 int retval = 0;
1952 1961
@@ -1955,7 +1964,7 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1955 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1964 FSF_QTCB_EXCHANGE_CONFIG_DATA,
1956 ZFCP_REQ_AUTO_CLEANUP, 1965 ZFCP_REQ_AUTO_CLEANUP,
1957 erp_action->adapter->pool.fsf_req_erp, 1966 erp_action->adapter->pool.fsf_req_erp,
1958 &lock_flags, &(erp_action->fsf_req)); 1967 &lock_flags, &fsf_req);
1959 if (retval < 0) { 1968 if (retval < 0) {
1960 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1969 ZFCP_LOG_INFO("error: Could not create exchange configuration "
1961 "data request for adapter %s.\n", 1970 "data request for adapter %s.\n",
@@ -1963,26 +1972,26 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1963 goto out; 1972 goto out;
1964 } 1973 }
1965 1974
1966 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 1975 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1967 erp_action->fsf_req->sbal_curr, 0);
1968 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1976 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1969 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1977 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1970 1978
1971 erp_action->fsf_req->erp_action = erp_action; 1979 fsf_req->qtcb->bottom.config.feature_selection =
1972 erp_action->fsf_req->qtcb->bottom.config.feature_selection =
1973 FSF_FEATURE_CFDC | 1980 FSF_FEATURE_CFDC |
1974 FSF_FEATURE_LUN_SHARING | 1981 FSF_FEATURE_LUN_SHARING |
1975 FSF_FEATURE_NOTIFICATION_LOST | 1982 FSF_FEATURE_NOTIFICATION_LOST |
1976 FSF_FEATURE_UPDATE_ALERT; 1983 FSF_FEATURE_UPDATE_ALERT;
1984 fsf_req->erp_action = erp_action;
1985 erp_action->fsf_req = fsf_req;
1977 1986
1978 /* start QDIO request for this FSF request */ 1987 zfcp_erp_start_timer(fsf_req);
1979 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 1988 retval = zfcp_fsf_req_send(fsf_req);
1980 if (retval) { 1989 if (retval) {
1981 ZFCP_LOG_INFO 1990 ZFCP_LOG_INFO
1982 ("error: Could not send exchange configuration data " 1991 ("error: Could not send exchange configuration data "
1983 "command on the adapter %s\n", 1992 "command on the adapter %s\n",
1984 zfcp_get_busid_by_adapter(erp_action->adapter)); 1993 zfcp_get_busid_by_adapter(erp_action->adapter));
1985 zfcp_fsf_req_free(erp_action->fsf_req); 1994 zfcp_fsf_req_free(fsf_req);
1986 erp_action->fsf_req = NULL; 1995 erp_action->fsf_req = NULL;
1987 goto out; 1996 goto out;
1988 } 1997 }
@@ -2212,10 +2221,9 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2212 struct fsf_qtcb_bottom_port *data) 2221 struct fsf_qtcb_bottom_port *data)
2213{ 2222{
2214 volatile struct qdio_buffer_element *sbale; 2223 volatile struct qdio_buffer_element *sbale;
2215 int retval = 0;
2216 unsigned long lock_flags;
2217 struct zfcp_fsf_req *fsf_req; 2224 struct zfcp_fsf_req *fsf_req;
2218 struct timer_list *timer; 2225 unsigned long lock_flags;
2226 int retval = 0;
2219 2227
2220 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) { 2228 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2221 ZFCP_LOG_INFO("error: exchange port data " 2229 ZFCP_LOG_INFO("error: exchange port data "
@@ -2248,22 +2256,11 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2248 if (erp_action) { 2256 if (erp_action) {
2249 erp_action->fsf_req = fsf_req; 2257 erp_action->fsf_req = fsf_req;
2250 fsf_req->erp_action = erp_action; 2258 fsf_req->erp_action = erp_action;
2251 timer = &erp_action->timer; 2259 zfcp_erp_start_timer(fsf_req);
2252 } else { 2260 } else
2253 timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC); 2261 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
2254 if (!timer) {
2255 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2256 lock_flags);
2257 zfcp_fsf_req_free(fsf_req);
2258 return -ENOMEM;
2259 }
2260 init_timer(timer);
2261 timer->function = zfcp_fsf_request_timeout_handler;
2262 timer->data = (unsigned long) adapter;
2263 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
2264 }
2265 2262
2266 retval = zfcp_fsf_req_send(fsf_req, timer); 2263 retval = zfcp_fsf_req_send(fsf_req);
2267 if (retval) { 2264 if (retval) {
2268 ZFCP_LOG_INFO("error: Could not send an exchange port data " 2265 ZFCP_LOG_INFO("error: Could not send an exchange port data "
2269 "command on the adapter %s\n", 2266 "command on the adapter %s\n",
@@ -2271,8 +2268,6 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2271 zfcp_fsf_req_free(fsf_req); 2268 zfcp_fsf_req_free(fsf_req);
2272 if (erp_action) 2269 if (erp_action)
2273 erp_action->fsf_req = NULL; 2270 erp_action->fsf_req = NULL;
2274 else
2275 kfree(timer);
2276 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2271 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2277 lock_flags); 2272 lock_flags);
2278 return retval; 2273 return retval;
@@ -2283,9 +2278,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2283 if (!erp_action) { 2278 if (!erp_action) {
2284 wait_event(fsf_req->completion_wq, 2279 wait_event(fsf_req->completion_wq,
2285 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 2280 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2286 del_timer_sync(timer);
2287 zfcp_fsf_req_free(fsf_req); 2281 zfcp_fsf_req_free(fsf_req);
2288 kfree(timer);
2289 } 2282 }
2290 return retval; 2283 return retval;
2291} 2284}
@@ -2367,6 +2360,7 @@ int
2367zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 2360zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2368{ 2361{
2369 volatile struct qdio_buffer_element *sbale; 2362 volatile struct qdio_buffer_element *sbale;
2363 struct zfcp_fsf_req *fsf_req;
2370 unsigned long lock_flags; 2364 unsigned long lock_flags;
2371 int retval = 0; 2365 int retval = 0;
2372 2366
@@ -2375,7 +2369,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2375 FSF_QTCB_OPEN_PORT_WITH_DID, 2369 FSF_QTCB_OPEN_PORT_WITH_DID,
2376 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2370 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2377 erp_action->adapter->pool.fsf_req_erp, 2371 erp_action->adapter->pool.fsf_req_erp,
2378 &lock_flags, &(erp_action->fsf_req)); 2372 &lock_flags, &fsf_req);
2379 if (retval < 0) { 2373 if (retval < 0) {
2380 ZFCP_LOG_INFO("error: Could not create open port request " 2374 ZFCP_LOG_INFO("error: Could not create open port request "
2381 "for port 0x%016Lx on adapter %s.\n", 2375 "for port 0x%016Lx on adapter %s.\n",
@@ -2384,24 +2378,24 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2384 goto out; 2378 goto out;
2385 } 2379 }
2386 2380
2387 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2381 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2388 erp_action->fsf_req->sbal_curr, 0);
2389 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2382 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2390 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2383 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2391 2384
2392 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id; 2385 fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2393 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status); 2386 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2394 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2387 fsf_req->data = (unsigned long) erp_action->port;
2395 erp_action->fsf_req->erp_action = erp_action; 2388 fsf_req->erp_action = erp_action;
2389 erp_action->fsf_req = fsf_req;
2396 2390
2397 /* start QDIO request for this FSF request */ 2391 zfcp_erp_start_timer(fsf_req);
2398 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2392 retval = zfcp_fsf_req_send(fsf_req);
2399 if (retval) { 2393 if (retval) {
2400 ZFCP_LOG_INFO("error: Could not send open port request for " 2394 ZFCP_LOG_INFO("error: Could not send open port request for "
2401 "port 0x%016Lx on adapter %s.\n", 2395 "port 0x%016Lx on adapter %s.\n",
2402 erp_action->port->wwpn, 2396 erp_action->port->wwpn,
2403 zfcp_get_busid_by_adapter(erp_action->adapter)); 2397 zfcp_get_busid_by_adapter(erp_action->adapter));
2404 zfcp_fsf_req_free(erp_action->fsf_req); 2398 zfcp_fsf_req_free(fsf_req);
2405 erp_action->fsf_req = NULL; 2399 erp_action->fsf_req = NULL;
2406 goto out; 2400 goto out;
2407 } 2401 }
@@ -2623,6 +2617,7 @@ int
2623zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 2617zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2624{ 2618{
2625 volatile struct qdio_buffer_element *sbale; 2619 volatile struct qdio_buffer_element *sbale;
2620 struct zfcp_fsf_req *fsf_req;
2626 unsigned long lock_flags; 2621 unsigned long lock_flags;
2627 int retval = 0; 2622 int retval = 0;
2628 2623
@@ -2631,7 +2626,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2631 FSF_QTCB_CLOSE_PORT, 2626 FSF_QTCB_CLOSE_PORT,
2632 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2627 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2633 erp_action->adapter->pool.fsf_req_erp, 2628 erp_action->adapter->pool.fsf_req_erp,
2634 &lock_flags, &(erp_action->fsf_req)); 2629 &lock_flags, &fsf_req);
2635 if (retval < 0) { 2630 if (retval < 0) {
2636 ZFCP_LOG_INFO("error: Could not create a close port request " 2631 ZFCP_LOG_INFO("error: Could not create a close port request "
2637 "for port 0x%016Lx on adapter %s.\n", 2632 "for port 0x%016Lx on adapter %s.\n",
@@ -2640,25 +2635,25 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2640 goto out; 2635 goto out;
2641 } 2636 }
2642 2637
2643 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2638 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2644 erp_action->fsf_req->sbal_curr, 0);
2645 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2639 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2646 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2640 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2647 2641
2648 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); 2642 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2649 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2643 fsf_req->data = (unsigned long) erp_action->port;
2650 erp_action->fsf_req->erp_action = erp_action; 2644 fsf_req->erp_action = erp_action;
2651 erp_action->fsf_req->qtcb->header.port_handle = 2645 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2652 erp_action->port->handle; 2646 fsf_req->erp_action = erp_action;
2653 2647 erp_action->fsf_req = fsf_req;
2654 /* start QDIO request for this FSF request */ 2648
2655 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2649 zfcp_erp_start_timer(fsf_req);
2650 retval = zfcp_fsf_req_send(fsf_req);
2656 if (retval) { 2651 if (retval) {
2657 ZFCP_LOG_INFO("error: Could not send a close port request for " 2652 ZFCP_LOG_INFO("error: Could not send a close port request for "
2658 "port 0x%016Lx on adapter %s.\n", 2653 "port 0x%016Lx on adapter %s.\n",
2659 erp_action->port->wwpn, 2654 erp_action->port->wwpn,
2660 zfcp_get_busid_by_adapter(erp_action->adapter)); 2655 zfcp_get_busid_by_adapter(erp_action->adapter));
2661 zfcp_fsf_req_free(erp_action->fsf_req); 2656 zfcp_fsf_req_free(fsf_req);
2662 erp_action->fsf_req = NULL; 2657 erp_action->fsf_req = NULL;
2663 goto out; 2658 goto out;
2664 } 2659 }
@@ -2755,16 +2750,17 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2755int 2750int
2756zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 2751zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2757{ 2752{
2758 int retval = 0;
2759 unsigned long lock_flags;
2760 volatile struct qdio_buffer_element *sbale; 2753 volatile struct qdio_buffer_element *sbale;
2754 struct zfcp_fsf_req *fsf_req;
2755 unsigned long lock_flags;
2756 int retval = 0;
2761 2757
2762 /* setup new FSF request */ 2758 /* setup new FSF request */
2763 retval = zfcp_fsf_req_create(erp_action->adapter, 2759 retval = zfcp_fsf_req_create(erp_action->adapter,
2764 FSF_QTCB_CLOSE_PHYSICAL_PORT, 2760 FSF_QTCB_CLOSE_PHYSICAL_PORT,
2765 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2761 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2766 erp_action->adapter->pool.fsf_req_erp, 2762 erp_action->adapter->pool.fsf_req_erp,
2767 &lock_flags, &erp_action->fsf_req); 2763 &lock_flags, &fsf_req);
2768 if (retval < 0) { 2764 if (retval < 0) {
2769 ZFCP_LOG_INFO("error: Could not create close physical port " 2765 ZFCP_LOG_INFO("error: Could not create close physical port "
2770 "request (adapter %s, port 0x%016Lx)\n", 2766 "request (adapter %s, port 0x%016Lx)\n",
@@ -2774,8 +2770,7 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2774 goto out; 2770 goto out;
2775 } 2771 }
2776 2772
2777 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2773 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2778 erp_action->fsf_req->sbal_curr, 0);
2779 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2774 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2780 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2775 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2781 2776
@@ -2783,20 +2778,19 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2783 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 2778 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2784 &erp_action->port->status); 2779 &erp_action->port->status);
2785 /* save a pointer to this port */ 2780 /* save a pointer to this port */
2786 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2781 fsf_req->data = (unsigned long) erp_action->port;
2787 /* port to be closed */ 2782 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2788 erp_action->fsf_req->qtcb->header.port_handle = 2783 fsf_req->erp_action = erp_action;
2789 erp_action->port->handle; 2784 erp_action->fsf_req = fsf_req;
2790 erp_action->fsf_req->erp_action = erp_action; 2785
2791 2786 zfcp_erp_start_timer(fsf_req);
2792 /* start QDIO request for this FSF request */ 2787 retval = zfcp_fsf_req_send(fsf_req);
2793 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
2794 if (retval) { 2788 if (retval) {
2795 ZFCP_LOG_INFO("error: Could not send close physical port " 2789 ZFCP_LOG_INFO("error: Could not send close physical port "
2796 "request (adapter %s, port 0x%016Lx)\n", 2790 "request (adapter %s, port 0x%016Lx)\n",
2797 zfcp_get_busid_by_adapter(erp_action->adapter), 2791 zfcp_get_busid_by_adapter(erp_action->adapter),
2798 erp_action->port->wwpn); 2792 erp_action->port->wwpn);
2799 zfcp_fsf_req_free(erp_action->fsf_req); 2793 zfcp_fsf_req_free(fsf_req);
2800 erp_action->fsf_req = NULL; 2794 erp_action->fsf_req = NULL;
2801 goto out; 2795 goto out;
2802 } 2796 }
@@ -2961,6 +2955,7 @@ int
2961zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 2955zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2962{ 2956{
2963 volatile struct qdio_buffer_element *sbale; 2957 volatile struct qdio_buffer_element *sbale;
2958 struct zfcp_fsf_req *fsf_req;
2964 unsigned long lock_flags; 2959 unsigned long lock_flags;
2965 int retval = 0; 2960 int retval = 0;
2966 2961
@@ -2969,7 +2964,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2969 FSF_QTCB_OPEN_LUN, 2964 FSF_QTCB_OPEN_LUN,
2970 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2965 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2971 erp_action->adapter->pool.fsf_req_erp, 2966 erp_action->adapter->pool.fsf_req_erp,
2972 &lock_flags, &(erp_action->fsf_req)); 2967 &lock_flags, &fsf_req);
2973 if (retval < 0) { 2968 if (retval < 0) {
2974 ZFCP_LOG_INFO("error: Could not create open unit request for " 2969 ZFCP_LOG_INFO("error: Could not create open unit request for "
2975 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 2970 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
@@ -2979,24 +2974,22 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2979 goto out; 2974 goto out;
2980 } 2975 }
2981 2976
2982 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2977 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2983 erp_action->fsf_req->sbal_curr, 0);
2984 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2978 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2985 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2979 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2986 2980
2987 erp_action->fsf_req->qtcb->header.port_handle = 2981 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2988 erp_action->port->handle; 2982 fsf_req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
2989 erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
2990 erp_action->unit->fcp_lun;
2991 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 2983 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2992 erp_action->fsf_req->qtcb->bottom.support.option = 2984 fsf_req->qtcb->bottom.support.option =
2993 FSF_OPEN_LUN_SUPPRESS_BOXING; 2985 FSF_OPEN_LUN_SUPPRESS_BOXING;
2994 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); 2986 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
2995 erp_action->fsf_req->data = (unsigned long) erp_action->unit; 2987 fsf_req->data = (unsigned long) erp_action->unit;
2996 erp_action->fsf_req->erp_action = erp_action; 2988 fsf_req->erp_action = erp_action;
2989 erp_action->fsf_req = fsf_req;
2997 2990
2998 /* start QDIO request for this FSF request */ 2991 zfcp_erp_start_timer(fsf_req);
2999 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2992 retval = zfcp_fsf_req_send(erp_action->fsf_req);
3000 if (retval) { 2993 if (retval) {
3001 ZFCP_LOG_INFO("error: Could not send an open unit request " 2994 ZFCP_LOG_INFO("error: Could not send an open unit request "
3002 "on the adapter %s, port 0x%016Lx for " 2995 "on the adapter %s, port 0x%016Lx for "
@@ -3004,7 +2997,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
3004 zfcp_get_busid_by_adapter(erp_action->adapter), 2997 zfcp_get_busid_by_adapter(erp_action->adapter),
3005 erp_action->port->wwpn, 2998 erp_action->port->wwpn,
3006 erp_action->unit->fcp_lun); 2999 erp_action->unit->fcp_lun);
3007 zfcp_fsf_req_free(erp_action->fsf_req); 3000 zfcp_fsf_req_free(fsf_req);
3008 erp_action->fsf_req = NULL; 3001 erp_action->fsf_req = NULL;
3009 goto out; 3002 goto out;
3010 } 3003 }
@@ -3297,6 +3290,7 @@ int
3297zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 3290zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3298{ 3291{
3299 volatile struct qdio_buffer_element *sbale; 3292 volatile struct qdio_buffer_element *sbale;
3293 struct zfcp_fsf_req *fsf_req;
3300 unsigned long lock_flags; 3294 unsigned long lock_flags;
3301 int retval = 0; 3295 int retval = 0;
3302 3296
@@ -3305,7 +3299,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3305 FSF_QTCB_CLOSE_LUN, 3299 FSF_QTCB_CLOSE_LUN,
3306 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 3300 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
3307 erp_action->adapter->pool.fsf_req_erp, 3301 erp_action->adapter->pool.fsf_req_erp,
3308 &lock_flags, &(erp_action->fsf_req)); 3302 &lock_flags, &fsf_req);
3309 if (retval < 0) { 3303 if (retval < 0) {
3310 ZFCP_LOG_INFO("error: Could not create close unit request for " 3304 ZFCP_LOG_INFO("error: Could not create close unit request for "
3311 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 3305 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
@@ -3315,27 +3309,26 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3315 goto out; 3309 goto out;
3316 } 3310 }
3317 3311
3318 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 3312 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
3319 erp_action->fsf_req->sbal_curr, 0);
3320 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 3313 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
3321 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 3314 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3322 3315
3323 erp_action->fsf_req->qtcb->header.port_handle = 3316 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
3324 erp_action->port->handle; 3317 fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3325 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3326 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); 3318 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3327 erp_action->fsf_req->data = (unsigned long) erp_action->unit; 3319 fsf_req->data = (unsigned long) erp_action->unit;
3328 erp_action->fsf_req->erp_action = erp_action; 3320 fsf_req->erp_action = erp_action;
3321 erp_action->fsf_req = fsf_req;
3329 3322
3330 /* start QDIO request for this FSF request */ 3323 zfcp_erp_start_timer(fsf_req);
3331 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 3324 retval = zfcp_fsf_req_send(erp_action->fsf_req);
3332 if (retval) { 3325 if (retval) {
3333 ZFCP_LOG_INFO("error: Could not send a close unit request for " 3326 ZFCP_LOG_INFO("error: Could not send a close unit request for "
3334 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n", 3327 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n",
3335 erp_action->unit->fcp_lun, 3328 erp_action->unit->fcp_lun,
3336 erp_action->port->wwpn, 3329 erp_action->port->wwpn,
3337 zfcp_get_busid_by_adapter(erp_action->adapter)); 3330 zfcp_get_busid_by_adapter(erp_action->adapter));
3338 zfcp_fsf_req_free(erp_action->fsf_req); 3331 zfcp_fsf_req_free(fsf_req);
3339 erp_action->fsf_req = NULL; 3332 erp_action->fsf_req = NULL;
3340 goto out; 3333 goto out;
3341 } 3334 }
@@ -3488,7 +3481,7 @@ int
3488zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, 3481zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3489 struct zfcp_unit *unit, 3482 struct zfcp_unit *unit,
3490 struct scsi_cmnd * scsi_cmnd, 3483 struct scsi_cmnd * scsi_cmnd,
3491 struct timer_list *timer, int req_flags) 3484 int use_timer, int req_flags)
3492{ 3485{
3493 struct zfcp_fsf_req *fsf_req = NULL; 3486 struct zfcp_fsf_req *fsf_req = NULL;
3494 struct fcp_cmnd_iu *fcp_cmnd_iu; 3487 struct fcp_cmnd_iu *fcp_cmnd_iu;
@@ -3516,7 +3509,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3516 fsf_req->unit = unit; 3509 fsf_req->unit = unit;
3517 3510
3518 /* associate FSF request with SCSI request (for look up on abort) */ 3511 /* associate FSF request with SCSI request (for look up on abort) */
3519 scsi_cmnd->host_scribble = (char *) fsf_req; 3512 scsi_cmnd->host_scribble = (unsigned char *) fsf_req->req_id;
3520 3513
3521 /* associate SCSI command with FSF request */ 3514 /* associate SCSI command with FSF request */
3522 fsf_req->data = (unsigned long) scsi_cmnd; 3515 fsf_req->data = (unsigned long) scsi_cmnd;
@@ -3629,11 +3622,10 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3629 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 3622 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3630 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 3623 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3631 3624
3632 /* 3625 if (use_timer)
3633 * start QDIO request for this FSF request 3626 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
3634 * covered by an SBALE) 3627
3635 */ 3628 retval = zfcp_fsf_req_send(fsf_req);
3636 retval = zfcp_fsf_req_send(fsf_req, timer);
3637 if (unlikely(retval < 0)) { 3629 if (unlikely(retval < 0)) {
3638 ZFCP_LOG_INFO("error: Could not send FCP command request " 3630 ZFCP_LOG_INFO("error: Could not send FCP command request "
3639 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n", 3631 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
@@ -3718,11 +3710,9 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3718 fcp_cmnd_iu->fcp_lun = unit->fcp_lun; 3710 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3719 fcp_cmnd_iu->task_management_flags = tm_flags; 3711 fcp_cmnd_iu->task_management_flags = tm_flags;
3720 3712
3721 /* start QDIO request for this FSF request */ 3713 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
3722 zfcp_fsf_start_scsi_er_timer(adapter); 3714 retval = zfcp_fsf_req_send(fsf_req);
3723 retval = zfcp_fsf_req_send(fsf_req, NULL);
3724 if (retval) { 3715 if (retval) {
3725 del_timer(&adapter->scsi_er_timer);
3726 ZFCP_LOG_INFO("error: Could not send an FCP-command (task " 3716 ZFCP_LOG_INFO("error: Could not send an FCP-command (task "
3727 "management) on adapter %s, port 0x%016Lx for " 3717 "management) on adapter %s, port 0x%016Lx for "
3728 "unit LUN 0x%016Lx\n", 3718 "unit LUN 0x%016Lx\n",
@@ -4226,7 +4216,6 @@ zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4226 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 4216 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
4227 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data; 4217 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
4228 4218
4229 del_timer(&fsf_req->adapter->scsi_er_timer);
4230 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 4219 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
4231 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 4220 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4232 goto skip_fsfstatus; 4221 goto skip_fsfstatus;
@@ -4295,7 +4284,6 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4295 struct zfcp_fsf_req *fsf_req; 4284 struct zfcp_fsf_req *fsf_req;
4296 struct fsf_qtcb_bottom_support *bottom; 4285 struct fsf_qtcb_bottom_support *bottom;
4297 volatile struct qdio_buffer_element *sbale; 4286 volatile struct qdio_buffer_element *sbale;
4298 struct timer_list *timer;
4299 unsigned long lock_flags; 4287 unsigned long lock_flags;
4300 int req_flags = 0; 4288 int req_flags = 0;
4301 int direction; 4289 int direction;
@@ -4327,12 +4315,6 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4327 goto out; 4315 goto out;
4328 } 4316 }
4329 4317
4330 timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
4331 if (!timer) {
4332 retval = -ENOMEM;
4333 goto out;
4334 }
4335
4336 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags, 4318 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags,
4337 NULL, &lock_flags, &fsf_req); 4319 NULL, &lock_flags, &fsf_req);
4338 if (retval < 0) { 4320 if (retval < 0) {
@@ -4367,12 +4349,8 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4367 } else 4349 } else
4368 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 4350 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
4369 4351
4370 init_timer(timer); 4352 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
4371 timer->function = zfcp_fsf_request_timeout_handler; 4353 retval = zfcp_fsf_req_send(fsf_req);
4372 timer->data = (unsigned long) adapter;
4373 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
4374
4375 retval = zfcp_fsf_req_send(fsf_req, timer);
4376 if (retval < 0) { 4354 if (retval < 0) {
4377 ZFCP_LOG_INFO("initiation of cfdc up/download failed" 4355 ZFCP_LOG_INFO("initiation of cfdc up/download failed"
4378 "(adapter %s)\n", 4356 "(adapter %s)\n",
@@ -4392,15 +4370,12 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4392 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 4370 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4393 4371
4394 *fsf_req_ptr = fsf_req; 4372 *fsf_req_ptr = fsf_req;
4395 del_timer_sync(timer); 4373 goto out;
4396 goto free_timer;
4397 4374
4398 free_fsf_req: 4375 free_fsf_req:
4399 zfcp_fsf_req_free(fsf_req); 4376 zfcp_fsf_req_free(fsf_req);
4400 unlock_queue_lock: 4377 unlock_queue_lock:
4401 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 4378 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4402 free_timer:
4403 kfree(timer);
4404 out: 4379 out:
4405 return retval; 4380 return retval;
4406} 4381}
@@ -4656,7 +4631,6 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4656{ 4631{
4657 volatile struct qdio_buffer_element *sbale; 4632 volatile struct qdio_buffer_element *sbale;
4658 struct zfcp_fsf_req *fsf_req = NULL; 4633 struct zfcp_fsf_req *fsf_req = NULL;
4659 unsigned long flags;
4660 int ret = 0; 4634 int ret = 0;
4661 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 4635 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4662 4636
@@ -4673,12 +4647,13 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4673 fsf_req->fsf_command = fsf_cmd; 4647 fsf_req->fsf_command = fsf_cmd;
4674 INIT_LIST_HEAD(&fsf_req->list); 4648 INIT_LIST_HEAD(&fsf_req->list);
4675 4649
4676 /* unique request id */ 4650 /* this is serialized (we are holding req_queue-lock of adapter */
4677 spin_lock_irqsave(&adapter->req_list_lock, flags); 4651 if (adapter->req_no == 0)
4652 adapter->req_no++;
4678 fsf_req->req_id = adapter->req_no++; 4653 fsf_req->req_id = adapter->req_no++;
4679 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
4680 4654
4681 zfcp_fsf_req_qtcb_init(fsf_req); 4655 init_timer(&fsf_req->timer);
4656 zfcp_fsf_req_qtcb_init(fsf_req);
4682 4657
4683 /* initialize waitqueue which may be used to wait on 4658 /* initialize waitqueue which may be used to wait on
4684 this request completion */ 4659 this request completion */
@@ -4748,8 +4723,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4748 * returns: 0 - request transfer succesfully started 4723 * returns: 0 - request transfer succesfully started
4749 * !0 - start of request transfer failed 4724 * !0 - start of request transfer failed
4750 */ 4725 */
4751static int 4726static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
4752zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4753{ 4727{
4754 struct zfcp_adapter *adapter; 4728 struct zfcp_adapter *adapter;
4755 struct zfcp_qdio_queue *req_queue; 4729 struct zfcp_qdio_queue *req_queue;
@@ -4777,12 +4751,6 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4777 4751
4778 inc_seq_no = (fsf_req->qtcb != NULL); 4752 inc_seq_no = (fsf_req->qtcb != NULL);
4779 4753
4780 /* figure out expiration time of timeout and start timeout */
4781 if (unlikely(timer)) {
4782 timer->expires += jiffies;
4783 add_timer(timer);
4784 }
4785
4786 ZFCP_LOG_TRACE("request queue of adapter %s: " 4754 ZFCP_LOG_TRACE("request queue of adapter %s: "
4787 "next free SBAL is %i, %i free SBALs\n", 4755 "next free SBAL is %i, %i free SBALs\n",
4788 zfcp_get_busid_by_adapter(adapter), 4756 zfcp_get_busid_by_adapter(adapter),
@@ -4819,12 +4787,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4819 if (unlikely(retval)) { 4787 if (unlikely(retval)) {
4820 /* Queues are down..... */ 4788 /* Queues are down..... */
4821 retval = -EIO; 4789 retval = -EIO;
4822 /* 4790 del_timer(&fsf_req->timer);
4823 * FIXME(potential race):
4824 * timer might be expired (absolutely unlikely)
4825 */
4826 if (timer)
4827 del_timer(timer);
4828 spin_lock(&adapter->req_list_lock); 4791 spin_lock(&adapter->req_list_lock);
4829 zfcp_reqlist_remove(adapter, fsf_req->req_id); 4792 zfcp_reqlist_remove(adapter, fsf_req->req_id);
4830 spin_unlock(&adapter->req_list_lock); 4793 spin_unlock(&adapter->req_list_lock);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 1bb55086db9f..7cafa34e4c7f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -39,11 +39,10 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
39 39
40static struct device_attribute *zfcp_sysfs_sdev_attrs[]; 40static struct device_attribute *zfcp_sysfs_sdev_attrs[];
41 41
42struct scsi_transport_template *zfcp_transport_template;
43
44struct zfcp_data zfcp_data = { 42struct zfcp_data zfcp_data = {
45 .scsi_host_template = { 43 .scsi_host_template = {
46 .name = ZFCP_NAME, 44 .name = ZFCP_NAME,
45 .module = THIS_MODULE,
47 .proc_name = "zfcp", 46 .proc_name = "zfcp",
48 .slave_alloc = zfcp_scsi_slave_alloc, 47 .slave_alloc = zfcp_scsi_slave_alloc,
49 .slave_configure = zfcp_scsi_slave_configure, 48 .slave_configure = zfcp_scsi_slave_configure,
@@ -232,7 +231,7 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
232 */ 231 */
233int 232int
234zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit, 233zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
235 struct scsi_cmnd *scpnt, struct timer_list *timer) 234 struct scsi_cmnd *scpnt, int use_timer)
236{ 235{
237 int tmp; 236 int tmp;
238 int retval; 237 int retval;
@@ -268,7 +267,7 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
268 goto out; 267 goto out;
269 } 268 }
270 269
271 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, timer, 270 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
272 ZFCP_REQ_AUTO_CLEANUP); 271 ZFCP_REQ_AUTO_CLEANUP);
273 272
274 if (unlikely(tmp < 0)) { 273 if (unlikely(tmp < 0)) {
@@ -292,21 +291,22 @@ zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
292 * zfcp_scsi_command_sync - send a SCSI command and wait for completion 291 * zfcp_scsi_command_sync - send a SCSI command and wait for completion
293 * @unit: unit where command is sent to 292 * @unit: unit where command is sent to
294 * @scpnt: scsi command to be sent 293 * @scpnt: scsi command to be sent
295 * @timer: timer to be started if request is successfully initiated 294 * @use_timer: indicates whether timer should be setup or not
296 * Return: 0 295 * Return: 0
297 * 296 *
298 * Errors are indicated in scpnt->result 297 * Errors are indicated in scpnt->result
299 */ 298 */
300int 299int
301zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt, 300zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
302 struct timer_list *timer) 301 int use_timer)
303{ 302{
304 int ret; 303 int ret;
305 DECLARE_COMPLETION(wait); 304 DECLARE_COMPLETION(wait);
306 305
307 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */ 306 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */
308 scpnt->scsi_done = zfcp_scsi_command_sync_handler; 307 scpnt->scsi_done = zfcp_scsi_command_sync_handler;
309 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt, timer); 308 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt,
309 use_timer);
310 if (ret == 0) 310 if (ret == 0)
311 wait_for_completion(&wait); 311 wait_for_completion(&wait);
312 312
@@ -342,7 +342,7 @@ zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
342 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 342 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
343 unit = (struct zfcp_unit *) scpnt->device->hostdata; 343 unit = (struct zfcp_unit *) scpnt->device->hostdata;
344 344
345 return zfcp_scsi_command_async(adapter, unit, scpnt, NULL); 345 return zfcp_scsi_command_async(adapter, unit, scpnt, 0);
346} 346}
347 347
348static struct zfcp_unit * 348static struct zfcp_unit *
@@ -379,16 +379,15 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
379 * will handle late commands. (Usually, the normal completion of late 379 * will handle late commands. (Usually, the normal completion of late
380 * commands is ignored with respect to the running abort operation.) 380 * commands is ignored with respect to the running abort operation.)
381 */ 381 */
382int 382int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
383zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 383{
385 struct Scsi_Host *scsi_host; 384 struct Scsi_Host *scsi_host;
386 struct zfcp_adapter *adapter; 385 struct zfcp_adapter *adapter;
387 struct zfcp_unit *unit; 386 struct zfcp_unit *unit;
388 int retval = SUCCESS; 387 struct zfcp_fsf_req *fsf_req;
389 struct zfcp_fsf_req *new_fsf_req = NULL;
390 struct zfcp_fsf_req *old_fsf_req;
391 unsigned long flags; 388 unsigned long flags;
389 unsigned long old_req_id;
390 int retval = SUCCESS;
392 391
393 scsi_host = scpnt->device->host; 392 scsi_host = scpnt->device->host;
394 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; 393 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
@@ -400,55 +399,47 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
400 /* avoid race condition between late normal completion and abort */ 399 /* avoid race condition between late normal completion and abort */
401 write_lock_irqsave(&adapter->abort_lock, flags); 400 write_lock_irqsave(&adapter->abort_lock, flags);
402 401
403 /* 402 /* Check whether corresponding fsf_req is still pending */
404 * Check whether command has just completed and can not be aborted. 403 spin_lock(&adapter->req_list_lock);
405 * Even if the command has just been completed late, we can access 404 fsf_req = zfcp_reqlist_ismember(adapter, (unsigned long)
406 * scpnt since the SCSI stack does not release it at least until 405 scpnt->host_scribble);
407 * this routine returns. (scpnt is parameter passed to this routine 406 spin_unlock(&adapter->req_list_lock);
408 * and must not disappear during abort even on late completion.) 407 if (!fsf_req) {
409 */
410 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
411 if (!old_fsf_req) {
412 write_unlock_irqrestore(&adapter->abort_lock, flags); 408 write_unlock_irqrestore(&adapter->abort_lock, flags);
413 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL); 409 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0);
414 retval = SUCCESS; 410 retval = SUCCESS;
415 goto out; 411 goto out;
416 } 412 }
417 old_fsf_req->data = 0; 413 fsf_req->data = 0;
418 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; 414 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
415 old_req_id = fsf_req->req_id;
419 416
420 /* don't access old_fsf_req after releasing the abort_lock */ 417 /* don't access old fsf_req after releasing the abort_lock */
421 write_unlock_irqrestore(&adapter->abort_lock, flags); 418 write_unlock_irqrestore(&adapter->abort_lock, flags);
422 /* call FSF routine which does the abort */ 419
423 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req, 420 fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0);
424 adapter, unit, 0); 421 if (!fsf_req) {
425 if (!new_fsf_req) {
426 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 422 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
427 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 423 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
428 old_fsf_req); 424 old_req_id);
429 retval = FAILED; 425 retval = FAILED;
430 goto out; 426 goto out;
431 } 427 }
432 428
433 /* wait for completion of abort */ 429 __wait_event(fsf_req->completion_wq,
434 __wait_event(new_fsf_req->completion_wq, 430 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
435 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
436 431
437 /* status should be valid since signals were not permitted */ 432 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
438 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 433 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0);
439 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req,
440 NULL);
441 retval = SUCCESS; 434 retval = SUCCESS;
442 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 435 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
443 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req, 436 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0);
444 NULL);
445 retval = SUCCESS; 437 retval = SUCCESS;
446 } else { 438 } else {
447 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req, 439 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0);
448 NULL);
449 retval = FAILED; 440 retval = FAILED;
450 } 441 }
451 zfcp_fsf_req_free(new_fsf_req); 442 zfcp_fsf_req_free(fsf_req);
452 out: 443 out:
453 return retval; 444 return retval;
454} 445}
@@ -548,14 +539,11 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
548 539
549/** 540/**
550 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset 541 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
551 *
552 * If ERP is already running it will be stopped.
553 */ 542 */
554int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 543int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
555{ 544{
556 struct zfcp_unit *unit; 545 struct zfcp_unit *unit;
557 struct zfcp_adapter *adapter; 546 struct zfcp_adapter *adapter;
558 unsigned long flags;
559 547
560 unit = (struct zfcp_unit*) scpnt->device->hostdata; 548 unit = (struct zfcp_unit*) scpnt->device->hostdata;
561 adapter = unit->port->adapter; 549 adapter = unit->port->adapter;
@@ -563,22 +551,8 @@ int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
563 ZFCP_LOG_NORMAL("host/bus reset because of problems with " 551 ZFCP_LOG_NORMAL("host/bus reset because of problems with "
564 "unit 0x%016Lx\n", unit->fcp_lun); 552 "unit 0x%016Lx\n", unit->fcp_lun);
565 553
566 write_lock_irqsave(&adapter->erp_lock, flags); 554 zfcp_erp_adapter_reopen(adapter, 0);
567 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 555 zfcp_erp_wait(adapter);
568 &adapter->status)) {
569 zfcp_erp_modify_adapter_status(adapter,
570 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
571 ZFCP_CLEAR);
572 zfcp_erp_action_dismiss_adapter(adapter);
573 write_unlock_irqrestore(&adapter->erp_lock, flags);
574 zfcp_fsf_req_dismiss_all(adapter);
575 adapter->fsf_req_seq_no = 0;
576 zfcp_erp_adapter_reopen(adapter, 0);
577 } else {
578 write_unlock_irqrestore(&adapter->erp_lock, flags);
579 zfcp_erp_adapter_reopen(adapter, 0);
580 zfcp_erp_wait(adapter);
581 }
582 556
583 return SUCCESS; 557 return SUCCESS;
584} 558}
@@ -607,7 +581,7 @@ zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
607 adapter->scsi_host->max_channel = 0; 581 adapter->scsi_host->max_channel = 0;
608 adapter->scsi_host->unique_id = unique_id++; /* FIXME */ 582 adapter->scsi_host->unique_id = unique_id++; /* FIXME */
609 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH; 583 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH;
610 adapter->scsi_host->transportt = zfcp_transport_template; 584 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
611 585
612 /* 586 /*
613 * save a pointer to our own adapter data structure within 587 * save a pointer to our own adapter data structure within
@@ -648,16 +622,6 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
648 return; 622 return;
649} 623}
650 624
651
652void
653zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *adapter)
654{
655 adapter->scsi_er_timer.function = zfcp_fsf_scsi_er_timeout_handler;
656 adapter->scsi_er_timer.data = (unsigned long) adapter;
657 adapter->scsi_er_timer.expires = jiffies + ZFCP_SCSI_ER_TIMEOUT;
658 add_timer(&adapter->scsi_er_timer);
659}
660
661/* 625/*
662 * Support functions for FC transport class 626 * Support functions for FC transport class
663 */ 627 */
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 16a12a3b7b2b..4ea49fd7965e 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -662,7 +662,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
662 particular standard ISA I/O Address need not be probed. 662 particular standard ISA I/O Address need not be probed.
663 */ 663 */
664 PrimaryProbeInfo->IO_Address = 0; 664 PrimaryProbeInfo->IO_Address = 0;
665 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_Device)) != NULL) { 665 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_Device)) != NULL) {
666 struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter; 666 struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter;
667 struct BusLogic_PCIHostAdapterInformation PCIHostAdapterInformation; 667 struct BusLogic_PCIHostAdapterInformation PCIHostAdapterInformation;
668 enum BusLogic_ISACompatibleIOPort ModifyIOAddressRequest; 668 enum BusLogic_ISACompatibleIOPort ModifyIOAddressRequest;
@@ -762,7 +762,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
762 PrimaryProbeInfo->Bus = Bus; 762 PrimaryProbeInfo->Bus = Bus;
763 PrimaryProbeInfo->Device = Device; 763 PrimaryProbeInfo->Device = Device;
764 PrimaryProbeInfo->IRQ_Channel = IRQ_Channel; 764 PrimaryProbeInfo->IRQ_Channel = IRQ_Channel;
765 PrimaryProbeInfo->PCI_Device = PCI_Device; 765 PrimaryProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
766 PCIMultiMasterCount++; 766 PCIMultiMasterCount++;
767 } else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters) { 767 } else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters) {
768 struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++]; 768 struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
@@ -773,7 +773,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
773 ProbeInfo->Bus = Bus; 773 ProbeInfo->Bus = Bus;
774 ProbeInfo->Device = Device; 774 ProbeInfo->Device = Device;
775 ProbeInfo->IRQ_Channel = IRQ_Channel; 775 ProbeInfo->IRQ_Channel = IRQ_Channel;
776 ProbeInfo->PCI_Device = PCI_Device; 776 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
777 NonPrimaryPCIMultiMasterCount++; 777 NonPrimaryPCIMultiMasterCount++;
778 PCIMultiMasterCount++; 778 PCIMultiMasterCount++;
779 } else 779 } else
@@ -823,7 +823,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
823 noting the PCI bus location and assigned IRQ Channel. 823 noting the PCI bus location and assigned IRQ Channel.
824 */ 824 */
825 PCI_Device = NULL; 825 PCI_Device = NULL;
826 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, PCI_Device)) != NULL) { 826 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, PCI_Device)) != NULL) {
827 unsigned char Bus; 827 unsigned char Bus;
828 unsigned char Device; 828 unsigned char Device;
829 unsigned int IRQ_Channel; 829 unsigned int IRQ_Channel;
@@ -850,7 +850,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
850 ProbeInfo->Bus = Bus; 850 ProbeInfo->Bus = Bus;
851 ProbeInfo->Device = Device; 851 ProbeInfo->Device = Device;
852 ProbeInfo->IRQ_Channel = IRQ_Channel; 852 ProbeInfo->IRQ_Channel = IRQ_Channel;
853 ProbeInfo->PCI_Device = PCI_Device; 853 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
854 break; 854 break;
855 } 855 }
856 } 856 }
@@ -874,7 +874,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
874 /* 874 /*
875 Interrogate PCI Configuration Space for any FlashPoint Host Adapters. 875 Interrogate PCI Configuration Space for any FlashPoint Host Adapters.
876 */ 876 */
877 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_Device)) != NULL) { 877 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_Device)) != NULL) {
878 unsigned char Bus; 878 unsigned char Bus;
879 unsigned char Device; 879 unsigned char Device;
880 unsigned int IRQ_Channel; 880 unsigned int IRQ_Channel;
@@ -923,7 +923,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
923 ProbeInfo->Bus = Bus; 923 ProbeInfo->Bus = Bus;
924 ProbeInfo->Device = Device; 924 ProbeInfo->Device = Device;
925 ProbeInfo->IRQ_Channel = IRQ_Channel; 925 ProbeInfo->IRQ_Channel = IRQ_Channel;
926 ProbeInfo->PCI_Device = PCI_Device; 926 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
927 FlashPointCount++; 927 FlashPointCount++;
928 } else 928 } else
929 BusLogic_Warning("BusLogic: Too many Host Adapters " "detected\n", NULL); 929 BusLogic_Warning("BusLogic: Too many Host Adapters " "detected\n", NULL);
@@ -1890,6 +1890,7 @@ static void BusLogic_ReleaseResources(struct BusLogic_HostAdapter *HostAdapter)
1890 */ 1890 */
1891 if (HostAdapter->MailboxSpace) 1891 if (HostAdapter->MailboxSpace)
1892 pci_free_consistent(HostAdapter->PCI_Device, HostAdapter->MailboxSize, HostAdapter->MailboxSpace, HostAdapter->MailboxSpaceHandle); 1892 pci_free_consistent(HostAdapter->PCI_Device, HostAdapter->MailboxSize, HostAdapter->MailboxSpace, HostAdapter->MailboxSpaceHandle);
1893 pci_dev_put(HostAdapter->PCI_Device);
1893 HostAdapter->MailboxSpace = NULL; 1894 HostAdapter->MailboxSpace = NULL;
1894 HostAdapter->MailboxSpaceHandle = 0; 1895 HostAdapter->MailboxSpaceHandle = 0;
1895 HostAdapter->MailboxSize = 0; 1896 HostAdapter->MailboxSize = 0;
@@ -2176,6 +2177,7 @@ static int __init BusLogic_init(void)
2176{ 2177{
2177 int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex; 2178 int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex;
2178 struct BusLogic_HostAdapter *PrototypeHostAdapter; 2179 struct BusLogic_HostAdapter *PrototypeHostAdapter;
2180 int ret = 0;
2179 2181
2180#ifdef MODULE 2182#ifdef MODULE
2181 if (BusLogic) 2183 if (BusLogic)
@@ -2282,25 +2284,49 @@ static int __init BusLogic_init(void)
2282 perform Target Device Inquiry. 2284 perform Target Device Inquiry.
2283 */ 2285 */
2284 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) && 2286 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
2285 BusLogic_ReportHostAdapterConfiguration(HostAdapter) && BusLogic_AcquireResources(HostAdapter) && BusLogic_CreateInitialCCBs(HostAdapter) && BusLogic_InitializeHostAdapter(HostAdapter) && BusLogic_TargetDeviceInquiry(HostAdapter)) { 2287 BusLogic_ReportHostAdapterConfiguration(HostAdapter) &&
2288 BusLogic_AcquireResources(HostAdapter) &&
2289 BusLogic_CreateInitialCCBs(HostAdapter) &&
2290 BusLogic_InitializeHostAdapter(HostAdapter) &&
2291 BusLogic_TargetDeviceInquiry(HostAdapter)) {
2286 /* 2292 /*
2287 Initialization has been completed successfully. Release and 2293 Initialization has been completed successfully. Release and
2288 re-register usage of the I/O Address range so that the Model 2294 re-register usage of the I/O Address range so that the Model
2289 Name of the Host Adapter will appear, and initialize the SCSI 2295 Name of the Host Adapter will appear, and initialize the SCSI
2290 Host structure. 2296 Host structure.
2291 */ 2297 */
2292 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); 2298 release_region(HostAdapter->IO_Address,
2293 if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount, HostAdapter->FullModelName)) { 2299 HostAdapter->AddressCount);
2294 printk(KERN_WARNING "BusLogic: Release and re-register of " "port 0x%04lx failed \n", (unsigned long) HostAdapter->IO_Address); 2300 if (!request_region(HostAdapter->IO_Address,
2301 HostAdapter->AddressCount,
2302 HostAdapter->FullModelName)) {
2303 printk(KERN_WARNING
2304 "BusLogic: Release and re-register of "
2305 "port 0x%04lx failed \n",
2306 (unsigned long)HostAdapter->IO_Address);
2295 BusLogic_DestroyCCBs(HostAdapter); 2307 BusLogic_DestroyCCBs(HostAdapter);
2296 BusLogic_ReleaseResources(HostAdapter); 2308 BusLogic_ReleaseResources(HostAdapter);
2297 list_del(&HostAdapter->host_list); 2309 list_del(&HostAdapter->host_list);
2298 scsi_host_put(Host); 2310 scsi_host_put(Host);
2311 ret = -ENOMEM;
2299 } else { 2312 } else {
2300 BusLogic_InitializeHostStructure(HostAdapter, Host); 2313 BusLogic_InitializeHostStructure(HostAdapter,
2301 scsi_add_host(Host, HostAdapter->PCI_Device ? &HostAdapter->PCI_Device->dev : NULL); 2314 Host);
2302 scsi_scan_host(Host); 2315 if (scsi_add_host(Host, HostAdapter->PCI_Device
2303 BusLogicHostAdapterCount++; 2316 ? &HostAdapter->PCI_Device->dev
2317 : NULL)) {
2318 printk(KERN_WARNING
2319 "BusLogic: scsi_add_host()"
2320 "failed!\n");
2321 BusLogic_DestroyCCBs(HostAdapter);
2322 BusLogic_ReleaseResources(HostAdapter);
2323 list_del(&HostAdapter->host_list);
2324 scsi_host_put(Host);
2325 ret = -ENODEV;
2326 } else {
2327 scsi_scan_host(Host);
2328 BusLogicHostAdapterCount++;
2329 }
2304 } 2330 }
2305 } else { 2331 } else {
2306 /* 2332 /*
@@ -2315,12 +2341,13 @@ static int __init BusLogic_init(void)
2315 BusLogic_ReleaseResources(HostAdapter); 2341 BusLogic_ReleaseResources(HostAdapter);
2316 list_del(&HostAdapter->host_list); 2342 list_del(&HostAdapter->host_list);
2317 scsi_host_put(Host); 2343 scsi_host_put(Host);
2344 ret = -ENODEV;
2318 } 2345 }
2319 } 2346 }
2320 kfree(PrototypeHostAdapter); 2347 kfree(PrototypeHostAdapter);
2321 kfree(BusLogic_ProbeInfoList); 2348 kfree(BusLogic_ProbeInfoList);
2322 BusLogic_ProbeInfoList = NULL; 2349 BusLogic_ProbeInfoList = NULL;
2323 return 0; 2350 return ret;
2324} 2351}
2325 2352
2326 2353
@@ -2954,6 +2981,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2954} 2981}
2955 2982
2956 2983
2984#if 0
2957/* 2985/*
2958 BusLogic_AbortCommand aborts Command if possible. 2986 BusLogic_AbortCommand aborts Command if possible.
2959*/ 2987*/
@@ -3024,6 +3052,7 @@ static int BusLogic_AbortCommand(struct scsi_cmnd *Command)
3024 return SUCCESS; 3052 return SUCCESS;
3025} 3053}
3026 3054
3055#endif
3027/* 3056/*
3028 BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all 3057 BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all
3029 currently executing SCSI Commands as having been Reset. 3058 currently executing SCSI Commands as having been Reset.
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 96a81cd17617..a6f920d218a0 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -27,6 +27,11 @@ config SCSI
27 However, do not compile this as a module if your root file system 27 However, do not compile this as a module if your root file system
28 (the one containing the directory /) is located on a SCSI device. 28 (the one containing the directory /) is located on a SCSI device.
29 29
30config SCSI_NETLINK
31 bool
32 default n
33 select NET
34
30config SCSI_PROC_FS 35config SCSI_PROC_FS
31 bool "legacy /proc/scsi/ support" 36 bool "legacy /proc/scsi/ support"
32 depends on SCSI && PROC_FS 37 depends on SCSI && PROC_FS
@@ -209,7 +214,7 @@ config SCSI_LOGGING
209 there should be no noticeable performance impact as long as you have 214 there should be no noticeable performance impact as long as you have
210 logging turned off. 215 logging turned off.
211 216
212menu "SCSI Transport Attributes" 217menu "SCSI Transports"
213 depends on SCSI 218 depends on SCSI
214 219
215config SCSI_SPI_ATTRS 220config SCSI_SPI_ATTRS
@@ -222,6 +227,7 @@ config SCSI_SPI_ATTRS
222config SCSI_FC_ATTRS 227config SCSI_FC_ATTRS
223 tristate "FiberChannel Transport Attributes" 228 tristate "FiberChannel Transport Attributes"
224 depends on SCSI 229 depends on SCSI
230 select SCSI_NETLINK
225 help 231 help
226 If you wish to export transport-specific information about 232 If you wish to export transport-specific information about
227 each attached FiberChannel device to sysfs, say Y. 233 each attached FiberChannel device to sysfs, say Y.
@@ -242,6 +248,8 @@ config SCSI_SAS_ATTRS
242 If you wish to export transport-specific information about 248 If you wish to export transport-specific information about
243 each attached SAS device to sysfs, say Y. 249 each attached SAS device to sysfs, say Y.
244 250
251source "drivers/scsi/libsas/Kconfig"
252
245endmenu 253endmenu
246 254
247menu "SCSI low-level drivers" 255menu "SCSI low-level drivers"
@@ -431,6 +439,7 @@ config SCSI_AIC7XXX_OLD
431 module will be called aic7xxx_old. 439 module will be called aic7xxx_old.
432 440
433source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 441source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
442source "drivers/scsi/aic94xx/Kconfig"
434 443
435# All the I2O code and drivers do not seem to be 64bit safe. 444# All the I2O code and drivers do not seem to be 64bit safe.
436config SCSI_DPT_I2O 445config SCSI_DPT_I2O
@@ -469,6 +478,20 @@ config SCSI_IN2000
469 To compile this driver as a module, choose M here: the 478 To compile this driver as a module, choose M here: the
470 module will be called in2000. 479 module will be called in2000.
471 480
481config SCSI_ARCMSR
482 tristate "ARECA ARC11X0[PCI-X]/ARC12X0[PCI-EXPRESS] SATA-RAID support"
483 depends on PCI && SCSI
484 help
485 This driver supports all of ARECA's SATA RAID controller cards.
486 This is an ARECA-maintained driver by Erich Chen.
487 If you have any problems, please mail to: < erich@areca.com.tw >
488 Areca supports Linux RAID config tools.
489
490 < http://www.areca.com.tw >
491
492 To compile this driver as a module, choose M here: the
493 module will be called arcmsr (modprobe arcmsr).
494
472source "drivers/scsi/megaraid/Kconfig.megaraid" 495source "drivers/scsi/megaraid/Kconfig.megaraid"
473 496
474config SCSI_SATA 497config SCSI_SATA
@@ -1053,6 +1076,13 @@ config 53C700_LE_ON_BE
1053 depends on SCSI_LASI700 1076 depends on SCSI_LASI700
1054 default y 1077 default y
1055 1078
1079config SCSI_STEX
1080 tristate "Promise SuperTrak EX Series support"
1081 depends on PCI && SCSI
1082 ---help---
1083 This driver supports Promise SuperTrak EX8350/8300/16350/16300
1084 Storage controllers.
1085
1056config SCSI_SYM53C8XX_2 1086config SCSI_SYM53C8XX_2
1057 tristate "SYM53C8XX Version 2 SCSI support" 1087 tristate "SYM53C8XX Version 2 SCSI support"
1058 depends on PCI && SCSI 1088 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index ebd0cf00bf3e..8fc2c594b537 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o
32obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o 32obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o
33obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o 33obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o 34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
35obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
35 36
36obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 37obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
37obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 38obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
59obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o 60obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
60obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o 61obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
61obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o 62obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
63obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
62obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o 64obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o
63obj-$(CONFIG_SCSI_AHA152X) += aha152x.o 65obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
64obj-$(CONFIG_SCSI_AHA1542) += aha1542.o 66obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
@@ -67,6 +69,7 @@ obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/
67obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ 69obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/
68obj-$(CONFIG_SCSI_AACRAID) += aacraid/ 70obj-$(CONFIG_SCSI_AACRAID) += aacraid/
69obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o 71obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
72obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
70obj-$(CONFIG_SCSI_IPS) += ips.o 73obj-$(CONFIG_SCSI_IPS) += ips.o
71obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o 74obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
72obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o 75obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
@@ -138,6 +141,7 @@ obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
138obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o 141obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
139obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o 142obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o
140obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 143obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
144obj-$(CONFIG_SCSI_STEX) += stex.o
141 145
142obj-$(CONFIG_ARM) += arm/ 146obj-$(CONFIG_ARM) += arm/
143 147
@@ -155,6 +159,7 @@ scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
155 scsicam.o scsi_error.o scsi_lib.o \ 159 scsicam.o scsi_error.o scsi_lib.o \
156 scsi_scan.o scsi_sysfs.o \ 160 scsi_scan.o scsi_sysfs.o \
157 scsi_devinfo.o 161 scsi_devinfo.o
162scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
158scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o 163scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
159scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 164scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
160 165
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index fddfa2ebcd70..085406928605 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -40,7 +40,7 @@ static irqreturn_t a2091_intr (int irq, void *_instance, struct pt_regs *fp)
40 return IRQ_HANDLED; 40 return IRQ_HANDLED;
41} 41}
42 42
43static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 43static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
44{ 44{
45 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 45 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
46 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 46 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -115,7 +115,7 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
115 return 0; 115 return 0;
116} 116}
117 117
118static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 118static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
119 int status) 119 int status)
120{ 120{
121 /* disable SCSI interrupts */ 121 /* disable SCSI interrupts */
@@ -217,7 +217,7 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
217 return num_a2091; 217 return num_a2091;
218} 218}
219 219
220static int a2091_bus_reset(Scsi_Cmnd *cmd) 220static int a2091_bus_reset(struct scsi_cmnd *cmd)
221{ 221{
222 /* FIXME perform bus-specific reset */ 222 /* FIXME perform bus-specific reset */
223 223
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 22d6a13dd8be..fe809bc88d73 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -13,10 +13,6 @@
13 13
14int a2091_detect(struct scsi_host_template *); 14int a2091_detect(struct scsi_host_template *);
15int a2091_release(struct Scsi_Host *); 15int a2091_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index ae9ab4b136ac..7bf46d40b561 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -44,7 +44,7 @@ static irqreturn_t a3000_intr (int irq, void *dummy, struct pt_regs *fp)
44 return IRQ_NONE; 44 return IRQ_NONE;
45} 45}
46 46
47static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 47static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
48{ 48{
49 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 49 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
50 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 50 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -110,8 +110,8 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
110 return 0; 110 return 0;
111} 111}
112 112
113static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 113static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
114 int status) 114 int status)
115{ 115{
116 /* disable SCSI interrupts */ 116 /* disable SCSI interrupts */
117 unsigned short cntr = CNTR_PDMD; 117 unsigned short cntr = CNTR_PDMD;
@@ -205,7 +205,7 @@ fail_register:
205 return 0; 205 return 0;
206} 206}
207 207
208static int a3000_bus_reset(Scsi_Cmnd *cmd) 208static int a3000_bus_reset(struct scsi_cmnd *cmd)
209{ 209{
210 /* FIXME perform bus-specific reset */ 210 /* FIXME perform bus-specific reset */
211 211
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index 5535a65150a4..44a4ec7b4650 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -13,10 +13,6 @@
13 13
14int a3000_detect(struct scsi_host_template *); 14int a3000_detect(struct scsi_host_template *);
15int a3000_release(struct Scsi_Host *); 15int a3000_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 83b5c7d085f2..ac108f9e2674 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -169,13 +169,17 @@ MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control block
169int acbsize = -1; 169int acbsize = -1;
170module_param(acbsize, int, S_IRUGO|S_IWUSR); 170module_param(acbsize, int, S_IRUGO|S_IWUSR);
171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); 171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
172
173int expose_physicals = 0;
174module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
175MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. 0=off, 1=on");
172/** 176/**
173 * aac_get_config_status - check the adapter configuration 177 * aac_get_config_status - check the adapter configuration
174 * @common: adapter to query 178 * @common: adapter to query
175 * 179 *
176 * Query config status, and commit the configuration if needed. 180 * Query config status, and commit the configuration if needed.
177 */ 181 */
178int aac_get_config_status(struct aac_dev *dev) 182int aac_get_config_status(struct aac_dev *dev, int commit_flag)
179{ 183{
180 int status = 0; 184 int status = 0;
181 struct fib * fibptr; 185 struct fib * fibptr;
@@ -219,7 +223,7 @@ int aac_get_config_status(struct aac_dev *dev)
219 aac_fib_complete(fibptr); 223 aac_fib_complete(fibptr);
220 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 224 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
221 if (status >= 0) { 225 if (status >= 0) {
222 if (commit == 1) { 226 if ((commit == 1) || commit_flag) {
223 struct aac_commit_config * dinfo; 227 struct aac_commit_config * dinfo;
224 aac_fib_init(fibptr); 228 aac_fib_init(fibptr);
225 dinfo = (struct aac_commit_config *) fib_data(fibptr); 229 dinfo = (struct aac_commit_config *) fib_data(fibptr);
@@ -489,6 +493,8 @@ int aac_probe_container(struct aac_dev *dev, int cid)
489 unsigned instance; 493 unsigned instance;
490 494
491 fsa_dev_ptr = dev->fsa_dev; 495 fsa_dev_ptr = dev->fsa_dev;
496 if (!fsa_dev_ptr)
497 return -ENOMEM;
492 instance = dev->scsi_host_ptr->unique_id; 498 instance = dev->scsi_host_ptr->unique_id;
493 499
494 if (!(fibptr = aac_fib_alloc(dev))) 500 if (!(fibptr = aac_fib_alloc(dev)))
@@ -782,8 +788,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
782 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); 788 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
783 } 789 }
784 790
785 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 791 if (!dev->in_reset) {
786 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", 792 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
793 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
787 dev->name, 794 dev->name,
788 dev->id, 795 dev->id,
789 tmp>>24, 796 tmp>>24,
@@ -792,20 +799,21 @@ int aac_get_adapter_info(struct aac_dev* dev)
792 le32_to_cpu(dev->adapter_info.kernelbuild), 799 le32_to_cpu(dev->adapter_info.kernelbuild),
793 (int)sizeof(dev->supplement_adapter_info.BuildDate), 800 (int)sizeof(dev->supplement_adapter_info.BuildDate),
794 dev->supplement_adapter_info.BuildDate); 801 dev->supplement_adapter_info.BuildDate);
795 tmp = le32_to_cpu(dev->adapter_info.monitorrev); 802 tmp = le32_to_cpu(dev->adapter_info.monitorrev);
796 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", 803 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
797 dev->name, dev->id, 804 dev->name, dev->id,
798 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 805 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
799 le32_to_cpu(dev->adapter_info.monitorbuild)); 806 le32_to_cpu(dev->adapter_info.monitorbuild));
800 tmp = le32_to_cpu(dev->adapter_info.biosrev); 807 tmp = le32_to_cpu(dev->adapter_info.biosrev);
801 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", 808 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
802 dev->name, dev->id, 809 dev->name, dev->id,
803 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 810 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
804 le32_to_cpu(dev->adapter_info.biosbuild)); 811 le32_to_cpu(dev->adapter_info.biosbuild));
805 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) 812 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
806 printk(KERN_INFO "%s%d: serial %x\n", 813 printk(KERN_INFO "%s%d: serial %x\n",
807 dev->name, dev->id, 814 dev->name, dev->id,
808 le32_to_cpu(dev->adapter_info.serial[0])); 815 le32_to_cpu(dev->adapter_info.serial[0]));
816 }
809 817
810 dev->nondasd_support = 0; 818 dev->nondasd_support = 0;
811 dev->raid_scsi_mode = 0; 819 dev->raid_scsi_mode = 0;
@@ -1392,6 +1400,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1392 struct scsi_cmnd *cmd; 1400 struct scsi_cmnd *cmd;
1393 struct scsi_device *sdev = scsicmd->device; 1401 struct scsi_device *sdev = scsicmd->device;
1394 int active = 0; 1402 int active = 0;
1403 struct aac_dev *aac;
1395 unsigned long flags; 1404 unsigned long flags;
1396 1405
1397 /* 1406 /*
@@ -1413,11 +1422,14 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1413 if (active) 1422 if (active)
1414 return SCSI_MLQUEUE_DEVICE_BUSY; 1423 return SCSI_MLQUEUE_DEVICE_BUSY;
1415 1424
1425 aac = (struct aac_dev *)scsicmd->device->host->hostdata;
1426 if (aac->in_reset)
1427 return SCSI_MLQUEUE_HOST_BUSY;
1428
1416 /* 1429 /*
1417 * Allocate and initialize a Fib 1430 * Allocate and initialize a Fib
1418 */ 1431 */
1419 if (!(cmd_fibcontext = 1432 if (!(cmd_fibcontext = aac_fib_alloc(aac)))
1420 aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
1421 return SCSI_MLQUEUE_HOST_BUSY; 1433 return SCSI_MLQUEUE_HOST_BUSY;
1422 1434
1423 aac_fib_init(cmd_fibcontext); 1435 aac_fib_init(cmd_fibcontext);
@@ -1470,6 +1482,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1470 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 1482 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
1471 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; 1483 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
1472 1484
1485 if (fsa_dev_ptr == NULL)
1486 return -1;
1473 /* 1487 /*
1474 * If the bus, id or lun is out of range, return fail 1488 * If the bus, id or lun is out of range, return fail
1475 * Test does not apply to ID 16, the pseudo id for the controller 1489 * Test does not apply to ID 16, the pseudo id for the controller
@@ -1499,6 +1513,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1499 case INQUIRY: 1513 case INQUIRY:
1500 case READ_CAPACITY: 1514 case READ_CAPACITY:
1501 case TEST_UNIT_READY: 1515 case TEST_UNIT_READY:
1516 if (dev->in_reset)
1517 return -1;
1502 spin_unlock_irq(host->host_lock); 1518 spin_unlock_irq(host->host_lock);
1503 aac_probe_container(dev, cid); 1519 aac_probe_container(dev, cid);
1504 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1520 if ((fsa_dev_ptr[cid].valid & 1) == 0)
@@ -1523,7 +1539,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1523 return 0; 1539 return 0;
1524 } 1540 }
1525 } else { /* check for physical non-dasd devices */ 1541 } else { /* check for physical non-dasd devices */
1526 if(dev->nondasd_support == 1){ 1542 if ((dev->nondasd_support == 1) || expose_physicals) {
1543 if (dev->in_reset)
1544 return -1;
1527 return aac_send_srb_fib(scsicmd); 1545 return aac_send_srb_fib(scsicmd);
1528 } else { 1546 } else {
1529 scsicmd->result = DID_NO_CONNECT << 16; 1547 scsicmd->result = DID_NO_CONNECT << 16;
@@ -1579,6 +1597,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1579 scsicmd->scsi_done(scsicmd); 1597 scsicmd->scsi_done(scsicmd);
1580 return 0; 1598 return 0;
1581 } 1599 }
1600 if (dev->in_reset)
1601 return -1;
1582 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); 1602 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
1583 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 1603 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1584 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1604 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
@@ -1734,6 +1754,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1734 case READ_10: 1754 case READ_10:
1735 case READ_12: 1755 case READ_12:
1736 case READ_16: 1756 case READ_16:
1757 if (dev->in_reset)
1758 return -1;
1737 /* 1759 /*
1738 * Hack to keep track of ordinal number of the device that 1760 * Hack to keep track of ordinal number of the device that
1739 * corresponds to a container. Needed to convert 1761 * corresponds to a container. Needed to convert
@@ -1752,6 +1774,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1752 case WRITE_10: 1774 case WRITE_10:
1753 case WRITE_12: 1775 case WRITE_12:
1754 case WRITE_16: 1776 case WRITE_16:
1777 if (dev->in_reset)
1778 return -1;
1755 return aac_write(scsicmd, cid); 1779 return aac_write(scsicmd, cid);
1756 1780
1757 case SYNCHRONIZE_CACHE: 1781 case SYNCHRONIZE_CACHE:
@@ -1782,6 +1806,8 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
1782 struct fsa_dev_info *fsa_dev_ptr; 1806 struct fsa_dev_info *fsa_dev_ptr;
1783 1807
1784 fsa_dev_ptr = dev->fsa_dev; 1808 fsa_dev_ptr = dev->fsa_dev;
1809 if (!fsa_dev_ptr)
1810 return -EBUSY;
1785 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 1811 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1786 return -EFAULT; 1812 return -EFAULT;
1787 if (qd.cnum == -1) 1813 if (qd.cnum == -1)
@@ -1820,6 +1846,8 @@ static int force_delete_disk(struct aac_dev *dev, void __user *arg)
1820 struct fsa_dev_info *fsa_dev_ptr; 1846 struct fsa_dev_info *fsa_dev_ptr;
1821 1847
1822 fsa_dev_ptr = dev->fsa_dev; 1848 fsa_dev_ptr = dev->fsa_dev;
1849 if (!fsa_dev_ptr)
1850 return -EBUSY;
1823 1851
1824 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 1852 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1825 return -EFAULT; 1853 return -EFAULT;
@@ -1843,6 +1871,8 @@ static int delete_disk(struct aac_dev *dev, void __user *arg)
1843 struct fsa_dev_info *fsa_dev_ptr; 1871 struct fsa_dev_info *fsa_dev_ptr;
1844 1872
1845 fsa_dev_ptr = dev->fsa_dev; 1873 fsa_dev_ptr = dev->fsa_dev;
1874 if (!fsa_dev_ptr)
1875 return -EBUSY;
1846 1876
1847 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 1877 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1848 return -EFAULT; 1878 return -EFAULT;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d0eecd4bec83..eb3ed91bac79 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -494,6 +494,7 @@ struct adapter_ops
494 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 494 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
495 int (*adapter_check_health)(struct aac_dev *dev); 495 int (*adapter_check_health)(struct aac_dev *dev);
496 int (*adapter_send)(struct fib * fib); 496 int (*adapter_send)(struct fib * fib);
497 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
497}; 498};
498 499
499/* 500/*
@@ -682,14 +683,6 @@ struct rx_inbound {
682 __le32 Mailbox[8]; 683 __le32 Mailbox[8];
683}; 684};
684 685
685#define InboundMailbox0 IndexRegs.Mailbox[0]
686#define InboundMailbox1 IndexRegs.Mailbox[1]
687#define InboundMailbox2 IndexRegs.Mailbox[2]
688#define InboundMailbox3 IndexRegs.Mailbox[3]
689#define InboundMailbox4 IndexRegs.Mailbox[4]
690#define InboundMailbox5 IndexRegs.Mailbox[5]
691#define InboundMailbox6 IndexRegs.Mailbox[6]
692
693#define INBOUNDDOORBELL_0 0x00000001 686#define INBOUNDDOORBELL_0 0x00000001
694#define INBOUNDDOORBELL_1 0x00000002 687#define INBOUNDDOORBELL_1 0x00000002
695#define INBOUNDDOORBELL_2 0x00000004 688#define INBOUNDDOORBELL_2 0x00000004
@@ -1010,6 +1003,8 @@ struct aac_dev
1010 struct rx_registers __iomem *rx; 1003 struct rx_registers __iomem *rx;
1011 struct rkt_registers __iomem *rkt; 1004 struct rkt_registers __iomem *rkt;
1012 } regs; 1005 } regs;
1006 volatile void __iomem *base;
1007 volatile struct rx_inbound __iomem *IndexRegs;
1013 u32 OIMR; /* Mask Register Cache */ 1008 u32 OIMR; /* Mask Register Cache */
1014 /* 1009 /*
1015 * AIF thread states 1010 * AIF thread states
@@ -1029,6 +1024,7 @@ struct aac_dev
1029 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1024 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
1030 u8 raw_io_64; 1025 u8 raw_io_64;
1031 u8 printf_enabled; 1026 u8 printf_enabled;
1027 u8 in_reset;
1032}; 1028};
1033 1029
1034#define aac_adapter_interrupt(dev) \ 1030#define aac_adapter_interrupt(dev) \
@@ -1049,6 +1045,9 @@ struct aac_dev
1049#define aac_adapter_send(fib) \ 1045#define aac_adapter_send(fib) \
1050 ((fib)->dev)->a_ops.adapter_send(fib) 1046 ((fib)->dev)->a_ops.adapter_send(fib)
1051 1047
1048#define aac_adapter_ioremap(dev, size) \
1049 (dev)->a_ops.adapter_ioremap(dev, size)
1050
1052#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) 1051#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
1053 1052
1054/* 1053/*
@@ -1524,7 +1523,6 @@ struct aac_get_name {
1524 __le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */ 1523 __le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */
1525}; 1524};
1526 1525
1527#define CT_OK 218
1528struct aac_get_name_resp { 1526struct aac_get_name_resp {
1529 __le32 dummy0; 1527 __le32 dummy0;
1530 __le32 dummy1; 1528 __le32 dummy1;
@@ -1670,6 +1668,7 @@ extern struct aac_common aac_config;
1670#define RCV_TEMP_READINGS 0x00000025 1668#define RCV_TEMP_READINGS 0x00000025
1671#define GET_COMM_PREFERRED_SETTINGS 0x00000026 1669#define GET_COMM_PREFERRED_SETTINGS 0x00000026
1672#define IOP_RESET 0x00001000 1670#define IOP_RESET 0x00001000
1671#define IOP_RESET_ALWAYS 0x00001001
1673#define RE_INIT_ADAPTER 0x000000ee 1672#define RE_INIT_ADAPTER 0x000000ee
1674 1673
1675/* 1674/*
@@ -1788,7 +1787,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1788int aac_fib_complete(struct fib * context); 1787int aac_fib_complete(struct fib * context);
1789#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1788#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
1790struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1789struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1791int aac_get_config_status(struct aac_dev *dev); 1790int aac_get_config_status(struct aac_dev *dev, int commit_flag);
1792int aac_get_containers(struct aac_dev *dev); 1791int aac_get_containers(struct aac_dev *dev);
1793int aac_scsi_cmd(struct scsi_cmnd *cmd); 1792int aac_scsi_cmd(struct scsi_cmnd *cmd);
1794int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg); 1793int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
@@ -1799,6 +1798,7 @@ int aac_sa_init(struct aac_dev *dev);
1799unsigned int aac_response_normal(struct aac_queue * q); 1798unsigned int aac_response_normal(struct aac_queue * q);
1800unsigned int aac_command_normal(struct aac_queue * q); 1799unsigned int aac_command_normal(struct aac_queue * q);
1801unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1800unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
1801int aac_check_health(struct aac_dev * dev);
1802int aac_command_thread(void *data); 1802int aac_command_thread(void *data);
1803int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1803int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
1804int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); 1804int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 255421de9d1a..da1d3a9212f8 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -38,7 +38,7 @@
38#include <linux/completion.h> 38#include <linux/completion.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/delay.h> 41#include <linux/delay.h> /* ssleep prototype */
42#include <linux/kthread.h> 42#include <linux/kthread.h>
43#include <asm/semaphore.h> 43#include <asm/semaphore.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -140,7 +140,8 @@ cleanup:
140 fibptr->hw_fib_pa = hw_fib_pa; 140 fibptr->hw_fib_pa = hw_fib_pa;
141 fibptr->hw_fib = hw_fib; 141 fibptr->hw_fib = hw_fib;
142 } 142 }
143 aac_fib_free(fibptr); 143 if (retval != -EINTR)
144 aac_fib_free(fibptr);
144 return retval; 145 return retval;
145} 146}
146 147
@@ -297,7 +298,7 @@ return_fib:
297 spin_unlock_irqrestore(&dev->fib_lock, flags); 298 spin_unlock_irqrestore(&dev->fib_lock, flags);
298 /* If someone killed the AIF aacraid thread, restart it */ 299 /* If someone killed the AIF aacraid thread, restart it */
299 status = !dev->aif_thread; 300 status = !dev->aif_thread;
300 if (status && dev->queues && dev->fsa_dev) { 301 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
301 /* Be paranoid, be very paranoid! */ 302 /* Be paranoid, be very paranoid! */
302 kthread_stop(dev->thread); 303 kthread_stop(dev->thread);
303 ssleep(1); 304 ssleep(1);
@@ -621,7 +622,13 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
621 622
622 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry)); 623 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
623 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 624 if(actual_fibsize != fibsize){ // User made a mistake - should not continue
624 dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n")); 625 dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
626 "Raw SRB command calculated fibsize=%d "
627 "user_srbcmd->sg.count=%d aac_srb=%d sgentry=%d "
628 "issued fibsize=%d\n",
629 actual_fibsize, user_srbcmd->sg.count,
630 sizeof(struct aac_srb), sizeof(struct sgentry),
631 fibsize));
625 rcode = -EINVAL; 632 rcode = -EINVAL;
626 goto cleanup; 633 goto cleanup;
627 } 634 }
@@ -663,6 +670,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
663 psg->count = cpu_to_le32(sg_indx+1); 670 psg->count = cpu_to_le32(sg_indx+1);
664 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 671 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
665 } 672 }
673 if (status == -EINTR) {
674 rcode = -EINTR;
675 goto cleanup;
676 }
666 677
667 if (status != 0){ 678 if (status != 0){
668 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); 679 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
@@ -696,8 +707,10 @@ cleanup:
696 for(i=0; i <= sg_indx; i++){ 707 for(i=0; i <= sg_indx; i++){
697 kfree(sg_list[i]); 708 kfree(sg_list[i]);
698 } 709 }
699 aac_fib_complete(srbfib); 710 if (rcode != -EINTR) {
700 aac_fib_free(srbfib); 711 aac_fib_complete(srbfib);
712 aac_fib_free(srbfib);
713 }
701 714
702 return rcode; 715 return rcode;
703} 716}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1cd3584ba7ff..d5cf8b91a0e7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -180,7 +180,7 @@ int aac_send_shutdown(struct aac_dev * dev)
180 -2 /* Timeout silently */, 1, 180 -2 /* Timeout silently */, 1,
181 NULL, NULL); 181 NULL, NULL);
182 182
183 if (status == 0) 183 if (status >= 0)
184 aac_fib_complete(fibctx); 184 aac_fib_complete(fibctx);
185 aac_fib_free(fibctx); 185 aac_fib_free(fibctx);
186 return status; 186 return status;
@@ -307,17 +307,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
307 if (status[1] & AAC_OPT_NEW_COMM) 307 if (status[1] & AAC_OPT_NEW_COMM)
308 dev->new_comm_interface = dev->a_ops.adapter_send != 0; 308 dev->new_comm_interface = dev->a_ops.adapter_send != 0;
309 if (dev->new_comm_interface && (status[2] > dev->base_size)) { 309 if (dev->new_comm_interface && (status[2] > dev->base_size)) {
310 iounmap(dev->regs.sa); 310 aac_adapter_ioremap(dev, 0);
311 dev->base_size = status[2]; 311 dev->base_size = status[2];
312 dprintk((KERN_DEBUG "ioremap(%lx,%d)\n", 312 if (aac_adapter_ioremap(dev, status[2])) {
313 host->base, status[2]));
314 dev->regs.sa = ioremap(host->base, status[2]);
315 if (dev->regs.sa == NULL) {
316 /* remap failed, go back ... */ 313 /* remap failed, go back ... */
317 dev->new_comm_interface = 0; 314 dev->new_comm_interface = 0;
318 dev->regs.sa = ioremap(host->base, 315 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
319 AAC_MIN_FOOTPRINT_SIZE);
320 if (dev->regs.sa == NULL) {
321 printk(KERN_WARNING 316 printk(KERN_WARNING
322 "aacraid: unable to map adapter.\n"); 317 "aacraid: unable to map adapter.\n");
323 return NULL; 318 return NULL;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 3f27419c66af..8734a045558e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -40,8 +40,10 @@
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/kthread.h> 42#include <linux/kthread.h>
43#include <scsi/scsi.h>
43#include <scsi/scsi_host.h> 44#include <scsi/scsi_host.h>
44#include <scsi/scsi_device.h> 45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
45#include <asm/semaphore.h> 47#include <asm/semaphore.h>
46 48
47#include "aacraid.h" 49#include "aacraid.h"
@@ -464,6 +466,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 466 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 467 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
466 468
469 if (!dev->queues)
470 return -EBUSY;
467 q = &dev->queues->queue[AdapNormCmdQueue]; 471 q = &dev->queues->queue[AdapNormCmdQueue];
468 472
469 if(wait) 473 if(wait)
@@ -527,8 +531,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
527 } 531 }
528 udelay(5); 532 udelay(5);
529 } 533 }
530 } else 534 } else if (down_interruptible(&fibptr->event_wait)) {
531 down(&fibptr->event_wait); 535 spin_lock_irqsave(&fibptr->event_lock, flags);
536 if (fibptr->done == 0) {
537 fibptr->done = 2; /* Tell interrupt we aborted */
538 spin_unlock_irqrestore(&fibptr->event_lock, flags);
539 return -EINTR;
540 }
541 spin_unlock_irqrestore(&fibptr->event_lock, flags);
542 }
532 BUG_ON(fibptr->done == 0); 543 BUG_ON(fibptr->done == 0);
533 544
534 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 545 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
@@ -795,7 +806,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
795 806
796 /* Sniff for container changes */ 807 /* Sniff for container changes */
797 808
798 if (!dev) 809 if (!dev || !dev->fsa_dev)
799 return; 810 return;
800 container = (u32)-1; 811 container = (u32)-1;
801 812
@@ -1022,13 +1033,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1022 if (device) { 1033 if (device) {
1023 switch (device_config_needed) { 1034 switch (device_config_needed) {
1024 case DELETE: 1035 case DELETE:
1025 scsi_remove_device(device);
1026 break;
1027 case CHANGE: 1036 case CHANGE:
1028 if (!dev->fsa_dev[container].valid) {
1029 scsi_remove_device(device);
1030 break;
1031 }
1032 scsi_rescan_device(&device->sdev_gendev); 1037 scsi_rescan_device(&device->sdev_gendev);
1033 1038
1034 default: 1039 default:
@@ -1045,6 +1050,262 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1045 1050
1046} 1051}
1047 1052
1053static int _aac_reset_adapter(struct aac_dev *aac)
1054{
1055 int index, quirks;
1056 u32 ret;
1057 int retval;
1058 struct Scsi_Host *host;
1059 struct scsi_device *dev;
1060 struct scsi_cmnd *command;
1061 struct scsi_cmnd *command_list;
1062
1063 /*
1064 * Assumptions:
1065 * - host is locked.
1066 * - in_reset is asserted, so no new i/o is getting to the
1067 * card.
1068 * - The card is dead.
1069 */
1070 host = aac->scsi_host_ptr;
1071 scsi_block_requests(host);
1072 aac_adapter_disable_int(aac);
1073 spin_unlock_irq(host->host_lock);
1074 kthread_stop(aac->thread);
1075
1076 /*
1077 * If a positive health, means in a known DEAD PANIC
1078 * state and the adapter could be reset to `try again'.
1079 */
1080 retval = aac_adapter_check_health(aac);
1081 if (retval == 0)
1082 retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS,
1083 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1084 if (retval)
1085 retval = aac_adapter_sync_cmd(aac, IOP_RESET,
1086 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1087
1088 if (retval)
1089 goto out;
1090 if (ret != 0x00000001) {
1091 retval = -ENODEV;
1092 goto out;
1093 }
1094
1095 index = aac->cardtype;
1096
1097 /*
1098 * Re-initialize the adapter, first free resources, then carefully
1099 * apply the initialization sequence to come back again. Only risk
1100 * is a change in Firmware dropping cache, it is assumed the caller
1101 * will ensure that i/o is queisced and the card is flushed in that
1102 * case.
1103 */
1104 aac_fib_map_free(aac);
1105 aac->hw_fib_va = NULL;
1106 aac->hw_fib_pa = 0;
1107 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1108 aac->comm_addr = NULL;
1109 aac->comm_phys = 0;
1110 kfree(aac->queues);
1111 aac->queues = NULL;
1112 free_irq(aac->pdev->irq, aac);
1113 kfree(aac->fsa_dev);
1114 aac->fsa_dev = NULL;
1115 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1116 if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1117 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1118 goto out;
1119 } else {
1120 if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
1121 ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
1122 goto out;
1123 }
1124 if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1125 goto out;
1126 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1127 if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1128 goto out;
1129 aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1130 if (IS_ERR(aac->thread)) {
1131 retval = PTR_ERR(aac->thread);
1132 goto out;
1133 }
1134 (void)aac_get_adapter_info(aac);
1135 quirks = aac_get_driver_ident(index)->quirks;
1136 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1137 host->sg_tablesize = 34;
1138 host->max_sectors = (host->sg_tablesize * 8) + 112;
1139 }
1140 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1141 host->sg_tablesize = 17;
1142 host->max_sectors = (host->sg_tablesize * 8) + 112;
1143 }
1144 aac_get_config_status(aac, 1);
1145 aac_get_containers(aac);
1146 /*
1147 * This is where the assumption that the Adapter is quiesced
1148 * is important.
1149 */
1150 command_list = NULL;
1151 __shost_for_each_device(dev, host) {
1152 unsigned long flags;
1153 spin_lock_irqsave(&dev->list_lock, flags);
1154 list_for_each_entry(command, &dev->cmd_list, list)
1155 if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1156 command->SCp.buffer = (struct scatterlist *)command_list;
1157 command_list = command;
1158 }
1159 spin_unlock_irqrestore(&dev->list_lock, flags);
1160 }
1161 while ((command = command_list)) {
1162 command_list = (struct scsi_cmnd *)command->SCp.buffer;
1163 command->SCp.buffer = NULL;
1164 command->result = DID_OK << 16
1165 | COMMAND_COMPLETE << 8
1166 | SAM_STAT_TASK_SET_FULL;
1167 command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1168 command->scsi_done(command);
1169 }
1170 retval = 0;
1171
1172out:
1173 aac->in_reset = 0;
1174 scsi_unblock_requests(host);
1175 spin_lock_irq(host->host_lock);
1176 return retval;
1177}
1178
1179int aac_check_health(struct aac_dev * aac)
1180{
1181 int BlinkLED;
1182 unsigned long time_now, flagv = 0;
1183 struct list_head * entry;
1184 struct Scsi_Host * host;
1185
1186 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1187 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1188 return 0;
1189
1190 if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1191 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1192 return 0; /* OK */
1193 }
1194
1195 aac->in_reset = 1;
1196
1197 /* Fake up an AIF:
1198 * aac_aifcmd.command = AifCmdEventNotify = 1
1199 * aac_aifcmd.seqnum = 0xFFFFFFFF
1200 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1201 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1202 * aac.aifcmd.data[2] = AifHighPriority = 3
1203 * aac.aifcmd.data[3] = BlinkLED
1204 */
1205
1206 time_now = jiffies/HZ;
1207 entry = aac->fib_list.next;
1208
1209 /*
1210 * For each Context that is on the
1211 * fibctxList, make a copy of the
1212 * fib, and then set the event to wake up the
1213 * thread that is waiting for it.
1214 */
1215 while (entry != &aac->fib_list) {
1216 /*
1217 * Extract the fibctx
1218 */
1219 struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1220 struct hw_fib * hw_fib;
1221 struct fib * fib;
1222 /*
1223 * Check if the queue is getting
1224 * backlogged
1225 */
1226 if (fibctx->count > 20) {
1227 /*
1228 * It's *not* jiffies folks,
1229 * but jiffies / HZ, so do not
1230 * panic ...
1231 */
1232 u32 time_last = fibctx->jiffies;
1233 /*
1234 * Has it been > 2 minutes
1235 * since the last read off
1236 * the queue?
1237 */
1238 if ((time_now - time_last) > aif_timeout) {
1239 entry = entry->next;
1240 aac_close_fib_context(aac, fibctx);
1241 continue;
1242 }
1243 }
1244 /*
1245 * Warning: no sleep allowed while
1246 * holding spinlock
1247 */
1248 hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1249 fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
1250 if (fib && hw_fib) {
1251 struct aac_aifcmd * aif;
1252
1253 memset(hw_fib, 0, sizeof(struct hw_fib));
1254 memset(fib, 0, sizeof(struct fib));
1255 fib->hw_fib = hw_fib;
1256 fib->dev = aac;
1257 aac_fib_init(fib);
1258 fib->type = FSAFS_NTC_FIB_CONTEXT;
1259 fib->size = sizeof (struct fib);
1260 fib->data = hw_fib->data;
1261 aif = (struct aac_aifcmd *)hw_fib->data;
1262 aif->command = cpu_to_le32(AifCmdEventNotify);
1263 aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1264 aif->data[0] = cpu_to_le32(AifEnExpEvent);
1265 aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1266 aif->data[2] = cpu_to_le32(AifHighPriority);
1267 aif->data[3] = cpu_to_le32(BlinkLED);
1268
1269 /*
1270 * Put the FIB onto the
1271 * fibctx's fibs
1272 */
1273 list_add_tail(&fib->fiblink, &fibctx->fib_list);
1274 fibctx->count++;
1275 /*
1276 * Set the event to wake up the
1277 * thread that will waiting.
1278 */
1279 up(&fibctx->wait_sem);
1280 } else {
1281 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1282 kfree(fib);
1283 kfree(hw_fib);
1284 }
1285 entry = entry->next;
1286 }
1287
1288 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1289
1290 if (BlinkLED < 0) {
1291 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1292 goto out;
1293 }
1294
1295 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1296
1297 host = aac->scsi_host_ptr;
1298 spin_lock_irqsave(host->host_lock, flagv);
1299 BlinkLED = _aac_reset_adapter(aac);
1300 spin_unlock_irqrestore(host->host_lock, flagv);
1301 return BlinkLED;
1302
1303out:
1304 aac->in_reset = 0;
1305 return BlinkLED;
1306}
1307
1308
1048/** 1309/**
1049 * aac_command_thread - command processing thread 1310 * aac_command_thread - command processing thread
1050 * @dev: Adapter to monitor 1311 * @dev: Adapter to monitor
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index b2a5c7262f36..8335f07b7720 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -124,10 +124,15 @@ unsigned int aac_response_normal(struct aac_queue * q)
124 } else { 124 } else {
125 unsigned long flagv; 125 unsigned long flagv;
126 spin_lock_irqsave(&fib->event_lock, flagv); 126 spin_lock_irqsave(&fib->event_lock, flagv);
127 fib->done = 1; 127 if (!fib->done)
128 fib->done = 1;
128 up(&fib->event_wait); 129 up(&fib->event_wait);
129 spin_unlock_irqrestore(&fib->event_lock, flagv); 130 spin_unlock_irqrestore(&fib->event_lock, flagv);
130 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 131 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
132 if (fib->done == 2) {
133 aac_fib_complete(fib);
134 aac_fib_free(fib);
135 }
131 } 136 }
132 consumed++; 137 consumed++;
133 spin_lock_irqsave(q->lock, flags); 138 spin_lock_irqsave(q->lock, flags);
@@ -316,7 +321,8 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
316 unsigned long flagv; 321 unsigned long flagv;
317 dprintk((KERN_INFO "event_wait up\n")); 322 dprintk((KERN_INFO "event_wait up\n"));
318 spin_lock_irqsave(&fib->event_lock, flagv); 323 spin_lock_irqsave(&fib->event_lock, flagv);
319 fib->done = 1; 324 if (!fib->done)
325 fib->done = 1;
320 up(&fib->event_wait); 326 up(&fib->event_wait);
321 spin_unlock_irqrestore(&fib->event_lock, flagv); 327 spin_unlock_irqrestore(&fib->event_lock, flagv);
322 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 328 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e42a479ce64a..359e7ddfdb47 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -82,6 +82,8 @@ static LIST_HEAD(aac_devices);
82static int aac_cfg_major = -1; 82static int aac_cfg_major = -1;
83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; 83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
84 84
85extern int expose_physicals;
86
85/* 87/*
86 * Because of the way Linux names scsi devices, the order in this table has 88 * Because of the way Linux names scsi devices, the order in this table has
87 * become important. Check for on-board Raid first, add-in cards second. 89 * become important. Check for on-board Raid first, add-in cards second.
@@ -394,6 +396,7 @@ static int aac_slave_configure(struct scsi_device *sdev)
394 sdev->skip_ms_page_3f = 1; 396 sdev->skip_ms_page_3f = 1;
395 } 397 }
396 if ((sdev->type == TYPE_DISK) && 398 if ((sdev->type == TYPE_DISK) &&
399 !expose_physicals &&
397 (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 400 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
398 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 401 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
399 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) 402 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
@@ -454,17 +457,17 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
454 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", 457 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
455 AAC_DRIVERNAME); 458 AAC_DRIVERNAME);
456 aac = (struct aac_dev *)host->hostdata; 459 aac = (struct aac_dev *)host->hostdata;
457 if (aac_adapter_check_health(aac)) { 460
458 printk(KERN_ERR "%s: Host adapter appears dead\n", 461 if ((count = aac_check_health(aac)))
459 AAC_DRIVERNAME); 462 return count;
460 return -ENODEV;
461 }
462 /* 463 /*
463 * Wait for all commands to complete to this specific 464 * Wait for all commands to complete to this specific
464 * target (block maximum 60 seconds). 465 * target (block maximum 60 seconds).
465 */ 466 */
466 for (count = 60; count; --count) { 467 for (count = 60; count; --count) {
467 int active = 0; 468 int active = aac->in_reset;
469
470 if (active == 0)
468 __shost_for_each_device(dev, host) { 471 __shost_for_each_device(dev, host) {
469 spin_lock_irqsave(&dev->list_lock, flags); 472 spin_lock_irqsave(&dev->list_lock, flags);
470 list_for_each_entry(command, &dev->cmd_list, list) { 473 list_for_each_entry(command, &dev->cmd_list, list) {
@@ -864,13 +867,6 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
864 * Map in the registers from the adapter. 867 * Map in the registers from the adapter.
865 */ 868 */
866 aac->base_size = AAC_MIN_FOOTPRINT_SIZE; 869 aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
867 if ((aac->regs.sa = ioremap(
868 (unsigned long)aac->scsi_host_ptr->base, AAC_MIN_FOOTPRINT_SIZE))
869 == NULL) {
870 printk(KERN_WARNING "%s: unable to map adapter.\n",
871 AAC_DRIVERNAME);
872 goto out_free_fibs;
873 }
874 if ((*aac_drivers[index].init)(aac)) 870 if ((*aac_drivers[index].init)(aac))
875 goto out_unmap; 871 goto out_unmap;
876 872
@@ -928,12 +924,12 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
928 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) 924 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
929 * physical channels are address by their actual physical number+1 925 * physical channels are address by their actual physical number+1
930 */ 926 */
931 if (aac->nondasd_support == 1) 927 if ((aac->nondasd_support == 1) || expose_physicals)
932 shost->max_channel = aac->maximum_num_channels; 928 shost->max_channel = aac->maximum_num_channels;
933 else 929 else
934 shost->max_channel = 0; 930 shost->max_channel = 0;
935 931
936 aac_get_config_status(aac); 932 aac_get_config_status(aac, 0);
937 aac_get_containers(aac); 933 aac_get_containers(aac);
938 list_add(&aac->entry, insert); 934 list_add(&aac->entry, insert);
939 935
@@ -969,8 +965,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
969 aac_fib_map_free(aac); 965 aac_fib_map_free(aac);
970 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 966 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
971 kfree(aac->queues); 967 kfree(aac->queues);
972 iounmap(aac->regs.sa); 968 aac_adapter_ioremap(aac, 0);
973 out_free_fibs:
974 kfree(aac->fibs); 969 kfree(aac->fibs);
975 kfree(aac->fsa_dev); 970 kfree(aac->fsa_dev);
976 out_free_host: 971 out_free_host:
@@ -1005,7 +1000,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
1005 kfree(aac->queues); 1000 kfree(aac->queues);
1006 1001
1007 free_irq(pdev->irq, aac); 1002 free_irq(pdev->irq, aac);
1008 iounmap(aac->regs.sa); 1003 aac_adapter_ioremap(aac, 0);
1009 1004
1010 kfree(aac->fibs); 1005 kfree(aac->fibs);
1011 kfree(aac->fsa_dev); 1006 kfree(aac->fsa_dev);
@@ -1013,6 +1008,10 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
1013 list_del(&aac->entry); 1008 list_del(&aac->entry);
1014 scsi_host_put(shost); 1009 scsi_host_put(shost);
1015 pci_disable_device(pdev); 1010 pci_disable_device(pdev);
1011 if (list_empty(&aac_devices)) {
1012 unregister_chrdev(aac_cfg_major, "aac");
1013 aac_cfg_major = -1;
1014 }
1016} 1015}
1017 1016
1018static struct pci_driver aac_pci_driver = { 1017static struct pci_driver aac_pci_driver = {
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 458ea897fd72..643f23b5ded8 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -28,370 +28,27 @@
28 * 28 *
29 */ 29 */
30 30
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/sched.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/blkdev.h> 31#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/completion.h>
41#include <linux/time.h>
42#include <linux/interrupt.h>
43#include <asm/semaphore.h>
44 32
45#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
46 34
47#include "aacraid.h" 35#include "aacraid.h"
48 36
49static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
50{
51 struct aac_dev *dev = dev_id;
52
53 if (dev->new_comm_interface) {
54 u32 Index = rkt_readl(dev, MUnit.OutboundQueue);
55 if (Index == 0xFFFFFFFFL)
56 Index = rkt_readl(dev, MUnit.OutboundQueue);
57 if (Index != 0xFFFFFFFFL) {
58 do {
59 if (aac_intr_normal(dev, Index)) {
60 rkt_writel(dev, MUnit.OutboundQueue, Index);
61 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
62 }
63 Index = rkt_readl(dev, MUnit.OutboundQueue);
64 } while (Index != 0xFFFFFFFFL);
65 return IRQ_HANDLED;
66 }
67 } else {
68 unsigned long bellbits;
69 u8 intstat;
70 intstat = rkt_readb(dev, MUnit.OISR);
71 /*
72 * Read mask and invert because drawbridge is reversed.
73 * This allows us to only service interrupts that have
74 * been enabled.
75 * Check to see if this is our interrupt. If it isn't just return
76 */
77 if (intstat & ~(dev->OIMR))
78 {
79 bellbits = rkt_readl(dev, OutboundDoorbellReg);
80 if (bellbits & DoorBellPrintfReady) {
81 aac_printf(dev, rkt_readl (dev, IndexRegs.Mailbox[5]));
82 rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
83 rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
84 }
85 else if (bellbits & DoorBellAdapterNormCmdReady) {
86 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
87 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
88// rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
89 }
90 else if (bellbits & DoorBellAdapterNormRespReady) {
91 rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
92 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
93 }
94 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
95 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
96 }
97 else if (bellbits & DoorBellAdapterNormRespNotFull) {
98 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
99 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
100 }
101 return IRQ_HANDLED;
102 }
103 }
104 return IRQ_NONE;
105}
106
107/**
108 * aac_rkt_disable_interrupt - Disable interrupts
109 * @dev: Adapter
110 */
111
112static void aac_rkt_disable_interrupt(struct aac_dev *dev)
113{
114 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
115}
116
117/** 37/**
118 * rkt_sync_cmd - send a command and wait 38 * aac_rkt_ioremap
119 * @dev: Adapter 39 * @size: mapping resize request
120 * @command: Command to execute
121 * @p1: first parameter
122 * @ret: adapter status
123 * 40 *
124 * This routine will send a synchronous command to the adapter and wait
125 * for its completion.
126 */ 41 */
127 42static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
128static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
129 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
130 u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
131{ 43{
132 unsigned long start; 44 if (!size) {
133 int ok; 45 iounmap(dev->regs.rkt);
134 /* 46 return 0;
135 * Write the command into Mailbox 0
136 */
137 rkt_writel(dev, InboundMailbox0, command);
138 /*
139 * Write the parameters into Mailboxes 1 - 6
140 */
141 rkt_writel(dev, InboundMailbox1, p1);
142 rkt_writel(dev, InboundMailbox2, p2);
143 rkt_writel(dev, InboundMailbox3, p3);
144 rkt_writel(dev, InboundMailbox4, p4);
145 /*
146 * Clear the synch command doorbell to start on a clean slate.
147 */
148 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
149 /*
150 * Disable doorbell interrupts
151 */
152 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
153 /*
154 * Force the completion of the mask register write before issuing
155 * the interrupt.
156 */
157 rkt_readb (dev, MUnit.OIMR);
158 /*
159 * Signal that there is a new synch command
160 */
161 rkt_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
162
163 ok = 0;
164 start = jiffies;
165
166 /*
167 * Wait up to 30 seconds
168 */
169 while (time_before(jiffies, start+30*HZ))
170 {
171 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
172 /*
173 * Mon960 will set doorbell0 bit when it has completed the command.
174 */
175 if (rkt_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
176 /*
177 * Clear the doorbell.
178 */
179 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
180 ok = 1;
181 break;
182 }
183 /*
184 * Yield the processor in case we are slow
185 */
186 msleep(1);
187 } 47 }
188 if (ok != 1) { 48 dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size);
189 /* 49 if (dev->base == NULL)
190 * Restore interrupt mask even though we timed out
191 */
192 if (dev->new_comm_interface)
193 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
194 else
195 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
196 return -ETIMEDOUT;
197 }
198 /*
199 * Pull the synch status from Mailbox 0.
200 */
201 if (status)
202 *status = rkt_readl(dev, IndexRegs.Mailbox[0]);
203 if (r1)
204 *r1 = rkt_readl(dev, IndexRegs.Mailbox[1]);
205 if (r2)
206 *r2 = rkt_readl(dev, IndexRegs.Mailbox[2]);
207 if (r3)
208 *r3 = rkt_readl(dev, IndexRegs.Mailbox[3]);
209 if (r4)
210 *r4 = rkt_readl(dev, IndexRegs.Mailbox[4]);
211 /*
212 * Clear the synch command doorbell.
213 */
214 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
215 /*
216 * Restore interrupt mask
217 */
218 if (dev->new_comm_interface)
219 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
220 else
221 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
222 return 0;
223
224}
225
226/**
227 * aac_rkt_interrupt_adapter - interrupt adapter
228 * @dev: Adapter
229 *
230 * Send an interrupt to the i960 and breakpoint it.
231 */
232
233static void aac_rkt_interrupt_adapter(struct aac_dev *dev)
234{
235 rkt_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
236 NULL, NULL, NULL, NULL, NULL);
237}
238
239/**
240 * aac_rkt_notify_adapter - send an event to the adapter
241 * @dev: Adapter
242 * @event: Event to send
243 *
244 * Notify the i960 that something it probably cares about has
245 * happened.
246 */
247
248static void aac_rkt_notify_adapter(struct aac_dev *dev, u32 event)
249{
250 switch (event) {
251
252 case AdapNormCmdQue:
253 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
254 break;
255 case HostNormRespNotFull:
256 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
257 break;
258 case AdapNormRespQue:
259 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
260 break;
261 case HostNormCmdNotFull:
262 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
263 break;
264 case HostShutdown:
265// rkt_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
266// NULL, NULL, NULL, NULL, NULL);
267 break;
268 case FastIo:
269 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
270 break;
271 case AdapPrintfDone:
272 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
273 break;
274 default:
275 BUG();
276 break;
277 }
278}
279
280/**
281 * aac_rkt_start_adapter - activate adapter
282 * @dev: Adapter
283 *
284 * Start up processing on an i960 based AAC adapter
285 */
286
287static void aac_rkt_start_adapter(struct aac_dev *dev)
288{
289 struct aac_init *init;
290
291 init = dev->init;
292 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
293 // We can only use a 32 bit address here
294 rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
295 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
296}
297
298/**
299 * aac_rkt_check_health
300 * @dev: device to check if healthy
301 *
302 * Will attempt to determine if the specified adapter is alive and
303 * capable of handling requests, returning 0 if alive.
304 */
305static int aac_rkt_check_health(struct aac_dev *dev)
306{
307 u32 status = rkt_readl(dev, MUnit.OMRx[0]);
308
309 /*
310 * Check to see if the board failed any self tests.
311 */
312 if (status & SELF_TEST_FAILED)
313 return -1; 50 return -1;
314 /* 51 dev->IndexRegs = &dev->regs.rkt->IndexRegs;
315 * Check to see if the board panic'd.
316 */
317 if (status & KERNEL_PANIC) {
318 char * buffer;
319 struct POSTSTATUS {
320 __le32 Post_Command;
321 __le32 Post_Address;
322 } * post;
323 dma_addr_t paddr, baddr;
324 int ret;
325
326 if ((status & 0xFF000000L) == 0xBC000000L)
327 return (status >> 16) & 0xFF;
328 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
329 ret = -2;
330 if (buffer == NULL)
331 return ret;
332 post = pci_alloc_consistent(dev->pdev,
333 sizeof(struct POSTSTATUS), &paddr);
334 if (post == NULL) {
335 pci_free_consistent(dev->pdev, 512, buffer, baddr);
336 return ret;
337 }
338 memset(buffer, 0, 512);
339 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
340 post->Post_Address = cpu_to_le32(baddr);
341 rkt_writel(dev, MUnit.IMRx[0], paddr);
342 rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
343 NULL, NULL, NULL, NULL, NULL);
344 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
345 post, paddr);
346 if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
347 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
348 ret <<= 4;
349 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
350 }
351 pci_free_consistent(dev->pdev, 512, buffer, baddr);
352 return ret;
353 }
354 /*
355 * Wait for the adapter to be up and running.
356 */
357 if (!(status & KERNEL_UP_AND_RUNNING))
358 return -3;
359 /*
360 * Everything is OK
361 */
362 return 0;
363}
364
365/**
366 * aac_rkt_send
367 * @fib: fib to issue
368 *
369 * Will send a fib, returning 0 if successful.
370 */
371static int aac_rkt_send(struct fib * fib)
372{
373 u64 addr = fib->hw_fib_pa;
374 struct aac_dev *dev = fib->dev;
375 volatile void __iomem *device = dev->regs.rkt;
376 u32 Index;
377
378 dprintk((KERN_DEBUG "%p->aac_rkt_send(%p->%llx)\n", dev, fib, addr));
379 Index = rkt_readl(dev, MUnit.InboundQueue);
380 if (Index == 0xFFFFFFFFL)
381 Index = rkt_readl(dev, MUnit.InboundQueue);
382 dprintk((KERN_DEBUG "Index = 0x%x\n", Index));
383 if (Index == 0xFFFFFFFFL)
384 return Index;
385 device += Index;
386 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff),
387 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
388 writel((u32)(addr & 0xffffffff), device);
389 device += sizeof(u32);
390 writel((u32)(addr >> 32), device);
391 device += sizeof(u32);
392 writel(le16_to_cpu(fib->hw_fib->header.Size), device);
393 rkt_writel(dev, MUnit.InboundQueue, Index);
394 dprintk((KERN_DEBUG "aac_rkt_send - return 0\n"));
395 return 0; 52 return 0;
396} 53}
397 54
@@ -406,78 +63,18 @@ static int aac_rkt_send(struct fib * fib)
406 63
407int aac_rkt_init(struct aac_dev *dev) 64int aac_rkt_init(struct aac_dev *dev)
408{ 65{
409 unsigned long start; 66 int retval;
410 unsigned long status; 67 extern int _aac_rx_init(struct aac_dev *dev);
411 int instance; 68 extern void aac_rx_start_adapter(struct aac_dev *dev);
412 const char * name;
413
414 instance = dev->id;
415 name = dev->name;
416 69
417 /* 70 /*
418 * Check to see if the board panic'd while booting.
419 */
420 /*
421 * Check to see if the board failed any self tests.
422 */
423 if (rkt_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
424 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
425 goto error_iounmap;
426 }
427 /*
428 * Check to see if the monitor panic'd while booting.
429 */
430 if (rkt_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
431 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
432 goto error_iounmap;
433 }
434 /*
435 * Check to see if the board panic'd while booting.
436 */
437 if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
438 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
439 goto error_iounmap;
440 }
441 start = jiffies;
442 /*
443 * Wait for the adapter to be up and running. Wait up to 3 minutes
444 */
445 while (!(rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING))
446 {
447 if(time_after(jiffies, start+startup_timeout*HZ))
448 {
449 status = rkt_readl(dev, MUnit.OMRx[0]);
450 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
451 dev->name, instance, status);
452 goto error_iounmap;
453 }
454 msleep(1);
455 }
456 if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0)
457 {
458 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
459 goto error_iounmap;
460 }
461 /*
462 * Fill in the function dispatch table. 71 * Fill in the function dispatch table.
463 */ 72 */
464 dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter; 73 dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
465 dev->a_ops.adapter_disable_int = aac_rkt_disable_interrupt;
466 dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
467 dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
468 dev->a_ops.adapter_check_health = aac_rkt_check_health;
469 dev->a_ops.adapter_send = aac_rkt_send;
470
471 /*
472 * First clear out all interrupts. Then enable the one's that we
473 * can handle.
474 */
475 rkt_writeb(dev, MUnit.OIMR, 0xff);
476 rkt_writel(dev, MUnit.ODR, 0xffffffff);
477 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
478 74
479 if (aac_init_adapter(dev) == NULL) 75 retval = _aac_rx_init(dev);
480 goto error_irq; 76 if (retval)
77 return retval;
481 if (dev->new_comm_interface) { 78 if (dev->new_comm_interface) {
482 /* 79 /*
483 * FIB Setup has already been done, but we can minimize the 80 * FIB Setup has already been done, but we can minimize the
@@ -494,20 +91,11 @@ int aac_rkt_init(struct aac_dev *dev)
494 dev->init->MaxIoCommands = cpu_to_le32(246); 91 dev->init->MaxIoCommands = cpu_to_le32(246);
495 dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB; 92 dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB;
496 } 93 }
497 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
498 } 94 }
499 /* 95 /*
500 * Tell the adapter that all is configured, and it can start 96 * Tell the adapter that all is configured, and it can start
501 * accepting requests 97 * accepting requests
502 */ 98 */
503 aac_rkt_start_adapter(dev); 99 aac_rx_start_adapter(dev);
504 return 0; 100 return 0;
505
506error_irq:
507 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
508 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
509
510error_iounmap:
511
512 return -1;
513} 101}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 035018db69b1..a1d214d770eb 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -79,7 +79,7 @@ static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
79 { 79 {
80 bellbits = rx_readl(dev, OutboundDoorbellReg); 80 bellbits = rx_readl(dev, OutboundDoorbellReg);
81 if (bellbits & DoorBellPrintfReady) { 81 if (bellbits & DoorBellPrintfReady) {
82 aac_printf(dev, rx_readl (dev, IndexRegs.Mailbox[5])); 82 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); 83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); 84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
85 } 85 }
@@ -134,14 +134,14 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
134 /* 134 /*
135 * Write the command into Mailbox 0 135 * Write the command into Mailbox 0
136 */ 136 */
137 rx_writel(dev, InboundMailbox0, command); 137 writel(command, &dev->IndexRegs->Mailbox[0]);
138 /* 138 /*
139 * Write the parameters into Mailboxes 1 - 6 139 * Write the parameters into Mailboxes 1 - 6
140 */ 140 */
141 rx_writel(dev, InboundMailbox1, p1); 141 writel(p1, &dev->IndexRegs->Mailbox[1]);
142 rx_writel(dev, InboundMailbox2, p2); 142 writel(p2, &dev->IndexRegs->Mailbox[2]);
143 rx_writel(dev, InboundMailbox3, p3); 143 writel(p3, &dev->IndexRegs->Mailbox[3]);
144 rx_writel(dev, InboundMailbox4, p4); 144 writel(p4, &dev->IndexRegs->Mailbox[4]);
145 /* 145 /*
146 * Clear the synch command doorbell to start on a clean slate. 146 * Clear the synch command doorbell to start on a clean slate.
147 */ 147 */
@@ -199,15 +199,15 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
199 * Pull the synch status from Mailbox 0. 199 * Pull the synch status from Mailbox 0.
200 */ 200 */
201 if (status) 201 if (status)
202 *status = rx_readl(dev, IndexRegs.Mailbox[0]); 202 *status = readl(&dev->IndexRegs->Mailbox[0]);
203 if (r1) 203 if (r1)
204 *r1 = rx_readl(dev, IndexRegs.Mailbox[1]); 204 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
205 if (r2) 205 if (r2)
206 *r2 = rx_readl(dev, IndexRegs.Mailbox[2]); 206 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
207 if (r3) 207 if (r3)
208 *r3 = rx_readl(dev, IndexRegs.Mailbox[3]); 208 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
209 if (r4) 209 if (r4)
210 *r4 = rx_readl(dev, IndexRegs.Mailbox[4]); 210 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
211 /* 211 /*
212 * Clear the synch command doorbell. 212 * Clear the synch command doorbell.
213 */ 213 */
@@ -261,8 +261,6 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
261 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); 261 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
262 break; 262 break;
263 case HostShutdown: 263 case HostShutdown:
264// rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
265// NULL, NULL, NULL, NULL, NULL);
266 break; 264 break;
267 case FastIo: 265 case FastIo:
268 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); 266 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
@@ -283,7 +281,7 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
283 * Start up processing on an i960 based AAC adapter 281 * Start up processing on an i960 based AAC adapter
284 */ 282 */
285 283
286static void aac_rx_start_adapter(struct aac_dev *dev) 284void aac_rx_start_adapter(struct aac_dev *dev)
287{ 285{
288 struct aac_init *init; 286 struct aac_init *init;
289 287
@@ -381,7 +379,7 @@ static int aac_rx_send(struct fib * fib)
381 dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); 379 dprintk((KERN_DEBUG "Index = 0x%x\n", Index));
382 if (Index == 0xFFFFFFFFL) 380 if (Index == 0xFFFFFFFFL)
383 return Index; 381 return Index;
384 device += Index; 382 device = dev->base + Index;
385 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), 383 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff),
386 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size))); 384 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
387 writel((u32)(addr & 0xffffffff), device); 385 writel((u32)(addr & 0xffffffff), device);
@@ -395,6 +393,43 @@ static int aac_rx_send(struct fib * fib)
395} 393}
396 394
397/** 395/**
396 * aac_rx_ioremap
397 * @size: mapping resize request
398 *
399 */
400static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
401{
402 if (!size) {
403 iounmap(dev->regs.rx);
404 return 0;
405 }
406 dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size);
407 if (dev->base == NULL)
408 return -1;
409 dev->IndexRegs = &dev->regs.rx->IndexRegs;
410 return 0;
411}
412
413static int aac_rx_restart_adapter(struct aac_dev *dev)
414{
415 u32 var;
416
417 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n",
418 dev->name, dev->id);
419
420 if (aac_rx_check_health(dev) <= 0)
421 return 1;
422 if (rx_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0,
423 &var, NULL, NULL, NULL, NULL))
424 return 1;
425 if (var != 0x00000001)
426 return 1;
427 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
428 return 1;
429 return 0;
430}
431
432/**
398 * aac_rx_init - initialize an i960 based AAC card 433 * aac_rx_init - initialize an i960 based AAC card
399 * @dev: device to configure 434 * @dev: device to configure
400 * 435 *
@@ -403,7 +438,7 @@ static int aac_rx_send(struct fib * fib)
403 * to the comm region. 438 * to the comm region.
404 */ 439 */
405 440
406int aac_rx_init(struct aac_dev *dev) 441int _aac_rx_init(struct aac_dev *dev)
407{ 442{
408 unsigned long start; 443 unsigned long start;
409 unsigned long status; 444 unsigned long status;
@@ -413,27 +448,30 @@ int aac_rx_init(struct aac_dev *dev)
413 instance = dev->id; 448 instance = dev->id;
414 name = dev->name; 449 name = dev->name;
415 450
451 if (aac_adapter_ioremap(dev, dev->base_size)) {
452 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
453 goto error_iounmap;
454 }
455
416 /* 456 /*
417 * Check to see if the board panic'd while booting. 457 * Check to see if the board panic'd while booting.
418 */ 458 */
459 status = rx_readl(dev, MUnit.OMRx[0]);
460 if (status & KERNEL_PANIC)
461 if (aac_rx_restart_adapter(dev))
462 goto error_iounmap;
419 /* 463 /*
420 * Check to see if the board failed any self tests. 464 * Check to see if the board failed any self tests.
421 */ 465 */
422 if (rx_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) { 466 status = rx_readl(dev, MUnit.OMRx[0]);
467 if (status & SELF_TEST_FAILED) {
423 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 468 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
424 goto error_iounmap; 469 goto error_iounmap;
425 } 470 }
426 /* 471 /*
427 * Check to see if the board panic'd while booting.
428 */
429 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
430 printk(KERN_ERR "%s%d: adapter kernel panic.\n", dev->name, instance);
431 goto error_iounmap;
432 }
433 /*
434 * Check to see if the monitor panic'd while booting. 472 * Check to see if the monitor panic'd while booting.
435 */ 473 */
436 if (rx_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) { 474 if (status & MONITOR_PANIC) {
437 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 475 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
438 goto error_iounmap; 476 goto error_iounmap;
439 } 477 }
@@ -441,12 +479,10 @@ int aac_rx_init(struct aac_dev *dev)
441 /* 479 /*
442 * Wait for the adapter to be up and running. Wait up to 3 minutes 480 * Wait for the adapter to be up and running. Wait up to 3 minutes
443 */ 481 */
444 while ((!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 482 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
445 || (!(rx_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING)))
446 { 483 {
447 if(time_after(jiffies, start+startup_timeout*HZ)) 484 if(time_after(jiffies, start+startup_timeout*HZ))
448 { 485 {
449 status = rx_readl(dev, IndexRegs.Mailbox[7]);
450 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 486 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
451 dev->name, instance, status); 487 dev->name, instance, status);
452 goto error_iounmap; 488 goto error_iounmap;
@@ -481,11 +517,6 @@ int aac_rx_init(struct aac_dev *dev)
481 if (dev->new_comm_interface) 517 if (dev->new_comm_interface)
482 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 518 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
483 519
484 /*
485 * Tell the adapter that all is configured, and it can start
486 * accepting requests
487 */
488 aac_rx_start_adapter(dev);
489 return 0; 520 return 0;
490 521
491error_irq: 522error_irq:
@@ -496,3 +527,23 @@ error_iounmap:
496 527
497 return -1; 528 return -1;
498} 529}
530
531int aac_rx_init(struct aac_dev *dev)
532{
533 int retval;
534
535 /*
536 * Fill in the function dispatch table.
537 */
538 dev->a_ops.adapter_ioremap = aac_rx_ioremap;
539
540 retval = _aac_rx_init(dev);
541 if (!retval) {
542 /*
543 * Tell the adapter that all is configured, and it can
544 * start accepting requests
545 */
546 aac_rx_start_adapter(dev);
547 }
548 return retval;
549}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index cd586cc8f9be..f906ead239dd 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -281,6 +281,21 @@ static int aac_sa_check_health(struct aac_dev *dev)
281} 281}
282 282
283/** 283/**
284 * aac_sa_ioremap
285 * @size: mapping resize request
286 *
287 */
288static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
289{
290 if (!size) {
291 iounmap(dev->regs.sa);
292 return 0;
293 }
294 dev->base = dev->regs.sa = ioremap(dev->scsi_host_ptr->base, size);
295 return (dev->base == NULL) ? -1 : 0;
296}
297
298/**
284 * aac_sa_init - initialize an ARM based AAC card 299 * aac_sa_init - initialize an ARM based AAC card
285 * @dev: device to configure 300 * @dev: device to configure
286 * 301 *
@@ -299,6 +314,11 @@ int aac_sa_init(struct aac_dev *dev)
299 instance = dev->id; 314 instance = dev->id;
300 name = dev->name; 315 name = dev->name;
301 316
317 if (aac_sa_ioremap(dev, dev->base_size)) {
318 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
319 goto error_iounmap;
320 }
321
302 /* 322 /*
303 * Check to see if the board failed any self tests. 323 * Check to see if the board failed any self tests.
304 */ 324 */
@@ -341,6 +361,7 @@ int aac_sa_init(struct aac_dev *dev)
341 dev->a_ops.adapter_notify = aac_sa_notify_adapter; 361 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
342 dev->a_ops.adapter_sync_cmd = sa_sync_cmd; 362 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
343 dev->a_ops.adapter_check_health = aac_sa_check_health; 363 dev->a_ops.adapter_check_health = aac_sa_check_health;
364 dev->a_ops.adapter_ioremap = aac_sa_ioremap;
344 365
345 /* 366 /*
346 * First clear out all interrupts. Then enable the one's that 367 * First clear out all interrupts. Then enable the one's that
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index e32b4ab2f8fb..773f02e3b10b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -888,10 +888,6 @@ typedef unsigned char uchar;
888#define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F) 888#define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F)
889#define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7) 889#define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7)
890#define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF)) 890#define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF))
891#define ASC_PCI_VENDORID 0x10CD
892#define ASC_PCI_DEVICEID_1200A 0x1100
893#define ASC_PCI_DEVICEID_1200B 0x1200
894#define ASC_PCI_DEVICEID_ULTRA 0x1300
895#define ASC_PCI_REVISION_3150 0x02 891#define ASC_PCI_REVISION_3150 0x02
896#define ASC_PCI_REVISION_3050 0x03 892#define ASC_PCI_REVISION_3050 0x03
897 893
@@ -899,6 +895,14 @@ typedef unsigned char uchar;
899#define ASC_DVCLIB_CALL_FAILED (0) 895#define ASC_DVCLIB_CALL_FAILED (0)
900#define ASC_DVCLIB_CALL_ERROR (-1) 896#define ASC_DVCLIB_CALL_ERROR (-1)
901 897
898#define PCI_VENDOR_ID_ASP 0x10cd
899#define PCI_DEVICE_ID_ASP_1200A 0x1100
900#define PCI_DEVICE_ID_ASP_ABP940 0x1200
901#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
902#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
903#define PCI_DEVICE_ID_38C0800_REV1 0x2500
904#define PCI_DEVICE_ID_38C1600_REV1 0x2700
905
902/* 906/*
903 * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists. 907 * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists.
904 * The SRB structure will have to be changed and the ASC_SRB2SCSIQ() 908 * The SRB structure will have to be changed and the ASC_SRB2SCSIQ()
@@ -1492,8 +1496,6 @@ typedef struct asc_dvc_cfg {
1492#define ASC_INIT_STATE_END_INQUIRY 0x0080 1496#define ASC_INIT_STATE_END_INQUIRY 0x0080
1493#define ASC_INIT_RESET_SCSI_DONE 0x0100 1497#define ASC_INIT_RESET_SCSI_DONE 0x0100
1494#define ASC_INIT_STATE_WITHOUT_EEP 0x8000 1498#define ASC_INIT_STATE_WITHOUT_EEP 0x8000
1495#define ASC_PCI_DEVICE_ID_REV_A 0x1100
1496#define ASC_PCI_DEVICE_ID_REV_B 0x1200
1497#define ASC_BUG_FIX_IF_NOT_DWB 0x0001 1499#define ASC_BUG_FIX_IF_NOT_DWB 0x0001
1498#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 1500#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002
1499#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 1501#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
@@ -2100,12 +2102,6 @@ STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
2100#define ADV_NUM_PAGE_CROSSING \ 2102#define ADV_NUM_PAGE_CROSSING \
2101 ((ADV_SG_TOTAL_MEM_SIZE + (ADV_PAGE_SIZE - 1))/ADV_PAGE_SIZE) 2103 ((ADV_SG_TOTAL_MEM_SIZE + (ADV_PAGE_SIZE - 1))/ADV_PAGE_SIZE)
2102 2104
2103/* a_condor.h */
2104#define ADV_PCI_VENDOR_ID 0x10CD
2105#define ADV_PCI_DEVICE_ID_REV_A 0x2300
2106#define ADV_PCI_DEVID_38C0800_REV1 0x2500
2107#define ADV_PCI_DEVID_38C1600_REV1 0x2700
2108
2109#define ADV_EEP_DVC_CFG_BEGIN (0x00) 2105#define ADV_EEP_DVC_CFG_BEGIN (0x00)
2110#define ADV_EEP_DVC_CFG_END (0x15) 2106#define ADV_EEP_DVC_CFG_END (0x15)
2111#define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ 2107#define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */
@@ -3569,14 +3565,7 @@ typedef struct scsi_cmnd REQ, *REQP;
3569#define PCI_MAX_SLOT 0x1F 3565#define PCI_MAX_SLOT 0x1F
3570#define PCI_MAX_BUS 0xFF 3566#define PCI_MAX_BUS 0xFF
3571#define PCI_IOADDRESS_MASK 0xFFFE 3567#define PCI_IOADDRESS_MASK 0xFFFE
3572#define ASC_PCI_VENDORID 0x10CD
3573#define ASC_PCI_DEVICE_ID_CNT 6 /* PCI Device ID count. */ 3568#define ASC_PCI_DEVICE_ID_CNT 6 /* PCI Device ID count. */
3574#define ASC_PCI_DEVICE_ID_1100 0x1100
3575#define ASC_PCI_DEVICE_ID_1200 0x1200
3576#define ASC_PCI_DEVICE_ID_1300 0x1300
3577#define ASC_PCI_DEVICE_ID_2300 0x2300 /* ASC-3550 */
3578#define ASC_PCI_DEVICE_ID_2500 0x2500 /* ASC-38C0800 */
3579#define ASC_PCI_DEVICE_ID_2700 0x2700 /* ASC-38C1600 */
3580 3569
3581#ifndef ADVANSYS_STATS 3570#ifndef ADVANSYS_STATS
3582#define ASC_STATS(shp, counter) 3571#define ASC_STATS(shp, counter)
@@ -4330,12 +4319,12 @@ advansys_detect(struct scsi_host_template *tpnt)
4330 struct pci_dev *pci_devp = NULL; 4319 struct pci_dev *pci_devp = NULL;
4331 int pci_device_id_cnt = 0; 4320 int pci_device_id_cnt = 0;
4332 unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = { 4321 unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = {
4333 ASC_PCI_DEVICE_ID_1100, 4322 PCI_DEVICE_ID_ASP_1200A,
4334 ASC_PCI_DEVICE_ID_1200, 4323 PCI_DEVICE_ID_ASP_ABP940,
4335 ASC_PCI_DEVICE_ID_1300, 4324 PCI_DEVICE_ID_ASP_ABP940U,
4336 ASC_PCI_DEVICE_ID_2300, 4325 PCI_DEVICE_ID_ASP_ABP940UW,
4337 ASC_PCI_DEVICE_ID_2500, 4326 PCI_DEVICE_ID_38C0800_REV1,
4338 ASC_PCI_DEVICE_ID_2700 4327 PCI_DEVICE_ID_38C1600_REV1
4339 }; 4328 };
4340 ADV_PADDR pci_memory_address; 4329 ADV_PADDR pci_memory_address;
4341#endif /* CONFIG_PCI */ 4330#endif /* CONFIG_PCI */
@@ -4471,7 +4460,7 @@ advansys_detect(struct scsi_host_template *tpnt)
4471 4460
4472 /* Find all PCI cards. */ 4461 /* Find all PCI cards. */
4473 while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) { 4462 while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) {
4474 if ((pci_devp = pci_find_device(ASC_PCI_VENDORID, 4463 if ((pci_devp = pci_find_device(PCI_VENDOR_ID_ASP,
4475 pci_device_id[pci_device_id_cnt], pci_devp)) == 4464 pci_device_id[pci_device_id_cnt], pci_devp)) ==
4476 NULL) { 4465 NULL) {
4477 pci_device_id_cnt++; 4466 pci_device_id_cnt++;
@@ -4575,9 +4564,9 @@ advansys_detect(struct scsi_host_template *tpnt)
4575 */ 4564 */
4576#ifdef CONFIG_PCI 4565#ifdef CONFIG_PCI
4577 if (asc_bus[bus] == ASC_IS_PCI && 4566 if (asc_bus[bus] == ASC_IS_PCI &&
4578 (pci_devp->device == ASC_PCI_DEVICE_ID_2300 || 4567 (pci_devp->device == PCI_DEVICE_ID_ASP_ABP940UW ||
4579 pci_devp->device == ASC_PCI_DEVICE_ID_2500 || 4568 pci_devp->device == PCI_DEVICE_ID_38C0800_REV1 ||
4580 pci_devp->device == ASC_PCI_DEVICE_ID_2700)) 4569 pci_devp->device == PCI_DEVICE_ID_38C1600_REV1))
4581 { 4570 {
4582 boardp->flags |= ASC_IS_WIDE_BOARD; 4571 boardp->flags |= ASC_IS_WIDE_BOARD;
4583 } 4572 }
@@ -4600,11 +4589,11 @@ advansys_detect(struct scsi_host_template *tpnt)
4600 adv_dvc_varp->isr_callback = adv_isr_callback; 4589 adv_dvc_varp->isr_callback = adv_isr_callback;
4601 adv_dvc_varp->async_callback = adv_async_callback; 4590 adv_dvc_varp->async_callback = adv_async_callback;
4602#ifdef CONFIG_PCI 4591#ifdef CONFIG_PCI
4603 if (pci_devp->device == ASC_PCI_DEVICE_ID_2300) 4592 if (pci_devp->device == PCI_DEVICE_ID_ASP_ABP940UW)
4604 { 4593 {
4605 ASC_DBG(1, "advansys_detect: ASC-3550\n"); 4594 ASC_DBG(1, "advansys_detect: ASC-3550\n");
4606 adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; 4595 adv_dvc_varp->chip_type = ADV_CHIP_ASC3550;
4607 } else if (pci_devp->device == ASC_PCI_DEVICE_ID_2500) 4596 } else if (pci_devp->device == PCI_DEVICE_ID_38C0800_REV1)
4608 { 4597 {
4609 ASC_DBG(1, "advansys_detect: ASC-38C0800\n"); 4598 ASC_DBG(1, "advansys_detect: ASC-38C0800\n");
4610 adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; 4599 adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800;
@@ -11922,7 +11911,7 @@ AscInitGetConfig(
11922 PCIRevisionID = DvcReadPCIConfigByte(asc_dvc, 11911 PCIRevisionID = DvcReadPCIConfigByte(asc_dvc,
11923 AscPCIConfigRevisionIDRegister); 11912 AscPCIConfigRevisionIDRegister);
11924 11913
11925 if (PCIVendorID != ASC_PCI_VENDORID) { 11914 if (PCIVendorID != PCI_VENDOR_ID_ASP) {
11926 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11915 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11927 } 11916 }
11928 prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc, 11917 prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc,
@@ -11942,15 +11931,15 @@ AscInitGetConfig(
11942 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11931 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11943 } 11932 }
11944 } 11933 }
11945 if ((PCIDeviceID == ASC_PCI_DEVICEID_1200A) || 11934 if ((PCIDeviceID == PCI_DEVICE_ID_ASP_1200A) ||
11946 (PCIDeviceID == ASC_PCI_DEVICEID_1200B)) { 11935 (PCIDeviceID == PCI_DEVICE_ID_ASP_ABP940)) {
11947 DvcWritePCIConfigByte(asc_dvc, 11936 DvcWritePCIConfigByte(asc_dvc,
11948 AscPCIConfigLatencyTimer, 0x00); 11937 AscPCIConfigLatencyTimer, 0x00);
11949 if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) 11938 if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer)
11950 != 0x00) { 11939 != 0x00) {
11951 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11940 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11952 } 11941 }
11953 } else if (PCIDeviceID == ASC_PCI_DEVICEID_ULTRA) { 11942 } else if (PCIDeviceID == PCI_DEVICE_ID_ASP_ABP940U) {
11954 if (DvcReadPCIConfigByte(asc_dvc, 11943 if (DvcReadPCIConfigByte(asc_dvc,
11955 AscPCIConfigLatencyTimer) < 0x20) { 11944 AscPCIConfigLatencyTimer) < 0x20) {
11956 DvcWritePCIConfigByte(asc_dvc, 11945 DvcWritePCIConfigByte(asc_dvc,
@@ -12037,8 +12026,8 @@ AscInitFromAscDvcVar(
12037 AscSetChipCfgMsw(iop_base, cfg_msw); 12026 AscSetChipCfgMsw(iop_base, cfg_msw);
12038 if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { 12027 if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) {
12039 } else { 12028 } else {
12040 if ((pci_device_id == ASC_PCI_DEVICE_ID_REV_A) || 12029 if ((pci_device_id == PCI_DEVICE_ID_ASP_1200A) ||
12041 (pci_device_id == ASC_PCI_DEVICE_ID_REV_B)) { 12030 (pci_device_id == PCI_DEVICE_ID_ASP_ABP940)) {
12042 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; 12031 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB;
12043 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; 12032 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
12044 } 12033 }
@@ -14275,8 +14264,8 @@ Default_38C0800_EEPROM_Config __initdata = {
14275 0, /* 55 reserved */ 14264 0, /* 55 reserved */
14276 0, /* 56 cisptr_lsw */ 14265 0, /* 56 cisptr_lsw */
14277 0, /* 57 cisprt_msw */ 14266 0, /* 57 cisprt_msw */
14278 ADV_PCI_VENDOR_ID, /* 58 subsysvid */ 14267 PCI_VENDOR_ID_ASP, /* 58 subsysvid */
14279 ADV_PCI_DEVID_38C0800_REV1, /* 59 subsysid */ 14268 PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */
14280 0, /* 60 reserved */ 14269 0, /* 60 reserved */
14281 0, /* 61 reserved */ 14270 0, /* 61 reserved */
14282 0, /* 62 reserved */ 14271 0, /* 62 reserved */
@@ -14405,8 +14394,8 @@ Default_38C1600_EEPROM_Config __initdata = {
14405 0, /* 55 reserved */ 14394 0, /* 55 reserved */
14406 0, /* 56 cisptr_lsw */ 14395 0, /* 56 cisptr_lsw */
14407 0, /* 57 cisprt_msw */ 14396 0, /* 57 cisprt_msw */
14408 ADV_PCI_VENDOR_ID, /* 58 subsysvid */ 14397 PCI_VENDOR_ID_ASP, /* 58 subsysvid */
14409 ADV_PCI_DEVID_38C1600_REV1, /* 59 subsysid */ 14398 PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */
14410 0, /* 60 reserved */ 14399 0, /* 60 reserved */
14411 0, /* 61 reserved */ 14400 0, /* 61 reserved */
14412 0, /* 62 reserved */ 14401 0, /* 62 reserved */
@@ -18225,3 +18214,22 @@ AdvInquiryHandling(
18225 } 18214 }
18226} 18215}
18227MODULE_LICENSE("Dual BSD/GPL"); 18216MODULE_LICENSE("Dual BSD/GPL");
18217
18218/* PCI Devices supported by this driver */
18219static struct pci_device_id advansys_pci_tbl[] __devinitdata = {
18220 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
18221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18222 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
18223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18224 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U,
18225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18226 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW,
18227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18228 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1,
18229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18230 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1,
18231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18232 { }
18233};
18234MODULE_DEVICE_TABLE(pci, advansys_pci_tbl);
18235
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index f974869ea323..fb6a476eb873 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -253,6 +253,7 @@
253#include <linux/isapnp.h> 253#include <linux/isapnp.h>
254#include <linux/spinlock.h> 254#include <linux/spinlock.h>
255#include <linux/workqueue.h> 255#include <linux/workqueue.h>
256#include <linux/list.h>
256#include <asm/semaphore.h> 257#include <asm/semaphore.h>
257#include <scsi/scsicam.h> 258#include <scsi/scsicam.h>
258 259
@@ -262,6 +263,8 @@
262#include <scsi/scsi_transport_spi.h> 263#include <scsi/scsi_transport_spi.h>
263#include "aha152x.h" 264#include "aha152x.h"
264 265
266static LIST_HEAD(aha152x_host_list);
267
265 268
266/* DEFINES */ 269/* DEFINES */
267 270
@@ -423,8 +426,6 @@ MODULE_DEVICE_TABLE(isapnp, id_table);
423 426
424#endif /* !PCMCIA */ 427#endif /* !PCMCIA */
425 428
426static int registered_count=0;
427static struct Scsi_Host *aha152x_host[2];
428static struct scsi_host_template aha152x_driver_template; 429static struct scsi_host_template aha152x_driver_template;
429 430
430/* 431/*
@@ -541,6 +542,7 @@ struct aha152x_hostdata {
541#ifdef __ISAPNP__ 542#ifdef __ISAPNP__
542 struct pnp_dev *pnpdev; 543 struct pnp_dev *pnpdev;
543#endif 544#endif
545 struct list_head host_list;
544}; 546};
545 547
546 548
@@ -755,20 +757,9 @@ static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp)
755 return ptr; 757 return ptr;
756} 758}
757 759
758static inline struct Scsi_Host *lookup_irq(int irqno)
759{
760 int i;
761
762 for(i=0; i<ARRAY_SIZE(aha152x_host); i++)
763 if(aha152x_host[i] && aha152x_host[i]->irq==irqno)
764 return aha152x_host[i];
765
766 return NULL;
767}
768
769static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs) 760static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs)
770{ 761{
771 struct Scsi_Host *shpnt = lookup_irq(irqno); 762 struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
772 763
773 if (!shpnt) { 764 if (!shpnt) {
774 printk(KERN_ERR "aha152x: catched software interrupt %d for unknown controller.\n", irqno); 765 printk(KERN_ERR "aha152x: catched software interrupt %d for unknown controller.\n", irqno);
@@ -791,10 +782,11 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
791 return NULL; 782 return NULL;
792 } 783 }
793 784
794 /* need to have host registered before triggering any interrupt */
795 aha152x_host[registered_count] = shpnt;
796
797 memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt)); 785 memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt));
786 INIT_LIST_HEAD(&HOSTDATA(shpnt)->host_list);
787
788 /* need to have host registered before triggering any interrupt */
789 list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list);
798 790
799 shpnt->io_port = setup->io_port; 791 shpnt->io_port = setup->io_port;
800 shpnt->n_io_port = IO_RANGE; 792 shpnt->n_io_port = IO_RANGE;
@@ -907,12 +899,10 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
907 899
908 scsi_scan_host(shpnt); 900 scsi_scan_host(shpnt);
909 901
910 registered_count++;
911
912 return shpnt; 902 return shpnt;
913 903
914out_host_put: 904out_host_put:
915 aha152x_host[registered_count]=NULL; 905 list_del(&HOSTDATA(shpnt)->host_list);
916 scsi_host_put(shpnt); 906 scsi_host_put(shpnt);
917 907
918 return NULL; 908 return NULL;
@@ -937,6 +927,7 @@ void aha152x_release(struct Scsi_Host *shpnt)
937#endif 927#endif
938 928
939 scsi_remove_host(shpnt); 929 scsi_remove_host(shpnt);
930 list_del(&HOSTDATA(shpnt)->host_list);
940 scsi_host_put(shpnt); 931 scsi_host_put(shpnt);
941} 932}
942 933
@@ -1459,9 +1450,12 @@ static struct work_struct aha152x_tq;
1459 */ 1450 */
1460static void run(void) 1451static void run(void)
1461{ 1452{
1462 int i; 1453 struct aha152x_hostdata *hd;
1463 for (i = 0; i<ARRAY_SIZE(aha152x_host); i++) { 1454
1464 is_complete(aha152x_host[i]); 1455 list_for_each_entry(hd, &aha152x_host_list, host_list) {
1456 struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
1457
1458 is_complete(shost);
1465 } 1459 }
1466} 1460}
1467 1461
@@ -1471,7 +1465,7 @@ static void run(void)
1471 */ 1465 */
1472static irqreturn_t intr(int irqno, void *dev_id, struct pt_regs *regs) 1466static irqreturn_t intr(int irqno, void *dev_id, struct pt_regs *regs)
1473{ 1467{
1474 struct Scsi_Host *shpnt = lookup_irq(irqno); 1468 struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
1475 unsigned long flags; 1469 unsigned long flags;
1476 unsigned char rev, dmacntrl0; 1470 unsigned char rev, dmacntrl0;
1477 1471
@@ -3953,16 +3947,17 @@ static int __init aha152x_init(void)
3953#endif 3947#endif
3954 } 3948 }
3955 3949
3956 return registered_count>0; 3950 return 1;
3957} 3951}
3958 3952
3959static void __exit aha152x_exit(void) 3953static void __exit aha152x_exit(void)
3960{ 3954{
3961 int i; 3955 struct aha152x_hostdata *hd;
3956
3957 list_for_each_entry(hd, &aha152x_host_list, host_list) {
3958 struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
3962 3959
3963 for(i=0; i<ARRAY_SIZE(setup); i++) { 3960 aha152x_release(shost);
3964 aha152x_release(aha152x_host[i]);
3965 aha152x_host[i]=NULL;
3966 } 3961 }
3967} 3962}
3968 3963
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 998999c0a972..c7eeaced324a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -321,7 +321,7 @@ MODULE_LICENSE("Dual BSD/GPL");
321MODULE_VERSION(AIC79XX_DRIVER_VERSION); 321MODULE_VERSION(AIC79XX_DRIVER_VERSION);
322module_param(aic79xx, charp, 0444); 322module_param(aic79xx, charp, 0444);
323MODULE_PARM_DESC(aic79xx, 323MODULE_PARM_DESC(aic79xx,
324"period delimited, options string.\n" 324"period-delimited options string:\n"
325" verbose Enable verbose/diagnostic logging\n" 325" verbose Enable verbose/diagnostic logging\n"
326" allow_memio Allow device registers to be memory mapped\n" 326" allow_memio Allow device registers to be memory mapped\n"
327" debug Bitmask of debug values to enable\n" 327" debug Bitmask of debug values to enable\n"
@@ -346,7 +346,7 @@ MODULE_PARM_DESC(aic79xx,
346" Shorten the selection timeout to 128ms\n" 346" Shorten the selection timeout to 128ms\n"
347"\n" 347"\n"
348" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" 348" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
349"\n"); 349);
350 350
351static void ahd_linux_handle_scsi_status(struct ahd_softc *, 351static void ahd_linux_handle_scsi_status(struct ahd_softc *,
352 struct scsi_device *, 352 struct scsi_device *,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index aa4be8a31415..64c8b88a429f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -341,7 +341,7 @@ MODULE_LICENSE("Dual BSD/GPL");
341MODULE_VERSION(AIC7XXX_DRIVER_VERSION); 341MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
342module_param(aic7xxx, charp, 0444); 342module_param(aic7xxx, charp, 0444);
343MODULE_PARM_DESC(aic7xxx, 343MODULE_PARM_DESC(aic7xxx,
344"period delimited, options string.\n" 344"period-delimited options string:\n"
345" verbose Enable verbose/diagnostic logging\n" 345" verbose Enable verbose/diagnostic logging\n"
346" allow_memio Allow device registers to be memory mapped\n" 346" allow_memio Allow device registers to be memory mapped\n"
347" debug Bitmask of debug values to enable\n" 347" debug Bitmask of debug values to enable\n"
@@ -2539,15 +2539,28 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
2539static void ahc_linux_get_signalling(struct Scsi_Host *shost) 2539static void ahc_linux_get_signalling(struct Scsi_Host *shost)
2540{ 2540{
2541 struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; 2541 struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
2542 u8 mode = ahc_inb(ahc, SBLKCTL); 2542 unsigned long flags;
2543 u8 mode;
2543 2544
2544 if (mode & ENAB40) 2545 if (!(ahc->features & AHC_ULTRA2)) {
2545 spi_signalling(shost) = SPI_SIGNAL_LVD; 2546 /* non-LVD chipset, may not have SBLKCTL reg */
2546 else if (mode & ENAB20)
2547 spi_signalling(shost) = 2547 spi_signalling(shost) =
2548 ahc->features & AHC_HVD ? 2548 ahc->features & AHC_HVD ?
2549 SPI_SIGNAL_HVD : 2549 SPI_SIGNAL_HVD :
2550 SPI_SIGNAL_SE; 2550 SPI_SIGNAL_SE;
2551 return;
2552 }
2553
2554 ahc_lock(ahc, &flags);
2555 ahc_pause(ahc);
2556 mode = ahc_inb(ahc, SBLKCTL);
2557 ahc_unpause(ahc);
2558 ahc_unlock(ahc, &flags);
2559
2560 if (mode & ENAB40)
2561 spi_signalling(shost) = SPI_SIGNAL_LVD;
2562 else if (mode & ENAB20)
2563 spi_signalling(shost) = SPI_SIGNAL_SE;
2551 else 2564 else
2552 spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; 2565 spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
2553} 2566}
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 3f85b5e978f1..5dcef48d414f 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -249,8 +249,6 @@
249#include <linux/stat.h> 249#include <linux/stat.h>
250#include <linux/slab.h> /* for kmalloc() */ 250#include <linux/slab.h> /* for kmalloc() */
251 251
252#include <linux/config.h> /* for CONFIG_PCI */
253
254#define AIC7XXX_C_VERSION "5.2.6" 252#define AIC7XXX_C_VERSION "5.2.6"
255 253
256#define ALL_TARGETS -1 254#define ALL_TARGETS -1
@@ -9196,7 +9194,7 @@ aic7xxx_detect(struct scsi_host_template *template)
9196 for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++) 9194 for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++)
9197 { 9195 {
9198 pdev = NULL; 9196 pdev = NULL;
9199 while ((pdev = pci_find_device(aic_pdevs[i].vendor_id, 9197 while ((pdev = pci_get_device(aic_pdevs[i].vendor_id,
9200 aic_pdevs[i].device_id, 9198 aic_pdevs[i].device_id,
9201 pdev))) { 9199 pdev))) {
9202 if (pci_enable_device(pdev)) 9200 if (pci_enable_device(pdev))
@@ -9653,6 +9651,9 @@ aic7xxx_detect(struct scsi_host_template *template)
9653 */ 9651 */
9654 aic7xxx_configure_bugs(temp_p); 9652 aic7xxx_configure_bugs(temp_p);
9655 9653
9654 /* Hold a pci device reference */
9655 pci_dev_get(temp_p->pdev);
9656
9656 if ( list_p == NULL ) 9657 if ( list_p == NULL )
9657 { 9658 {
9658 list_p = current_p = temp_p; 9659 list_p = current_p = temp_p;
@@ -10989,8 +10990,10 @@ aic7xxx_release(struct Scsi_Host *host)
10989 if(!p->pdev) 10990 if(!p->pdev)
10990 release_region(p->base, MAXREG - MINREG); 10991 release_region(p->base, MAXREG - MINREG);
10991#ifdef CONFIG_PCI 10992#ifdef CONFIG_PCI
10992 else 10993 else {
10993 pci_release_regions(p->pdev); 10994 pci_release_regions(p->pdev);
10995 pci_dev_put(p->pdev);
10996 }
10994#endif 10997#endif
10995 prev = NULL; 10998 prev = NULL;
10996 next = first_aic7xxx; 10999 next = first_aic7xxx;
diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig
new file mode 100644
index 000000000000..0ed391d8ee84
--- /dev/null
+++ b/drivers/scsi/aic94xx/Kconfig
@@ -0,0 +1,41 @@
1#
2# Kernel configuration file for aic94xx SAS/SATA driver.
3#
4# Copyright (c) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (c) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the aic94xx driver.
10#
11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as
13# published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The aic94xx driver is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with Aic94xx Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24#
25#
26
27config SCSI_AIC94XX
28 tristate "Adaptec AIC94xx SAS/SATA support"
29 depends on PCI
30 select SCSI_SAS_LIBSAS
31 help
32 This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X
33 AIC94xx chip based host adapters.
34
35config AIC94XX_DEBUG
36 bool "Compile in debug mode"
37 default y
38 depends on SCSI_AIC94XX
39 help
40 Compiles the aic94xx driver in debug mode. In debug mode,
41 the driver prints some messages to the console.
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
new file mode 100644
index 000000000000..e6b70123940c
--- /dev/null
+++ b/drivers/scsi/aic94xx/Makefile
@@ -0,0 +1,39 @@
1#
2# Makefile for Adaptec aic94xx SAS/SATA driver.
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the the aic94xx driver.
10#
11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as
13# published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The aic94xx driver is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with the aic94xx driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24
25ifeq ($(CONFIG_AIC94XX_DEBUG),y)
26 EXTRA_CFLAGS += -DASD_DEBUG -DASD_ENTER_EXIT
27endif
28
29obj-$(CONFIG_SCSI_AIC94XX) += aic94xx.o
30aic94xx-y += aic94xx_init.o \
31 aic94xx_hwi.o \
32 aic94xx_reg.o \
33 aic94xx_sds.o \
34 aic94xx_seq.o \
35 aic94xx_dump.o \
36 aic94xx_scb.o \
37 aic94xx_dev.o \
38 aic94xx_tmf.o \
39 aic94xx_task.o
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
new file mode 100644
index 000000000000..1bd5b4ecf3d5
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -0,0 +1,114 @@
1/*
2 * Aic94xx SAS/SATA driver header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * $Id: //depot/aic94xx/aic94xx.h#31 $
26 */
27
28#ifndef _AIC94XX_H_
29#define _AIC94XX_H_
30
31#include <linux/slab.h>
32#include <linux/ctype.h>
33#include <scsi/libsas.h>
34
35#define ASD_DRIVER_NAME "aic94xx"
36#define ASD_DRIVER_DESCRIPTION "Adaptec aic94xx SAS/SATA driver"
37
38#define asd_printk(fmt, ...) printk(KERN_NOTICE ASD_DRIVER_NAME ": " fmt, ## __VA_ARGS__)
39
40#ifdef ASD_ENTER_EXIT
41#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
42 __FUNCTION__)
43#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
44 __FUNCTION__)
45#else
46#define ENTER
47#define EXIT
48#endif
49
50#ifdef ASD_DEBUG
51#define ASD_DPRINTK asd_printk
52#else
53#define ASD_DPRINTK(fmt, ...)
54#endif
55
56/* 2*ITNL timeout + 1 second */
57#define AIC94XX_SCB_TIMEOUT (5*HZ)
58
59extern kmem_cache_t *asd_dma_token_cache;
60extern kmem_cache_t *asd_ascb_cache;
61extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
62
63static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
64{
65 int i;
66 for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
67 snprintf(p, 3, "%02X", sas_addr[i]);
68 *p = '\0';
69}
70
71static inline void asd_destringify_sas_addr(u8 *sas_addr, const char *p)
72{
73 int i;
74 for (i = 0; i < SAS_ADDR_SIZE; i++) {
75 u8 h, l;
76 if (!*p)
77 break;
78 h = isdigit(*p) ? *p-'0' : *p-'A'+10;
79 p++;
80 l = isdigit(*p) ? *p-'0' : *p-'A'+10;
81 p++;
82 sas_addr[i] = (h<<4) | l;
83 }
84}
85
86struct asd_ha_struct;
87struct asd_ascb;
88
89int asd_read_ocm(struct asd_ha_struct *asd_ha);
90int asd_read_flash(struct asd_ha_struct *asd_ha);
91
92int asd_dev_found(struct domain_device *dev);
93void asd_dev_gone(struct domain_device *dev);
94
95void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
96
97int asd_execute_task(struct sas_task *, int num, unsigned long gfp_flags);
98
99/* ---------- TMFs ---------- */
100int asd_abort_task(struct sas_task *);
101int asd_abort_task_set(struct domain_device *, u8 *lun);
102int asd_clear_aca(struct domain_device *, u8 *lun);
103int asd_clear_task_set(struct domain_device *, u8 *lun);
104int asd_lu_reset(struct domain_device *, u8 *lun);
105int asd_query_task(struct sas_task *);
106
107/* ---------- Adapter and Port management ---------- */
108int asd_clear_nexus_port(struct asd_sas_port *port);
109int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha);
110
111/* ---------- Phy Management ---------- */
112int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg);
113
114#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
new file mode 100644
index 000000000000..6f8901b748f7
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -0,0 +1,353 @@
1/*
2 * Aic94xx SAS/SATA DDB management
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * $Id: //depot/aic94xx/aic94xx_dev.c#21 $
26 */
27
28#include "aic94xx.h"
29#include "aic94xx_hwi.h"
30#include "aic94xx_reg.h"
31#include "aic94xx_sas.h"
32
33#define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \
34 (_ha)->hw_prof.max_ddbs)
35#define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
36#define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
37
38static inline int asd_get_ddb(struct asd_ha_struct *asd_ha)
39{
40 unsigned long flags;
41 int ddb, i;
42
43 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
44 ddb = FIND_FREE_DDB(asd_ha);
45 if (ddb >= asd_ha->hw_prof.max_ddbs) {
46 ddb = -ENOMEM;
47 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
48 goto out;
49 }
50 SET_DDB(ddb, asd_ha);
51 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
52
53 for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
54 asd_ddbsite_write_dword(asd_ha, ddb, i, 0);
55out:
56 return ddb;
57}
58
59#define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag)
60#define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr)
61#define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head)
62#define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type)
63#define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask)
64#define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags)
65#define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2)
66#define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail)
67#define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail)
68#define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb)
69#define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn)
70#define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts)
71#define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr)
72#define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask)
73#define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags)
74#define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status)
75#define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr)
76#define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout)
77
78static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb)
79{
80 unsigned long flags;
81
82 if (!ddb || ddb >= 0xFFFF)
83 return;
84 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED);
85 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
86 CLEAR_DDB(ddb, asd_ha);
87 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
88}
89
90static inline void asd_set_ddb_type(struct domain_device *dev)
91{
92 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
93 int ddb = (int) (unsigned long) dev->lldd_dev;
94
95 if (dev->dev_type == SATA_PM_PORT)
96 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
97 else if (dev->tproto)
98 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
99 else
100 asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR);
101}
102
103static int asd_init_sata_tag_ddb(struct domain_device *dev)
104{
105 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
106 int ddb, i;
107
108 ddb = asd_get_ddb(asd_ha);
109 if (ddb < 0)
110 return ddb;
111
112 for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2)
113 asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF);
114
115 asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev,
116 SISTER_DDB, ddb);
117 return 0;
118}
119
120static inline int asd_init_sata(struct domain_device *dev)
121{
122 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
123 int ddb = (int) (unsigned long) dev->lldd_dev;
124 u32 qdepth = 0;
125 int res = 0;
126
127 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
128 if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) &&
129 dev->sata_dev.identify_device &&
130 dev->sata_dev.identify_device[10] != 0) {
131 u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]);
132 u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]);
133
134 if (w76 & 0x100) /* NCQ? */
135 qdepth = (w75 & 0x1F) + 1;
136 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
137 (1<<qdepth)-1);
138 asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
139 }
140 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
141 dev->dev_type == SATA_PM_PORT) {
142 struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
143 dev->frame_rcvd;
144 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
145 }
146 asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF);
147 if (qdepth > 0)
148 res = asd_init_sata_tag_ddb(dev);
149 return res;
150}
151
152static int asd_init_target_ddb(struct domain_device *dev)
153{
154 int ddb, i;
155 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
156 u8 flags = 0;
157
158 ddb = asd_get_ddb(asd_ha);
159 if (ddb < 0)
160 return ddb;
161
162 dev->lldd_dev = (void *) (unsigned long) ddb;
163
164 asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE);
165 asd_ddbsite_write_byte(asd_ha, ddb, 1, 0);
166 asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF);
167 for (i = 0; i < SAS_ADDR_SIZE; i++)
168 asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i,
169 dev->sas_addr[i]);
170 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF);
171 asd_set_ddb_type(dev);
172 asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
173 if (dev->port->oob_mode != SATA_OOB_MODE) {
174 flags |= OPEN_REQUIRED;
175 if ((dev->dev_type == SATA_DEV) ||
176 (dev->tproto & SAS_PROTO_STP)) {
177 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
178 if (rps_resp->frame_type == SMP_RESPONSE &&
179 rps_resp->function == SMP_REPORT_PHY_SATA &&
180 rps_resp->result == SMP_RESP_FUNC_ACC) {
181 if (rps_resp->rps.affil_valid)
182 flags |= STP_AFFIL_POL;
183 if (rps_resp->rps.affil_supp)
184 flags |= SUPPORTS_AFFIL;
185 }
186 } else {
187 flags |= CONCURRENT_CONN_SUPP;
188 if (!dev->parent &&
189 (dev->dev_type == EDGE_DEV ||
190 dev->dev_type == FANOUT_DEV))
191 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
192 4);
193 else
194 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
195 dev->pathways);
196 asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
197 }
198 }
199 if (dev->dev_type == SATA_PM)
200 flags |= SATA_MULTIPORT;
201 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
202
203 flags = 0;
204 if (dev->tproto & SAS_PROTO_STP)
205 flags |= STP_CL_POL_NO_TX;
206 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags);
207
208 asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF);
209 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
210 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
211
212 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTO_STP)) {
213 i = asd_init_sata(dev);
214 if (i < 0) {
215 asd_free_ddb(asd_ha, ddb);
216 return i;
217 }
218 }
219
220 if (dev->dev_type == SAS_END_DEV) {
221 struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
222 if (rdev->I_T_nexus_loss_timeout > 0)
223 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
224 min(rdev->I_T_nexus_loss_timeout,
225 (u16)ITNL_TIMEOUT_CONST));
226 else
227 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
228 (u16)ITNL_TIMEOUT_CONST);
229 }
230 return 0;
231}
232
233static int asd_init_sata_pm_table_ddb(struct domain_device *dev)
234{
235 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
236 int ddb, i;
237
238 ddb = asd_get_ddb(asd_ha);
239 if (ddb < 0)
240 return ddb;
241
242 for (i = 0; i < 32; i += 2)
243 asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF);
244
245 asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev,
246 SISTER_DDB, ddb);
247
248 return 0;
249}
250
251#define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags)
252#define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb)
253
254/**
255 * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port
256 * dev: pointer to domain device
257 *
258 * For SATA Port Multiplier Ports we need to allocate one SATA Port
259 * Multiplier Port DDB and depending on whether the target on it
260 * supports SATA II NCQ, one SATA Tag DDB.
261 */
262static int asd_init_sata_pm_port_ddb(struct domain_device *dev)
263{
264 int ddb, i, parent_ddb, pmtable_ddb;
265 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
266 u8 flags;
267
268 ddb = asd_get_ddb(asd_ha);
269 if (ddb < 0)
270 return ddb;
271
272 asd_set_ddb_type(dev);
273 flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET;
274 asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags);
275 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
276 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
277 asd_init_sata(dev);
278
279 parent_ddb = (int) (unsigned long) dev->parent->lldd_dev;
280 asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb);
281 pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB);
282 asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb);
283
284 if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) {
285 i = asd_init_sata_tag_ddb(dev);
286 if (i < 0) {
287 asd_free_ddb(asd_ha, ddb);
288 return i;
289 }
290 }
291 return 0;
292}
293
294static int asd_init_initiator_ddb(struct domain_device *dev)
295{
296 return -ENODEV;
297}
298
299/**
300 * asd_init_sata_pm_ddb -- SATA Port Multiplier
301 * dev: pointer to domain device
302 *
303 * For STP and direct-attached SATA Port Multipliers we need
304 * one target port DDB entry and one SATA PM table DDB entry.
305 */
306static int asd_init_sata_pm_ddb(struct domain_device *dev)
307{
308 int res = 0;
309
310 res = asd_init_target_ddb(dev);
311 if (res)
312 goto out;
313 res = asd_init_sata_pm_table_ddb(dev);
314 if (res)
315 asd_free_ddb(dev->port->ha->lldd_ha,
316 (int) (unsigned long) dev->lldd_dev);
317out:
318 return res;
319}
320
321int asd_dev_found(struct domain_device *dev)
322{
323 int res = 0;
324
325 switch (dev->dev_type) {
326 case SATA_PM:
327 res = asd_init_sata_pm_ddb(dev);
328 break;
329 case SATA_PM_PORT:
330 res = asd_init_sata_pm_port_ddb(dev);
331 break;
332 default:
333 if (dev->tproto)
334 res = asd_init_target_ddb(dev);
335 else
336 res = asd_init_initiator_ddb(dev);
337 }
338 return res;
339}
340
341void asd_dev_gone(struct domain_device *dev)
342{
343 int ddb, sister_ddb;
344 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
345
346 ddb = (int) (unsigned long) dev->lldd_dev;
347 sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB);
348
349 if (sister_ddb != 0xFFFF)
350 asd_free_ddb(asd_ha, sister_ddb);
351 asd_free_ddb(asd_ha, ddb);
352 dev->lldd_dev = NULL;
353}
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
new file mode 100644
index 000000000000..e6ade5996d95
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -0,0 +1,959 @@
1/*
2 * Aic94xx SAS/SATA driver dump interface.
3 *
4 * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * This file is licensed under GPLv2.
9 *
10 * This file is part of the aic94xx driver.
11 *
12 * The aic94xx driver is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; version 2 of the
15 * License.
16 *
17 * The aic94xx driver is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with the aic94xx driver; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 * 2005/07/14/LT Complete overhaul of this file. Update pages, register
27 * locations, names, etc. Make use of macros. Print more information.
28 * Print all cseq and lseq mip and mdp.
29 *
30 */
31
32#include "linux/pci.h"
33#include "aic94xx.h"
34#include "aic94xx_reg.h"
35#include "aic94xx_reg_def.h"
36#include "aic94xx_sas.h"
37
38#include "aic94xx_dump.h"
39
40#ifdef ASD_DEBUG
41
42#define MD(x) (1 << (x))
43#define MODE_COMMON (1 << 31)
44#define MODE_0_7 (0xFF)
45
46static const struct lseq_cio_regs {
47 char *name;
48 u32 offs;
49 u8 width;
50 u32 mode;
51} LSEQmCIOREGS[] = {
52 {"LmMnSCBPTR", 0x20, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) },
53 {"LmMnDDBPTR", 0x22, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) },
54 {"LmREQMBX", 0x30, 32, MODE_COMMON },
55 {"LmRSPMBX", 0x34, 32, MODE_COMMON },
56 {"LmMnINT", 0x38, 32, MODE_0_7 },
57 {"LmMnINTEN", 0x3C, 32, MODE_0_7 },
58 {"LmXMTPRIMD", 0x40, 32, MODE_COMMON },
59 {"LmXMTPRIMCS", 0x44, 8, MODE_COMMON },
60 {"LmCONSTAT", 0x45, 8, MODE_COMMON },
61 {"LmMnDMAERRS", 0x46, 8, MD(0)|MD(1) },
62 {"LmMnSGDMAERRS", 0x47, 8, MD(0)|MD(1) },
63 {"LmMnEXPHDRP", 0x48, 8, MD(0) },
64 {"LmMnSASAALIGN", 0x48, 8, MD(1) },
65 {"LmMnMSKHDRP", 0x49, 8, MD(0) },
66 {"LmMnSTPALIGN", 0x49, 8, MD(1) },
67 {"LmMnRCVHDRP", 0x4A, 8, MD(0) },
68 {"LmMnXMTHDRP", 0x4A, 8, MD(1) },
69 {"LmALIGNMODE", 0x4B, 8, MD(1) },
70 {"LmMnEXPRCVCNT", 0x4C, 32, MD(0) },
71 {"LmMnXMTCNT", 0x4C, 32, MD(1) },
72 {"LmMnCURRTAG", 0x54, 16, MD(0) },
73 {"LmMnPREVTAG", 0x56, 16, MD(0) },
74 {"LmMnACKOFS", 0x58, 8, MD(1) },
75 {"LmMnXFRLVL", 0x59, 8, MD(0)|MD(1) },
76 {"LmMnSGDMACTL", 0x5A, 8, MD(0)|MD(1) },
77 {"LmMnSGDMASTAT", 0x5B, 8, MD(0)|MD(1) },
78 {"LmMnDDMACTL", 0x5C, 8, MD(0)|MD(1) },
79 {"LmMnDDMASTAT", 0x5D, 8, MD(0)|MD(1) },
80 {"LmMnDDMAMODE", 0x5E, 16, MD(0)|MD(1) },
81 {"LmMnPIPECTL", 0x61, 8, MD(0)|MD(1) },
82 {"LmMnACTSCB", 0x62, 16, MD(0)|MD(1) },
83 {"LmMnSGBHADR", 0x64, 8, MD(0)|MD(1) },
84 {"LmMnSGBADR", 0x65, 8, MD(0)|MD(1) },
85 {"LmMnSGDCNT", 0x66, 8, MD(0)|MD(1) },
86 {"LmMnSGDMADR", 0x68, 32, MD(0)|MD(1) },
87 {"LmMnSGDMADR", 0x6C, 32, MD(0)|MD(1) },
88 {"LmMnXFRCNT", 0x70, 32, MD(0)|MD(1) },
89 {"LmMnXMTCRC", 0x74, 32, MD(1) },
90 {"LmCURRTAG", 0x74, 16, MD(0) },
91 {"LmPREVTAG", 0x76, 16, MD(0) },
92 {"LmMnDPSEL", 0x7B, 8, MD(0)|MD(1) },
93 {"LmDPTHSTAT", 0x7C, 8, MODE_COMMON },
94 {"LmMnHOLDLVL", 0x7D, 8, MD(0) },
95 {"LmMnSATAFS", 0x7E, 8, MD(1) },
96 {"LmMnCMPLTSTAT", 0x7F, 8, MD(0)|MD(1) },
97 {"LmPRMSTAT0", 0x80, 32, MODE_COMMON },
98 {"LmPRMSTAT1", 0x84, 32, MODE_COMMON },
99 {"LmGPRMINT", 0x88, 8, MODE_COMMON },
100 {"LmMnCURRSCB", 0x8A, 16, MD(0) },
101 {"LmPRMICODE", 0x8C, 32, MODE_COMMON },
102 {"LmMnRCVCNT", 0x90, 16, MD(0) },
103 {"LmMnBUFSTAT", 0x92, 16, MD(0) },
104 {"LmMnXMTHDRSIZE",0x92, 8, MD(1) },
105 {"LmMnXMTSIZE", 0x93, 8, MD(1) },
106 {"LmMnTGTXFRCNT", 0x94, 32, MD(0) },
107 {"LmMnEXPROFS", 0x98, 32, MD(0) },
108 {"LmMnXMTROFS", 0x98, 32, MD(1) },
109 {"LmMnRCVROFS", 0x9C, 32, MD(0) },
110 {"LmCONCTL", 0xA0, 16, MODE_COMMON },
111 {"LmBITLTIMER", 0xA2, 16, MODE_COMMON },
112 {"LmWWNLOW", 0xA8, 32, MODE_COMMON },
113 {"LmWWNHIGH", 0xAC, 32, MODE_COMMON },
114 {"LmMnFRMERR", 0xB0, 32, MD(0) },
115 {"LmMnFRMERREN", 0xB4, 32, MD(0) },
116 {"LmAWTIMER", 0xB8, 16, MODE_COMMON },
117 {"LmAWTCTL", 0xBA, 8, MODE_COMMON },
118 {"LmMnHDRCMPS", 0xC0, 32, MD(0) },
119 {"LmMnXMTSTAT", 0xC4, 8, MD(1) },
120 {"LmHWTSTATEN", 0xC5, 8, MODE_COMMON },
121 {"LmMnRRDYRC", 0xC6, 8, MD(0) },
122 {"LmMnRRDYTC", 0xC6, 8, MD(1) },
123 {"LmHWTSTAT", 0xC7, 8, MODE_COMMON },
124 {"LmMnDATABUFADR",0xC8, 16, MD(0)|MD(1) },
125 {"LmDWSSTATUS", 0xCB, 8, MODE_COMMON },
126 {"LmMnACTSTAT", 0xCE, 16, MD(0)|MD(1) },
127 {"LmMnREQSCB", 0xD2, 16, MD(0)|MD(1) },
128 {"LmXXXPRIM", 0xD4, 32, MODE_COMMON },
129 {"LmRCVASTAT", 0xD9, 8, MODE_COMMON },
130 {"LmINTDIS1", 0xDA, 8, MODE_COMMON },
131 {"LmPSTORESEL", 0xDB, 8, MODE_COMMON },
132 {"LmPSTORE", 0xDC, 32, MODE_COMMON },
133 {"LmPRIMSTAT0EN", 0xE0, 32, MODE_COMMON },
134 {"LmPRIMSTAT1EN", 0xE4, 32, MODE_COMMON },
135 {"LmDONETCTL", 0xF2, 16, MODE_COMMON },
136 {NULL, 0, 0, 0 }
137};
138/*
139static struct lseq_cio_regs LSEQmOOBREGS[] = {
140 {"OOB_BFLTR" ,0x100, 8, MD(5)},
141 {"OOB_INIT_MIN" ,0x102,16, MD(5)},
142 {"OOB_INIT_MAX" ,0x104,16, MD(5)},
143 {"OOB_INIT_NEG" ,0x106,16, MD(5)},
144 {"OOB_SAS_MIN" ,0x108,16, MD(5)},
145 {"OOB_SAS_MAX" ,0x10A,16, MD(5)},
146 {"OOB_SAS_NEG" ,0x10C,16, MD(5)},
147 {"OOB_WAKE_MIN" ,0x10E,16, MD(5)},
148 {"OOB_WAKE_MAX" ,0x110,16, MD(5)},
149 {"OOB_WAKE_NEG" ,0x112,16, MD(5)},
150 {"OOB_IDLE_MAX" ,0x114,16, MD(5)},
151 {"OOB_BURST_MAX" ,0x116,16, MD(5)},
152 {"OOB_XMIT_BURST" ,0x118, 8, MD(5)},
153 {"OOB_SEND_PAIRS" ,0x119, 8, MD(5)},
154 {"OOB_INIT_IDLE" ,0x11A, 8, MD(5)},
155 {"OOB_INIT_NEGO" ,0x11C, 8, MD(5)},
156 {"OOB_SAS_IDLE" ,0x11E, 8, MD(5)},
157 {"OOB_SAS_NEGO" ,0x120, 8, MD(5)},
158 {"OOB_WAKE_IDLE" ,0x122, 8, MD(5)},
159 {"OOB_WAKE_NEGO" ,0x124, 8, MD(5)},
160 {"OOB_DATA_KBITS" ,0x126, 8, MD(5)},
161 {"OOB_BURST_DATA" ,0x128,32, MD(5)},
162 {"OOB_ALIGN_0_DATA" ,0x12C,32, MD(5)},
163 {"OOB_ALIGN_1_DATA" ,0x130,32, MD(5)},
164 {"OOB_SYNC_DATA" ,0x134,32, MD(5)},
165 {"OOB_D10_2_DATA" ,0x138,32, MD(5)},
166 {"OOB_PHY_RST_CNT" ,0x13C,32, MD(5)},
167 {"OOB_SIG_GEN" ,0x140, 8, MD(5)},
168 {"OOB_XMIT" ,0x141, 8, MD(5)},
169 {"FUNCTION_MAKS" ,0x142, 8, MD(5)},
170 {"OOB_MODE" ,0x143, 8, MD(5)},
171 {"CURRENT_STATUS" ,0x144, 8, MD(5)},
172 {"SPEED_MASK" ,0x145, 8, MD(5)},
173 {"PRIM_COUNT" ,0x146, 8, MD(5)},
174 {"OOB_SIGNALS" ,0x148, 8, MD(5)},
175 {"OOB_DATA_DET" ,0x149, 8, MD(5)},
176 {"OOB_TIME_OUT" ,0x14C, 8, MD(5)},
177 {"OOB_TIMER_ENABLE" ,0x14D, 8, MD(5)},
178 {"OOB_STATUS" ,0x14E, 8, MD(5)},
179 {"HOT_PLUG_DELAY" ,0x150, 8, MD(5)},
180 {"RCD_DELAY" ,0x151, 8, MD(5)},
181 {"COMSAS_TIMER" ,0x152, 8, MD(5)},
182 {"SNTT_DELAY" ,0x153, 8, MD(5)},
183 {"SPD_CHNG_DELAY" ,0x154, 8, MD(5)},
184 {"SNLT_DELAY" ,0x155, 8, MD(5)},
185 {"SNWT_DELAY" ,0x156, 8, MD(5)},
186 {"ALIGN_DELAY" ,0x157, 8, MD(5)},
187 {"INT_ENABLE_0" ,0x158, 8, MD(5)},
188 {"INT_ENABLE_1" ,0x159, 8, MD(5)},
189 {"INT_ENABLE_2" ,0x15A, 8, MD(5)},
190 {"INT_ENABLE_3" ,0x15B, 8, MD(5)},
191 {"OOB_TEST_REG" ,0x15C, 8, MD(5)},
192 {"PHY_CONTROL_0" ,0x160, 8, MD(5)},
193 {"PHY_CONTROL_1" ,0x161, 8, MD(5)},
194 {"PHY_CONTROL_2" ,0x162, 8, MD(5)},
195 {"PHY_CONTROL_3" ,0x163, 8, MD(5)},
196 {"PHY_OOB_CAL_TX" ,0x164, 8, MD(5)},
197 {"PHY_OOB_CAL_RX" ,0x165, 8, MD(5)},
198 {"OOB_PHY_CAL_TX" ,0x166, 8, MD(5)},
199 {"OOB_PHY_CAL_RX" ,0x167, 8, MD(5)},
200 {"PHY_CONTROL_4" ,0x168, 8, MD(5)},
201 {"PHY_TEST" ,0x169, 8, MD(5)},
202 {"PHY_PWR_CTL" ,0x16A, 8, MD(5)},
203 {"PHY_PWR_DELAY" ,0x16B, 8, MD(5)},
204 {"OOB_SM_CON" ,0x16C, 8, MD(5)},
205 {"ADDR_TRAP_1" ,0x16D, 8, MD(5)},
206 {"ADDR_NEXT_1" ,0x16E, 8, MD(5)},
207 {"NEXT_ST_1" ,0x16F, 8, MD(5)},
208 {"OOB_SM_STATE" ,0x170, 8, MD(5)},
209 {"ADDR_TRAP_2" ,0x171, 8, MD(5)},
210 {"ADDR_NEXT_2" ,0x172, 8, MD(5)},
211 {"NEXT_ST_2" ,0x173, 8, MD(5)},
212 {NULL, 0, 0, 0 }
213};
214*/
215#define STR_8BIT " %30s[0x%04x]:0x%02x\n"
216#define STR_16BIT " %30s[0x%04x]:0x%04x\n"
217#define STR_32BIT " %30s[0x%04x]:0x%08x\n"
218#define STR_64BIT " %30s[0x%04x]:0x%llx\n"
219
220#define PRINT_REG_8bit(_ha, _n, _r) asd_printk(STR_8BIT, #_n, _n, \
221 asd_read_reg_byte(_ha, _r))
222#define PRINT_REG_16bit(_ha, _n, _r) asd_printk(STR_16BIT, #_n, _n, \
223 asd_read_reg_word(_ha, _r))
224#define PRINT_REG_32bit(_ha, _n, _r) asd_printk(STR_32BIT, #_n, _n, \
225 asd_read_reg_dword(_ha, _r))
226
227#define PRINT_CREG_8bit(_ha, _n) asd_printk(STR_8BIT, #_n, _n, \
228 asd_read_reg_byte(_ha, C##_n))
229#define PRINT_CREG_16bit(_ha, _n) asd_printk(STR_16BIT, #_n, _n, \
230 asd_read_reg_word(_ha, C##_n))
231#define PRINT_CREG_32bit(_ha, _n) asd_printk(STR_32BIT, #_n, _n, \
232 asd_read_reg_dword(_ha, C##_n))
233
234#define MSTR_8BIT " Mode:%02d %30s[0x%04x]:0x%02x\n"
235#define MSTR_16BIT " Mode:%02d %30s[0x%04x]:0x%04x\n"
236#define MSTR_32BIT " Mode:%02d %30s[0x%04x]:0x%08x\n"
237
238#define PRINT_MREG_8bit(_ha, _m, _n, _r) asd_printk(MSTR_8BIT, _m, #_n, _n, \
239 asd_read_reg_byte(_ha, _r))
240#define PRINT_MREG_16bit(_ha, _m, _n, _r) asd_printk(MSTR_16BIT, _m, #_n, _n, \
241 asd_read_reg_word(_ha, _r))
242#define PRINT_MREG_32bit(_ha, _m, _n, _r) asd_printk(MSTR_32BIT, _m, #_n, _n, \
243 asd_read_reg_dword(_ha, _r))
244
245/* can also be used for MD when the register is mode aware already */
246#define PRINT_MIS_byte(_ha, _n) asd_printk(STR_8BIT, #_n,CSEQ_##_n-CMAPPEDSCR,\
247 asd_read_reg_byte(_ha, CSEQ_##_n))
248#define PRINT_MIS_word(_ha, _n) asd_printk(STR_16BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\
249 asd_read_reg_word(_ha, CSEQ_##_n))
250#define PRINT_MIS_dword(_ha, _n) \
251 asd_printk(STR_32BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\
252 asd_read_reg_dword(_ha, CSEQ_##_n))
253#define PRINT_MIS_qword(_ha, _n) \
254 asd_printk(STR_64BIT, #_n,CSEQ_##_n-CMAPPEDSCR, \
255 (unsigned long long)(((u64)asd_read_reg_dword(_ha, CSEQ_##_n)) \
256 | (((u64)asd_read_reg_dword(_ha, (CSEQ_##_n)+4))<<32)))
257
258#define CMDP_REG(_n, _m) (_m*(CSEQ_PAGE_SIZE*2)+CSEQ_##_n)
259#define PRINT_CMDP_word(_ha, _n) \
260asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \
261 #_n, \
262 asd_read_reg_word(_ha, CMDP_REG(_n, 0)), \
263 asd_read_reg_word(_ha, CMDP_REG(_n, 1)), \
264 asd_read_reg_word(_ha, CMDP_REG(_n, 2)), \
265 asd_read_reg_word(_ha, CMDP_REG(_n, 3)), \
266 asd_read_reg_word(_ha, CMDP_REG(_n, 4)), \
267 asd_read_reg_word(_ha, CMDP_REG(_n, 5)), \
268 asd_read_reg_word(_ha, CMDP_REG(_n, 6)), \
269 asd_read_reg_word(_ha, CMDP_REG(_n, 7)))
270
271#define PRINT_CMDP_byte(_ha, _n) \
272asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \
273 #_n, \
274 asd_read_reg_byte(_ha, CMDP_REG(_n, 0)), \
275 asd_read_reg_byte(_ha, CMDP_REG(_n, 1)), \
276 asd_read_reg_byte(_ha, CMDP_REG(_n, 2)), \
277 asd_read_reg_byte(_ha, CMDP_REG(_n, 3)), \
278 asd_read_reg_byte(_ha, CMDP_REG(_n, 4)), \
279 asd_read_reg_byte(_ha, CMDP_REG(_n, 5)), \
280 asd_read_reg_byte(_ha, CMDP_REG(_n, 6)), \
281 asd_read_reg_byte(_ha, CMDP_REG(_n, 7)))
282
283static void asd_dump_cseq_state(struct asd_ha_struct *asd_ha)
284{
285 int mode;
286
287 asd_printk("CSEQ STATE\n");
288
289 asd_printk("ARP2 REGISTERS\n");
290
291 PRINT_CREG_32bit(asd_ha, ARP2CTL);
292 PRINT_CREG_32bit(asd_ha, ARP2INT);
293 PRINT_CREG_32bit(asd_ha, ARP2INTEN);
294 PRINT_CREG_8bit(asd_ha, MODEPTR);
295 PRINT_CREG_8bit(asd_ha, ALTMODE);
296 PRINT_CREG_8bit(asd_ha, FLAG);
297 PRINT_CREG_8bit(asd_ha, ARP2INTCTL);
298 PRINT_CREG_16bit(asd_ha, STACK);
299 PRINT_CREG_16bit(asd_ha, PRGMCNT);
300 PRINT_CREG_16bit(asd_ha, ACCUM);
301 PRINT_CREG_16bit(asd_ha, SINDEX);
302 PRINT_CREG_16bit(asd_ha, DINDEX);
303 PRINT_CREG_8bit(asd_ha, SINDIR);
304 PRINT_CREG_8bit(asd_ha, DINDIR);
305 PRINT_CREG_8bit(asd_ha, JUMLDIR);
306 PRINT_CREG_8bit(asd_ha, ARP2HALTCODE);
307 PRINT_CREG_16bit(asd_ha, CURRADDR);
308 PRINT_CREG_16bit(asd_ha, LASTADDR);
309 PRINT_CREG_16bit(asd_ha, NXTLADDR);
310
311 asd_printk("IOP REGISTERS\n");
312
313 PRINT_REG_32bit(asd_ha, BISTCTL1, CBISTCTL);
314 PRINT_CREG_32bit(asd_ha, MAPPEDSCR);
315
316 asd_printk("CIO REGISTERS\n");
317
318 for (mode = 0; mode < 9; mode++)
319 PRINT_MREG_16bit(asd_ha, mode, MnSCBPTR, CMnSCBPTR(mode));
320 PRINT_MREG_16bit(asd_ha, 15, MnSCBPTR, CMnSCBPTR(15));
321
322 for (mode = 0; mode < 9; mode++)
323 PRINT_MREG_16bit(asd_ha, mode, MnDDBPTR, CMnDDBPTR(mode));
324 PRINT_MREG_16bit(asd_ha, 15, MnDDBPTR, CMnDDBPTR(15));
325
326 for (mode = 0; mode < 8; mode++)
327 PRINT_MREG_32bit(asd_ha, mode, MnREQMBX, CMnREQMBX(mode));
328 for (mode = 0; mode < 8; mode++)
329 PRINT_MREG_32bit(asd_ha, mode, MnRSPMBX, CMnRSPMBX(mode));
330 for (mode = 0; mode < 8; mode++)
331 PRINT_MREG_32bit(asd_ha, mode, MnINT, CMnINT(mode));
332 for (mode = 0; mode < 8; mode++)
333 PRINT_MREG_32bit(asd_ha, mode, MnINTEN, CMnINTEN(mode));
334
335 PRINT_CREG_8bit(asd_ha, SCRATCHPAGE);
336 for (mode = 0; mode < 8; mode++)
337 PRINT_MREG_8bit(asd_ha, mode, MnSCRATCHPAGE,
338 CMnSCRATCHPAGE(mode));
339
340 PRINT_REG_32bit(asd_ha, CLINKCON, CLINKCON);
341 PRINT_REG_8bit(asd_ha, CCONMSK, CCONMSK);
342 PRINT_REG_8bit(asd_ha, CCONEXIST, CCONEXIST);
343 PRINT_REG_16bit(asd_ha, CCONMODE, CCONMODE);
344 PRINT_REG_32bit(asd_ha, CTIMERCALC, CTIMERCALC);
345 PRINT_REG_8bit(asd_ha, CINTDIS, CINTDIS);
346
347 asd_printk("SCRATCH MEMORY\n");
348
349 asd_printk("MIP 4 >>>>>\n");
350 PRINT_MIS_word(asd_ha, Q_EXE_HEAD);
351 PRINT_MIS_word(asd_ha, Q_EXE_TAIL);
352 PRINT_MIS_word(asd_ha, Q_DONE_HEAD);
353 PRINT_MIS_word(asd_ha, Q_DONE_TAIL);
354 PRINT_MIS_word(asd_ha, Q_SEND_HEAD);
355 PRINT_MIS_word(asd_ha, Q_SEND_TAIL);
356 PRINT_MIS_word(asd_ha, Q_DMA2CHIM_HEAD);
357 PRINT_MIS_word(asd_ha, Q_DMA2CHIM_TAIL);
358 PRINT_MIS_word(asd_ha, Q_COPY_HEAD);
359 PRINT_MIS_word(asd_ha, Q_COPY_TAIL);
360 PRINT_MIS_word(asd_ha, REG0);
361 PRINT_MIS_word(asd_ha, REG1);
362 PRINT_MIS_dword(asd_ha, REG2);
363 PRINT_MIS_byte(asd_ha, LINK_CTL_Q_MAP);
364 PRINT_MIS_byte(asd_ha, MAX_CSEQ_MODE);
365 PRINT_MIS_byte(asd_ha, FREE_LIST_HACK_COUNT);
366
367 asd_printk("MIP 5 >>>>\n");
368 PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_QUEUE);
369 PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_COUNT);
370 PRINT_MIS_word(asd_ha, Q_EST_NEXUS_HEAD);
371 PRINT_MIS_word(asd_ha, Q_EST_NEXUS_TAIL);
372 PRINT_MIS_word(asd_ha, NEED_EST_NEXUS_SCB);
373 PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_HEAD);
374 PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_TAIL);
375 PRINT_MIS_byte(asd_ha, EST_NEXUS_SCB_OFFSET);
376
377 asd_printk("MIP 6 >>>>\n");
378 PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR0);
379 PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR1);
380 PRINT_MIS_word(asd_ha, INT_ROUT_SCBPTR);
381 PRINT_MIS_byte(asd_ha, INT_ROUT_MODE);
382 PRINT_MIS_byte(asd_ha, ISR_SCRATCH_FLAGS);
383 PRINT_MIS_word(asd_ha, ISR_SAVE_SINDEX);
384 PRINT_MIS_word(asd_ha, ISR_SAVE_DINDEX);
385 PRINT_MIS_word(asd_ha, Q_MONIRTT_HEAD);
386 PRINT_MIS_word(asd_ha, Q_MONIRTT_TAIL);
387 PRINT_MIS_byte(asd_ha, FREE_SCB_MASK);
388 PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_HEAD);
389 PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_TAIL);
390 PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_HEAD);
391 PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_TAIL);
392
393 asd_printk("MIP 7 >>>>\n");
394 PRINT_MIS_qword(asd_ha, EMPTY_REQ_QUEUE);
395 PRINT_MIS_qword(asd_ha, EMPTY_REQ_COUNT);
396 PRINT_MIS_word(asd_ha, Q_EMPTY_HEAD);
397 PRINT_MIS_word(asd_ha, Q_EMPTY_TAIL);
398 PRINT_MIS_word(asd_ha, NEED_EMPTY_SCB);
399 PRINT_MIS_byte(asd_ha, EMPTY_REQ_HEAD);
400 PRINT_MIS_byte(asd_ha, EMPTY_REQ_TAIL);
401 PRINT_MIS_byte(asd_ha, EMPTY_SCB_OFFSET);
402 PRINT_MIS_word(asd_ha, PRIMITIVE_DATA);
403 PRINT_MIS_dword(asd_ha, TIMEOUT_CONST);
404
405 asd_printk("MDP 0 >>>>\n");
406 asd_printk("%-20s %6s %6s %6s %6s %6s %6s %6s %6s\n",
407 "Mode: ", "0", "1", "2", "3", "4", "5", "6", "7");
408 PRINT_CMDP_word(asd_ha, LRM_SAVE_SINDEX);
409 PRINT_CMDP_word(asd_ha, LRM_SAVE_SCBPTR);
410 PRINT_CMDP_word(asd_ha, Q_LINK_HEAD);
411 PRINT_CMDP_word(asd_ha, Q_LINK_TAIL);
412 PRINT_CMDP_byte(asd_ha, LRM_SAVE_SCRPAGE);
413
414 asd_printk("MDP 0 Mode 8 >>>>\n");
415 PRINT_MIS_word(asd_ha, RET_ADDR);
416 PRINT_MIS_word(asd_ha, RET_SCBPTR);
417 PRINT_MIS_word(asd_ha, SAVE_SCBPTR);
418 PRINT_MIS_word(asd_ha, EMPTY_TRANS_CTX);
419 PRINT_MIS_word(asd_ha, RESP_LEN);
420 PRINT_MIS_word(asd_ha, TMF_SCBPTR);
421 PRINT_MIS_word(asd_ha, GLOBAL_PREV_SCB);
422 PRINT_MIS_word(asd_ha, GLOBAL_HEAD);
423 PRINT_MIS_word(asd_ha, CLEAR_LU_HEAD);
424 PRINT_MIS_byte(asd_ha, TMF_OPCODE);
425 PRINT_MIS_byte(asd_ha, SCRATCH_FLAGS);
426 PRINT_MIS_word(asd_ha, HSB_SITE);
427 PRINT_MIS_word(asd_ha, FIRST_INV_SCB_SITE);
428 PRINT_MIS_word(asd_ha, FIRST_INV_DDB_SITE);
429
430 asd_printk("MDP 1 Mode 8 >>>>\n");
431 PRINT_MIS_qword(asd_ha, LUN_TO_CLEAR);
432 PRINT_MIS_qword(asd_ha, LUN_TO_CHECK);
433
434 asd_printk("MDP 2 Mode 8 >>>>\n");
435 PRINT_MIS_qword(asd_ha, HQ_NEW_POINTER);
436 PRINT_MIS_qword(asd_ha, HQ_DONE_BASE);
437 PRINT_MIS_dword(asd_ha, HQ_DONE_POINTER);
438 PRINT_MIS_byte(asd_ha, HQ_DONE_PASS);
439}
440
441#define PRINT_LREG_8bit(_h, _lseq, _n) \
442 asd_printk(STR_8BIT, #_n, _n, asd_read_reg_byte(_h, Lm##_n(_lseq)))
443#define PRINT_LREG_16bit(_h, _lseq, _n) \
444 asd_printk(STR_16BIT, #_n, _n, asd_read_reg_word(_h, Lm##_n(_lseq)))
445#define PRINT_LREG_32bit(_h, _lseq, _n) \
446 asd_printk(STR_32BIT, #_n, _n, asd_read_reg_dword(_h, Lm##_n(_lseq)))
447
448#define PRINT_LMIP_byte(_h, _lseq, _n) \
449 asd_printk(STR_8BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
450 asd_read_reg_byte(_h, LmSEQ_##_n(_lseq)))
451#define PRINT_LMIP_word(_h, _lseq, _n) \
452 asd_printk(STR_16BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
453 asd_read_reg_word(_h, LmSEQ_##_n(_lseq)))
454#define PRINT_LMIP_dword(_h, _lseq, _n) \
455 asd_printk(STR_32BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
456 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)))
457#define PRINT_LMIP_qword(_h, _lseq, _n) \
458 asd_printk(STR_64BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
459 (unsigned long long)(((unsigned long long) \
460 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) \
461 | (((unsigned long long) \
462 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)+4))<<32)))
463
464static void asd_print_lseq_cio_reg(struct asd_ha_struct *asd_ha,
465 u32 lseq_cio_addr, int i)
466{
467 switch (LSEQmCIOREGS[i].width) {
468 case 8:
469 asd_printk("%20s[0x%x]: 0x%02x\n", LSEQmCIOREGS[i].name,
470 LSEQmCIOREGS[i].offs,
471 asd_read_reg_byte(asd_ha, lseq_cio_addr +
472 LSEQmCIOREGS[i].offs));
473
474 break;
475 case 16:
476 asd_printk("%20s[0x%x]: 0x%04x\n", LSEQmCIOREGS[i].name,
477 LSEQmCIOREGS[i].offs,
478 asd_read_reg_word(asd_ha, lseq_cio_addr +
479 LSEQmCIOREGS[i].offs));
480
481 break;
482 case 32:
483 asd_printk("%20s[0x%x]: 0x%08x\n", LSEQmCIOREGS[i].name,
484 LSEQmCIOREGS[i].offs,
485 asd_read_reg_dword(asd_ha, lseq_cio_addr +
486 LSEQmCIOREGS[i].offs));
487 break;
488 }
489}
490
491static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
492{
493 u32 moffs;
494 int mode;
495
496 asd_printk("LSEQ %d STATE\n", lseq);
497
498 asd_printk("LSEQ%d: ARP2 REGISTERS\n", lseq);
499 PRINT_LREG_32bit(asd_ha, lseq, ARP2CTL);
500 PRINT_LREG_32bit(asd_ha, lseq, ARP2INT);
501 PRINT_LREG_32bit(asd_ha, lseq, ARP2INTEN);
502 PRINT_LREG_8bit(asd_ha, lseq, MODEPTR);
503 PRINT_LREG_8bit(asd_ha, lseq, ALTMODE);
504 PRINT_LREG_8bit(asd_ha, lseq, FLAG);
505 PRINT_LREG_8bit(asd_ha, lseq, ARP2INTCTL);
506 PRINT_LREG_16bit(asd_ha, lseq, STACK);
507 PRINT_LREG_16bit(asd_ha, lseq, PRGMCNT);
508 PRINT_LREG_16bit(asd_ha, lseq, ACCUM);
509 PRINT_LREG_16bit(asd_ha, lseq, SINDEX);
510 PRINT_LREG_16bit(asd_ha, lseq, DINDEX);
511 PRINT_LREG_8bit(asd_ha, lseq, SINDIR);
512 PRINT_LREG_8bit(asd_ha, lseq, DINDIR);
513 PRINT_LREG_8bit(asd_ha, lseq, JUMLDIR);
514 PRINT_LREG_8bit(asd_ha, lseq, ARP2HALTCODE);
515 PRINT_LREG_16bit(asd_ha, lseq, CURRADDR);
516 PRINT_LREG_16bit(asd_ha, lseq, LASTADDR);
517 PRINT_LREG_16bit(asd_ha, lseq, NXTLADDR);
518
519 asd_printk("LSEQ%d: IOP REGISTERS\n", lseq);
520
521 PRINT_LREG_32bit(asd_ha, lseq, MODECTL);
522 PRINT_LREG_32bit(asd_ha, lseq, DBGMODE);
523 PRINT_LREG_32bit(asd_ha, lseq, CONTROL);
524 PRINT_REG_32bit(asd_ha, BISTCTL0, LmBISTCTL0(lseq));
525 PRINT_REG_32bit(asd_ha, BISTCTL1, LmBISTCTL1(lseq));
526
527 asd_printk("LSEQ%d: CIO REGISTERS\n", lseq);
528 asd_printk("Mode common:\n");
529
530 for (mode = 0; mode < 8; mode++) {
531 u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq);
532 int i;
533
534 for (i = 0; LSEQmCIOREGS[i].name; i++)
535 if (LSEQmCIOREGS[i].mode == MODE_COMMON)
536 asd_print_lseq_cio_reg(asd_ha,lseq_cio_addr,i);
537 }
538
539 asd_printk("Mode unique:\n");
540 for (mode = 0; mode < 8; mode++) {
541 u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq);
542 int i;
543
544 asd_printk("Mode %d\n", mode);
545 for (i = 0; LSEQmCIOREGS[i].name; i++) {
546 if (!(LSEQmCIOREGS[i].mode & (1 << mode)))
547 continue;
548 asd_print_lseq_cio_reg(asd_ha, lseq_cio_addr, i);
549 }
550 }
551
552 asd_printk("SCRATCH MEMORY\n");
553
554 asd_printk("LSEQ%d MIP 0 >>>>\n", lseq);
555 PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_HEAD);
556 PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL);
557 PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER);
558 PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS);
559 PRINT_LMIP_qword(asd_ha, lseq, CONNECTION_STATE);
560 PRINT_LMIP_word(asd_ha, lseq, CONCTL);
561 PRINT_LMIP_byte(asd_ha, lseq, CONSTAT);
562 PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES);
563 PRINT_LMIP_word(asd_ha, lseq, REG1_ISR);
564 PRINT_LMIP_word(asd_ha, lseq, REG2_ISR);
565 PRINT_LMIP_word(asd_ha, lseq, REG3_ISR);
566 PRINT_LMIP_qword(asd_ha, lseq,REG0_ISR);
567
568 asd_printk("LSEQ%d MIP 1 >>>>\n", lseq);
569 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR0);
570 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR1);
571 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR2);
572 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR3);
573 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE0);
574 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE1);
575 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE2);
576 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE3);
577 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_HEAD);
578 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_TAIL);
579 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_BUF_AVAIL);
580 PRINT_LMIP_dword(asd_ha, lseq, TIMEOUT_CONST);
581 PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_SINDEX);
582 PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_DINDEX);
583
584 asd_printk("LSEQ%d MIP 2 >>>>\n", lseq);
585 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR0);
586 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR1);
587 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR2);
588 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR3);
589 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD0);
590 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD1);
591 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD2);
592 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD3);
593 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_HEAD);
594 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_TAIL);
595 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_BUFS_AVAIL);
596
597 asd_printk("LSEQ%d MIP 3 >>>>\n", lseq);
598 PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TMR_TOUT_CONST);
599 PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMEOUT);
600 PRINT_LMIP_dword(asd_ha, lseq, SRST_ASSERT_TIMEOUT);
601 PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMEOUT);
602 PRINT_LMIP_dword(asd_ha, lseq, ONE_MILLISEC_TIMEOUT);
603 PRINT_LMIP_dword(asd_ha, lseq, TEN_MS_COMINIT_TIMEOUT);
604 PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMEOUT);
605
606 for (mode = 0; mode < 3; mode++) {
607 asd_printk("LSEQ%d MDP 0 MODE %d >>>>\n", lseq, mode);
608 moffs = mode * LSEQ_MODE_SCRATCH_SIZE;
609
610 asd_printk(STR_16BIT, "RET_ADDR", 0,
611 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)
612 + moffs));
613 asd_printk(STR_16BIT, "REG0_MODE", 2,
614 asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)
615 + moffs));
616 asd_printk(STR_16BIT, "MODE_FLAGS", 4,
617 asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)
618 + moffs));
619 asd_printk(STR_16BIT, "RET_ADDR2", 0x6,
620 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)
621 + moffs));
622 asd_printk(STR_16BIT, "RET_ADDR1", 0x8,
623 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)
624 + moffs));
625 asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB,
626 asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)
627 + moffs));
628 asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC,
629 asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)
630 + moffs));
631 }
632
633 asd_printk("LSEQ%d MDP 0 MODE 5 >>>>\n", lseq);
634 moffs = LSEQ_MODE5_PAGE0_OFFSET;
635 asd_printk(STR_16BIT, "RET_ADDR", 0,
636 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + moffs));
637 asd_printk(STR_16BIT, "REG0_MODE", 2,
638 asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + moffs));
639 asd_printk(STR_16BIT, "MODE_FLAGS", 4,
640 asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + moffs));
641 asd_printk(STR_16BIT, "RET_ADDR2", 0x6,
642 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + moffs));
643 asd_printk(STR_16BIT, "RET_ADDR1", 0x8,
644 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + moffs));
645 asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB,
646 asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + moffs));
647 asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC,
648 asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + moffs));
649
650 asd_printk("LSEQ%d MDP 0 MODE 0 >>>>\n", lseq);
651 PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_DDB_SITE);
652 PRINT_LMIP_word(asd_ha, lseq, EMPTY_TRANS_CTX);
653 PRINT_LMIP_word(asd_ha, lseq, RESP_LEN);
654 PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_SCB_SITE);
655 PRINT_LMIP_dword(asd_ha, lseq, INTEN_SAVE);
656 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_FRM_LEN);
657 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_PROTOCOL);
658 PRINT_LMIP_byte(asd_ha, lseq, RESP_STATUS);
659 PRINT_LMIP_byte(asd_ha, lseq, LAST_LOADED_SGE);
660 PRINT_LMIP_byte(asd_ha, lseq, SAVE_SCBPTR);
661
662 asd_printk("LSEQ%d MDP 0 MODE 1 >>>>\n", lseq);
663 PRINT_LMIP_word(asd_ha, lseq, Q_XMIT_HEAD);
664 PRINT_LMIP_word(asd_ha, lseq, M1_EMPTY_TRANS_CTX);
665 PRINT_LMIP_word(asd_ha, lseq, INI_CONN_TAG);
666 PRINT_LMIP_byte(asd_ha, lseq, FAILED_OPEN_STATUS);
667 PRINT_LMIP_byte(asd_ha, lseq, XMIT_REQUEST_TYPE);
668 PRINT_LMIP_byte(asd_ha, lseq, M1_RESP_STATUS);
669 PRINT_LMIP_byte(asd_ha, lseq, M1_LAST_LOADED_SGE);
670 PRINT_LMIP_word(asd_ha, lseq, M1_SAVE_SCBPTR);
671
672 asd_printk("LSEQ%d MDP 0 MODE 2 >>>>\n", lseq);
673 PRINT_LMIP_word(asd_ha, lseq, PORT_COUNTER);
674 PRINT_LMIP_word(asd_ha, lseq, PM_TABLE_PTR);
675 PRINT_LMIP_word(asd_ha, lseq, SATA_INTERLOCK_TMR_SAVE);
676 PRINT_LMIP_word(asd_ha, lseq, IP_BITL);
677 PRINT_LMIP_word(asd_ha, lseq, COPY_SMP_CONN_TAG);
678 PRINT_LMIP_byte(asd_ha, lseq, P0M2_OFFS1AH);
679
680 asd_printk("LSEQ%d MDP 0 MODE 4/5 >>>>\n", lseq);
681 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_STATUS);
682 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_MODE);
683 PRINT_LMIP_word(asd_ha, lseq, Q_LINK_HEAD);
684 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_ERR);
685 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_SIGNALS);
686 PRINT_LMIP_byte(asd_ha, lseq, SAS_RESET_MODE);
687 PRINT_LMIP_byte(asd_ha, lseq, LINK_RESET_RETRY_COUNT);
688 PRINT_LMIP_byte(asd_ha, lseq, NUM_LINK_RESET_RETRIES);
689 PRINT_LMIP_word(asd_ha, lseq, OOB_INT_ENABLES);
690 PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_TIMEOUT);
691 PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_DOWN_COUNT);
692
693 asd_printk("LSEQ%d MDP 1 MODE 0 >>>>\n", lseq);
694 PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR0);
695 PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR1);
696
697 asd_printk("LSEQ%d MDP 1 MODE 1 >>>>\n", lseq);
698 PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR0);
699 PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR1);
700
701 asd_printk("LSEQ%d MDP 1 MODE 2 >>>>\n", lseq);
702 PRINT_LMIP_dword(asd_ha, lseq, INVALID_DWORD_COUNT);
703 PRINT_LMIP_dword(asd_ha, lseq, DISPARITY_ERROR_COUNT);
704 PRINT_LMIP_dword(asd_ha, lseq, LOSS_OF_SYNC_COUNT);
705
706 asd_printk("LSEQ%d MDP 1 MODE 4/5 >>>>\n", lseq);
707 PRINT_LMIP_dword(asd_ha, lseq, FRAME_TYPE_MASK);
708 PRINT_LMIP_dword(asd_ha, lseq, HASHED_SRC_ADDR_MASK_PRINT);
709 PRINT_LMIP_byte(asd_ha, lseq, NUM_FILL_BYTES_MASK);
710 PRINT_LMIP_word(asd_ha, lseq, TAG_MASK);
711 PRINT_LMIP_word(asd_ha, lseq, TARGET_PORT_XFER_TAG);
712 PRINT_LMIP_dword(asd_ha, lseq, DATA_OFFSET);
713
714 asd_printk("LSEQ%d MDP 2 MODE 0 >>>>\n", lseq);
715 PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMER_TERM_TS);
716 PRINT_LMIP_byte(asd_ha, lseq, DEVICE_BITS);
717 PRINT_LMIP_word(asd_ha, lseq, SDB_DDB);
718 PRINT_LMIP_word(asd_ha, lseq, SDB_NUM_TAGS);
719 PRINT_LMIP_word(asd_ha, lseq, SDB_CURR_TAG);
720
721 asd_printk("LSEQ%d MDP 2 MODE 1 >>>>\n", lseq);
722 PRINT_LMIP_qword(asd_ha, lseq, TX_ID_ADDR_FRAME);
723 PRINT_LMIP_dword(asd_ha, lseq, OPEN_TIMER_TERM_TS);
724 PRINT_LMIP_dword(asd_ha, lseq, SRST_AS_TIMER_TERM_TS);
725 PRINT_LMIP_dword(asd_ha, lseq, LAST_LOADED_SG_EL);
726
727 asd_printk("LSEQ%d MDP 2 MODE 2 >>>>\n", lseq);
728 PRINT_LMIP_dword(asd_ha, lseq, CLOSE_TIMER_TERM_TS);
729 PRINT_LMIP_dword(asd_ha, lseq, BREAK_TIMER_TERM_TS);
730 PRINT_LMIP_dword(asd_ha, lseq, DWS_RESET_TIMER_TERM_TS);
731 PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMER_TERM_TS);
732 PRINT_LMIP_dword(asd_ha, lseq, MCTL_TIMER_TERM_TS);
733
734 asd_printk("LSEQ%d MDP 2 MODE 4/5 >>>>\n", lseq);
735 PRINT_LMIP_dword(asd_ha, lseq, COMINIT_TIMER_TERM_TS);
736 PRINT_LMIP_dword(asd_ha, lseq, RCV_ID_TIMER_TERM_TS);
737 PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMER_TERM_TS);
738 PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
739}
740
741/**
742 * asd_dump_ddb_site -- dump a CSEQ DDB site
743 * @asd_ha: pointer to host adapter structure
744 * @site_no: site number of interest
745 */
746void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no)
747{
748 if (site_no >= asd_ha->hw_prof.max_ddbs)
749 return;
750
751#define DDB_FIELDB(__name) \
752 asd_ddbsite_read_byte(asd_ha, site_no, \
753 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
754#define DDB2_FIELDB(__name) \
755 asd_ddbsite_read_byte(asd_ha, site_no, \
756 offsetof(struct asd_ddb_stp_sata_target_port, __name))
757#define DDB_FIELDW(__name) \
758 asd_ddbsite_read_word(asd_ha, site_no, \
759 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
760
761#define DDB_FIELDD(__name) \
762 asd_ddbsite_read_dword(asd_ha, site_no, \
763 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
764
765 asd_printk("DDB: 0x%02x\n", site_no);
766 asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type));
767 asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate));
768 asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag)));
769 asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head)));
770 asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended));
771 asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type));
772 asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def));
773 asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features));
774 asd_printk("Pathway Blocked Count: 0x%02x\n",
775 DDB_FIELDB(pathway_blocked_count));
776 asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time));
777 asd_printk("more_compat_features: 0x%08x\n",
778 DDB_FIELDD(more_compat_features));
779 asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask));
780 asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags));
781 asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2));
782 asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail));
783 asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail));
784 asd_printk("Active Task Count: 0x%04x\n",
785 DDB_FIELDW(active_task_count));
786 asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason));
787 asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout));
788 asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp));
789}
790
791void asd_dump_ddb_0(struct asd_ha_struct *asd_ha)
792{
793#define DDB0_FIELDB(__name) \
794 asd_ddbsite_read_byte(asd_ha, 0, \
795 offsetof(struct asd_ddb_seq_shared, __name))
796#define DDB0_FIELDW(__name) \
797 asd_ddbsite_read_word(asd_ha, 0, \
798 offsetof(struct asd_ddb_seq_shared, __name))
799
800#define DDB0_FIELDD(__name) \
801 asd_ddbsite_read_dword(asd_ha,0 , \
802 offsetof(struct asd_ddb_seq_shared, __name))
803
804#define DDB0_FIELDA(__name, _o) \
805 asd_ddbsite_read_byte(asd_ha, 0, \
806 offsetof(struct asd_ddb_seq_shared, __name)+_o)
807
808
809 asd_printk("DDB: 0\n");
810 asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head));
811 asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail));
812 asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt));
813 asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head));
814 asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail));
815 asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock));
816 asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag));
817 asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt));
818 asd_printk("est_nexus_buf_thresh:%04x\n",
819 DDB0_FIELDW(est_nexus_buf_thresh));
820 asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active));
821 asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up));
822 asd_printk("port_map_by_links:%02x %02x %02x %02x "
823 "%02x %02x %02x %02x\n",
824 DDB0_FIELDA(port_map_by_links, 0),
825 DDB0_FIELDA(port_map_by_links, 1),
826 DDB0_FIELDA(port_map_by_links, 2),
827 DDB0_FIELDA(port_map_by_links, 3),
828 DDB0_FIELDA(port_map_by_links, 4),
829 DDB0_FIELDA(port_map_by_links, 5),
830 DDB0_FIELDA(port_map_by_links, 6),
831 DDB0_FIELDA(port_map_by_links, 7));
832}
833
834static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no)
835{
836
837#define SCB_FIELDB(__name) \
838 asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \
839 + offsetof(struct initiate_ssp_task, __name))
840#define SCB_FIELDW(__name) \
841 asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \
842 + offsetof(struct initiate_ssp_task, __name))
843#define SCB_FIELDD(__name) \
844 asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \
845 + offsetof(struct initiate_ssp_task, __name))
846
847 asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len));
848 asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type));
849 asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag));
850 asd_printk("Target Port Xfer Tag: 0x%04x.\n",
851 SCB_FIELDW(ssp_frame.tptt));
852 asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs));
853 asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count));
854}
855
856/**
857 * asd_dump_scb_sites -- dump currently used CSEQ SCB sites
858 * @asd_ha: pointer to host adapter struct
859 */
860void asd_dump_scb_sites(struct asd_ha_struct *asd_ha)
861{
862 u16 site_no;
863
864 for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) {
865 u8 opcode;
866
867 if (!SCB_SITE_VALID(site_no))
868 continue;
869
870 /* We are only interested in SCB sites currently used.
871 */
872 opcode = asd_scbsite_read_byte(asd_ha, site_no,
873 offsetof(struct scb_header,
874 opcode));
875 if (opcode == 0xFF)
876 continue;
877
878 asd_printk("\nSCB: 0x%x\n", site_no);
879 asd_dump_scb_site(asd_ha, site_no);
880 }
881}
882
883/**
884 * ads_dump_seq_state -- dump CSEQ and LSEQ states
885 * @asd_ha: pointer to host adapter structure
886 * @lseq_mask: mask of LSEQs of interest
887 */
888void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask)
889{
890 int lseq;
891
892 asd_dump_cseq_state(asd_ha);
893
894 if (lseq_mask != 0)
895 for_each_sequencer(lseq_mask, lseq_mask, lseq)
896 asd_dump_lseq_state(asd_ha, lseq);
897}
898
899void asd_dump_frame_rcvd(struct asd_phy *phy,
900 struct done_list_struct *dl)
901{
902 unsigned long flags;
903 int i;
904
905 switch ((dl->status_block[1] & 0x70) >> 3) {
906 case SAS_PROTO_STP:
907 ASD_DPRINTK("STP proto device-to-host FIS:\n");
908 break;
909 default:
910 case SAS_PROTO_SSP:
911 ASD_DPRINTK("SAS proto IDENTIFY:\n");
912 break;
913 }
914 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
915 for (i = 0; i < phy->sas_phy.frame_rcvd_size; i+=4)
916 ASD_DPRINTK("%02x: %02x %02x %02x %02x\n",
917 i,
918 phy->frame_rcvd[i],
919 phy->frame_rcvd[i+1],
920 phy->frame_rcvd[i+2],
921 phy->frame_rcvd[i+3]);
922 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
923}
924
925static inline void asd_dump_scb(struct asd_ascb *ascb, int ind)
926{
927 asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
928 "index:%d, opcode:0x%02x\n",
929 ind, ascb->dma_scb.vaddr,
930 (unsigned long long)ascb->dma_scb.dma_handle,
931 (unsigned long long)
932 le64_to_cpu(ascb->scb->header.next_scb),
933 le16_to_cpu(ascb->scb->header.index),
934 ascb->scb->header.opcode);
935}
936
937void asd_dump_scb_list(struct asd_ascb *ascb, int num)
938{
939 int i = 0;
940
941 asd_printk("dumping %d scbs:\n", num);
942
943 asd_dump_scb(ascb, i++);
944 --num;
945
946 if (num > 0 && !list_empty(&ascb->list)) {
947 struct list_head *el;
948
949 list_for_each(el, &ascb->list) {
950 struct asd_ascb *s = list_entry(el, struct asd_ascb,
951 list);
952 asd_dump_scb(s, i++);
953 if (--num <= 0)
954 break;
955 }
956 }
957}
958
959#endif /* ASD_DEBUG */
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h
new file mode 100644
index 000000000000..0c388e7da6bb
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dump.h
@@ -0,0 +1,52 @@
1/*
2 * Aic94xx SAS/SATA driver dump header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_DUMP_H_
28#define _AIC94XX_DUMP_H_
29
30#ifdef ASD_DEBUG
31
32void asd_dump_ddb_0(struct asd_ha_struct *asd_ha);
33void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no);
34void asd_dump_scb_sites(struct asd_ha_struct *asd_ha);
35void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask);
36void asd_dump_frame_rcvd(struct asd_phy *phy,
37 struct done_list_struct *dl);
38void asd_dump_scb_list(struct asd_ascb *ascb, int num);
39#else /* ASD_DEBUG */
40
41static inline void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) { }
42static inline void asd_dump_target_ddb(struct asd_ha_struct *asd_ha,
43 u16 site_no) { }
44static inline void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) { }
45static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha,
46 u8 lseq_mask) { }
47static inline void asd_dump_frame_rcvd(struct asd_phy *phy,
48 struct done_list_struct *dl) { }
49static inline void asd_dump_scb_list(struct asd_ascb *ascb, int num) { }
50#endif /* ASD_DEBUG */
51
52#endif /* _AIC94XX_DUMP_H_ */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
new file mode 100644
index 000000000000..a24201351108
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -0,0 +1,1376 @@
1/*
2 * Aic94xx SAS/SATA driver hardware interface.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28#include <linux/delay.h>
29#include <linux/module.h>
30
31#include "aic94xx.h"
32#include "aic94xx_reg.h"
33#include "aic94xx_hwi.h"
34#include "aic94xx_seq.h"
35#include "aic94xx_dump.h"
36
37u32 MBAR0_SWB_SIZE;
38
39/* ---------- Initialization ---------- */
40
41static void asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
42{
43 extern char sas_addr_str[];
44 /* If the user has specified a WWN it overrides other settings
45 */
46 if (sas_addr_str[0] != '\0')
47 asd_destringify_sas_addr(asd_ha->hw_prof.sas_addr,
48 sas_addr_str);
49 else if (asd_ha->hw_prof.sas_addr[0] != 0)
50 asd_stringify_sas_addr(sas_addr_str, asd_ha->hw_prof.sas_addr);
51}
52
53static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha)
54{
55 int i;
56
57 for (i = 0; i < ASD_MAX_PHYS; i++) {
58 if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0)
59 continue;
60 /* Set a phy's address only if it has none.
61 */
62 ASD_DPRINTK("setting phy%d addr to %llx\n", i,
63 SAS_ADDR(asd_ha->hw_prof.sas_addr));
64 memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr,
65 asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
66 }
67}
68
69/* ---------- PHY initialization ---------- */
70
71static void asd_init_phy_identify(struct asd_phy *phy)
72{
73 phy->identify_frame = phy->id_frm_tok->vaddr;
74
75 memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
76
77 phy->identify_frame->dev_type = SAS_END_DEV;
78 if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
79 phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
80 if (phy->sas_phy.role & PHY_ROLE_TARGET)
81 phy->identify_frame->target_bits = phy->sas_phy.tproto;
82 memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr,
83 SAS_ADDR_SIZE);
84 phy->identify_frame->phy_id = phy->sas_phy.id;
85}
86
87static int asd_init_phy(struct asd_phy *phy)
88{
89 struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
90 struct asd_sas_phy *sas_phy = &phy->sas_phy;
91
92 sas_phy->enabled = 1;
93 sas_phy->class = SAS;
94 sas_phy->iproto = SAS_PROTO_ALL;
95 sas_phy->tproto = 0;
96 sas_phy->type = PHY_TYPE_PHYSICAL;
97 sas_phy->role = PHY_ROLE_INITIATOR;
98 sas_phy->oob_mode = OOB_NOT_CONNECTED;
99 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
100
101 phy->id_frm_tok = asd_alloc_coherent(asd_ha,
102 sizeof(*phy->identify_frame),
103 GFP_KERNEL);
104 if (!phy->id_frm_tok) {
105 asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id);
106 return -ENOMEM;
107 } else
108 asd_init_phy_identify(phy);
109
110 memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd));
111
112 return 0;
113}
114
115static int asd_init_phys(struct asd_ha_struct *asd_ha)
116{
117 u8 i;
118 u8 phy_mask = asd_ha->hw_prof.enabled_phys;
119
120 for (i = 0; i < ASD_MAX_PHYS; i++) {
121 struct asd_phy *phy = &asd_ha->phys[i];
122
123 phy->phy_desc = &asd_ha->hw_prof.phy_desc[i];
124
125 phy->sas_phy.enabled = 0;
126 phy->sas_phy.id = i;
127 phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0];
128 phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0];
129 phy->sas_phy.ha = &asd_ha->sas_ha;
130 phy->sas_phy.lldd_phy = phy;
131 }
132
133 /* Now enable and initialize only the enabled phys. */
134 for_each_phy(phy_mask, phy_mask, i) {
135 int err = asd_init_phy(&asd_ha->phys[i]);
136 if (err)
137 return err;
138 }
139
140 return 0;
141}
142
143/* ---------- Sliding windows ---------- */
144
145static int asd_init_sw(struct asd_ha_struct *asd_ha)
146{
147 struct pci_dev *pcidev = asd_ha->pcidev;
148 int err;
149 u32 v;
150
151 /* Unlock MBARs */
152 err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v);
153 if (err) {
154 asd_printk("couldn't access conf. space of %s\n",
155 pci_name(pcidev));
156 goto Err;
157 }
158 if (v)
159 err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v);
160 if (err) {
161 asd_printk("couldn't write to MBAR_KEY of %s\n",
162 pci_name(pcidev));
163 goto Err;
164 }
165
166 /* Set sliding windows A, B and C to point to proper internal
167 * memory regions.
168 */
169 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR);
170 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB,
171 REG_BASE_ADDR_CSEQCIO);
172 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI);
173 asd_ha->io_handle[0].swa_base = REG_BASE_ADDR;
174 asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO;
175 asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI;
176 MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80;
177 if (!asd_ha->iospace) {
178 /* MBAR1 will point to OCM (On Chip Memory) */
179 pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR);
180 asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR;
181 }
182 spin_lock_init(&asd_ha->iolock);
183Err:
184 return err;
185}
186
187/* ---------- SCB initialization ---------- */
188
189/**
190 * asd_init_scbs - manually allocate the first SCB.
191 * @asd_ha: pointer to host adapter structure
192 *
193 * This allocates the very first SCB which would be sent to the
194 * sequencer for execution. Its bus address is written to
195 * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of
196 * the _next_ scb to be DMA-ed to the host adapter is read from the last
197 * SCB DMA-ed to the host adapter, we have to always stay one step
198 * ahead of the sequencer and keep one SCB already allocated.
199 */
200static int asd_init_scbs(struct asd_ha_struct *asd_ha)
201{
202 struct asd_seq_data *seq = &asd_ha->seq;
203 int bitmap_bytes;
204
205 /* allocate the index array and bitmap */
206 asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs;
207 asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits*
208 sizeof(void *), GFP_KERNEL);
209 if (!asd_ha->seq.tc_index_array)
210 return -ENOMEM;
211
212 bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
213 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
214 asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
215 if (!asd_ha->seq.tc_index_bitmap)
216 return -ENOMEM;
217
218 spin_lock_init(&seq->tc_index_lock);
219
220 seq->next_scb.size = sizeof(struct scb);
221 seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL,
222 &seq->next_scb.dma_handle);
223 if (!seq->next_scb.vaddr) {
224 kfree(asd_ha->seq.tc_index_bitmap);
225 kfree(asd_ha->seq.tc_index_array);
226 asd_ha->seq.tc_index_bitmap = NULL;
227 asd_ha->seq.tc_index_array = NULL;
228 return -ENOMEM;
229 }
230
231 seq->pending = 0;
232 spin_lock_init(&seq->pend_q_lock);
233 INIT_LIST_HEAD(&seq->pend_q);
234
235 return 0;
236}
237
238static inline void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
239{
240 asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE;
241 asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE;
242 ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n",
243 asd_ha->hw_prof.max_scbs,
244 asd_ha->hw_prof.max_ddbs);
245}
246
247/* ---------- Done List initialization ---------- */
248
249static void asd_dl_tasklet_handler(unsigned long);
250
251static int asd_init_dl(struct asd_ha_struct *asd_ha)
252{
253 asd_ha->seq.actual_dl
254 = asd_alloc_coherent(asd_ha,
255 ASD_DL_SIZE * sizeof(struct done_list_struct),
256 GFP_KERNEL);
257 if (!asd_ha->seq.actual_dl)
258 return -ENOMEM;
259 asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr;
260 asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE;
261 asd_ha->seq.dl_next = 0;
262 tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler,
263 (unsigned long) asd_ha);
264
265 return 0;
266}
267
268/* ---------- EDB and ESCB init ---------- */
269
270static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, unsigned int gfp_flags)
271{
272 struct asd_seq_data *seq = &asd_ha->seq;
273 int i;
274
275 seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags);
276 if (!seq->edb_arr)
277 return -ENOMEM;
278
279 for (i = 0; i < seq->num_edbs; i++) {
280 seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE,
281 gfp_flags);
282 if (!seq->edb_arr[i])
283 goto Err_unroll;
284 memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE);
285 }
286
287 ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs);
288
289 return 0;
290
291Err_unroll:
292 for (i-- ; i >= 0; i--)
293 asd_free_coherent(asd_ha, seq->edb_arr[i]);
294 kfree(seq->edb_arr);
295 seq->edb_arr = NULL;
296
297 return -ENOMEM;
298}
299
300static int asd_alloc_escbs(struct asd_ha_struct *asd_ha,
301 unsigned int gfp_flags)
302{
303 struct asd_seq_data *seq = &asd_ha->seq;
304 struct asd_ascb *escb;
305 int i, escbs;
306
307 seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr),
308 gfp_flags);
309 if (!seq->escb_arr)
310 return -ENOMEM;
311
312 escbs = seq->num_escbs;
313 escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags);
314 if (!escb) {
315 asd_printk("couldn't allocate list of escbs\n");
316 goto Err;
317 }
318 seq->num_escbs -= escbs; /* subtract what was not allocated */
319 ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs);
320
321 for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next,
322 struct asd_ascb,
323 list)) {
324 seq->escb_arr[i] = escb;
325 escb->scb->header.opcode = EMPTY_SCB;
326 }
327
328 return 0;
329Err:
330 kfree(seq->escb_arr);
331 seq->escb_arr = NULL;
332 return -ENOMEM;
333
334}
335
336static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha)
337{
338 struct asd_seq_data *seq = &asd_ha->seq;
339 int i, k, z = 0;
340
341 for (i = 0; i < seq->num_escbs; i++) {
342 struct asd_ascb *ascb = seq->escb_arr[i];
343 struct empty_scb *escb = &ascb->scb->escb;
344
345 ascb->edb_index = z;
346
347 escb->num_valid = ASD_EDBS_PER_SCB;
348
349 for (k = 0; k < ASD_EDBS_PER_SCB; k++) {
350 struct sg_el *eb = &escb->eb[k];
351 struct asd_dma_tok *edb = seq->edb_arr[z++];
352
353 memset(eb, 0, sizeof(*eb));
354 eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle));
355 eb->size = cpu_to_le32(((u32) edb->size));
356 }
357 }
358}
359
360/**
361 * asd_init_escbs -- allocate and initialize empty scbs
362 * @asd_ha: pointer to host adapter structure
363 *
364 * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers.
365 * They transport sense data, etc.
366 */
367static int asd_init_escbs(struct asd_ha_struct *asd_ha)
368{
369 struct asd_seq_data *seq = &asd_ha->seq;
370 int err = 0;
371
372 /* Allocate two empty data buffers (edb) per sequencer. */
373 int edbs = 2*(1+asd_ha->hw_prof.num_phys);
374
375 seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB;
376 seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB;
377
378 err = asd_alloc_edbs(asd_ha, GFP_KERNEL);
379 if (err) {
380 asd_printk("couldn't allocate edbs\n");
381 return err;
382 }
383
384 err = asd_alloc_escbs(asd_ha, GFP_KERNEL);
385 if (err) {
386 asd_printk("couldn't allocate escbs\n");
387 return err;
388 }
389
390 asd_assign_edbs2escbs(asd_ha);
391 /* In order to insure that normal SCBs do not overfill sequencer
392 * memory and leave no space for escbs (halting condition),
393 * we increment pending here by the number of escbs. However,
394 * escbs are never pending.
395 */
396 seq->pending = seq->num_escbs;
397 seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2;
398
399 return 0;
400}
401
402/* ---------- HW initialization ---------- */
403
404/**
405 * asd_chip_hardrst -- hard reset the chip
406 * @asd_ha: pointer to host adapter structure
407 *
408 * This takes 16 cycles and is synchronous to CFCLK, which runs
409 * at 200 MHz, so this should take at most 80 nanoseconds.
410 */
411int asd_chip_hardrst(struct asd_ha_struct *asd_ha)
412{
413 int i;
414 int count = 100;
415 u32 reg;
416
417 for (i = 0 ; i < 4 ; i++) {
418 asd_write_reg_dword(asd_ha, COMBIST, HARDRST);
419 }
420
421 do {
422 udelay(1);
423 reg = asd_read_reg_dword(asd_ha, CHIMINT);
424 if (reg & HARDRSTDET) {
425 asd_write_reg_dword(asd_ha, CHIMINT,
426 HARDRSTDET|PORRSTDET);
427 return 0;
428 }
429 } while (--count > 0);
430
431 return -ENODEV;
432}
433
434/**
435 * asd_init_chip -- initialize the chip
436 * @asd_ha: pointer to host adapter structure
437 *
438 * Hard resets the chip, disables HA interrupts, downloads the sequnecer
439 * microcode and starts the sequencers. The caller has to explicitly
440 * enable HA interrupts with asd_enable_ints(asd_ha).
441 */
442static int asd_init_chip(struct asd_ha_struct *asd_ha)
443{
444 int err;
445
446 err = asd_chip_hardrst(asd_ha);
447 if (err) {
448 asd_printk("couldn't hard reset %s\n",
449 pci_name(asd_ha->pcidev));
450 goto out;
451 }
452
453 asd_disable_ints(asd_ha);
454
455 err = asd_init_seqs(asd_ha);
456 if (err) {
457 asd_printk("couldn't init seqs for %s\n",
458 pci_name(asd_ha->pcidev));
459 goto out;
460 }
461
462 err = asd_start_seqs(asd_ha);
463 if (err) {
464 asd_printk("coudln't start seqs for %s\n",
465 pci_name(asd_ha->pcidev));
466 goto out;
467 }
468out:
469 return err;
470}
471
472#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE))
473
474static int max_devs = 0;
475module_param_named(max_devs, max_devs, int, S_IRUGO);
476MODULE_PARM_DESC(max_devs, "\n"
477 "\tMaximum number of SAS devices to support (not LUs).\n"
478 "\tDefault: 2176, Maximum: 65663.\n");
479
480static int max_cmnds = 0;
481module_param_named(max_cmnds, max_cmnds, int, S_IRUGO);
482MODULE_PARM_DESC(max_cmnds, "\n"
483 "\tMaximum number of commands queuable.\n"
484 "\tDefault: 512, Maximum: 66047.\n");
485
486static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha)
487{
488 unsigned long dma_addr = OCM_BASE_ADDR;
489 u32 d;
490
491 dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
492 asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr);
493 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
494 d |= 4;
495 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
496 asd_ha->hw_prof.max_ddbs += MAX_DEVS;
497}
498
499static int asd_extend_devctx(struct asd_ha_struct *asd_ha)
500{
501 dma_addr_t dma_handle;
502 unsigned long dma_addr;
503 u32 d;
504 int size;
505
506 asd_extend_devctx_ocm(asd_ha);
507
508 asd_ha->hw_prof.ddb_ext = NULL;
509 if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) {
510 max_devs = asd_ha->hw_prof.max_ddbs;
511 return 0;
512 }
513
514 size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE;
515
516 asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
517 if (!asd_ha->hw_prof.ddb_ext) {
518 asd_printk("couldn't allocate memory for %d devices\n",
519 max_devs);
520 max_devs = asd_ha->hw_prof.max_ddbs;
521 return -ENOMEM;
522 }
523 dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle;
524 dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE);
525 dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
526 dma_handle = (dma_addr_t) dma_addr;
527 asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle);
528 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
529 d &= ~4;
530 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
531
532 asd_ha->hw_prof.max_ddbs = max_devs;
533
534 return 0;
535}
536
537static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha)
538{
539 dma_addr_t dma_handle;
540 unsigned long dma_addr;
541 u32 d;
542 int size;
543
544 asd_ha->hw_prof.scb_ext = NULL;
545 if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) {
546 max_cmnds = asd_ha->hw_prof.max_scbs;
547 return 0;
548 }
549
550 size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE;
551
552 asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
553 if (!asd_ha->hw_prof.scb_ext) {
554 asd_printk("couldn't allocate memory for %d commands\n",
555 max_cmnds);
556 max_cmnds = asd_ha->hw_prof.max_scbs;
557 return -ENOMEM;
558 }
559 dma_handle = asd_ha->hw_prof.scb_ext->dma_handle;
560 dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE);
561 dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE;
562 dma_handle = (dma_addr_t) dma_addr;
563 asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle);
564 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
565 d &= ~1;
566 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
567
568 asd_ha->hw_prof.max_scbs = max_cmnds;
569
570 return 0;
571}
572
573/**
574 * asd_init_ctxmem -- initialize context memory
575 * asd_ha: pointer to host adapter structure
576 *
577 * This function sets the maximum number of SCBs and
578 * DDBs which can be used by the sequencer. This is normally
579 * 512 and 128 respectively. If support for more SCBs or more DDBs
580 * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are
581 * initialized here to extend context memory to point to host memory,
582 * thus allowing unlimited support for SCBs and DDBs -- only limited
583 * by host memory.
584 */
585static int asd_init_ctxmem(struct asd_ha_struct *asd_ha)
586{
587 int bitmap_bytes;
588
589 asd_get_max_scb_ddb(asd_ha);
590 asd_extend_devctx(asd_ha);
591 asd_extend_cmdctx(asd_ha);
592
593 /* The kernel wants bitmaps to be unsigned long sized. */
594 bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8;
595 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
596 asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
597 if (!asd_ha->hw_prof.ddb_bitmap)
598 return -ENOMEM;
599 spin_lock_init(&asd_ha->hw_prof.ddb_lock);
600
601 return 0;
602}
603
604int asd_init_hw(struct asd_ha_struct *asd_ha)
605{
606 int err;
607 u32 v;
608
609 err = asd_init_sw(asd_ha);
610 if (err)
611 return err;
612
613 err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v);
614 if (err) {
615 asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n",
616 pci_name(asd_ha->pcidev));
617 return err;
618 }
619 pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
620 v | SC_TMR_DIS);
621 if (err) {
622 asd_printk("couldn't disable split completion timer of %s\n",
623 pci_name(asd_ha->pcidev));
624 return err;
625 }
626
627 err = asd_read_ocm(asd_ha);
628 if (err) {
629 asd_printk("couldn't read ocm(%d)\n", err);
630 /* While suspicios, it is not an error that we
631 * couldn't read the OCM. */
632 }
633
634 err = asd_read_flash(asd_ha);
635 if (err) {
636 asd_printk("couldn't read flash(%d)\n", err);
637 /* While suspicios, it is not an error that we
638 * couldn't read FLASH memory.
639 */
640 }
641
642 asd_init_ctxmem(asd_ha);
643
644 asd_get_user_sas_addr(asd_ha);
645 if (!asd_ha->hw_prof.sas_addr[0]) {
646 asd_printk("No SAS Address provided for %s\n",
647 pci_name(asd_ha->pcidev));
648 err = -ENODEV;
649 goto Out;
650 }
651
652 asd_propagate_sas_addr(asd_ha);
653
654 err = asd_init_phys(asd_ha);
655 if (err) {
656 asd_printk("couldn't initialize phys for %s\n",
657 pci_name(asd_ha->pcidev));
658 goto Out;
659 }
660
661 err = asd_init_scbs(asd_ha);
662 if (err) {
663 asd_printk("couldn't initialize scbs for %s\n",
664 pci_name(asd_ha->pcidev));
665 goto Out;
666 }
667
668 err = asd_init_dl(asd_ha);
669 if (err) {
670 asd_printk("couldn't initialize the done list:%d\n",
671 err);
672 goto Out;
673 }
674
675 err = asd_init_escbs(asd_ha);
676 if (err) {
677 asd_printk("couldn't initialize escbs\n");
678 goto Out;
679 }
680
681 err = asd_init_chip(asd_ha);
682 if (err) {
683 asd_printk("couldn't init the chip\n");
684 goto Out;
685 }
686Out:
687 return err;
688}
689
690/* ---------- Chip reset ---------- */
691
692/**
693 * asd_chip_reset -- reset the host adapter, etc
694 * @asd_ha: pointer to host adapter structure of interest
695 *
696 * Called from the ISR. Hard reset the chip. Let everything
697 * timeout. This should be no different than hot-unplugging the
698 * host adapter. Once everything times out we'll init the chip with
699 * a call to asd_init_chip() and enable interrupts with asd_enable_ints().
700 * XXX finish.
701 */
702static void asd_chip_reset(struct asd_ha_struct *asd_ha)
703{
704 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
705
706 ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
707 asd_chip_hardrst(asd_ha);
708 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
709}
710
711/* ---------- Done List Routines ---------- */
712
713static void asd_dl_tasklet_handler(unsigned long data)
714{
715 struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data;
716 struct asd_seq_data *seq = &asd_ha->seq;
717 unsigned long flags;
718
719 while (1) {
720 struct done_list_struct *dl = &seq->dl[seq->dl_next];
721 struct asd_ascb *ascb;
722
723 if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle)
724 break;
725
726 /* find the aSCB */
727 spin_lock_irqsave(&seq->tc_index_lock, flags);
728 ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index));
729 spin_unlock_irqrestore(&seq->tc_index_lock, flags);
730 if (unlikely(!ascb)) {
731 ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n");
732 goto next_1;
733 } else if (ascb->scb->header.opcode == EMPTY_SCB) {
734 goto out;
735 } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) {
736 goto next_1;
737 }
738 spin_lock_irqsave(&seq->pend_q_lock, flags);
739 list_del_init(&ascb->list);
740 seq->pending--;
741 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
742 out:
743 ascb->tasklet_complete(ascb, dl);
744
745 next_1:
746 seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1);
747 if (!seq->dl_next)
748 seq->dl_toggle ^= DL_TOGGLE_MASK;
749 }
750}
751
752/* ---------- Interrupt Service Routines ---------- */
753
754/**
755 * asd_process_donelist_isr -- schedule processing of done list entries
756 * @asd_ha: pointer to host adapter structure
757 */
758static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
759{
760 tasklet_schedule(&asd_ha->seq.dl_tasklet);
761}
762
763/**
764 * asd_com_sas_isr -- process device communication interrupt (COMINT)
765 * @asd_ha: pointer to host adapter structure
766 */
767static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
768{
769 u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT);
770
771 /* clear COMSTAT int */
772 asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF);
773
774 if (comstat & CSBUFPERR) {
775 asd_printk("%s: command/status buffer dma parity error\n",
776 pci_name(asd_ha->pcidev));
777 } else if (comstat & CSERR) {
778 int i;
779 u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
780 dmaerr &= 0xFF;
781 asd_printk("%s: command/status dma error, DMAERR: 0x%02x, "
782 "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n",
783 pci_name(asd_ha->pcidev),
784 dmaerr,
785 asd_read_reg_dword(asd_ha, CSDMAADR),
786 asd_read_reg_dword(asd_ha, CSDMAADR+4));
787 asd_printk("CSBUFFER:\n");
788 for (i = 0; i < 8; i++) {
789 asd_printk("%08x %08x %08x %08x\n",
790 asd_read_reg_dword(asd_ha, CSBUFFER),
791 asd_read_reg_dword(asd_ha, CSBUFFER+4),
792 asd_read_reg_dword(asd_ha, CSBUFFER+8),
793 asd_read_reg_dword(asd_ha, CSBUFFER+12));
794 }
795 asd_dump_seq_state(asd_ha, 0);
796 } else if (comstat & OVLYERR) {
797 u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
798 dmaerr = (dmaerr >> 8) & 0xFF;
799 asd_printk("%s: overlay dma error:0x%x\n",
800 pci_name(asd_ha->pcidev),
801 dmaerr);
802 }
803 asd_chip_reset(asd_ha);
804}
805
806static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
807{
808 static const char *halt_code[256] = {
809 "UNEXPECTED_INTERRUPT0",
810 "UNEXPECTED_INTERRUPT1",
811 "UNEXPECTED_INTERRUPT2",
812 "UNEXPECTED_INTERRUPT3",
813 "UNEXPECTED_INTERRUPT4",
814 "UNEXPECTED_INTERRUPT5",
815 "UNEXPECTED_INTERRUPT6",
816 "UNEXPECTED_INTERRUPT7",
817 "UNEXPECTED_INTERRUPT8",
818 "UNEXPECTED_INTERRUPT9",
819 "UNEXPECTED_INTERRUPT10",
820 [11 ... 19] = "unknown[11,19]",
821 "NO_FREE_SCB_AVAILABLE",
822 "INVALID_SCB_OPCODE",
823 "INVALID_MBX_OPCODE",
824 "INVALID_ATA_STATE",
825 "ATA_QUEUE_FULL",
826 "ATA_TAG_TABLE_FAULT",
827 "ATA_TAG_MASK_FAULT",
828 "BAD_LINK_QUEUE_STATE",
829 "DMA2CHIM_QUEUE_ERROR",
830 "EMPTY_SCB_LIST_FULL",
831 "unknown[30]",
832 "IN_USE_SCB_ON_FREE_LIST",
833 "BAD_OPEN_WAIT_STATE",
834 "INVALID_STP_AFFILIATION",
835 "unknown[34]",
836 "EXEC_QUEUE_ERROR",
837 "TOO_MANY_EMPTIES_NEEDED",
838 "EMPTY_REQ_QUEUE_ERROR",
839 "Q_MONIRTT_MGMT_ERROR",
840 "TARGET_MODE_FLOW_ERROR",
841 "DEVICE_QUEUE_NOT_FOUND",
842 "START_IRTT_TIMER_ERROR",
843 "ABORT_TASK_ILLEGAL_REQ",
844 [43 ... 255] = "unknown[43,255]"
845 };
846
847 if (dchstatus & CSEQINT) {
848 u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT);
849
850 if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) {
851 asd_printk("%s: CSEQ arp2int:0x%x\n",
852 pci_name(asd_ha->pcidev),
853 arp2int);
854 } else if (arp2int & ARP2HALTC)
855 asd_printk("%s: CSEQ halted: %s\n",
856 pci_name(asd_ha->pcidev),
857 halt_code[(arp2int>>16)&0xFF]);
858 else
859 asd_printk("%s: CARP2INT:0x%x\n",
860 pci_name(asd_ha->pcidev),
861 arp2int);
862 }
863 if (dchstatus & LSEQINT_MASK) {
864 int lseq;
865 u8 lseq_mask = dchstatus & LSEQINT_MASK;
866
867 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
868 u32 arp2int = asd_read_reg_dword(asd_ha,
869 LmARP2INT(lseq));
870 if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR
871 | ARP2CIOPERR)) {
872 asd_printk("%s: LSEQ%d arp2int:0x%x\n",
873 pci_name(asd_ha->pcidev),
874 lseq, arp2int);
875 /* XXX we should only do lseq reset */
876 } else if (arp2int & ARP2HALTC)
877 asd_printk("%s: LSEQ%d halted: %s\n",
878 pci_name(asd_ha->pcidev),
879 lseq,halt_code[(arp2int>>16)&0xFF]);
880 else
881 asd_printk("%s: LSEQ%d ARP2INT:0x%x\n",
882 pci_name(asd_ha->pcidev), lseq,
883 arp2int);
884 }
885 }
886 asd_chip_reset(asd_ha);
887}
888
889/**
890 * asd_dch_sas_isr -- process device channel interrupt (DEVINT)
891 * @asd_ha: pointer to host adapter structure
892 */
893static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
894{
895 u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS);
896
897 if (dchstatus & CFIFTOERR) {
898 asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev));
899 asd_chip_reset(asd_ha);
900 } else
901 asd_arp2_err(asd_ha, dchstatus);
902}
903
904/**
905 * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
906 * @asd_ha: pointer to host adapter structure
907 */
908static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
909{
910 u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R);
911
912 if (!(stat0r & ASIERR)) {
913 asd_printk("hmm, EXSI interrupted but no error?\n");
914 return;
915 }
916
917 if (stat0r & ASIFMTERR) {
918 asd_printk("ASI SEEPROM format error for %s\n",
919 pci_name(asd_ha->pcidev));
920 } else if (stat0r & ASISEECHKERR) {
921 u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R);
922 asd_printk("ASI SEEPROM checksum 0x%x error for %s\n",
923 stat1r & CHECKSUM_MASK,
924 pci_name(asd_ha->pcidev));
925 } else {
926 u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR);
927
928 if (!(statr & CPI2ASIMSTERR_MASK)) {
929 ASD_DPRINTK("hmm, ASIERR?\n");
930 return;
931 } else {
932 u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR);
933 u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR);
934
935 asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, "
936 "count: 0x%x, byteen: 0x%x, targerr: 0x%x "
937 "master id: 0x%x, master err: 0x%x\n",
938 pci_name(asd_ha->pcidev),
939 addr, data,
940 (statr & CPI2ASIBYTECNT_MASK) >> 16,
941 (statr & CPI2ASIBYTEEN_MASK) >> 12,
942 (statr & CPI2ASITARGERR_MASK) >> 8,
943 (statr & CPI2ASITARGMID_MASK) >> 4,
944 (statr & CPI2ASIMSTERR_MASK));
945 }
946 }
947 asd_chip_reset(asd_ha);
948}
949
950/**
951 * asd_hst_pcix_isr -- process host interface interrupts
952 * @asd_ha: pointer to host adapter structure
953 *
954 * Asserted on PCIX errors: target abort, etc.
955 */
956static inline void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
957{
958 u16 status;
959 u32 pcix_status;
960 u32 ecc_status;
961
962 pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status);
963 pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status);
964 pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status);
965
966 if (status & PCI_STATUS_DETECTED_PARITY)
967 asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev));
968 else if (status & PCI_STATUS_REC_MASTER_ABORT)
969 asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev));
970 else if (status & PCI_STATUS_REC_TARGET_ABORT)
971 asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev));
972 else if (status & PCI_STATUS_PARITY)
973 asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev));
974 else if (pcix_status & RCV_SCE) {
975 asd_printk("received split completion error for %s\n",
976 pci_name(asd_ha->pcidev));
977 pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
978 /* XXX: Abort task? */
979 return;
980 } else if (pcix_status & UNEXP_SC) {
981 asd_printk("unexpected split completion for %s\n",
982 pci_name(asd_ha->pcidev));
983 pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
984 /* ignore */
985 return;
986 } else if (pcix_status & SC_DISCARD)
987 asd_printk("split completion discarded for %s\n",
988 pci_name(asd_ha->pcidev));
989 else if (ecc_status & UNCOR_ECCERR)
990 asd_printk("uncorrectable ECC error for %s\n",
991 pci_name(asd_ha->pcidev));
992 asd_chip_reset(asd_ha);
993}
994
995/**
996 * asd_hw_isr -- host adapter interrupt service routine
997 * @irq: ignored
998 * @dev_id: pointer to host adapter structure
999 * @regs: ignored
1000 *
1001 * The ISR processes done list entries and level 3 error handling.
1002 */
1003irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs)
1004{
1005 struct asd_ha_struct *asd_ha = dev_id;
1006 u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT);
1007
1008 if (!chimint)
1009 return IRQ_NONE;
1010
1011 asd_write_reg_dword(asd_ha, CHIMINT, chimint);
1012 (void) asd_read_reg_dword(asd_ha, CHIMINT);
1013
1014 if (chimint & DLAVAIL)
1015 asd_process_donelist_isr(asd_ha);
1016 if (chimint & COMINT)
1017 asd_com_sas_isr(asd_ha);
1018 if (chimint & DEVINT)
1019 asd_dch_sas_isr(asd_ha);
1020 if (chimint & INITERR)
1021 asd_rbi_exsi_isr(asd_ha);
1022 if (chimint & HOSTERR)
1023 asd_hst_pcix_isr(asd_ha);
1024
1025 return IRQ_HANDLED;
1026}
1027
1028/* ---------- SCB handling ---------- */
1029
1030static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
1031 unsigned int gfp_flags)
1032{
1033 extern kmem_cache_t *asd_ascb_cache;
1034 struct asd_seq_data *seq = &asd_ha->seq;
1035 struct asd_ascb *ascb;
1036 unsigned long flags;
1037
1038 ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags);
1039
1040 if (ascb) {
1041 memset(ascb, 0, sizeof(*ascb));
1042 ascb->dma_scb.size = sizeof(struct scb);
1043 ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
1044 gfp_flags,
1045 &ascb->dma_scb.dma_handle);
1046 if (!ascb->dma_scb.vaddr) {
1047 kmem_cache_free(asd_ascb_cache, ascb);
1048 return NULL;
1049 }
1050 memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
1051 asd_init_ascb(asd_ha, ascb);
1052
1053 spin_lock_irqsave(&seq->tc_index_lock, flags);
1054 ascb->tc_index = asd_tc_index_get(seq, ascb);
1055 spin_unlock_irqrestore(&seq->tc_index_lock, flags);
1056 if (ascb->tc_index == -1)
1057 goto undo;
1058
1059 ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index);
1060 }
1061
1062 return ascb;
1063undo:
1064 dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
1065 ascb->dma_scb.dma_handle);
1066 kmem_cache_free(asd_ascb_cache, ascb);
1067 ASD_DPRINTK("no index for ascb\n");
1068 return NULL;
1069}
1070
1071/**
1072 * asd_ascb_alloc_list -- allocate a list of aSCBs
1073 * @asd_ha: pointer to host adapter structure
1074 * @num: pointer to integer number of aSCBs
1075 * @gfp_flags: GFP_ flags.
1076 *
1077 * This is the only function which is used to allocate aSCBs.
1078 * It can allocate one or many. If more than one, then they form
1079 * a linked list in two ways: by their list field of the ascb struct
1080 * and by the next_scb field of the scb_header.
1081 *
1082 * Returns NULL if no memory was available, else pointer to a list
1083 * of ascbs. When this function returns, @num would be the number
1084 * of SCBs which were not able to be allocated, 0 if all requested
1085 * were able to be allocated.
1086 */
1087struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
1088 *asd_ha, int *num,
1089 unsigned int gfp_flags)
1090{
1091 struct asd_ascb *first = NULL;
1092
1093 for ( ; *num > 0; --*num) {
1094 struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags);
1095
1096 if (!ascb)
1097 break;
1098 else if (!first)
1099 first = ascb;
1100 else {
1101 struct asd_ascb *last = list_entry(first->list.prev,
1102 struct asd_ascb,
1103 list);
1104 list_add_tail(&ascb->list, &first->list);
1105 last->scb->header.next_scb =
1106 cpu_to_le64(((u64)ascb->dma_scb.dma_handle));
1107 }
1108 }
1109
1110 return first;
1111}
1112
1113/**
1114 * asd_swap_head_scb -- swap the head scb
1115 * @asd_ha: pointer to host adapter structure
1116 * @ascb: pointer to the head of an ascb list
1117 *
1118 * The sequencer knows the DMA address of the next SCB to be DMAed to
1119 * the host adapter, from initialization or from the last list DMAed.
1120 * seq->next_scb keeps the address of this SCB. The sequencer will
1121 * DMA to the host adapter this list of SCBs. But the head (first
1122 * element) of this list is not known to the sequencer. Here we swap
1123 * the head of the list with the known SCB (memcpy()).
1124 * Only one memcpy() is required per list so it is in our interest
1125 * to keep the list of SCB as long as possible so that the ratio
1126 * of number of memcpy calls to the number of SCB DMA-ed is as small
1127 * as possible.
1128 *
1129 * LOCKING: called with the pending list lock held.
1130 */
1131static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
1132 struct asd_ascb *ascb)
1133{
1134 struct asd_seq_data *seq = &asd_ha->seq;
1135 struct asd_ascb *last = list_entry(ascb->list.prev,
1136 struct asd_ascb,
1137 list);
1138 struct asd_dma_tok t = ascb->dma_scb;
1139
1140 memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb));
1141 ascb->dma_scb = seq->next_scb;
1142 ascb->scb = ascb->dma_scb.vaddr;
1143 seq->next_scb = t;
1144 last->scb->header.next_scb =
1145 cpu_to_le64(((u64)seq->next_scb.dma_handle));
1146}
1147
1148/**
1149 * asd_start_timers -- (add and) start timers of SCBs
1150 * @list: pointer to struct list_head of the scbs
1151 * @to: timeout in jiffies
1152 *
1153 * If an SCB in the @list has no timer function, assign the default
1154 * one, then start the timer of the SCB. This function is
1155 * intended to be called from asd_post_ascb_list(), just prior to
1156 * posting the SCBs to the sequencer.
1157 */
1158static inline void asd_start_scb_timers(struct list_head *list)
1159{
1160 struct asd_ascb *ascb;
1161 list_for_each_entry(ascb, list, list) {
1162 if (!ascb->uldd_timer) {
1163 ascb->timer.data = (unsigned long) ascb;
1164 ascb->timer.function = asd_ascb_timedout;
1165 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
1166 add_timer(&ascb->timer);
1167 }
1168 }
1169}
1170
1171/**
1172 * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter
1173 * @asd_ha: pointer to a host adapter structure
1174 * @ascb: pointer to the first aSCB in the list
1175 * @num: number of aSCBs in the list (to be posted)
1176 *
1177 * See queueing comment in asd_post_escb_list().
1178 *
1179 * Additional note on queuing: In order to minimize the ratio of memcpy()
1180 * to the number of ascbs sent, we try to batch-send as many ascbs as possible
1181 * in one go.
1182 * Two cases are possible:
1183 * A) can_queue >= num,
1184 * B) can_queue < num.
1185 * Case A: we can send the whole batch at once. Increment "pending"
1186 * in the beginning of this function, when it is checked, in order to
1187 * eliminate races when this function is called by multiple processes.
1188 * Case B: should never happen if the managing layer considers
1189 * lldd_queue_size.
1190 */
1191int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1192 int num)
1193{
1194 unsigned long flags;
1195 LIST_HEAD(list);
1196 int can_queue;
1197
1198 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1199 can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending;
1200 if (can_queue >= num)
1201 asd_ha->seq.pending += num;
1202 else
1203 can_queue = 0;
1204
1205 if (!can_queue) {
1206 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1207 asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev));
1208 return -SAS_QUEUE_FULL;
1209 }
1210
1211 asd_swap_head_scb(asd_ha, ascb);
1212
1213 __list_add(&list, ascb->list.prev, &ascb->list);
1214
1215 asd_start_scb_timers(&list);
1216
1217 asd_ha->seq.scbpro += num;
1218 list_splice_init(&list, asd_ha->seq.pend_q.prev);
1219 asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1220 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1221
1222 return 0;
1223}
1224
1225/**
1226 * asd_post_escb_list -- post a list of 1 or more empty scb
1227 * @asd_ha: pointer to a host adapter structure
1228 * @ascb: pointer to the first empty SCB in the list
1229 * @num: number of aSCBs in the list (to be posted)
1230 *
1231 * This is essentially the same as asd_post_ascb_list, but we do not
1232 * increment pending, add those to the pending list or get indexes.
1233 * See asd_init_escbs() and asd_init_post_escbs().
1234 *
1235 * Since sending a list of ascbs is a superset of sending a single
1236 * ascb, this function exists to generalize this. More specifically,
1237 * when sending a list of those, we want to do only a _single_
1238 * memcpy() at swap head, as opposed to for each ascb sent (in the
1239 * case of sending them one by one). That is, we want to minimize the
1240 * ratio of memcpy() operations to the number of ascbs sent. The same
1241 * logic applies to asd_post_ascb_list().
1242 */
1243int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1244 int num)
1245{
1246 unsigned long flags;
1247
1248 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1249 asd_swap_head_scb(asd_ha, ascb);
1250 asd_ha->seq.scbpro += num;
1251 asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1252 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1253
1254 return 0;
1255}
1256
1257/* ---------- LED ---------- */
1258
1259/**
1260 * asd_turn_led -- turn on/off an LED
1261 * @asd_ha: pointer to host adapter structure
1262 * @phy_id: the PHY id whose LED we want to manupulate
1263 * @op: 1 to turn on, 0 to turn off
1264 */
1265void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1266{
1267 if (phy_id < ASD_MAX_PHYS) {
1268 u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id));
1269 if (op)
1270 v |= LEDPOL;
1271 else
1272 v &= ~LEDPOL;
1273 asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v);
1274 }
1275}
1276
1277/**
1278 * asd_control_led -- enable/disable an LED on the board
1279 * @asd_ha: pointer to host adapter structure
1280 * @phy_id: integer, the phy id
1281 * @op: integer, 1 to enable, 0 to disable the LED
1282 *
1283 * First we output enable the LED, then we set the source
1284 * to be an external module.
1285 */
1286void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1287{
1288 if (phy_id < ASD_MAX_PHYS) {
1289 u32 v;
1290
1291 v = asd_read_reg_dword(asd_ha, GPIOOER);
1292 if (op)
1293 v |= (1 << phy_id);
1294 else
1295 v &= ~(1 << phy_id);
1296 asd_write_reg_dword(asd_ha, GPIOOER, v);
1297
1298 v = asd_read_reg_dword(asd_ha, GPIOCNFGR);
1299 if (op)
1300 v |= (1 << phy_id);
1301 else
1302 v &= ~(1 << phy_id);
1303 asd_write_reg_dword(asd_ha, GPIOCNFGR, v);
1304 }
1305}
1306
1307/* ---------- PHY enable ---------- */
1308
1309static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id)
1310{
1311 struct asd_phy *phy = &asd_ha->phys[phy_id];
1312
1313 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0);
1314 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY),
1315 HOTPLUG_DELAY_TIMEOUT);
1316
1317 /* Get defaults from manuf. sector */
1318 /* XXX we need defaults for those in case MS is broken. */
1319 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0),
1320 phy->phy_desc->phy_control_0);
1321 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1),
1322 phy->phy_desc->phy_control_1);
1323 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2),
1324 phy->phy_desc->phy_control_2);
1325 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3),
1326 phy->phy_desc->phy_control_3);
1327
1328 asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id),
1329 ASD_COMINIT_TIMEOUT);
1330
1331 asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id),
1332 phy->id_frm_tok->dma_handle);
1333
1334 asd_control_led(asd_ha, phy_id, 1);
1335
1336 return 0;
1337}
1338
1339int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
1340{
1341 u8 phy_m;
1342 u8 i;
1343 int num = 0, k;
1344 struct asd_ascb *ascb;
1345 struct asd_ascb *ascb_list;
1346
1347 if (!phy_mask) {
1348 asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__);
1349 return 0;
1350 }
1351
1352 for_each_phy(phy_mask, phy_m, i) {
1353 num++;
1354 asd_enable_phy(asd_ha, i);
1355 }
1356
1357 k = num;
1358 ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL);
1359 if (!ascb_list) {
1360 asd_printk("no memory for control phy ascb list\n");
1361 return -ENOMEM;
1362 }
1363 num -= k;
1364
1365 ascb = ascb_list;
1366 for_each_phy(phy_mask, phy_m, i) {
1367 asd_build_control_phy(ascb, i, ENABLE_PHY);
1368 ascb = list_entry(ascb->list.next, struct asd_ascb, list);
1369 }
1370 ASD_DPRINTK("posting %d control phy scbs\n", num);
1371 k = asd_post_ascb_list(asd_ha, ascb_list, num);
1372 if (k)
1373 asd_ascb_free_list(ascb_list);
1374
1375 return k;
1376}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
new file mode 100644
index 000000000000..c7d505388fed
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -0,0 +1,397 @@
1/*
2 * Aic94xx SAS/SATA driver hardware interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_HWI_H_
28#define _AIC94XX_HWI_H_
29
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33
34#include <scsi/libsas.h>
35
36#include "aic94xx.h"
37#include "aic94xx_sas.h"
38
39/* Define ASD_MAX_PHYS to the maximum phys ever. Currently 8. */
40#define ASD_MAX_PHYS 8
41#define ASD_PCBA_SN_SIZE 12
42
43/* Those are to be further named properly, the "RAZORx" part, and
44 * subsequently included in include/linux/pci_ids.h.
45 */
46#define PCI_DEVICE_ID_ADAPTEC2_RAZOR10 0x410
47#define PCI_DEVICE_ID_ADAPTEC2_RAZOR12 0x412
48#define PCI_DEVICE_ID_ADAPTEC2_RAZOR1E 0x41E
49#define PCI_DEVICE_ID_ADAPTEC2_RAZOR30 0x430
50#define PCI_DEVICE_ID_ADAPTEC2_RAZOR32 0x432
51#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3E 0x43E
52#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3F 0x43F
53
54struct asd_ha_addrspace {
55 void __iomem *addr;
56 unsigned long start; /* pci resource start */
57 unsigned long len; /* pci resource len */
58 unsigned long flags; /* pci resource flags */
59
60 /* addresses internal to the host adapter */
61 u32 swa_base; /* mmspace 1 (MBAR1) uses this only */
62 u32 swb_base;
63 u32 swc_base;
64};
65
66struct bios_struct {
67 int present;
68 u8 maj;
69 u8 min;
70 u32 bld;
71};
72
73struct unit_element_struct {
74 u16 num;
75 u16 size;
76 void *area;
77};
78
79struct flash_struct {
80 u32 bar;
81 int present;
82 int wide;
83 u8 manuf;
84 u8 dev_id;
85 u8 sec_prot;
86
87 u32 dir_offs;
88};
89
90struct asd_phy_desc {
91 /* From CTRL-A settings, then set to what is appropriate */
92 u8 sas_addr[SAS_ADDR_SIZE];
93 u8 max_sas_lrate;
94 u8 min_sas_lrate;
95 u8 max_sata_lrate;
96 u8 min_sata_lrate;
97 u8 flags;
98#define ASD_CRC_DIS 1
99#define ASD_SATA_SPINUP_HOLD 2
100
101 u8 phy_control_0; /* mode 5 reg 0x160 */
102 u8 phy_control_1; /* mode 5 reg 0x161 */
103 u8 phy_control_2; /* mode 5 reg 0x162 */
104 u8 phy_control_3; /* mode 5 reg 0x163 */
105};
106
107struct asd_dma_tok {
108 void *vaddr;
109 dma_addr_t dma_handle;
110 size_t size;
111};
112
113struct hw_profile {
114 struct bios_struct bios;
115 struct unit_element_struct ue;
116 struct flash_struct flash;
117
118 u8 sas_addr[SAS_ADDR_SIZE];
119 char pcba_sn[ASD_PCBA_SN_SIZE+1];
120
121 u8 enabled_phys; /* mask of enabled phys */
122 struct asd_phy_desc phy_desc[ASD_MAX_PHYS];
123 u32 max_scbs; /* absolute sequencer scb queue size */
124 struct asd_dma_tok *scb_ext;
125 u32 max_ddbs;
126 struct asd_dma_tok *ddb_ext;
127
128 spinlock_t ddb_lock;
129 void *ddb_bitmap;
130
131 int num_phys; /* ENABLEABLE */
132 int max_phys; /* REPORTED + ENABLEABLE */
133
134 unsigned addr_range; /* max # of addrs; max # of possible ports */
135 unsigned port_name_base;
136 unsigned dev_name_base;
137 unsigned sata_name_base;
138};
139
140struct asd_ascb {
141 struct list_head list;
142 struct asd_ha_struct *ha;
143
144 struct scb *scb; /* equals dma_scb->vaddr */
145 struct asd_dma_tok dma_scb;
146 struct asd_dma_tok *sg_arr;
147
148 void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *);
149 u8 uldd_timer:1;
150
151 /* internally generated command */
152 struct timer_list timer;
153 struct completion completion;
154 u8 tag_valid:1;
155 __be16 tag; /* error recovery only */
156
157 /* If this is an Empty SCB, index of first edb in seq->edb_arr. */
158 int edb_index;
159
160 /* Used by the timer timeout function. */
161 int tc_index;
162
163 void *uldd_task;
164};
165
166#define ASD_DL_SIZE_BITS 0x8
167#define ASD_DL_SIZE (1<<(2+ASD_DL_SIZE_BITS))
168#define ASD_DEF_DL_TOGGLE 0x01
169
170struct asd_seq_data {
171 spinlock_t pend_q_lock;
172 u16 scbpro;
173 int pending;
174 struct list_head pend_q;
175 int can_queue; /* per adapter */
176 struct asd_dma_tok next_scb; /* next scb to be delivered to CSEQ */
177
178 spinlock_t tc_index_lock;
179 void **tc_index_array;
180 void *tc_index_bitmap;
181 int tc_index_bitmap_bits;
182
183 struct tasklet_struct dl_tasklet;
184 struct done_list_struct *dl; /* array of done list entries, equals */
185 struct asd_dma_tok *actual_dl; /* actual_dl->vaddr */
186 int dl_toggle;
187 int dl_next;
188
189 int num_edbs;
190 struct asd_dma_tok **edb_arr;
191 int num_escbs;
192 struct asd_ascb **escb_arr; /* array of pointers to escbs */
193};
194
195/* This is the Host Adapter structure. It describes the hardware
196 * SAS adapter.
197 */
198struct asd_ha_struct {
199 struct pci_dev *pcidev;
200 const char *name;
201
202 struct sas_ha_struct sas_ha;
203
204 u8 revision_id;
205
206 int iospace;
207 spinlock_t iolock;
208 struct asd_ha_addrspace io_handle[2];
209
210 struct hw_profile hw_prof;
211
212 struct asd_phy phys[ASD_MAX_PHYS];
213 struct asd_sas_port ports[ASD_MAX_PHYS];
214
215 struct dma_pool *scb_pool;
216
217 struct asd_seq_data seq; /* sequencer related */
218};
219
220/* ---------- Common macros ---------- */
221
222#define ASD_BUSADDR_LO(__dma_handle) ((u32)(__dma_handle))
223#define ASD_BUSADDR_HI(__dma_handle) (((sizeof(dma_addr_t))==8) \
224 ? ((u32)((__dma_handle) >> 32)) \
225 : ((u32)0))
226
227#define dev_to_asd_ha(__dev) pci_get_drvdata(to_pci_dev(__dev))
228#define SCB_SITE_VALID(__site_no) (((__site_no) & 0xF0FF) != 0x00FF \
229 && ((__site_no) & 0xF0FF) > 0x001F)
230/* For each bit set in __lseq_mask, set __lseq to equal the bit
231 * position of the set bit and execute the statement following.
232 * __mc is the temporary mask, used as a mask "counter".
233 */
234#define for_each_sequencer(__lseq_mask, __mc, __lseq) \
235 for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\
236 if (((__mc) & 1))
237#define for_each_phy(__lseq_mask, __mc, __lseq) \
238 for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\
239 if (((__mc) & 1))
240
241#define PHY_ENABLED(_HA, _I) ((_HA)->hw_prof.enabled_phys & (1<<(_I)))
242
243/* ---------- DMA allocs ---------- */
244
245static inline struct asd_dma_tok *asd_dmatok_alloc(unsigned int flags)
246{
247 return kmem_cache_alloc(asd_dma_token_cache, flags);
248}
249
250static inline void asd_dmatok_free(struct asd_dma_tok *token)
251{
252 kmem_cache_free(asd_dma_token_cache, token);
253}
254
255static inline struct asd_dma_tok *asd_alloc_coherent(struct asd_ha_struct *
256 asd_ha, size_t size,
257 unsigned int flags)
258{
259 struct asd_dma_tok *token = asd_dmatok_alloc(flags);
260 if (token) {
261 token->size = size;
262 token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev,
263 token->size,
264 &token->dma_handle,
265 flags);
266 if (!token->vaddr) {
267 asd_dmatok_free(token);
268 token = NULL;
269 }
270 }
271 return token;
272}
273
274static inline void asd_free_coherent(struct asd_ha_struct *asd_ha,
275 struct asd_dma_tok *token)
276{
277 if (token) {
278 dma_free_coherent(&asd_ha->pcidev->dev, token->size,
279 token->vaddr, token->dma_handle);
280 asd_dmatok_free(token);
281 }
282}
283
284static inline void asd_init_ascb(struct asd_ha_struct *asd_ha,
285 struct asd_ascb *ascb)
286{
287 INIT_LIST_HEAD(&ascb->list);
288 ascb->scb = ascb->dma_scb.vaddr;
289 ascb->ha = asd_ha;
290 ascb->timer.function = NULL;
291 init_timer(&ascb->timer);
292 ascb->tc_index = -1;
293 init_completion(&ascb->completion);
294}
295
296/* Must be called with the tc_index_lock held!
297 */
298static inline void asd_tc_index_release(struct asd_seq_data *seq, int index)
299{
300 seq->tc_index_array[index] = NULL;
301 clear_bit(index, seq->tc_index_bitmap);
302}
303
304/* Must be called with the tc_index_lock held!
305 */
306static inline int asd_tc_index_get(struct asd_seq_data *seq, void *ptr)
307{
308 int index;
309
310 index = find_first_zero_bit(seq->tc_index_bitmap,
311 seq->tc_index_bitmap_bits);
312 if (index == seq->tc_index_bitmap_bits)
313 return -1;
314
315 seq->tc_index_array[index] = ptr;
316 set_bit(index, seq->tc_index_bitmap);
317
318 return index;
319}
320
321/* Must be called with the tc_index_lock held!
322 */
323static inline void *asd_tc_index_find(struct asd_seq_data *seq, int index)
324{
325 return seq->tc_index_array[index];
326}
327
328/**
329 * asd_ascb_free -- free a single aSCB after is has completed
330 * @ascb: pointer to the aSCB of interest
331 *
332 * This frees an aSCB after it has been executed/completed by
333 * the sequencer.
334 */
335static inline void asd_ascb_free(struct asd_ascb *ascb)
336{
337 if (ascb) {
338 struct asd_ha_struct *asd_ha = ascb->ha;
339 unsigned long flags;
340
341 BUG_ON(!list_empty(&ascb->list));
342 spin_lock_irqsave(&ascb->ha->seq.tc_index_lock, flags);
343 asd_tc_index_release(&ascb->ha->seq, ascb->tc_index);
344 spin_unlock_irqrestore(&ascb->ha->seq.tc_index_lock, flags);
345 dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
346 ascb->dma_scb.dma_handle);
347 kmem_cache_free(asd_ascb_cache, ascb);
348 }
349}
350
351/**
352 * asd_ascb_list_free -- free a list of ascbs
353 * @ascb_list: a list of ascbs
354 *
355 * This function will free a list of ascbs allocated by asd_ascb_alloc_list.
356 * It is used when say the scb queueing function returned QUEUE_FULL,
357 * and we do not need the ascbs any more.
358 */
359static inline void asd_ascb_free_list(struct asd_ascb *ascb_list)
360{
361 LIST_HEAD(list);
362 struct list_head *n, *pos;
363
364 __list_add(&list, ascb_list->list.prev, &ascb_list->list);
365 list_for_each_safe(pos, n, &list) {
366 list_del_init(pos);
367 asd_ascb_free(list_entry(pos, struct asd_ascb, list));
368 }
369}
370
371/* ---------- Function declarations ---------- */
372
373int asd_init_hw(struct asd_ha_struct *asd_ha);
374irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs);
375
376
377struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
378 *asd_ha, int *num,
379 unsigned int gfp_mask);
380
381int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
382 int num);
383int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
384 int num);
385
386int asd_init_post_escbs(struct asd_ha_struct *asd_ha);
387void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc);
388void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
389void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
390int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask);
391void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
392 u8 subfunc);
393
394void asd_ascb_timedout(unsigned long data);
395int asd_chip_hardrst(struct asd_ha_struct *asd_ha);
396
397#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
new file mode 100644
index 000000000000..ee2ccad70487
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -0,0 +1,866 @@
1/*
2 * Aic94xx SAS/SATA driver initialization.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/config.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/kernel.h>
31#include <linux/pci.h>
32#include <linux/delay.h>
33
34#include <scsi/scsi_host.h>
35
36#include "aic94xx.h"
37#include "aic94xx_reg.h"
38#include "aic94xx_hwi.h"
39#include "aic94xx_seq.h"
40
41/* The format is "version.release.patchlevel" */
42#define ASD_DRIVER_VERSION "1.0.2"
43
44static int use_msi = 0;
45module_param_named(use_msi, use_msi, int, S_IRUGO);
46MODULE_PARM_DESC(use_msi, "\n"
47 "\tEnable(1) or disable(0) using PCI MSI.\n"
48 "\tDefault: 0");
49
50static int lldd_max_execute_num = 0;
51module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
52MODULE_PARM_DESC(collector, "\n"
53 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
54 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
55 "\tThe aic94xx SAS LLDD supports both modes.\n"
56 "\tDefault: 0 (Direct Mode).\n");
57
58char sas_addr_str[2*SAS_ADDR_SIZE + 1] = "";
59
60static struct scsi_transport_template *aic94xx_transport_template;
61
62static struct scsi_host_template aic94xx_sht = {
63 .module = THIS_MODULE,
64 /* .name is initialized */
65 .name = "aic94xx",
66 .queuecommand = sas_queuecommand,
67 .target_alloc = sas_target_alloc,
68 .slave_configure = sas_slave_configure,
69 .slave_destroy = sas_slave_destroy,
70 .change_queue_depth = sas_change_queue_depth,
71 .change_queue_type = sas_change_queue_type,
72 .bios_param = sas_bios_param,
73 .can_queue = 1,
74 .cmd_per_lun = 1,
75 .this_id = -1,
76 .sg_tablesize = SG_ALL,
77 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
78 .use_clustering = ENABLE_CLUSTERING,
79};
80
81static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
82{
83 int err, i;
84 struct asd_ha_addrspace *io_handle;
85
86 asd_ha->iospace = 0;
87 for (i = 0; i < 3; i += 2) {
88 io_handle = &asd_ha->io_handle[i==0?0:1];
89 io_handle->start = pci_resource_start(asd_ha->pcidev, i);
90 io_handle->len = pci_resource_len(asd_ha->pcidev, i);
91 io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
92 err = -ENODEV;
93 if (!io_handle->start || !io_handle->len) {
94 asd_printk("MBAR%d start or length for %s is 0.\n",
95 i==0?0:1, pci_name(asd_ha->pcidev));
96 goto Err;
97 }
98 err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
99 if (err) {
100 asd_printk("couldn't reserve memory region for %s\n",
101 pci_name(asd_ha->pcidev));
102 goto Err;
103 }
104 if (io_handle->flags & IORESOURCE_CACHEABLE)
105 io_handle->addr = ioremap(io_handle->start,
106 io_handle->len);
107 else
108 io_handle->addr = ioremap_nocache(io_handle->start,
109 io_handle->len);
110 if (!io_handle->addr) {
111 asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
112 pci_name(asd_ha->pcidev));
113 goto Err_unreq;
114 }
115 }
116
117 return 0;
118Err_unreq:
119 pci_release_region(asd_ha->pcidev, i);
120Err:
121 if (i > 0) {
122 io_handle = &asd_ha->io_handle[0];
123 iounmap(io_handle->addr);
124 pci_release_region(asd_ha->pcidev, 0);
125 }
126 return err;
127}
128
129static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha)
130{
131 struct asd_ha_addrspace *io_handle;
132
133 io_handle = &asd_ha->io_handle[1];
134 iounmap(io_handle->addr);
135 pci_release_region(asd_ha->pcidev, 2);
136
137 io_handle = &asd_ha->io_handle[0];
138 iounmap(io_handle->addr);
139 pci_release_region(asd_ha->pcidev, 0);
140}
141
142static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
143{
144 int i = PCI_IOBAR_OFFSET, err;
145 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];
146
147 asd_ha->iospace = 1;
148 io_handle->start = pci_resource_start(asd_ha->pcidev, i);
149 io_handle->len = pci_resource_len(asd_ha->pcidev, i);
150 io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
151 io_handle->addr = (void __iomem *) io_handle->start;
152 if (!io_handle->start || !io_handle->len) {
153 asd_printk("couldn't get IO ports for %s\n",
154 pci_name(asd_ha->pcidev));
155 return -ENODEV;
156 }
157 err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
158 if (err) {
159 asd_printk("couldn't reserve io space for %s\n",
160 pci_name(asd_ha->pcidev));
161 }
162
163 return err;
164}
165
166static void __devexit asd_unmap_ioport(struct asd_ha_struct *asd_ha)
167{
168 pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
169}
170
171static int __devinit asd_map_ha(struct asd_ha_struct *asd_ha)
172{
173 int err;
174 u16 cmd_reg;
175
176 err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg);
177 if (err) {
178 asd_printk("couldn't read command register of %s\n",
179 pci_name(asd_ha->pcidev));
180 goto Err;
181 }
182
183 err = -ENODEV;
184 if (cmd_reg & PCI_COMMAND_MEMORY) {
185 if ((err = asd_map_memio(asd_ha)))
186 goto Err;
187 } else if (cmd_reg & PCI_COMMAND_IO) {
188 if ((err = asd_map_ioport(asd_ha)))
189 goto Err;
190 asd_printk("%s ioport mapped -- upgrade your hardware\n",
191 pci_name(asd_ha->pcidev));
192 } else {
193 asd_printk("no proper device access to %s\n",
194 pci_name(asd_ha->pcidev));
195 goto Err;
196 }
197
198 return 0;
199Err:
200 return err;
201}
202
203static void __devexit asd_unmap_ha(struct asd_ha_struct *asd_ha)
204{
205 if (asd_ha->iospace)
206 asd_unmap_ioport(asd_ha);
207 else
208 asd_unmap_memio(asd_ha);
209}
210
211static const char *asd_dev_rev[30] = {
212 [0] = "A0",
213 [1] = "A1",
214 [8] = "B0",
215};
216
217static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha)
218{
219 int err, i;
220
221 err = pci_read_config_byte(asd_ha->pcidev, PCI_REVISION_ID,
222 &asd_ha->revision_id);
223 if (err) {
224 asd_printk("couldn't read REVISION ID register of %s\n",
225 pci_name(asd_ha->pcidev));
226 goto Err;
227 }
228 err = -ENODEV;
229 if (asd_ha->revision_id < AIC9410_DEV_REV_B0) {
230 asd_printk("%s is revision %s (%X), which is not supported\n",
231 pci_name(asd_ha->pcidev),
232 asd_dev_rev[asd_ha->revision_id],
233 asd_ha->revision_id);
234 goto Err;
235 }
236 /* Provide some sane default values. */
237 asd_ha->hw_prof.max_scbs = 512;
238 asd_ha->hw_prof.max_ddbs = 128;
239 asd_ha->hw_prof.num_phys = ASD_MAX_PHYS;
240 /* All phys are enabled, by default. */
241 asd_ha->hw_prof.enabled_phys = 0xFF;
242 for (i = 0; i < ASD_MAX_PHYS; i++) {
243 asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
244 SAS_LINK_RATE_3_0_GBPS;
245 asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
246 SAS_LINK_RATE_1_5_GBPS;
247 asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
248 SAS_LINK_RATE_1_5_GBPS;
249 asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
250 SAS_LINK_RATE_1_5_GBPS;
251 }
252
253 return 0;
254Err:
255 return err;
256}
257
258static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha)
259{
260 int err = asd_common_setup(asd_ha);
261
262 if (err)
263 return err;
264
265 asd_ha->hw_prof.addr_range = 8;
266 asd_ha->hw_prof.port_name_base = 0;
267 asd_ha->hw_prof.dev_name_base = 8;
268 asd_ha->hw_prof.sata_name_base = 16;
269
270 return 0;
271}
272
273static int __devinit asd_aic9405_setup(struct asd_ha_struct *asd_ha)
274{
275 int err = asd_common_setup(asd_ha);
276
277 if (err)
278 return err;
279
280 asd_ha->hw_prof.addr_range = 4;
281 asd_ha->hw_prof.port_name_base = 0;
282 asd_ha->hw_prof.dev_name_base = 4;
283 asd_ha->hw_prof.sata_name_base = 8;
284
285 return 0;
286}
287
288static ssize_t asd_show_dev_rev(struct device *dev,
289 struct device_attribute *attr, char *buf)
290{
291 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
292 return snprintf(buf, PAGE_SIZE, "%s\n",
293 asd_dev_rev[asd_ha->revision_id]);
294}
295static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
296
297static ssize_t asd_show_dev_bios_build(struct device *dev,
298 struct device_attribute *attr,char *buf)
299{
300 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
301 return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld);
302}
303static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL);
304
305static ssize_t asd_show_dev_pcba_sn(struct device *dev,
306 struct device_attribute *attr, char *buf)
307{
308 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
309 return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn);
310}
311static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
312
313static void asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
314{
315 device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
316 device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
317 device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
318}
319
320static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
321{
322 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
323 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
324 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
325}
326
327/* The first entry, 0, is used for dynamic ids, the rest for devices
328 * we know about.
329 */
330static struct asd_pcidev_struct {
331 const char * name;
332 int (*setup)(struct asd_ha_struct *asd_ha);
333} asd_pcidev_data[] = {
334 /* Id 0 is used for dynamic ids. */
335 { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
336 .setup = asd_aic9410_setup
337 },
338 { .name = "Adaptec AIC-9410W SAS/SATA Host Adapter",
339 .setup = asd_aic9410_setup
340 },
341 { .name = "Adaptec AIC-9405W SAS/SATA Host Adapter",
342 .setup = asd_aic9405_setup
343 },
344};
345
346static inline int asd_create_ha_caches(struct asd_ha_struct *asd_ha)
347{
348 asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool",
349 &asd_ha->pcidev->dev,
350 sizeof(struct scb),
351 8, 0);
352 if (!asd_ha->scb_pool) {
353 asd_printk("couldn't create scb pool\n");
354 return -ENOMEM;
355 }
356
357 return 0;
358}
359
360/**
361 * asd_free_edbs -- free empty data buffers
362 * asd_ha: pointer to host adapter structure
363 */
364static inline void asd_free_edbs(struct asd_ha_struct *asd_ha)
365{
366 struct asd_seq_data *seq = &asd_ha->seq;
367 int i;
368
369 for (i = 0; i < seq->num_edbs; i++)
370 asd_free_coherent(asd_ha, seq->edb_arr[i]);
371 kfree(seq->edb_arr);
372 seq->edb_arr = NULL;
373}
374
375static inline void asd_free_escbs(struct asd_ha_struct *asd_ha)
376{
377 struct asd_seq_data *seq = &asd_ha->seq;
378 int i;
379
380 for (i = 0; i < seq->num_escbs; i++) {
381 if (!list_empty(&seq->escb_arr[i]->list))
382 list_del_init(&seq->escb_arr[i]->list);
383
384 asd_ascb_free(seq->escb_arr[i]);
385 }
386 kfree(seq->escb_arr);
387 seq->escb_arr = NULL;
388}
389
390static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
391{
392 int i;
393
394 if (asd_ha->hw_prof.ddb_ext)
395 asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext);
396 if (asd_ha->hw_prof.scb_ext)
397 asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext);
398
399 if (asd_ha->hw_prof.ddb_bitmap)
400 kfree(asd_ha->hw_prof.ddb_bitmap);
401 asd_ha->hw_prof.ddb_bitmap = NULL;
402
403 for (i = 0; i < ASD_MAX_PHYS; i++) {
404 struct asd_phy *phy = &asd_ha->phys[i];
405
406 asd_free_coherent(asd_ha, phy->id_frm_tok);
407 }
408 if (asd_ha->seq.escb_arr)
409 asd_free_escbs(asd_ha);
410 if (asd_ha->seq.edb_arr)
411 asd_free_edbs(asd_ha);
412 if (asd_ha->hw_prof.ue.area) {
413 kfree(asd_ha->hw_prof.ue.area);
414 asd_ha->hw_prof.ue.area = NULL;
415 }
416 if (asd_ha->seq.tc_index_array) {
417 kfree(asd_ha->seq.tc_index_array);
418 kfree(asd_ha->seq.tc_index_bitmap);
419 asd_ha->seq.tc_index_array = NULL;
420 asd_ha->seq.tc_index_bitmap = NULL;
421 }
422 if (asd_ha->seq.actual_dl) {
423 asd_free_coherent(asd_ha, asd_ha->seq.actual_dl);
424 asd_ha->seq.actual_dl = NULL;
425 asd_ha->seq.dl = NULL;
426 }
427 if (asd_ha->seq.next_scb.vaddr) {
428 dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr,
429 asd_ha->seq.next_scb.dma_handle);
430 asd_ha->seq.next_scb.vaddr = NULL;
431 }
432 dma_pool_destroy(asd_ha->scb_pool);
433 asd_ha->scb_pool = NULL;
434}
435
436kmem_cache_t *asd_dma_token_cache;
437kmem_cache_t *asd_ascb_cache;
438
439static int asd_create_global_caches(void)
440{
441 if (!asd_dma_token_cache) {
442 asd_dma_token_cache
443 = kmem_cache_create(ASD_DRIVER_NAME "_dma_token",
444 sizeof(struct asd_dma_tok),
445 0,
446 SLAB_HWCACHE_ALIGN,
447 NULL, NULL);
448 if (!asd_dma_token_cache) {
449 asd_printk("couldn't create dma token cache\n");
450 return -ENOMEM;
451 }
452 }
453
454 if (!asd_ascb_cache) {
455 asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb",
456 sizeof(struct asd_ascb),
457 0,
458 SLAB_HWCACHE_ALIGN,
459 NULL, NULL);
460 if (!asd_ascb_cache) {
461 asd_printk("couldn't create ascb cache\n");
462 goto Err;
463 }
464 }
465
466 return 0;
467Err:
468 kmem_cache_destroy(asd_dma_token_cache);
469 asd_dma_token_cache = NULL;
470 return -ENOMEM;
471}
472
473static void asd_destroy_global_caches(void)
474{
475 if (asd_dma_token_cache)
476 kmem_cache_destroy(asd_dma_token_cache);
477 asd_dma_token_cache = NULL;
478
479 if (asd_ascb_cache)
480 kmem_cache_destroy(asd_ascb_cache);
481 asd_ascb_cache = NULL;
482}
483
484static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
485{
486 int i;
487 struct asd_sas_phy **sas_phys =
488 kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_phy), GFP_KERNEL);
489 struct asd_sas_port **sas_ports =
490 kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_port), GFP_KERNEL);
491
492 if (!sas_phys || !sas_ports) {
493 kfree(sas_phys);
494 kfree(sas_ports);
495 return -ENOMEM;
496 }
497
498 asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name;
499 asd_ha->sas_ha.lldd_module = THIS_MODULE;
500 asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0];
501
502 for (i = 0; i < ASD_MAX_PHYS; i++) {
503 sas_phys[i] = &asd_ha->phys[i].sas_phy;
504 sas_ports[i] = &asd_ha->ports[i];
505 }
506
507 asd_ha->sas_ha.sas_phy = sas_phys;
508 asd_ha->sas_ha.sas_port= sas_ports;
509 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
510
511 asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue;
512
513 return sas_register_ha(&asd_ha->sas_ha);
514}
515
516static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
517{
518 int err;
519
520 err = sas_unregister_ha(&asd_ha->sas_ha);
521
522 sas_remove_host(asd_ha->sas_ha.core.shost);
523 scsi_remove_host(asd_ha->sas_ha.core.shost);
524 scsi_host_put(asd_ha->sas_ha.core.shost);
525
526 kfree(asd_ha->sas_ha.sas_phy);
527 kfree(asd_ha->sas_ha.sas_port);
528
529 return err;
530}
531
532static int __devinit asd_pci_probe(struct pci_dev *dev,
533 const struct pci_device_id *id)
534{
535 struct asd_pcidev_struct *asd_dev;
536 unsigned asd_id = (unsigned) id->driver_data;
537 struct asd_ha_struct *asd_ha;
538 struct Scsi_Host *shost;
539 int err;
540
541 if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) {
542 asd_printk("wrong driver_data in PCI table\n");
543 return -ENODEV;
544 }
545
546 if ((err = pci_enable_device(dev))) {
547 asd_printk("couldn't enable device %s\n", pci_name(dev));
548 return err;
549 }
550
551 pci_set_master(dev);
552
553 err = -ENOMEM;
554
555 shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *));
556 if (!shost)
557 goto Err;
558
559 asd_dev = &asd_pcidev_data[asd_id];
560
561 asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL);
562 if (!asd_ha) {
563 asd_printk("out of memory\n");
564 goto Err;
565 }
566 asd_ha->pcidev = dev;
567 asd_ha->sas_ha.pcidev = asd_ha->pcidev;
568 asd_ha->sas_ha.lldd_ha = asd_ha;
569
570 asd_ha->name = asd_dev->name;
571 asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
572
573 SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha;
574 asd_ha->sas_ha.core.shost = shost;
575 shost->transportt = aic94xx_transport_template;
576 shost->max_id = ~0;
577 shost->max_lun = ~0;
578 shost->max_cmd_len = 16;
579
580 err = scsi_add_host(shost, &dev->dev);
581 if (err) {
582 scsi_host_put(shost);
583 goto Err_free;
584 }
585
586
587
588 err = asd_dev->setup(asd_ha);
589 if (err)
590 goto Err_free;
591
592 err = -ENODEV;
593 if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)
594 && !pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK))
595 ;
596 else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK)
597 && !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK))
598 ;
599 else {
600 asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
601 goto Err_free;
602 }
603
604 pci_set_drvdata(dev, asd_ha);
605
606 err = asd_map_ha(asd_ha);
607 if (err)
608 goto Err_free;
609
610 err = asd_create_ha_caches(asd_ha);
611 if (err)
612 goto Err_unmap;
613
614 err = asd_init_hw(asd_ha);
615 if (err)
616 goto Err_free_cache;
617
618 asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled "
619 "phys, flash %s, BIOS %s%d\n",
620 pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr),
621 asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys,
622 asd_ha->hw_prof.num_phys,
623 asd_ha->hw_prof.flash.present ? "present" : "not present",
624 asd_ha->hw_prof.bios.present ? "build " : "not present",
625 asd_ha->hw_prof.bios.bld);
626
627 shost->can_queue = asd_ha->seq.can_queue;
628
629 if (use_msi)
630 pci_enable_msi(asd_ha->pcidev);
631
632 err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, SA_SHIRQ,
633 ASD_DRIVER_NAME, asd_ha);
634 if (err) {
635 asd_printk("couldn't get irq %d for %s\n",
636 asd_ha->pcidev->irq, pci_name(asd_ha->pcidev));
637 goto Err_irq;
638 }
639 asd_enable_ints(asd_ha);
640
641 err = asd_init_post_escbs(asd_ha);
642 if (err) {
643 asd_printk("couldn't post escbs for %s\n",
644 pci_name(asd_ha->pcidev));
645 goto Err_escbs;
646 }
647 ASD_DPRINTK("escbs posted\n");
648
649 asd_create_dev_attrs(asd_ha);
650
651 err = asd_register_sas_ha(asd_ha);
652 if (err)
653 goto Err_reg_sas;
654
655 err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys);
656 if (err) {
657 asd_printk("coudln't enable phys, err:%d\n", err);
658 goto Err_en_phys;
659 }
660 ASD_DPRINTK("enabled phys\n");
661 /* give the phy enabling interrupt event time to come in (1s
662 * is empirically about all it takes) */
663 ssleep(1);
664 /* Wait for discovery to finish */
665 scsi_flush_work(asd_ha->sas_ha.core.shost);
666
667 return 0;
668Err_en_phys:
669 asd_unregister_sas_ha(asd_ha);
670Err_reg_sas:
671 asd_remove_dev_attrs(asd_ha);
672Err_escbs:
673 asd_disable_ints(asd_ha);
674 free_irq(dev->irq, asd_ha);
675Err_irq:
676 if (use_msi)
677 pci_disable_msi(dev);
678 asd_chip_hardrst(asd_ha);
679Err_free_cache:
680 asd_destroy_ha_caches(asd_ha);
681Err_unmap:
682 asd_unmap_ha(asd_ha);
683Err_free:
684 kfree(asd_ha);
685 scsi_remove_host(shost);
686Err:
687 pci_disable_device(dev);
688 return err;
689}
690
691static void asd_free_queues(struct asd_ha_struct *asd_ha)
692{
693 unsigned long flags;
694 LIST_HEAD(pending);
695 struct list_head *n, *pos;
696
697 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
698 asd_ha->seq.pending = 0;
699 list_splice_init(&asd_ha->seq.pend_q, &pending);
700 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
701
702 if (!list_empty(&pending))
703 ASD_DPRINTK("Uh-oh! Pending is not empty!\n");
704
705 list_for_each_safe(pos, n, &pending) {
706 struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
707 list_del_init(pos);
708 ASD_DPRINTK("freeing from pending\n");
709 asd_ascb_free(ascb);
710 }
711}
712
713static void asd_turn_off_leds(struct asd_ha_struct *asd_ha)
714{
715 u8 phy_mask = asd_ha->hw_prof.enabled_phys;
716 u8 i;
717
718 for_each_phy(phy_mask, phy_mask, i) {
719 asd_turn_led(asd_ha, i, 0);
720 asd_control_led(asd_ha, i, 0);
721 }
722}
723
724static void __devexit asd_pci_remove(struct pci_dev *dev)
725{
726 struct asd_ha_struct *asd_ha = pci_get_drvdata(dev);
727
728 if (!asd_ha)
729 return;
730
731 asd_unregister_sas_ha(asd_ha);
732
733 asd_disable_ints(asd_ha);
734
735 asd_remove_dev_attrs(asd_ha);
736
737 /* XXX more here as needed */
738
739 free_irq(dev->irq, asd_ha);
740 if (use_msi)
741 pci_disable_msi(asd_ha->pcidev);
742 asd_turn_off_leds(asd_ha);
743 asd_chip_hardrst(asd_ha);
744 asd_free_queues(asd_ha);
745 asd_destroy_ha_caches(asd_ha);
746 asd_unmap_ha(asd_ha);
747 kfree(asd_ha);
748 pci_disable_device(dev);
749 return;
750}
751
752static ssize_t asd_version_show(struct device_driver *driver, char *buf)
753{
754 return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
755}
756static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL);
757
758static void asd_create_driver_attrs(struct device_driver *driver)
759{
760 driver_create_file(driver, &driver_attr_version);
761}
762
763static void asd_remove_driver_attrs(struct device_driver *driver)
764{
765 driver_remove_file(driver, &driver_attr_version);
766}
767
768static struct sas_domain_function_template aic94xx_transport_functions = {
769 .lldd_port_formed = asd_update_port_links,
770
771 .lldd_dev_found = asd_dev_found,
772 .lldd_dev_gone = asd_dev_gone,
773
774 .lldd_execute_task = asd_execute_task,
775
776 .lldd_abort_task = asd_abort_task,
777 .lldd_abort_task_set = asd_abort_task_set,
778 .lldd_clear_aca = asd_clear_aca,
779 .lldd_clear_task_set = asd_clear_task_set,
780 .lldd_I_T_nexus_reset = NULL,
781 .lldd_lu_reset = asd_lu_reset,
782 .lldd_query_task = asd_query_task,
783
784 .lldd_clear_nexus_port = asd_clear_nexus_port,
785 .lldd_clear_nexus_ha = asd_clear_nexus_ha,
786
787 .lldd_control_phy = asd_control_phy,
788};
789
790static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
791 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR10),
792 0, 0, 1},
793 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR12),
794 0, 0, 1},
795 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR1E),
796 0, 0, 1},
797 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR30),
798 0, 0, 2},
799 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR32),
800 0, 0, 2},
801 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3E),
802 0, 0, 2},
803 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3F),
804 0, 0, 2},
805 {}
806};
807
808MODULE_DEVICE_TABLE(pci, aic94xx_pci_table);
809
810static struct pci_driver aic94xx_pci_driver = {
811 .name = ASD_DRIVER_NAME,
812 .id_table = aic94xx_pci_table,
813 .probe = asd_pci_probe,
814 .remove = __devexit_p(asd_pci_remove),
815};
816
817static int __init aic94xx_init(void)
818{
819 int err;
820
821
822 asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION,
823 ASD_DRIVER_VERSION);
824
825 err = asd_create_global_caches();
826 if (err)
827 return err;
828
829 aic94xx_transport_template =
830 sas_domain_attach_transport(&aic94xx_transport_functions);
831 if (!aic94xx_transport_template)
832 goto out_destroy_caches;
833
834 err = pci_register_driver(&aic94xx_pci_driver);
835 if (err)
836 goto out_release_transport;
837
838 asd_create_driver_attrs(&aic94xx_pci_driver.driver);
839
840 return err;
841
842 out_release_transport:
843 sas_release_transport(aic94xx_transport_template);
844 out_destroy_caches:
845 asd_destroy_global_caches();
846
847 return err;
848}
849
850static void __exit aic94xx_exit(void)
851{
852 asd_remove_driver_attrs(&aic94xx_pci_driver.driver);
853 pci_unregister_driver(&aic94xx_pci_driver);
854 sas_release_transport(aic94xx_transport_template);
855 asd_destroy_global_caches();
856 asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION,
857 ASD_DRIVER_VERSION);
858}
859
860module_init(aic94xx_init);
861module_exit(aic94xx_exit);
862
863MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
864MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION);
865MODULE_LICENSE("GPL v2");
866MODULE_VERSION(ASD_DRIVER_VERSION);
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c
new file mode 100644
index 000000000000..f210dac3203d
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg.c
@@ -0,0 +1,332 @@
1/*
2 * Aic94xx SAS/SATA driver register access.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28#include "aic94xx_reg.h"
29#include "aic94xx.h"
30
31/* Writing to device address space.
32 * Offset comes before value to remind that the operation of
33 * this function is *offs = val.
34 */
35static inline void asd_write_byte(struct asd_ha_struct *asd_ha,
36 unsigned long offs, u8 val)
37{
38 if (unlikely(asd_ha->iospace))
39 outb(val,
40 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
41 else
42 writeb(val, asd_ha->io_handle[0].addr + offs);
43 wmb();
44}
45
46static inline void asd_write_word(struct asd_ha_struct *asd_ha,
47 unsigned long offs, u16 val)
48{
49 if (unlikely(asd_ha->iospace))
50 outw(val,
51 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
52 else
53 writew(val, asd_ha->io_handle[0].addr + offs);
54 wmb();
55}
56
57static inline void asd_write_dword(struct asd_ha_struct *asd_ha,
58 unsigned long offs, u32 val)
59{
60 if (unlikely(asd_ha->iospace))
61 outl(val,
62 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
63 else
64 writel(val, asd_ha->io_handle[0].addr + offs);
65 wmb();
66}
67
68/* Reading from device address space.
69 */
70static inline u8 asd_read_byte(struct asd_ha_struct *asd_ha,
71 unsigned long offs)
72{
73 u8 val;
74 if (unlikely(asd_ha->iospace))
75 val = inb((unsigned long) asd_ha->io_handle[0].addr
76 + (offs & 0xFF));
77 else
78 val = readb(asd_ha->io_handle[0].addr + offs);
79 rmb();
80 return val;
81}
82
83static inline u16 asd_read_word(struct asd_ha_struct *asd_ha,
84 unsigned long offs)
85{
86 u16 val;
87 if (unlikely(asd_ha->iospace))
88 val = inw((unsigned long)asd_ha->io_handle[0].addr
89 + (offs & 0xFF));
90 else
91 val = readw(asd_ha->io_handle[0].addr + offs);
92 rmb();
93 return val;
94}
95
96static inline u32 asd_read_dword(struct asd_ha_struct *asd_ha,
97 unsigned long offs)
98{
99 u32 val;
100 if (unlikely(asd_ha->iospace))
101 val = inl((unsigned long) asd_ha->io_handle[0].addr
102 + (offs & 0xFF));
103 else
104 val = readl(asd_ha->io_handle[0].addr + offs);
105 rmb();
106 return val;
107}
108
109static inline u32 asd_mem_offs_swa(void)
110{
111 return 0;
112}
113
114static inline u32 asd_mem_offs_swc(void)
115{
116 return asd_mem_offs_swa() + MBAR0_SWA_SIZE;
117}
118
119static inline u32 asd_mem_offs_swb(void)
120{
121 return asd_mem_offs_swc() + MBAR0_SWC_SIZE + 0x20;
122}
123
124/* We know that the register wanted is in the range
125 * of the sliding window.
126 */
127#define ASD_READ_SW(ww, type, ord) \
128static inline type asd_read_##ww##_##ord (struct asd_ha_struct *asd_ha,\
129 u32 reg) \
130{ \
131 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \
132 u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\
133 return asd_read_##ord (asd_ha, (unsigned long) map_offs); \
134}
135
136#define ASD_WRITE_SW(ww, type, ord) \
137static inline void asd_write_##ww##_##ord (struct asd_ha_struct *asd_ha,\
138 u32 reg, type val) \
139{ \
140 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \
141 u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\
142 asd_write_##ord (asd_ha, (unsigned long) map_offs, val); \
143}
144
145ASD_READ_SW(swa, u8, byte);
146ASD_READ_SW(swa, u16, word);
147ASD_READ_SW(swa, u32, dword);
148
149ASD_READ_SW(swb, u8, byte);
150ASD_READ_SW(swb, u16, word);
151ASD_READ_SW(swb, u32, dword);
152
153ASD_READ_SW(swc, u8, byte);
154ASD_READ_SW(swc, u16, word);
155ASD_READ_SW(swc, u32, dword);
156
157ASD_WRITE_SW(swa, u8, byte);
158ASD_WRITE_SW(swa, u16, word);
159ASD_WRITE_SW(swa, u32, dword);
160
161ASD_WRITE_SW(swb, u8, byte);
162ASD_WRITE_SW(swb, u16, word);
163ASD_WRITE_SW(swb, u32, dword);
164
165ASD_WRITE_SW(swc, u8, byte);
166ASD_WRITE_SW(swc, u16, word);
167ASD_WRITE_SW(swc, u32, dword);
168
169/*
170 * A word about sliding windows:
171 * MBAR0 is divided into sliding windows A, C and B, in that order.
172 * SWA starts at offset 0 of MBAR0, up to 0x57, with size 0x58 bytes.
173 * SWC starts at offset 0x58 of MBAR0, up to 0x60, with size 0x8 bytes.
174 * From 0x60 to 0x7F, we have a copy of PCI config space 0x60-0x7F.
175 * SWB starts at offset 0x80 of MBAR0 and extends to the end of MBAR0.
176 * See asd_init_sw() in aic94xx_hwi.c
177 *
178 * We map the most common registers we'd access of the internal 4GB
179 * host adapter memory space. If a register/internal memory location
180 * is wanted which is not mapped, we slide SWB, by paging it,
181 * see asd_move_swb() in aic94xx_reg.c.
182 */
183
184/**
185 * asd_move_swb -- move sliding window B
186 * @asd_ha: pointer to host adapter structure
187 * @reg: register desired to be within range of the new window
188 */
189static inline void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg)
190{
191 u32 base = reg & ~(MBAR0_SWB_SIZE-1);
192 pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base);
193 asd_ha->io_handle[0].swb_base = base;
194}
195
196static void __asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val)
197{
198 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0];
199 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR);
200 if (io_handle->swa_base <= reg
201 && reg < io_handle->swa_base + MBAR0_SWA_SIZE)
202 asd_write_swa_byte (asd_ha, reg,val);
203 else if (io_handle->swb_base <= reg
204 && reg < io_handle->swb_base + MBAR0_SWB_SIZE)
205 asd_write_swb_byte (asd_ha, reg, val);
206 else if (io_handle->swc_base <= reg
207 && reg < io_handle->swc_base + MBAR0_SWC_SIZE)
208 asd_write_swc_byte (asd_ha, reg, val);
209 else {
210 /* Ok, we have to move SWB */
211 asd_move_swb(asd_ha, reg);
212 asd_write_swb_byte (asd_ha, reg, val);
213 }
214}
215
216#define ASD_WRITE_REG(type, ord) \
217void asd_write_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg, type val)\
218{ \
219 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \
220 unsigned long flags; \
221 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \
222 spin_lock_irqsave(&asd_ha->iolock, flags); \
223 if (io_handle->swa_base <= reg \
224 && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \
225 asd_write_swa_##ord (asd_ha, reg,val); \
226 else if (io_handle->swb_base <= reg \
227 && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \
228 asd_write_swb_##ord (asd_ha, reg, val); \
229 else if (io_handle->swc_base <= reg \
230 && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \
231 asd_write_swc_##ord (asd_ha, reg, val); \
232 else { \
233 /* Ok, we have to move SWB */ \
234 asd_move_swb(asd_ha, reg); \
235 asd_write_swb_##ord (asd_ha, reg, val); \
236 } \
237 spin_unlock_irqrestore(&asd_ha->iolock, flags); \
238}
239
240ASD_WRITE_REG(u8, byte);
241ASD_WRITE_REG(u16,word);
242ASD_WRITE_REG(u32,dword);
243
244static u8 __asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg)
245{
246 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0];
247 u8 val;
248 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR);
249 if (io_handle->swa_base <= reg
250 && reg < io_handle->swa_base + MBAR0_SWA_SIZE)
251 val = asd_read_swa_byte (asd_ha, reg);
252 else if (io_handle->swb_base <= reg
253 && reg < io_handle->swb_base + MBAR0_SWB_SIZE)
254 val = asd_read_swb_byte (asd_ha, reg);
255 else if (io_handle->swc_base <= reg
256 && reg < io_handle->swc_base + MBAR0_SWC_SIZE)
257 val = asd_read_swc_byte (asd_ha, reg);
258 else {
259 /* Ok, we have to move SWB */
260 asd_move_swb(asd_ha, reg);
261 val = asd_read_swb_byte (asd_ha, reg);
262 }
263 return val;
264}
265
266#define ASD_READ_REG(type, ord) \
267type asd_read_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg) \
268{ \
269 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \
270 type val; \
271 unsigned long flags; \
272 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \
273 spin_lock_irqsave(&asd_ha->iolock, flags); \
274 if (io_handle->swa_base <= reg \
275 && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \
276 val = asd_read_swa_##ord (asd_ha, reg); \
277 else if (io_handle->swb_base <= reg \
278 && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \
279 val = asd_read_swb_##ord (asd_ha, reg); \
280 else if (io_handle->swc_base <= reg \
281 && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \
282 val = asd_read_swc_##ord (asd_ha, reg); \
283 else { \
284 /* Ok, we have to move SWB */ \
285 asd_move_swb(asd_ha, reg); \
286 val = asd_read_swb_##ord (asd_ha, reg); \
287 } \
288 spin_unlock_irqrestore(&asd_ha->iolock, flags); \
289 return val; \
290}
291
292ASD_READ_REG(u8, byte);
293ASD_READ_REG(u16,word);
294ASD_READ_REG(u32,dword);
295
296/**
297 * asd_read_reg_string -- read a string of bytes from io space memory
298 * @asd_ha: pointer to host adapter structure
299 * @dst: pointer to a destination buffer where data will be written to
300 * @offs: start offset (register) to read from
301 * @count: number of bytes to read
302 */
303void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst,
304 u32 offs, int count)
305{
306 u8 *p = dst;
307 unsigned long flags;
308
309 spin_lock_irqsave(&asd_ha->iolock, flags);
310 for ( ; count > 0; count--, offs++, p++)
311 *p = __asd_read_reg_byte(asd_ha, offs);
312 spin_unlock_irqrestore(&asd_ha->iolock, flags);
313}
314
315/**
316 * asd_write_reg_string -- write a string of bytes to io space memory
317 * @asd_ha: pointer to host adapter structure
318 * @src: pointer to source buffer where data will be read from
319 * @offs: start offset (register) to write to
320 * @count: number of bytes to write
321 */
322void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src,
323 u32 offs, int count)
324{
325 u8 *p = src;
326 unsigned long flags;
327
328 spin_lock_irqsave(&asd_ha->iolock, flags);
329 for ( ; count > 0; count--, offs++, p++)
330 __asd_write_reg_byte(asd_ha, offs, *p);
331 spin_unlock_irqrestore(&asd_ha->iolock, flags);
332}
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.h b/drivers/scsi/aic94xx/aic94xx_reg.h
new file mode 100644
index 000000000000..2279307fd27e
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg.h
@@ -0,0 +1,302 @@
1/*
2 * Aic94xx SAS/SATA driver hardware registers definitions.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_REG_H_
28#define _AIC94XX_REG_H_
29
30#include <asm/io.h>
31#include "aic94xx_hwi.h"
32
33/* Values */
34#define AIC9410_DEV_REV_B0 0x8
35
36/* MBAR0, SWA, SWB, SWC, internal memory space addresses */
37#define REG_BASE_ADDR 0xB8000000
38#define REG_BASE_ADDR_CSEQCIO 0xB8002000
39#define REG_BASE_ADDR_EXSI 0xB8042800
40
41#define MBAR0_SWA_SIZE 0x58
42extern u32 MBAR0_SWB_SIZE;
43#define MBAR0_SWC_SIZE 0x8
44
45/* MBAR1, points to On Chip Memory */
46#define OCM_BASE_ADDR 0xA0000000
47#define OCM_MAX_SIZE 0x20000
48
49/* Smallest address possible to reference */
50#define ALL_BASE_ADDR OCM_BASE_ADDR
51
52/* PCI configuration space registers */
53#define PCI_IOBAR_OFFSET 4
54
55#define PCI_CONF_MBAR1 0x6C
56#define PCI_CONF_MBAR0_SWA 0x70
57#define PCI_CONF_MBAR0_SWB 0x74
58#define PCI_CONF_MBAR0_SWC 0x78
59#define PCI_CONF_MBAR_KEY 0x7C
60#define PCI_CONF_FLSH_BAR 0xB8
61
62#include "aic94xx_reg_def.h"
63
64u8 asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg);
65u16 asd_read_reg_word(struct asd_ha_struct *asd_ha, u32 reg);
66u32 asd_read_reg_dword(struct asd_ha_struct *asd_ha, u32 reg);
67
68void asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val);
69void asd_write_reg_word(struct asd_ha_struct *asd_ha, u32 reg, u16 val);
70void asd_write_reg_dword(struct asd_ha_struct *asd_ha, u32 reg, u32 val);
71
72void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst,
73 u32 offs, int count);
74void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src,
75 u32 offs, int count);
76
77#define ASD_READ_OCM(type, ord, S) \
78static inline type asd_read_ocm_##ord (struct asd_ha_struct *asd_ha, \
79 u32 offs) \
80{ \
81 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \
82 type val = read##S (io_handle->addr + (unsigned long) offs); \
83 rmb(); \
84 return val; \
85}
86
87ASD_READ_OCM(u8, byte, b);
88ASD_READ_OCM(u16,word, w);
89ASD_READ_OCM(u32,dword,l);
90
91#define ASD_WRITE_OCM(type, ord, S) \
92static inline void asd_write_ocm_##ord (struct asd_ha_struct *asd_ha, \
93 u32 offs, type val) \
94{ \
95 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \
96 write##S (val, io_handle->addr + (unsigned long) offs); \
97 return; \
98}
99
100ASD_WRITE_OCM(u8, byte, b);
101ASD_WRITE_OCM(u16,word, w);
102ASD_WRITE_OCM(u32,dword,l);
103
104#define ASD_DDBSITE_READ(type, ord) \
105static inline type asd_ddbsite_read_##ord (struct asd_ha_struct *asd_ha, \
106 u16 ddb_site_no, \
107 u16 offs) \
108{ \
109 asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \
110 asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \
111 return asd_read_reg_##ord (asd_ha, CTXACCESS); \
112}
113
114ASD_DDBSITE_READ(u32, dword);
115ASD_DDBSITE_READ(u16, word);
116
117static inline u8 asd_ddbsite_read_byte(struct asd_ha_struct *asd_ha,
118 u16 ddb_site_no,
119 u16 offs)
120{
121 if (offs & 1)
122 return asd_ddbsite_read_word(asd_ha, ddb_site_no,
123 offs & ~1) >> 8;
124 else
125 return asd_ddbsite_read_word(asd_ha, ddb_site_no,
126 offs) & 0xFF;
127}
128
129
130#define ASD_DDBSITE_WRITE(type, ord) \
131static inline void asd_ddbsite_write_##ord (struct asd_ha_struct *asd_ha, \
132 u16 ddb_site_no, \
133 u16 offs, type val) \
134{ \
135 asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \
136 asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \
137 asd_write_reg_##ord (asd_ha, CTXACCESS, val); \
138}
139
140ASD_DDBSITE_WRITE(u32, dword);
141ASD_DDBSITE_WRITE(u16, word);
142
143static inline void asd_ddbsite_write_byte(struct asd_ha_struct *asd_ha,
144 u16 ddb_site_no,
145 u16 offs, u8 val)
146{
147 u16 base = offs & ~1;
148 u16 rval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base);
149 if (offs & 1)
150 rval = (val << 8) | (rval & 0xFF);
151 else
152 rval = (rval & 0xFF00) | val;
153 asd_ddbsite_write_word(asd_ha, ddb_site_no, base, rval);
154}
155
156
157#define ASD_SCBSITE_READ(type, ord) \
158static inline type asd_scbsite_read_##ord (struct asd_ha_struct *asd_ha, \
159 u16 scb_site_no, \
160 u16 offs) \
161{ \
162 asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \
163 asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \
164 return asd_read_reg_##ord (asd_ha, CTXACCESS); \
165}
166
167ASD_SCBSITE_READ(u32, dword);
168ASD_SCBSITE_READ(u16, word);
169
170static inline u8 asd_scbsite_read_byte(struct asd_ha_struct *asd_ha,
171 u16 scb_site_no,
172 u16 offs)
173{
174 if (offs & 1)
175 return asd_scbsite_read_word(asd_ha, scb_site_no,
176 offs & ~1) >> 8;
177 else
178 return asd_scbsite_read_word(asd_ha, scb_site_no,
179 offs) & 0xFF;
180}
181
182
183#define ASD_SCBSITE_WRITE(type, ord) \
184static inline void asd_scbsite_write_##ord (struct asd_ha_struct *asd_ha, \
185 u16 scb_site_no, \
186 u16 offs, type val) \
187{ \
188 asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \
189 asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \
190 asd_write_reg_##ord (asd_ha, CTXACCESS, val); \
191}
192
193ASD_SCBSITE_WRITE(u32, dword);
194ASD_SCBSITE_WRITE(u16, word);
195
196static inline void asd_scbsite_write_byte(struct asd_ha_struct *asd_ha,
197 u16 scb_site_no,
198 u16 offs, u8 val)
199{
200 u16 base = offs & ~1;
201 u16 rval = asd_scbsite_read_word(asd_ha, scb_site_no, base);
202 if (offs & 1)
203 rval = (val << 8) | (rval & 0xFF);
204 else
205 rval = (rval & 0xFF00) | val;
206 asd_scbsite_write_word(asd_ha, scb_site_no, base, rval);
207}
208
209/**
210 * asd_ddbsite_update_word -- atomically update a word in a ddb site
211 * @asd_ha: pointer to host adapter structure
212 * @ddb_site_no: the DDB site number
213 * @offs: the offset into the DDB
214 * @oldval: old value found in that offset
215 * @newval: the new value to replace it
216 *
217 * This function is used when the sequencers are running and we need to
218 * update a DDB site atomically without expensive pausing and upausing
219 * of the sequencers and accessing the DDB site through the CIO bus.
220 *
221 * Return 0 on success; -EFAULT on parity error; -EAGAIN if the old value
222 * is different than the current value at that offset.
223 */
224static inline int asd_ddbsite_update_word(struct asd_ha_struct *asd_ha,
225 u16 ddb_site_no, u16 offs,
226 u16 oldval, u16 newval)
227{
228 u8 done;
229 u16 oval = asd_ddbsite_read_word(asd_ha, ddb_site_no, offs);
230 if (oval != oldval)
231 return -EAGAIN;
232 asd_write_reg_word(asd_ha, AOLDDATA, oldval);
233 asd_write_reg_word(asd_ha, ANEWDATA, newval);
234 do {
235 done = asd_read_reg_byte(asd_ha, ATOMICSTATCTL);
236 } while (!(done & ATOMICDONE));
237 if (done & ATOMICERR)
238 return -EFAULT; /* parity error */
239 else if (done & ATOMICWIN)
240 return 0; /* success */
241 else
242 return -EAGAIN; /* oldval different than current value */
243}
244
245static inline int asd_ddbsite_update_byte(struct asd_ha_struct *asd_ha,
246 u16 ddb_site_no, u16 offs,
247 u8 _oldval, u8 _newval)
248{
249 u16 base = offs & ~1;
250 u16 oval;
251 u16 nval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base);
252 if (offs & 1) {
253 if ((nval >> 8) != _oldval)
254 return -EAGAIN;
255 nval = (_newval << 8) | (nval & 0xFF);
256 oval = (_oldval << 8) | (nval & 0xFF);
257 } else {
258 if ((nval & 0xFF) != _oldval)
259 return -EAGAIN;
260 nval = (nval & 0xFF00) | _newval;
261 oval = (nval & 0xFF00) | _oldval;
262 }
263 return asd_ddbsite_update_word(asd_ha, ddb_site_no, base, oval, nval);
264}
265
266static inline void asd_write_reg_addr(struct asd_ha_struct *asd_ha, u32 reg,
267 dma_addr_t dma_handle)
268{
269 asd_write_reg_dword(asd_ha, reg, ASD_BUSADDR_LO(dma_handle));
270 asd_write_reg_dword(asd_ha, reg+4, ASD_BUSADDR_HI(dma_handle));
271}
272
273static inline u32 asd_get_cmdctx_size(struct asd_ha_struct *asd_ha)
274{
275 /* DCHREVISION returns 0, possibly broken */
276 u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE;
277 return ctxmemsize ? 65536 : 32768;
278}
279
280static inline u32 asd_get_devctx_size(struct asd_ha_struct *asd_ha)
281{
282 u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE;
283 return ctxmemsize ? 8192 : 4096;
284}
285
286static inline void asd_disable_ints(struct asd_ha_struct *asd_ha)
287{
288 asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
289}
290
291static inline void asd_enable_ints(struct asd_ha_struct *asd_ha)
292{
293 /* Enable COM SAS interrupt on errors, COMSTAT */
294 asd_write_reg_dword(asd_ha, COMSTATEN,
295 EN_CSBUFPERR | EN_CSERR | EN_OVLYERR);
296 /* Enable DCH SAS CFIFTOERR */
297 asd_write_reg_dword(asd_ha, DCHSTATUS, EN_CFIFTOERR);
298 /* Enable Host Device interrupts */
299 asd_write_reg_dword(asd_ha, CHIMINTEN, SET_CHIMINTEN);
300}
301
302#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
new file mode 100644
index 000000000000..b79f45f3ad47
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -0,0 +1,2398 @@
1/*
2 * Aic94xx SAS/SATA driver hardware registers defintions.
3 *
4 * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * Luben Tuikov: Some register value updates to make it work with the window
9 * agnostic register r/w functions. Some register corrections, sizes,
10 * etc.
11 *
12 * This file is licensed under GPLv2.
13 *
14 * This file is part of the aic94xx driver.
15 *
16 * The aic94xx driver is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; version 2 of the
19 * License.
20 *
21 * The aic94xx driver is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with the aic94xx driver; if not, write to the Free Software
28 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 *
30 * $Id: //depot/aic94xx/aic94xx_reg_def.h#27 $
31 *
32 */
33
34#ifndef _ADP94XX_REG_DEF_H_
35#define _ADP94XX_REG_DEF_H_
36
37/*
38 * Common definitions.
39 */
40#define CSEQ_MODE_PAGE_SIZE 0x200 /* CSEQ mode page size */
41#define LmSEQ_MODE_PAGE_SIZE 0x200 /* LmSEQ mode page size */
42#define LmSEQ_HOST_REG_SIZE 0x4000 /* LmSEQ Host Register size */
43
44/********************* COM_SAS registers definition *************************/
45
46/* The base is REG_BASE_ADDR, defined in aic94xx_reg.h.
47 */
48
49/*
50 * CHIM Registers, Address Range : (0x00-0xFF)
51 */
52#define COMBIST (REG_BASE_ADDR + 0x00)
53
54/* bits 31:24 */
55#define L7BLKRST 0x80000000
56#define L6BLKRST 0x40000000
57#define L5BLKRST 0x20000000
58#define L4BLKRST 0x10000000
59#define L3BLKRST 0x08000000
60#define L2BLKRST 0x04000000
61#define L1BLKRST 0x02000000
62#define L0BLKRST 0x01000000
63#define LmBLKRST 0xFF000000
64#define LmBLKRST_COMBIST(phyid) (1 << (24 + phyid))
65
66#define OCMBLKRST 0x00400000
67#define CTXMEMBLKRST 0x00200000
68#define CSEQBLKRST 0x00100000
69#define EXSIBLKRST 0x00040000
70#define DPIBLKRST 0x00020000
71#define DFIFBLKRST 0x00010000
72#define HARDRST 0x00000200
73#define COMBLKRST 0x00000100
74#define FRCDFPERR 0x00000080
75#define FRCCIOPERR 0x00000020
76#define FRCBISTERR 0x00000010
77#define COMBISTEN 0x00000004
78#define COMBISTDONE 0x00000002 /* ro */
79#define COMBISTFAIL 0x00000001 /* ro */
80
81#define COMSTAT (REG_BASE_ADDR + 0x04)
82
83#define REQMBXREAD 0x00000040
84#define RSPMBXAVAIL 0x00000020
85#define CSBUFPERR 0x00000008
86#define OVLYERR 0x00000004
87#define CSERR 0x00000002
88#define OVLYDMADONE 0x00000001
89
90#define COMSTAT_MASK (REQMBXREAD | RSPMBXAVAIL | \
91 CSBUFPERR | OVLYERR | CSERR |\
92 OVLYDMADONE)
93
94#define COMSTATEN (REG_BASE_ADDR + 0x08)
95
96#define EN_REQMBXREAD 0x00000040
97#define EN_RSPMBXAVAIL 0x00000020
98#define EN_CSBUFPERR 0x00000008
99#define EN_OVLYERR 0x00000004
100#define EN_CSERR 0x00000002
101#define EN_OVLYDONE 0x00000001
102
103#define SCBPRO (REG_BASE_ADDR + 0x0C)
104
105#define SCBCONS_MASK 0xFFFF0000
106#define SCBPRO_MASK 0x0000FFFF
107
108#define CHIMREQMBX (REG_BASE_ADDR + 0x10)
109
110#define CHIMRSPMBX (REG_BASE_ADDR + 0x14)
111
112#define CHIMINT (REG_BASE_ADDR + 0x18)
113
114#define EXT_INT0 0x00000800
115#define EXT_INT1 0x00000400
116#define PORRSTDET 0x00000200
117#define HARDRSTDET 0x00000100
118#define DLAVAILQ 0x00000080 /* ro */
119#define HOSTERR 0x00000040
120#define INITERR 0x00000020
121#define DEVINT 0x00000010
122#define COMINT 0x00000008
123#define DEVTIMER2 0x00000004
124#define DEVTIMER1 0x00000002
125#define DLAVAIL 0x00000001
126
127#define CHIMINT_MASK (HOSTERR | INITERR | DEVINT | COMINT |\
128 DEVTIMER2 | DEVTIMER1 | DLAVAIL)
129
130#define DEVEXCEPT_MASK (HOSTERR | INITERR | DEVINT | COMINT)
131
132#define CHIMINTEN (REG_BASE_ADDR + 0x1C)
133
134#define RST_EN_EXT_INT1 0x01000000
135#define RST_EN_EXT_INT0 0x00800000
136#define RST_EN_HOSTERR 0x00400000
137#define RST_EN_INITERR 0x00200000
138#define RST_EN_DEVINT 0x00100000
139#define RST_EN_COMINT 0x00080000
140#define RST_EN_DEVTIMER2 0x00040000
141#define RST_EN_DEVTIMER1 0x00020000
142#define RST_EN_DLAVAIL 0x00010000
143#define SET_EN_EXT_INT1 0x00000100
144#define SET_EN_EXT_INT0 0x00000080
145#define SET_EN_HOSTERR 0x00000040
146#define SET_EN_INITERR 0x00000020
147#define SET_EN_DEVINT 0x00000010
148#define SET_EN_COMINT 0x00000008
149#define SET_EN_DEVTIMER2 0x00000004
150#define SET_EN_DEVTIMER1 0x00000002
151#define SET_EN_DLAVAIL 0x00000001
152
153#define RST_CHIMINTEN (RST_EN_HOSTERR | RST_EN_INITERR | \
154 RST_EN_DEVINT | RST_EN_COMINT | \
155 RST_EN_DEVTIMER2 | RST_EN_DEVTIMER1 |\
156 RST_EN_DLAVAIL)
157
158#define SET_CHIMINTEN (SET_EN_HOSTERR | SET_EN_INITERR |\
159 SET_EN_DEVINT | SET_EN_COMINT |\
160 SET_EN_DLAVAIL)
161
162#define OVLYDMACTL (REG_BASE_ADDR + 0x20)
163
164#define OVLYADR_MASK 0x07FF0000
165#define OVLYLSEQ_MASK 0x0000FF00
166#define OVLYCSEQ 0x00000080
167#define OVLYHALTERR 0x00000040
168#define PIOCMODE 0x00000020
169#define RESETOVLYDMA 0x00000008 /* wo */
170#define STARTOVLYDMA 0x00000004
171#define STOPOVLYDMA 0x00000002 /* wo */
172#define OVLYDMAACT 0x00000001 /* ro */
173
174#define OVLYDMACNT (REG_BASE_ADDR + 0x24)
175
176#define OVLYDOMAIN1 0x20000000 /* ro */
177#define OVLYDOMAIN0 0x10000000
178#define OVLYBUFADR_MASK 0x007F0000
179#define OVLYDMACNT_MASK 0x00003FFF
180
181#define OVLYDMAADR (REG_BASE_ADDR + 0x28)
182
183#define DMAERR (REG_BASE_ADDR + 0x30)
184
185#define OVLYERRSTAT_MASK 0x0000FF00 /* ro */
186#define CSERRSTAT_MASK 0x000000FF /* ro */
187
188#define SPIODATA (REG_BASE_ADDR + 0x34)
189
190/* 0x38 - 0x3C are reserved */
191
192#define T1CNTRLR (REG_BASE_ADDR + 0x40)
193
194#define T1DONE 0x00010000 /* ro */
195#define TIMER64 0x00000400
196#define T1ENABLE 0x00000200
197#define T1RELOAD 0x00000100
198#define T1PRESCALER_MASK 0x00000003
199
200#define T1CMPR (REG_BASE_ADDR + 0x44)
201
202#define T1CNTR (REG_BASE_ADDR + 0x48)
203
204#define T2CNTRLR (REG_BASE_ADDR + 0x4C)
205
206#define T2DONE 0x00010000 /* ro */
207#define T2ENABLE 0x00000200
208#define T2RELOAD 0x00000100
209#define T2PRESCALER_MASK 0x00000003
210
211#define T2CMPR (REG_BASE_ADDR + 0x50)
212
213#define T2CNTR (REG_BASE_ADDR + 0x54)
214
215/* 0x58h - 0xFCh are reserved */
216
217/*
218 * DCH_SAS Registers, Address Range : (0x800-0xFFF)
219 */
220#define CMDCTXBASE (REG_BASE_ADDR + 0x800)
221
222#define DEVCTXBASE (REG_BASE_ADDR + 0x808)
223
224#define CTXDOMAIN (REG_BASE_ADDR + 0x810)
225
226#define DEVCTXDOMAIN1 0x00000008 /* ro */
227#define DEVCTXDOMAIN0 0x00000004
228#define CMDCTXDOMAIN1 0x00000002 /* ro */
229#define CMDCTXDOMAIN0 0x00000001
230
231#define DCHCTL (REG_BASE_ADDR + 0x814)
232
233#define OCMBISTREPAIR 0x00080000
234#define OCMBISTEN 0x00040000
235#define OCMBISTDN 0x00020000 /* ro */
236#define OCMBISTFAIL 0x00010000 /* ro */
237#define DDBBISTEN 0x00004000
238#define DDBBISTDN 0x00002000 /* ro */
239#define DDBBISTFAIL 0x00001000 /* ro */
240#define SCBBISTEN 0x00000400
241#define SCBBISTDN 0x00000200 /* ro */
242#define SCBBISTFAIL 0x00000100 /* ro */
243
244#define MEMSEL_MASK 0x000000E0
245#define MEMSEL_CCM_LSEQ 0x00000000
246#define MEMSEL_CCM_IOP 0x00000020
247#define MEMSEL_CCM_SASCTL 0x00000040
248#define MEMSEL_DCM_LSEQ 0x00000060
249#define MEMSEL_DCM_IOP 0x00000080
250#define MEMSEL_OCM 0x000000A0
251
252#define FRCERR 0x00000010
253#define AUTORLS 0x00000001
254
255#define DCHREVISION (REG_BASE_ADDR + 0x818)
256
257#define DCHREVISION_MASK 0x000000FF
258
259#define DCHSTATUS (REG_BASE_ADDR + 0x81C)
260
261#define EN_CFIFTOERR 0x00020000
262#define CFIFTOERR 0x00000200
263#define CSEQINT 0x00000100 /* ro */
264#define LSEQ7INT 0x00000080 /* ro */
265#define LSEQ6INT 0x00000040 /* ro */
266#define LSEQ5INT 0x00000020 /* ro */
267#define LSEQ4INT 0x00000010 /* ro */
268#define LSEQ3INT 0x00000008 /* ro */
269#define LSEQ2INT 0x00000004 /* ro */
270#define LSEQ1INT 0x00000002 /* ro */
271#define LSEQ0INT 0x00000001 /* ro */
272
273#define LSEQINT_MASK (LSEQ7INT | LSEQ6INT | LSEQ5INT |\
274 LSEQ4INT | LSEQ3INT | LSEQ2INT |\
275 LSEQ1INT | LSEQ0INT)
276
277#define DCHDFIFDEBUG (REG_BASE_ADDR + 0x820)
278#define ENFAIRMST 0x00FF0000
279#define DISWRMST9 0x00000200
280#define DISWRMST8 0x00000100
281#define DISRDMST 0x000000FF
282
283#define ATOMICSTATCTL (REG_BASE_ADDR + 0x824)
284/* 8 bit wide */
285#define AUTOINC 0x80
286#define ATOMICERR 0x04
287#define ATOMICWIN 0x02
288#define ATOMICDONE 0x01
289
290
291#define ALTCIOADR (REG_BASE_ADDR + 0x828)
292/* 16 bit; bits 8:0 define CIO addr space of CSEQ */
293
294#define ASCBPTR (REG_BASE_ADDR + 0x82C)
295/* 16 bit wide */
296
297#define ADDBPTR (REG_BASE_ADDR + 0x82E)
298/* 16 bit wide */
299
300#define ANEWDATA (REG_BASE_ADDR + 0x830)
301/* 16 bit */
302
303#define AOLDDATA (REG_BASE_ADDR + 0x834)
304/* 16 bit */
305
306#define CTXACCESS (REG_BASE_ADDR + 0x838)
307/* 32 bit */
308
309/* 0x83Ch - 0xFFCh are reserved */
310
311/*
312 * ARP2 External Processor Registers, Address Range : (0x00-0x1F)
313 */
314#define ARP2CTL 0x00
315
316#define FRCSCRPERR 0x00040000
317#define FRCARP2PERR 0x00020000
318#define FRCARP2ILLOPC 0x00010000
319#define ENWAITTO 0x00008000
320#define PERRORDIS 0x00004000
321#define FAILDIS 0x00002000
322#define CIOPERRDIS 0x00001000
323#define BREAKEN3 0x00000800
324#define BREAKEN2 0x00000400
325#define BREAKEN1 0x00000200
326#define BREAKEN0 0x00000100
327#define EPAUSE 0x00000008
328#define PAUSED 0x00000004 /* ro */
329#define STEP 0x00000002
330#define ARP2RESET 0x00000001 /* wo */
331
332#define ARP2INT 0x04
333
334#define HALTCODE_MASK 0x00FF0000 /* ro */
335#define ARP2WAITTO 0x00000100
336#define ARP2HALTC 0x00000080
337#define ARP2ILLOPC 0x00000040
338#define ARP2PERR 0x00000020
339#define ARP2CIOPERR 0x00000010
340#define ARP2BREAK3 0x00000008
341#define ARP2BREAK2 0x00000004
342#define ARP2BREAK1 0x00000002
343#define ARP2BREAK0 0x00000001
344
345#define ARP2INTEN 0x08
346
347#define EN_ARP2WAITTO 0x00000100
348#define EN_ARP2HALTC 0x00000080
349#define EN_ARP2ILLOPC 0x00000040
350#define EN_ARP2PERR 0x00000020
351#define EN_ARP2CIOPERR 0x00000010
352#define EN_ARP2BREAK3 0x00000008
353#define EN_ARP2BREAK2 0x00000004
354#define EN_ARP2BREAK1 0x00000002
355#define EN_ARP2BREAK0 0x00000001
356
357#define ARP2BREAKADR01 0x0C
358
359#define BREAKADR1_MASK 0x0FFF0000
360#define BREAKADR0_MASK 0x00000FFF
361
362#define ARP2BREAKADR23 0x10
363
364#define BREAKADR3_MASK 0x0FFF0000
365#define BREAKADR2_MASK 0x00000FFF
366
367/* 0x14h - 0x1Ch are reserved */
368
369/*
370 * ARP2 Registers, Address Range : (0x00-0x1F)
371 * The definitions have the same address offset for CSEQ and LmSEQ
372 * CIO Bus Registers.
373 */
374#define MODEPTR 0x00
375
376#define DSTMODE 0xF0
377#define SRCMODE 0x0F
378
379#define ALTMODE 0x01
380
381#define ALTDMODE 0xF0
382#define ALTSMODE 0x0F
383
384#define ATOMICXCHG 0x02
385
386#define FLAG 0x04
387
388#define INTCODE_MASK 0xF0
389#define ALTMODEV2 0x04
390#define CARRY_INT 0x02
391#define CARRY 0x01
392
393#define ARP2INTCTL 0x05
394
395#define PAUSEDIS 0x80
396#define RSTINTCTL 0x40
397#define POPALTMODE 0x08
398#define ALTMODEV 0x04
399#define INTMASK 0x02
400#define IRET 0x01
401
402#define STACK 0x06
403
404#define FUNCTION1 0x07
405
406#define PRGMCNT 0x08
407
408#define ACCUM 0x0A
409
410#define SINDEX 0x0C
411
412#define DINDEX 0x0E
413
414#define ALLONES 0x10
415
416#define ALLZEROS 0x11
417
418#define SINDIR 0x12
419
420#define DINDIR 0x13
421
422#define JUMLDIR 0x14
423
424#define ARP2HALTCODE 0x15
425
426#define CURRADDR 0x16
427
428#define LASTADDR 0x18
429
430#define NXTLADDR 0x1A
431
432#define DBGPORTPTR 0x1C
433
434#define DBGPORT 0x1D
435
436/*
437 * CIO Registers.
438 * The definitions have the same address offset for CSEQ and LmSEQ
439 * CIO Bus Registers.
440 */
441#define MnSCBPTR 0x20
442
443#define MnDDBPTR 0x22
444
445#define SCRATCHPAGE 0x24
446
447#define MnSCRATCHPAGE 0x25
448
449#define SCRATCHPAGESV 0x26
450
451#define MnSCRATCHPAGESV 0x27
452
453#define MnDMAERRS 0x46
454
455#define MnSGDMAERRS 0x47
456
457#define MnSGBUF 0x53
458
459#define MnSGDMASTAT 0x5b
460
461#define MnDDMACTL 0x5c /* RAZOR.rspec.fm rev 1.5 is wrong */
462
463#define MnDDMASTAT 0x5d /* RAZOR.rspec.fm rev 1.5 is wrong */
464
465#define MnDDMAMODE 0x5e /* RAZOR.rspec.fm rev 1.5 is wrong */
466
467#define MnDMAENG 0x60
468
469#define MnPIPECTL 0x61
470
471#define MnSGBADR 0x65
472
473#define MnSCB_SITE 0x100
474
475#define MnDDB_SITE 0x180
476
477/*
478 * The common definitions below have the same address offset for both
479 * CSEQ and LmSEQ.
480 */
481#define BISTCTL0 0x4C
482
483#define BISTCTL1 0x50
484
485#define MAPPEDSCR 0x800
486
487/*
488 * CSEQ Host Register, Address Range : (0x000-0xFFC)
489 */
490#define CSEQ_HOST_REG_BASE_ADR 0xB8001000
491
492#define CARP2CTL (CSEQ_HOST_REG_BASE_ADR + ARP2CTL)
493
494#define CARP2INT (CSEQ_HOST_REG_BASE_ADR + ARP2INT)
495
496#define CARP2INTEN (CSEQ_HOST_REG_BASE_ADR + ARP2INTEN)
497
498#define CARP2BREAKADR01 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR01)
499
500#define CARP2BREAKADR23 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR23)
501
502#define CBISTCTL (CSEQ_HOST_REG_BASE_ADR + BISTCTL1)
503
504#define CSEQRAMBISTEN 0x00000040
505#define CSEQRAMBISTDN 0x00000020 /* ro */
506#define CSEQRAMBISTFAIL 0x00000010 /* ro */
507#define CSEQSCRBISTEN 0x00000004
508#define CSEQSCRBISTDN 0x00000002 /* ro */
509#define CSEQSCRBISTFAIL 0x00000001 /* ro */
510
511#define CMAPPEDSCR (CSEQ_HOST_REG_BASE_ADR + MAPPEDSCR)
512
513/*
514 * CSEQ CIO Bus Registers, Address Range : (0x0000-0x1FFC)
515 * 16 modes, each mode is 512 bytes.
516 * Unless specified, the register should valid for all modes.
517 */
518#define CSEQ_CIO_REG_BASE_ADR REG_BASE_ADDR_CSEQCIO
519
520#define CSEQm_CIO_REG(Mode, Reg) \
521 (CSEQ_CIO_REG_BASE_ADR + \
522 ((u32) (Mode) * CSEQ_MODE_PAGE_SIZE) + (u32) (Reg))
523
524#define CMODEPTR (CSEQ_CIO_REG_BASE_ADR + MODEPTR)
525
526#define CALTMODE (CSEQ_CIO_REG_BASE_ADR + ALTMODE)
527
528#define CATOMICXCHG (CSEQ_CIO_REG_BASE_ADR + ATOMICXCHG)
529
530#define CFLAG (CSEQ_CIO_REG_BASE_ADR + FLAG)
531
532#define CARP2INTCTL (CSEQ_CIO_REG_BASE_ADR + ARP2INTCTL)
533
534#define CSTACK (CSEQ_CIO_REG_BASE_ADR + STACK)
535
536#define CFUNCTION1 (CSEQ_CIO_REG_BASE_ADR + FUNCTION1)
537
538#define CPRGMCNT (CSEQ_CIO_REG_BASE_ADR + PRGMCNT)
539
540#define CACCUM (CSEQ_CIO_REG_BASE_ADR + ACCUM)
541
542#define CSINDEX (CSEQ_CIO_REG_BASE_ADR + SINDEX)
543
544#define CDINDEX (CSEQ_CIO_REG_BASE_ADR + DINDEX)
545
546#define CALLONES (CSEQ_CIO_REG_BASE_ADR + ALLONES)
547
548#define CALLZEROS (CSEQ_CIO_REG_BASE_ADR + ALLZEROS)
549
550#define CSINDIR (CSEQ_CIO_REG_BASE_ADR + SINDIR)
551
552#define CDINDIR (CSEQ_CIO_REG_BASE_ADR + DINDIR)
553
554#define CJUMLDIR (CSEQ_CIO_REG_BASE_ADR + JUMLDIR)
555
556#define CARP2HALTCODE (CSEQ_CIO_REG_BASE_ADR + ARP2HALTCODE)
557
558#define CCURRADDR (CSEQ_CIO_REG_BASE_ADR + CURRADDR)
559
560#define CLASTADDR (CSEQ_CIO_REG_BASE_ADR + LASTADDR)
561
562#define CNXTLADDR (CSEQ_CIO_REG_BASE_ADR + NXTLADDR)
563
564#define CDBGPORTPTR (CSEQ_CIO_REG_BASE_ADR + DBGPORTPTR)
565
566#define CDBGPORT (CSEQ_CIO_REG_BASE_ADR + DBGPORT)
567
568#define CSCRATCHPAGE (CSEQ_CIO_REG_BASE_ADR + SCRATCHPAGE)
569
570#define CMnSCBPTR(Mode) CSEQm_CIO_REG(Mode, MnSCBPTR)
571
572#define CMnDDBPTR(Mode) CSEQm_CIO_REG(Mode, MnDDBPTR)
573
574#define CMnSCRATCHPAGE(Mode) CSEQm_CIO_REG(Mode, MnSCRATCHPAGE)
575
576#define CLINKCON (CSEQ_CIO_REG_BASE_ADR + 0x28)
577
578#define CCIOAACESS (CSEQ_CIO_REG_BASE_ADR + 0x2C)
579
580/* mode 0-7 */
581#define MnREQMBX 0x30
582#define CMnREQMBX(Mode) CSEQm_CIO_REG(Mode, 0x30)
583
584/* mode 8 */
585#define CSEQCON CSEQm_CIO_REG(8, 0x30)
586
587/* mode 0-7 */
588#define MnRSPMBX 0x34
589#define CMnRSPMBX(Mode) CSEQm_CIO_REG(Mode, 0x34)
590
591/* mode 8 */
592#define CSEQCOMCTL CSEQm_CIO_REG(8, 0x34)
593
594/* mode 8 */
595#define CSEQCOMSTAT CSEQm_CIO_REG(8, 0x35)
596
597/* mode 8 */
598#define CSEQCOMINTEN CSEQm_CIO_REG(8, 0x36)
599
600/* mode 8 */
601#define CSEQCOMDMACTL CSEQm_CIO_REG(8, 0x37)
602
603#define CSHALTERR 0x10
604#define RESETCSDMA 0x08 /* wo */
605#define STARTCSDMA 0x04
606#define STOPCSDMA 0x02 /* wo */
607#define CSDMAACT 0x01 /* ro */
608
609/* mode 0-7 */
610#define MnINT 0x38
611#define CMnINT(Mode) CSEQm_CIO_REG(Mode, 0x38)
612
613#define CMnREQMBXE 0x02
614#define CMnRSPMBXF 0x01
615#define CMnINT_MASK 0x00000003
616
617/* mode 8 */
618#define CSEQREQMBX CSEQm_CIO_REG(8, 0x38)
619
620/* mode 0-7 */
621#define MnINTEN 0x3C
622#define CMnINTEN(Mode) CSEQm_CIO_REG(Mode, 0x3C)
623
624#define EN_CMnRSPMBXF 0x01
625
626/* mode 8 */
627#define CSEQRSPMBX CSEQm_CIO_REG(8, 0x3C)
628
629/* mode 8 */
630#define CSDMAADR CSEQm_CIO_REG(8, 0x40)
631
632/* mode 8 */
633#define CSDMACNT CSEQm_CIO_REG(8, 0x48)
634
635/* mode 8 */
636#define CSEQDLCTL CSEQm_CIO_REG(8, 0x4D)
637
638#define DONELISTEND 0x10
639#define DONELISTSIZE_MASK 0x0F
640#define DONELISTSIZE_8ELEM 0x01
641#define DONELISTSIZE_16ELEM 0x02
642#define DONELISTSIZE_32ELEM 0x03
643#define DONELISTSIZE_64ELEM 0x04
644#define DONELISTSIZE_128ELEM 0x05
645#define DONELISTSIZE_256ELEM 0x06
646#define DONELISTSIZE_512ELEM 0x07
647#define DONELISTSIZE_1024ELEM 0x08
648#define DONELISTSIZE_2048ELEM 0x09
649#define DONELISTSIZE_4096ELEM 0x0A
650#define DONELISTSIZE_8192ELEM 0x0B
651#define DONELISTSIZE_16384ELEM 0x0C
652
653/* mode 8 */
654#define CSEQDLOFFS CSEQm_CIO_REG(8, 0x4E)
655
656/* mode 11 */
657#define CM11INTVEC0 CSEQm_CIO_REG(11, 0x50)
658
659/* mode 11 */
660#define CM11INTVEC1 CSEQm_CIO_REG(11, 0x52)
661
662/* mode 11 */
663#define CM11INTVEC2 CSEQm_CIO_REG(11, 0x54)
664
665#define CCONMSK (CSEQ_CIO_REG_BASE_ADR + 0x60)
666
667#define CCONEXIST (CSEQ_CIO_REG_BASE_ADR + 0x61)
668
669#define CCONMODE (CSEQ_CIO_REG_BASE_ADR + 0x62)
670
671#define CTIMERCALC (CSEQ_CIO_REG_BASE_ADR + 0x64)
672
673#define CINTDIS (CSEQ_CIO_REG_BASE_ADR + 0x68)
674
675/* mode 8, 32x32 bits, 128 bytes of mapped buffer */
676#define CSBUFFER CSEQm_CIO_REG(8, 0x80)
677
678#define CSCRATCH (CSEQ_CIO_REG_BASE_ADR + 0x1C0)
679
680/* mode 0-8 */
681#define CMnSCRATCH(Mode) CSEQm_CIO_REG(Mode, 0x1E0)
682
683/*
684 * CSEQ Mapped Instruction RAM Page, Address Range : (0x0000-0x1FFC)
685 */
686#define CSEQ_RAM_REG_BASE_ADR 0xB8004000
687
688/*
689 * The common definitions below have the same address offset for all the Link
690 * sequencers.
691 */
692#define MODECTL 0x40
693
694#define DBGMODE 0x44
695
696#define CONTROL 0x48
697#define LEDTIMER 0x00010000
698#define LEDTIMERS_10us 0x00000000
699#define LEDTIMERS_1ms 0x00000800
700#define LEDTIMERS_100ms 0x00001000
701#define LEDMODE_TXRX 0x00000000
702#define LEDMODE_CONNECTED 0x00000200
703#define LEDPOL 0x00000100
704
705#define LSEQRAM 0x1000
706
707/*
708 * LmSEQ Host Registers, Address Range : (0x0000-0x3FFC)
709 */
710#define LSEQ0_HOST_REG_BASE_ADR 0xB8020000
711#define LSEQ1_HOST_REG_BASE_ADR 0xB8024000
712#define LSEQ2_HOST_REG_BASE_ADR 0xB8028000
713#define LSEQ3_HOST_REG_BASE_ADR 0xB802C000
714#define LSEQ4_HOST_REG_BASE_ADR 0xB8030000
715#define LSEQ5_HOST_REG_BASE_ADR 0xB8034000
716#define LSEQ6_HOST_REG_BASE_ADR 0xB8038000
717#define LSEQ7_HOST_REG_BASE_ADR 0xB803C000
718
719#define LmARP2CTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
720 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
721 ARP2CTL)
722
723#define LmARP2INT(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
724 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
725 ARP2INT)
726
727#define LmARP2INTEN(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
728 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
729 ARP2INTEN)
730
731#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
732 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
733 DBGMODE)
734
735#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
736 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
737 CONTROL)
738
739#define LmARP2BREAKADR01(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
740 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
741 ARP2BREAKADR01)
742
743#define LmARP2BREAKADR23(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
744 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
745 ARP2BREAKADR23)
746
747#define LmMODECTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
748 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
749 MODECTL)
750
751#define LmAUTODISCI 0x08000000
752#define LmDSBLBITLT 0x04000000
753#define LmDSBLANTT 0x02000000
754#define LmDSBLCRTT 0x01000000
755#define LmDSBLCONT 0x00000100
756#define LmPRIMODE 0x00000080
757#define LmDSBLHOLD 0x00000040
758#define LmDISACK 0x00000020
759#define LmBLIND48 0x00000010
760#define LmRCVMODE_MASK 0x0000000C
761#define LmRCVMODE_PLD 0x00000000
762#define LmRCVMODE_HPC 0x00000004
763
764#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
765 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
766 DBGMODE)
767
768#define LmFRCPERR 0x80000000
769#define LmMEMSEL_MASK 0x30000000
770#define LmFRCRBPERR 0x00000000
771#define LmFRCTBPERR 0x10000000
772#define LmFRCSGBPERR 0x20000000
773#define LmFRCARBPERR 0x30000000
774#define LmRCVIDW 0x00080000
775#define LmINVDWERR 0x00040000
776#define LmRCVDISP 0x00004000
777#define LmDISPERR 0x00002000
778#define LmDSBLDSCR 0x00000800
779#define LmDSBLSCR 0x00000400
780#define LmFRCNAK 0x00000200
781#define LmFRCROFS 0x00000100
782#define LmFRCCRC 0x00000080
783#define LmFRMTYPE_MASK 0x00000070
784#define LmSG_DATA 0x00000000
785#define LmSG_COMMAND 0x00000010
786#define LmSG_TASK 0x00000020
787#define LmSG_TGTXFER 0x00000030
788#define LmSG_RESPONSE 0x00000040
789#define LmSG_IDENADDR 0x00000050
790#define LmSG_OPENADDR 0x00000060
791#define LmDISCRCGEN 0x00000008
792#define LmDISCRCCHK 0x00000004
793#define LmSSXMTFRM 0x00000002
794#define LmSSRCVFRM 0x00000001
795
796#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
797 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
798 CONTROL)
799
800#define LmSTEPXMTFRM 0x00000002
801#define LmSTEPRCVFRM 0x00000001
802
803#define LmBISTCTL0(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
804 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
805 BISTCTL0)
806
807#define ARBBISTEN 0x40000000
808#define ARBBISTDN 0x20000000 /* ro */
809#define ARBBISTFAIL 0x10000000 /* ro */
810#define TBBISTEN 0x00000400
811#define TBBISTDN 0x00000200 /* ro */
812#define TBBISTFAIL 0x00000100 /* ro */
813#define RBBISTEN 0x00000040
814#define RBBISTDN 0x00000020 /* ro */
815#define RBBISTFAIL 0x00000010 /* ro */
816#define SGBISTEN 0x00000004
817#define SGBISTDN 0x00000002 /* ro */
818#define SGBISTFAIL 0x00000001 /* ro */
819
820#define LmBISTCTL1(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
821 ((LinkNum)*LmSEQ_HOST_REG_SIZE) +\
822 BISTCTL1)
823
824#define LmRAMPAGE1 0x00000200
825#define LmRAMPAGE0 0x00000100
826#define LmIMEMBISTEN 0x00000040
827#define LmIMEMBISTDN 0x00000020 /* ro */
828#define LmIMEMBISTFAIL 0x00000010 /* ro */
829#define LmSCRBISTEN 0x00000004
830#define LmSCRBISTDN 0x00000002 /* ro */
831#define LmSCRBISTFAIL 0x00000001 /* ro */
832#define LmRAMPAGE (LmRAMPAGE1 + LmRAMPAGE0)
833#define LmRAMPAGE_LSHIFT 0x8
834
835#define LmSCRATCH(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
836 ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\
837 MAPPEDSCR)
838
839#define LmSEQRAM(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
840 ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\
841 LSEQRAM)
842
843/*
844 * LmSEQ CIO Bus Register, Address Range : (0x0000-0xFFC)
845 * 8 modes, each mode is 512 bytes.
846 * Unless specified, the register should valid for all modes.
847 */
848#define LmSEQ_CIOBUS_REG_BASE 0x2000
849
850#define LmSEQ_PHY_BASE(Mode, LinkNum) \
851 (LSEQ0_HOST_REG_BASE_ADR + \
852 (LmSEQ_HOST_REG_SIZE * (u32) (LinkNum)) + \
853 LmSEQ_CIOBUS_REG_BASE + \
854 ((u32) (Mode) * LmSEQ_MODE_PAGE_SIZE))
855
856#define LmSEQ_PHY_REG(Mode, LinkNum, Reg) \
857 (LmSEQ_PHY_BASE(Mode, LinkNum) + (u32) (Reg))
858
859#define LmMODEPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, MODEPTR)
860
861#define LmALTMODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALTMODE)
862
863#define LmATOMICXCHG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ATOMICXCHG)
864
865#define LmFLAG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FLAG)
866
867#define LmARP2INTCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2INTCTL)
868
869#define LmSTACK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, STACK)
870
871#define LmFUNCTION1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FUNCTION1)
872
873#define LmPRGMCNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, PRGMCNT)
874
875#define LmACCUM(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ACCUM)
876
877#define LmSINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDEX)
878
879#define LmDINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDEX)
880
881#define LmALLONES(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLONES)
882
883#define LmALLZEROS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLZEROS)
884
885#define LmSINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDIR)
886
887#define LmDINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDIR)
888
889#define LmJUMLDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, JUMLDIR)
890
891#define LmARP2HALTCODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2HALTCODE)
892
893#define LmCURRADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, CURRADDR)
894
895#define LmLASTADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, LASTADDR)
896
897#define LmNXTLADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, NXTLADDR)
898
899#define LmDBGPORTPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORTPTR)
900
901#define LmDBGPORT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORT)
902
903#define LmSCRATCHPAGE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SCRATCHPAGE)
904
905#define LmMnSCRATCHPAGE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, \
906 MnSCRATCHPAGE)
907
908#define LmTIMERCALC(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x28)
909
910#define LmREQMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x30)
911
912#define LmRSPMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x34)
913
914#define LmMnINT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x38)
915
916#define CTXMEMSIZE 0x80000000 /* ro */
917#define LmACKREQ 0x08000000
918#define LmNAKREQ 0x04000000
919#define LmMnXMTERR 0x02000000
920#define LmM5OOBSVC 0x01000000
921#define LmHWTINT 0x00800000
922#define LmMnCTXDONE 0x00100000
923#define LmM2REQMBXF 0x00080000
924#define LmM2RSPMBXE 0x00040000
925#define LmMnDMAERR 0x00020000
926#define LmRCVPRIM 0x00010000
927#define LmRCVERR 0x00008000
928#define LmADDRRCV 0x00004000
929#define LmMnHDRMISS 0x00002000
930#define LmMnWAITSCB 0x00001000
931#define LmMnRLSSCB 0x00000800
932#define LmMnSAVECTX 0x00000400
933#define LmMnFETCHSG 0x00000200
934#define LmMnLOADCTX 0x00000100
935#define LmMnCFGICL 0x00000080
936#define LmMnCFGSATA 0x00000040
937#define LmMnCFGEXPSATA 0x00000020
938#define LmMnCFGCMPLT 0x00000010
939#define LmMnCFGRBUF 0x00000008
940#define LmMnSAVETTR 0x00000004
941#define LmMnCFGRDAT 0x00000002
942#define LmMnCFGHDR 0x00000001
943
944#define LmMnINTEN(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x3C)
945
946#define EN_LmACKREQ 0x08000000
947#define EN_LmNAKREQ 0x04000000
948#define EN_LmMnXMTERR 0x02000000
949#define EN_LmM5OOBSVC 0x01000000
950#define EN_LmHWTINT 0x00800000
951#define EN_LmMnCTXDONE 0x00100000
952#define EN_LmM2REQMBXF 0x00080000
953#define EN_LmM2RSPMBXE 0x00040000
954#define EN_LmMnDMAERR 0x00020000
955#define EN_LmRCVPRIM 0x00010000
956#define EN_LmRCVERR 0x00008000
957#define EN_LmADDRRCV 0x00004000
958#define EN_LmMnHDRMISS 0x00002000
959#define EN_LmMnWAITSCB 0x00001000
960#define EN_LmMnRLSSCB 0x00000800
961#define EN_LmMnSAVECTX 0x00000400
962#define EN_LmMnFETCHSG 0x00000200
963#define EN_LmMnLOADCTX 0x00000100
964#define EN_LmMnCFGICL 0x00000080
965#define EN_LmMnCFGSATA 0x00000040
966#define EN_LmMnCFGEXPSATA 0x00000020
967#define EN_LmMnCFGCMPLT 0x00000010
968#define EN_LmMnCFGRBUF 0x00000008
969#define EN_LmMnSAVETTR 0x00000004
970#define EN_LmMnCFGRDAT 0x00000002
971#define EN_LmMnCFGHDR 0x00000001
972
973#define LmM0INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmMnCFGRBUF | \
974 EN_LmMnSAVETTR | EN_LmMnCFGRDAT | \
975 EN_LmMnCFGHDR | EN_LmRCVERR | \
976 EN_LmADDRRCV | EN_LmMnHDRMISS | \
977 EN_LmMnRLSSCB | EN_LmMnSAVECTX | \
978 EN_LmMnFETCHSG | EN_LmMnLOADCTX | \
979 EN_LmHWTINT | EN_LmMnCTXDONE | \
980 EN_LmRCVPRIM | EN_LmMnCFGSATA | \
981 EN_LmMnCFGEXPSATA | EN_LmMnDMAERR)
982
983#define LmM1INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmADDRRCV | \
984 EN_LmMnRLSSCB | EN_LmMnSAVECTX | \
985 EN_LmMnFETCHSG | EN_LmMnLOADCTX | \
986 EN_LmMnXMTERR | EN_LmHWTINT | \
987 EN_LmMnCTXDONE | EN_LmRCVPRIM | \
988 EN_LmRCVERR | EN_LmMnDMAERR)
989
990#define LmM2INTEN_MASK (EN_LmADDRRCV | EN_LmHWTINT | \
991 EN_LmM2REQMBXF | EN_LmRCVPRIM | \
992 EN_LmRCVERR)
993
994#define LmM5INTEN_MASK (EN_LmADDRRCV | EN_LmM5OOBSVC | \
995 EN_LmHWTINT | EN_LmRCVPRIM | \
996 EN_LmRCVERR)
997
998#define LmXMTPRIMD(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x40)
999
1000#define LmXMTPRIMCS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x44)
1001
1002#define LmCONSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x45)
1003
1004#define LmMnDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x46)
1005
1006#define LmMnSGDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x47)
1007
1008#define LmM0EXPHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x48)
1009
1010#define LmM1SASALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x48)
1011#define SAS_ALIGN_DEFAULT 0xFF
1012
1013#define LmM0MSKHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x49)
1014
1015#define LmM1STPALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x49)
1016#define STP_ALIGN_DEFAULT 0x1F
1017
1018#define LmM0RCVHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4A)
1019
1020#define LmM1XMTHDRP(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4A)
1021
1022#define LmM0ICLADR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4B)
1023
1024#define LmM1ALIGNMODE(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4B)
1025
1026#define LmDISALIGN 0x20
1027#define LmROTSTPALIGN 0x10
1028#define LmSTPALIGN 0x08
1029#define LmROTNOTIFY 0x04
1030#define LmDUALALIGN 0x02
1031#define LmROTALIGN 0x01
1032
1033#define LmM0EXPRCVNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4C)
1034
1035#define LmM1XMTCNT(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4C)
1036
1037#define LmMnBUFSTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x4E)
1038
1039#define LmMnBUFPERR 0x01
1040
1041/* mode 0-1 */
1042#define LmMnXFRLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x59)
1043
1044#define LmMnXFRLVL_128 0x05
1045#define LmMnXFRLVL_256 0x04
1046#define LmMnXFRLVL_512 0x03
1047#define LmMnXFRLVL_1024 0x02
1048#define LmMnXFRLVL_1536 0x01
1049#define LmMnXFRLVL_2048 0x00
1050
1051 /* mode 0-1 */
1052#define LmMnSGDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5A)
1053
1054#define LmMnRESETSG 0x04
1055#define LmMnSTOPSG 0x02
1056#define LmMnSTARTSG 0x01
1057
1058/* mode 0-1 */
1059#define LmMnSGDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5B)
1060
1061/* mode 0-1 */
1062#define LmMnDDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5C)
1063
1064#define LmMnFLUSH 0x40 /* wo */
1065#define LmMnRLSRTRY 0x20 /* wo */
1066#define LmMnDISCARD 0x10 /* wo */
1067#define LmMnRESETDAT 0x08 /* wo */
1068#define LmMnSUSDAT 0x04 /* wo */
1069#define LmMnSTOPDAT 0x02 /* wo */
1070#define LmMnSTARTDAT 0x01 /* wo */
1071
1072/* mode 0-1 */
1073#define LmMnDDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5D)
1074
1075#define LmMnDPEMPTY 0x80
1076#define LmMnFLUSHING 0x40
1077#define LmMnDDMAREQ 0x20
1078#define LmMnHDMAREQ 0x10
1079#define LmMnDATFREE 0x08
1080#define LmMnDATSUS 0x04
1081#define LmMnDATACT 0x02
1082#define LmMnDATEN 0x01
1083
1084/* mode 0-1 */
1085#define LmMnDDMAMODE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5E)
1086
1087#define LmMnDMATYPE_NORMAL 0x0000
1088#define LmMnDMATYPE_HOST_ONLY_TX 0x0001
1089#define LmMnDMATYPE_DEVICE_ONLY_TX 0x0002
1090#define LmMnDMATYPE_INVALID 0x0003
1091#define LmMnDMATYPE_MASK 0x0003
1092
1093#define LmMnDMAWRAP 0x0004
1094#define LmMnBITBUCKET 0x0008
1095#define LmMnDISHDR 0x0010
1096#define LmMnSTPCRC 0x0020
1097#define LmXTEST 0x0040
1098#define LmMnDISCRC 0x0080
1099#define LmMnENINTLK 0x0100
1100#define LmMnADDRFRM 0x0400
1101#define LmMnENXMTCRC 0x0800
1102
1103/* mode 0-1 */
1104#define LmMnXFRCNT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x70)
1105
1106/* mode 0-1 */
1107#define LmMnDPSEL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7B)
1108#define LmMnDPSEL_MASK 0x07
1109#define LmMnEOLPRE 0x40
1110#define LmMnEOSPRE 0x80
1111
1112/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */
1113/* Receive Mode n = 0 */
1114#define LmMnHRADDR 0x00
1115#define LmMnHBYTECNT 0x01
1116#define LmMnHREWIND 0x02
1117#define LmMnDWADDR 0x03
1118#define LmMnDSPACECNT 0x04
1119#define LmMnDFRMSIZE 0x05
1120
1121/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */
1122/* Transmit Mode n = 1 */
1123#define LmMnHWADDR 0x00
1124#define LmMnHSPACECNT 0x01
1125/* #define LmMnHREWIND 0x02 */
1126#define LmMnDRADDR 0x03
1127#define LmMnDBYTECNT 0x04
1128/* #define LmMnDFRMSIZE 0x05 */
1129
1130/* mode 0-1 */
1131#define LmMnDPACC(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x78)
1132#define LmMnDPACC_MASK 0x00FFFFFF
1133
1134/* mode 0-1 */
1135#define LmMnHOLDLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7D)
1136
1137#define LmPRMSTAT0(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x80)
1138#define LmPRMSTAT0BYTE0 0x80
1139#define LmPRMSTAT0BYTE1 0x81
1140#define LmPRMSTAT0BYTE2 0x82
1141#define LmPRMSTAT0BYTE3 0x83
1142
1143#define LmFRAMERCVD 0x80000000
1144#define LmXFRRDYRCVD 0x40000000
1145#define LmUNKNOWNP 0x20000000
1146#define LmBREAK 0x10000000
1147#define LmDONE 0x08000000
1148#define LmOPENACPT 0x04000000
1149#define LmOPENRJCT 0x02000000
1150#define LmOPENRTRY 0x01000000
1151#define LmCLOSERV1 0x00800000
1152#define LmCLOSERV0 0x00400000
1153#define LmCLOSENORM 0x00200000
1154#define LmCLOSECLAF 0x00100000
1155#define LmNOTIFYRV2 0x00080000
1156#define LmNOTIFYRV1 0x00040000
1157#define LmNOTIFYRV0 0x00020000
1158#define LmNOTIFYSPIN 0x00010000
1159#define LmBROADRV4 0x00008000
1160#define LmBROADRV3 0x00004000
1161#define LmBROADRV2 0x00002000
1162#define LmBROADRV1 0x00001000
1163#define LmBROADSES 0x00000800
1164#define LmBROADRVCH1 0x00000400
1165#define LmBROADRVCH0 0x00000200
1166#define LmBROADCH 0x00000100
1167#define LmAIPRVWP 0x00000080
1168#define LmAIPWP 0x00000040
1169#define LmAIPWD 0x00000020
1170#define LmAIPWC 0x00000010
1171#define LmAIPRV2 0x00000008
1172#define LmAIPRV1 0x00000004
1173#define LmAIPRV0 0x00000002
1174#define LmAIPNRML 0x00000001
1175
1176#define LmBROADCAST_MASK (LmBROADCH | LmBROADRVCH0 | \
1177 LmBROADRVCH1)
1178
1179#define LmPRMSTAT1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x84)
1180#define LmPRMSTAT1BYTE0 0x84
1181#define LmPRMSTAT1BYTE1 0x85
1182#define LmPRMSTAT1BYTE2 0x86
1183#define LmPRMSTAT1BYTE3 0x87
1184
1185#define LmFRMRCVDSTAT 0x80000000
1186#define LmBREAK_DET 0x04000000
1187#define LmCLOSE_DET 0x02000000
1188#define LmDONE_DET 0x01000000
1189#define LmXRDY 0x00040000
1190#define LmSYNCSRST 0x00020000
1191#define LmSYNC 0x00010000
1192#define LmXHOLD 0x00008000
1193#define LmRRDY 0x00004000
1194#define LmHOLD 0x00002000
1195#define LmROK 0x00001000
1196#define LmRIP 0x00000800
1197#define LmCRBLK 0x00000400
1198#define LmACK 0x00000200
1199#define LmNAK 0x00000100
1200#define LmHARDRST 0x00000080
1201#define LmERROR 0x00000040
1202#define LmRERR 0x00000020
1203#define LmPMREQP 0x00000010
1204#define LmPMREQS 0x00000008
1205#define LmPMACK 0x00000004
1206#define LmPMNAK 0x00000002
1207#define LmDMAT 0x00000001
1208
1209/* mode 1 */
1210#define LmMnSATAFS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7E)
1211#define LmMnXMTSIZE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x93)
1212
1213/* mode 0 */
1214#define LmMnFRMERR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xB0)
1215
1216#define LmACRCERR 0x00000800
1217#define LmPHYOVRN 0x00000400
1218#define LmOBOVRN 0x00000200
1219#define LmMnZERODATA 0x00000100
1220#define LmSATAINTLK 0x00000080
1221#define LmMnCRCERR 0x00000020
1222#define LmRRDYOVRN 0x00000010
1223#define LmMISSSOAF 0x00000008
1224#define LmMISSSOF 0x00000004
1225#define LmMISSEOAF 0x00000002
1226#define LmMISSEOF 0x00000001
1227
1228#define LmFRMERREN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xB4)
1229
1230#define EN_LmACRCERR 0x00000800
1231#define EN_LmPHYOVRN 0x00000400
1232#define EN_LmOBOVRN 0x00000200
1233#define EN_LmMnZERODATA 0x00000100
1234#define EN_LmSATAINTLK 0x00000080
1235#define EN_LmFRMBAD 0x00000040
1236#define EN_LmMnCRCERR 0x00000020
1237#define EN_LmRRDYOVRN 0x00000010
1238#define EN_LmMISSSOAF 0x00000008
1239#define EN_LmMISSSOF 0x00000004
1240#define EN_LmMISSEOAF 0x00000002
1241#define EN_LmMISSEOF 0x00000001
1242
1243#define LmFRMERREN_MASK (EN_LmSATAINTLK | EN_LmMnCRCERR | \
1244 EN_LmRRDYOVRN | EN_LmMISSSOF | \
1245 EN_LmMISSEOAF | EN_LmMISSEOF | \
1246 EN_LmACRCERR | LmPHYOVRN | \
1247 EN_LmOBOVRN | EN_LmMnZERODATA)
1248
1249#define LmHWTSTATEN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC5)
1250
1251#define EN_LmDONETO 0x80
1252#define EN_LmINVDISP 0x40
1253#define EN_LmINVDW 0x20
1254#define EN_LmDWSEVENT 0x08
1255#define EN_LmCRTTTO 0x04
1256#define EN_LmANTTTO 0x02
1257#define EN_LmBITLTTO 0x01
1258
1259#define LmHWTSTATEN_MASK (EN_LmINVDISP | EN_LmINVDW | \
1260 EN_LmDWSEVENT | EN_LmCRTTTO | \
1261 EN_LmANTTTO | EN_LmDONETO | \
1262 EN_LmBITLTTO)
1263
1264#define LmHWTSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC7)
1265
1266#define LmDONETO 0x80
1267#define LmINVDISP 0x40
1268#define LmINVDW 0x20
1269#define LmDWSEVENT 0x08
1270#define LmCRTTTO 0x04
1271#define LmANTTTO 0x02
1272#define LmBITLTTO 0x01
1273
1274#define LmMnDATABUFADR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xC8)
1275#define LmDATABUFADR_MASK 0x0FFF
1276
1277#define LmMnDATABUF(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xCA)
1278
1279#define LmPRIMSTAT0EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE0)
1280
1281#define EN_LmUNKNOWNP 0x20000000
1282#define EN_LmBREAK 0x10000000
1283#define EN_LmDONE 0x08000000
1284#define EN_LmOPENACPT 0x04000000
1285#define EN_LmOPENRJCT 0x02000000
1286#define EN_LmOPENRTRY 0x01000000
1287#define EN_LmCLOSERV1 0x00800000
1288#define EN_LmCLOSERV0 0x00400000
1289#define EN_LmCLOSENORM 0x00200000
1290#define EN_LmCLOSECLAF 0x00100000
1291#define EN_LmNOTIFYRV2 0x00080000
1292#define EN_LmNOTIFYRV1 0x00040000
1293#define EN_LmNOTIFYRV0 0x00020000
1294#define EN_LmNOTIFYSPIN 0x00010000
1295#define EN_LmBROADRV4 0x00008000
1296#define EN_LmBROADRV3 0x00004000
1297#define EN_LmBROADRV2 0x00002000
1298#define EN_LmBROADRV1 0x00001000
1299#define EN_LmBROADRV0 0x00000800
1300#define EN_LmBROADRVCH1 0x00000400
1301#define EN_LmBROADRVCH0 0x00000200
1302#define EN_LmBROADCH 0x00000100
1303#define EN_LmAIPRVWP 0x00000080
1304#define EN_LmAIPWP 0x00000040
1305#define EN_LmAIPWD 0x00000020
1306#define EN_LmAIPWC 0x00000010
1307#define EN_LmAIPRV2 0x00000008
1308#define EN_LmAIPRV1 0x00000004
1309#define EN_LmAIPRV0 0x00000002
1310#define EN_LmAIPNRML 0x00000001
1311
1312#define LmPRIMSTAT0EN_MASK (EN_LmBREAK | \
1313 EN_LmDONE | EN_LmOPENACPT | \
1314 EN_LmOPENRJCT | EN_LmOPENRTRY | \
1315 EN_LmCLOSERV1 | EN_LmCLOSERV0 | \
1316 EN_LmCLOSENORM | EN_LmCLOSECLAF | \
1317 EN_LmBROADRV4 | EN_LmBROADRV3 | \
1318 EN_LmBROADRV2 | EN_LmBROADRV1 | \
1319 EN_LmBROADRV0 | EN_LmBROADRVCH1 | \
1320 EN_LmBROADRVCH0 | EN_LmBROADCH | \
1321 EN_LmAIPRVWP | EN_LmAIPWP | \
1322 EN_LmAIPWD | EN_LmAIPWC | \
1323 EN_LmAIPRV2 | EN_LmAIPRV1 | \
1324 EN_LmAIPRV0 | EN_LmAIPNRML)
1325
1326#define LmPRIMSTAT1EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE4)
1327
1328#define EN_LmXRDY 0x00040000
1329#define EN_LmSYNCSRST 0x00020000
1330#define EN_LmSYNC 0x00010000
1331#define EN_LmXHOLD 0x00008000
1332#define EN_LmRRDY 0x00004000
1333#define EN_LmHOLD 0x00002000
1334#define EN_LmROK 0x00001000
1335#define EN_LmRIP 0x00000800
1336#define EN_LmCRBLK 0x00000400
1337#define EN_LmACK 0x00000200
1338#define EN_LmNAK 0x00000100
1339#define EN_LmHARDRST 0x00000080
1340#define EN_LmERROR 0x00000040
1341#define EN_LmRERR 0x00000020
1342#define EN_LmPMREQP 0x00000010
1343#define EN_LmPMREQS 0x00000008
1344#define EN_LmPMACK 0x00000004
1345#define EN_LmPMNAK 0x00000002
1346#define EN_LmDMAT 0x00000001
1347
1348#define LmPRIMSTAT1EN_MASK (EN_LmHARDRST | \
1349 EN_LmSYNCSRST | \
1350 EN_LmPMREQP | EN_LmPMREQS | \
1351 EN_LmPMACK | EN_LmPMNAK)
1352
1353#define LmSMSTATE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE8)
1354
1355#define LmSMSTATEBRK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xEC)
1356
1357#define LmSMDBGCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xF0)
1358
1359
1360/*
1361 * LmSEQ CIO Bus Mode 3 Register.
1362 * Mode 3: Configuration and Setup, IOP Context SCB.
1363 */
1364#define LmM3SATATIMER(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x48)
1365
1366#define LmM3INTVEC0(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x90)
1367
1368#define LmM3INTVEC1(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x92)
1369
1370#define LmM3INTVEC2(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x94)
1371
1372#define LmM3INTVEC3(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x96)
1373
1374#define LmM3INTVEC4(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x98)
1375
1376#define LmM3INTVEC5(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9A)
1377
1378#define LmM3INTVEC6(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9C)
1379
1380#define LmM3INTVEC7(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9E)
1381
1382#define LmM3INTVEC8(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA4)
1383
1384#define LmM3INTVEC9(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA6)
1385
1386#define LmM3INTVEC10(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB0)
1387
1388#define LmM3FRMGAP(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB4)
1389
1390#define LmBITL_TIMER(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA2)
1391
1392#define LmWWN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA8)
1393
1394
1395/*
1396 * LmSEQ CIO Bus Mode 5 Registers.
1397 * Mode 5: Phy/OOB Control and Status.
1398 */
1399#define LmSEQ_OOB_REG(phy_id, reg) LmSEQ_PHY_REG(5, (phy_id), (reg))
1400
1401#define OOB_BFLTR 0x100
1402
1403#define BFLTR_THR_MASK 0xF0
1404#define BFLTR_TC_MASK 0x0F
1405
1406#define OOB_INIT_MIN 0x102
1407
1408#define OOB_INIT_MAX 0x104
1409
1410#define OOB_INIT_NEG 0x106
1411
1412#define OOB_SAS_MIN 0x108
1413
1414#define OOB_SAS_MAX 0x10A
1415
1416#define OOB_SAS_NEG 0x10C
1417
1418#define OOB_WAKE_MIN 0x10E
1419
1420#define OOB_WAKE_MAX 0x110
1421
1422#define OOB_WAKE_NEG 0x112
1423
1424#define OOB_IDLE_MAX 0x114
1425
1426#define OOB_BURST_MAX 0x116
1427
1428#define OOB_DATA_KBITS 0x126
1429
1430#define OOB_ALIGN_0_DATA 0x12C
1431
1432#define OOB_ALIGN_1_DATA 0x130
1433
1434#define D10_2_DATA_k 0x00
1435#define SYNC_DATA_k 0x02
1436#define ALIGN_1_DATA_k 0x04
1437#define ALIGN_0_DATA_k 0x08
1438#define BURST_DATA_k 0x10
1439
1440#define OOB_PHY_RESET_COUNT 0x13C
1441
1442#define OOB_SIG_GEN 0x140
1443
1444#define START_OOB 0x80
1445#define START_DWS 0x40
1446#define ALIGN_CNT3 0x30
1447#define ALIGN_CNT2 0x20
1448#define ALIGN_CNT1 0x10
1449#define ALIGN_CNT4 0x00
1450#define STOP_DWS 0x08
1451#define SEND_COMSAS 0x04
1452#define SEND_COMINIT 0x02
1453#define SEND_COMWAKE 0x01
1454
1455#define OOB_XMIT 0x141
1456
1457#define TX_ENABLE 0x80
1458#define XMIT_OOB_BURST 0x10
1459#define XMIT_D10_2 0x08
1460#define XMIT_SYNC 0x04
1461#define XMIT_ALIGN_1 0x02
1462#define XMIT_ALIGN_0 0x01
1463
1464#define FUNCTION_MASK 0x142
1465
1466#define SAS_MODE_DIS 0x80
1467#define SATA_MODE_DIS 0x40
1468#define SPINUP_HOLD_DIS 0x20
1469#define HOT_PLUG_DIS 0x10
1470#define SATA_PS_DIS 0x08
1471#define FUNCTION_MASK_DEFAULT (SPINUP_HOLD_DIS | SATA_PS_DIS)
1472
1473#define OOB_MODE 0x143
1474
1475#define SAS_MODE 0x80
1476#define SATA_MODE 0x40
1477#define SLOW_CLK 0x20
1478#define FORCE_XMIT_15 0x08
1479#define PHY_SPEED_60 0x04
1480#define PHY_SPEED_30 0x02
1481#define PHY_SPEED_15 0x01
1482
1483#define CURRENT_STATUS 0x144
1484
1485#define CURRENT_OOB_DONE 0x80
1486#define CURRENT_LOSS_OF_SIGNAL 0x40
1487#define CURRENT_SPINUP_HOLD 0x20
1488#define CURRENT_HOT_PLUG_CNCT 0x10
1489#define CURRENT_GTO_TIMEOUT 0x08
1490#define CURRENT_OOB_TIMEOUT 0x04
1491#define CURRENT_DEVICE_PRESENT 0x02
1492#define CURRENT_OOB_ERROR 0x01
1493
1494#define CURRENT_OOB1_ERROR (CURRENT_HOT_PLUG_CNCT | \
1495 CURRENT_GTO_TIMEOUT)
1496
1497#define CURRENT_OOB2_ERROR (CURRENT_HOT_PLUG_CNCT | \
1498 CURRENT_OOB_ERROR)
1499
1500#define DEVICE_ADDED_W_CNT (CURRENT_OOB_DONE | \
1501 CURRENT_HOT_PLUG_CNCT | \
1502 CURRENT_DEVICE_PRESENT)
1503
1504#define DEVICE_ADDED_WO_CNT (CURRENT_OOB_DONE | \
1505 CURRENT_DEVICE_PRESENT)
1506
1507#define DEVICE_REMOVED CURRENT_LOSS_OF_SIGNAL
1508
1509#define CURRENT_PHY_MASK (CURRENT_OOB_DONE | \
1510 CURRENT_LOSS_OF_SIGNAL | \
1511 CURRENT_SPINUP_HOLD | \
1512 CURRENT_HOT_PLUG_CNCT | \
1513 CURRENT_GTO_TIMEOUT | \
1514 CURRENT_DEVICE_PRESENT | \
1515 CURRENT_OOB_ERROR )
1516
1517#define CURRENT_ERR_MASK (CURRENT_LOSS_OF_SIGNAL | \
1518 CURRENT_GTO_TIMEOUT | \
1519 CURRENT_OOB_TIMEOUT | \
1520 CURRENT_OOB_ERROR )
1521
1522#define SPEED_MASK 0x145
1523
1524#define SATA_SPEED_30_DIS 0x10
1525#define SATA_SPEED_15_DIS 0x08
1526#define SAS_SPEED_60_DIS 0x04
1527#define SAS_SPEED_30_DIS 0x02
1528#define SAS_SPEED_15_DIS 0x01
1529#define SAS_SPEED_MASK_DEFAULT 0x00
1530
1531#define OOB_TIMER_ENABLE 0x14D
1532
1533#define HOT_PLUG_EN 0x80
1534#define RCD_EN 0x40
1535#define COMTIMER_EN 0x20
1536#define SNTT_EN 0x10
1537#define SNLT_EN 0x04
1538#define SNWT_EN 0x02
1539#define ALIGN_EN 0x01
1540
1541#define OOB_STATUS 0x14E
1542
1543#define OOB_DONE 0x80
1544#define LOSS_OF_SIGNAL 0x40 /* ro */
1545#define SPINUP_HOLD 0x20
1546#define HOT_PLUG_CNCT 0x10 /* ro */
1547#define GTO_TIMEOUT 0x08 /* ro */
1548#define OOB_TIMEOUT 0x04 /* ro */
1549#define DEVICE_PRESENT 0x02 /* ro */
1550#define OOB_ERROR 0x01 /* ro */
1551
1552#define OOB_STATUS_ERROR_MASK (LOSS_OF_SIGNAL | GTO_TIMEOUT | \
1553 OOB_TIMEOUT | OOB_ERROR)
1554
1555#define OOB_STATUS_CLEAR 0x14F
1556
1557#define OOB_DONE_CLR 0x80
1558#define LOSS_OF_SIGNAL_CLR 0x40
1559#define SPINUP_HOLD_CLR 0x20
1560#define HOT_PLUG_CNCT_CLR 0x10
1561#define GTO_TIMEOUT_CLR 0x08
1562#define OOB_TIMEOUT_CLR 0x04
1563#define OOB_ERROR_CLR 0x01
1564
1565#define HOT_PLUG_DELAY 0x150
1566/* In 5 ms units. 20 = 100 ms. */
1567#define HOTPLUG_DELAY_TIMEOUT 20
1568
1569
1570#define INT_ENABLE_2 0x15A
1571
1572#define OOB_DONE_EN 0x80
1573#define LOSS_OF_SIGNAL_EN 0x40
1574#define SPINUP_HOLD_EN 0x20
1575#define HOT_PLUG_CNCT_EN 0x10
1576#define GTO_TIMEOUT_EN 0x08
1577#define OOB_TIMEOUT_EN 0x04
1578#define DEVICE_PRESENT_EN 0x02
1579#define OOB_ERROR_EN 0x01
1580
1581#define PHY_CONTROL_0 0x160
1582
1583#define PHY_LOWPWREN_TX 0x80
1584#define PHY_LOWPWREN_RX 0x40
1585#define SPARE_REG_160_B5 0x20
1586#define OFFSET_CANCEL_RX 0x10
1587
1588/* bits 3:2 */
1589#define PHY_RXCOMCENTER_60V 0x00
1590#define PHY_RXCOMCENTER_70V 0x04
1591#define PHY_RXCOMCENTER_80V 0x08
1592#define PHY_RXCOMCENTER_90V 0x0C
1593#define PHY_RXCOMCENTER_MASK 0x0C
1594
1595#define PHY_RESET 0x02
1596#define SAS_DEFAULT_SEL 0x01
1597
1598#define PHY_CONTROL_1 0x161
1599
1600/* bits 2:0 */
1601#define SATA_PHY_DETLEVEL_50mv 0x00
1602#define SATA_PHY_DETLEVEL_75mv 0x01
1603#define SATA_PHY_DETLEVEL_100mv 0x02
1604#define SATA_PHY_DETLEVEL_125mv 0x03
1605#define SATA_PHY_DETLEVEL_150mv 0x04
1606#define SATA_PHY_DETLEVEL_175mv 0x05
1607#define SATA_PHY_DETLEVEL_200mv 0x06
1608#define SATA_PHY_DETLEVEL_225mv 0x07
1609#define SATA_PHY_DETLEVEL_MASK 0x07
1610
1611/* bits 5:3 */
1612#define SAS_PHY_DETLEVEL_50mv 0x00
1613#define SAS_PHY_DETLEVEL_75mv 0x08
1614#define SAS_PHY_DETLEVEL_100mv 0x10
1615#define SAS_PHY_DETLEVEL_125mv 0x11
1616#define SAS_PHY_DETLEVEL_150mv 0x20
1617#define SAS_PHY_DETLEVEL_175mv 0x21
1618#define SAS_PHY_DETLEVEL_200mv 0x30
1619#define SAS_PHY_DETLEVEL_225mv 0x31
1620#define SAS_PHY_DETLEVEL_MASK 0x38
1621
1622#define PHY_CONTROL_2 0x162
1623
1624/* bits 7:5 */
1625#define SATA_PHY_DRV_400mv 0x00
1626#define SATA_PHY_DRV_450mv 0x20
1627#define SATA_PHY_DRV_500mv 0x40
1628#define SATA_PHY_DRV_550mv 0x60
1629#define SATA_PHY_DRV_600mv 0x80
1630#define SATA_PHY_DRV_650mv 0xA0
1631#define SATA_PHY_DRV_725mv 0xC0
1632#define SATA_PHY_DRV_800mv 0xE0
1633#define SATA_PHY_DRV_MASK 0xE0
1634
1635/* bits 4:3 */
1636#define SATA_PREEMP_0 0x00
1637#define SATA_PREEMP_1 0x08
1638#define SATA_PREEMP_2 0x10
1639#define SATA_PREEMP_3 0x18
1640#define SATA_PREEMP_MASK 0x18
1641
1642#define SATA_CMSH1P5 0x04
1643
1644/* bits 1:0 */
1645#define SATA_SLEW_0 0x00
1646#define SATA_SLEW_1 0x01
1647#define SATA_SLEW_2 0x02
1648#define SATA_SLEW_3 0x03
1649#define SATA_SLEW_MASK 0x03
1650
1651#define PHY_CONTROL_3 0x163
1652
1653/* bits 7:5 */
1654#define SAS_PHY_DRV_400mv 0x00
1655#define SAS_PHY_DRV_450mv 0x20
1656#define SAS_PHY_DRV_500mv 0x40
1657#define SAS_PHY_DRV_550mv 0x60
1658#define SAS_PHY_DRV_600mv 0x80
1659#define SAS_PHY_DRV_650mv 0xA0
1660#define SAS_PHY_DRV_725mv 0xC0
1661#define SAS_PHY_DRV_800mv 0xE0
1662#define SAS_PHY_DRV_MASK 0xE0
1663
1664/* bits 4:3 */
1665#define SAS_PREEMP_0 0x00
1666#define SAS_PREEMP_1 0x08
1667#define SAS_PREEMP_2 0x10
1668#define SAS_PREEMP_3 0x18
1669#define SAS_PREEMP_MASK 0x18
1670
1671#define SAS_CMSH1P5 0x04
1672
1673/* bits 1:0 */
1674#define SAS_SLEW_0 0x00
1675#define SAS_SLEW_1 0x01
1676#define SAS_SLEW_2 0x02
1677#define SAS_SLEW_3 0x03
1678#define SAS_SLEW_MASK 0x03
1679
1680#define PHY_CONTROL_4 0x168
1681
1682#define PHY_DONE_CAL_TX 0x80
1683#define PHY_DONE_CAL_RX 0x40
1684#define RX_TERM_LOAD_DIS 0x20
1685#define TX_TERM_LOAD_DIS 0x10
1686#define AUTO_TERM_CAL_DIS 0x08
1687#define PHY_SIGDET_FLTR_EN 0x04
1688#define OSC_FREQ 0x02
1689#define PHY_START_CAL 0x01
1690
1691/*
1692 * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC)
1693 */
1694#define PCIX_REG_BASE_ADR 0xB8040000
1695
1696#define PCIC_VENDOR_ID 0x00
1697
1698#define PCIC_DEVICE_ID 0x02
1699
1700#define PCIC_COMMAND 0x04
1701
1702#define INT_DIS 0x0400
1703#define FBB_EN 0x0200 /* ro */
1704#define SERR_EN 0x0100
1705#define STEP_EN 0x0080 /* ro */
1706#define PERR_EN 0x0040
1707#define VGA_EN 0x0020 /* ro */
1708#define MWI_EN 0x0010
1709#define SPC_EN 0x0008
1710#define MST_EN 0x0004
1711#define MEM_EN 0x0002
1712#define IO_EN 0x0001
1713
1714#define PCIC_STATUS 0x06
1715
1716#define PERR_DET 0x8000
1717#define SERR_GEN 0x4000
1718#define MABT_DET 0x2000
1719#define TABT_DET 0x1000
1720#define TABT_GEN 0x0800
1721#define DPERR_DET 0x0100
1722#define CAP_LIST 0x0010
1723#define INT_STAT 0x0008
1724
1725#define PCIC_DEVREV_ID 0x08
1726
1727#define PCIC_CLASS_CODE 0x09
1728
1729#define PCIC_CACHELINE_SIZE 0x0C
1730
1731#define PCIC_MBAR0 0x10
1732
1733#define PCIC_MBAR0_OFFSET 0
1734
1735#define PCIC_MBAR1 0x18
1736
1737#define PCIC_MBAR1_OFFSET 2
1738
1739#define PCIC_IOBAR 0x20
1740
1741#define PCIC_IOBAR_OFFSET 4
1742
1743#define PCIC_SUBVENDOR_ID 0x2C
1744
1745#define PCIC_SUBSYTEM_ID 0x2E
1746
1747#define PCIX_STATUS 0x44
1748#define RCV_SCE 0x20000000
1749#define UNEXP_SC 0x00080000
1750#define SC_DISCARD 0x00040000
1751
1752#define ECC_CTRL_STAT 0x48
1753#define UNCOR_ECCERR 0x00000008
1754
1755#define PCIC_PM_CSR 0x5C
1756
1757#define PWR_STATE_D0 0
1758#define PWR_STATE_D1 1 /* not supported */
1759#define PWR_STATE_D2 2 /* not supported */
1760#define PWR_STATE_D3 3
1761
1762#define PCIC_BASE1 0x6C /* internal use only */
1763
1764#define BASE1_RSVD 0xFFFFFFF8
1765
1766#define PCIC_BASEA 0x70 /* internal use only */
1767
1768#define BASEA_RSVD 0xFFFFFFC0
1769#define BASEA_START 0
1770
1771#define PCIC_BASEB 0x74 /* internal use only */
1772
1773#define BASEB_RSVD 0xFFFFFF80
1774#define BASEB_IOMAP_MASK 0x7F
1775#define BASEB_START 0x80
1776
1777#define PCIC_BASEC 0x78 /* internal use only */
1778
1779#define BASEC_RSVD 0xFFFFFFFC
1780#define BASEC_MASK 0x03
1781#define BASEC_START 0x58
1782
1783#define PCIC_MBAR_KEY 0x7C /* internal use only */
1784
1785#define MBAR_KEY_MASK 0xFFFFFFFF
1786
1787#define PCIC_HSTPCIX_CNTRL 0xA0
1788
1789#define REWIND_DIS 0x0800
1790#define SC_TMR_DIS 0x04000000
1791
1792#define PCIC_MBAR0_MASK 0xA8
1793#define PCIC_MBAR0_SIZE_MASK 0x1FFFE000
1794#define PCIC_MBAR0_SIZE_SHIFT 13
1795#define PCIC_MBAR0_SIZE(val) \
1796 (((val) & PCIC_MBAR0_SIZE_MASK) >> PCIC_MBAR0_SIZE_SHIFT)
1797
1798#define PCIC_FLASH_MBAR 0xB8
1799
1800#define PCIC_INTRPT_STAT 0xD4
1801
1802#define PCIC_TP_CTRL 0xFC
1803
1804/*
1805 * EXSI Registers, Addresss Range: (0x00-0xFC)
1806 */
1807#define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI
1808
1809#define EXSICNFGR (EXSI_REG_BASE_ADR + 0x00)
1810
1811#define OCMINITIALIZED 0x80000000
1812#define ASIEN 0x00400000
1813#define HCMODE 0x00200000
1814#define PCIDEF 0x00100000
1815#define COMSTOCK 0x00080000
1816#define SEEPROMEND 0x00040000
1817#define MSTTIMEN 0x00020000
1818#define XREGEX 0x00000200
1819#define NVRAMW 0x00000100
1820#define NVRAMEX 0x00000080
1821#define SRAMW 0x00000040
1822#define SRAMEX 0x00000020
1823#define FLASHW 0x00000010
1824#define FLASHEX 0x00000008
1825#define SEEPROMCFG 0x00000004
1826#define SEEPROMTYP 0x00000002
1827#define SEEPROMEX 0x00000001
1828
1829
1830#define EXSICNTRLR (EXSI_REG_BASE_ADR + 0x04)
1831
1832#define MODINT_EN 0x00000001
1833
1834
1835#define PMSTATR (EXSI_REG_BASE_ADR + 0x10)
1836
1837#define FLASHRST 0x00000002
1838#define FLASHRDY 0x00000001
1839
1840
1841#define FLCNFGR (EXSI_REG_BASE_ADR + 0x14)
1842
1843#define FLWEH_MASK 0x30000000
1844#define FLWESU_MASK 0x0C000000
1845#define FLWEPW_MASK 0x03F00000
1846#define FLOEH_MASK 0x000C0000
1847#define FLOESU_MASK 0x00030000
1848#define FLOEPW_MASK 0x0000FC00
1849#define FLCSH_MASK 0x00000300
1850#define FLCSSU_MASK 0x000000C0
1851#define FLCSPW_MASK 0x0000003F
1852
1853#define SRCNFGR (EXSI_REG_BASE_ADR + 0x18)
1854
1855#define SRWEH_MASK 0x30000000
1856#define SRWESU_MASK 0x0C000000
1857#define SRWEPW_MASK 0x03F00000
1858
1859#define SROEH_MASK 0x000C0000
1860#define SROESU_MASK 0x00030000
1861#define SROEPW_MASK 0x0000FC00
1862#define SRCSH_MASK 0x00000300
1863#define SRCSSU_MASK 0x000000C0
1864#define SRCSPW_MASK 0x0000003F
1865
1866#define NVCNFGR (EXSI_REG_BASE_ADR + 0x1C)
1867
1868#define NVWEH_MASK 0x30000000
1869#define NVWESU_MASK 0x0C000000
1870#define NVWEPW_MASK 0x03F00000
1871#define NVOEH_MASK 0x000C0000
1872#define NVOESU_MASK 0x00030000
1873#define NVOEPW_MASK 0x0000FC00
1874#define NVCSH_MASK 0x00000300
1875#define NVCSSU_MASK 0x000000C0
1876#define NVCSPW_MASK 0x0000003F
1877
1878#define XRCNFGR (EXSI_REG_BASE_ADR + 0x20)
1879
1880#define XRWEH_MASK 0x30000000
1881#define XRWESU_MASK 0x0C000000
1882#define XRWEPW_MASK 0x03F00000
1883#define XROEH_MASK 0x000C0000
1884#define XROESU_MASK 0x00030000
1885#define XROEPW_MASK 0x0000FC00
1886#define XRCSH_MASK 0x00000300
1887#define XRCSSU_MASK 0x000000C0
1888#define XRCSPW_MASK 0x0000003F
1889
1890#define XREGADDR (EXSI_REG_BASE_ADR + 0x24)
1891
1892#define XRADDRINCEN 0x80000000
1893#define XREGADD_MASK 0x007FFFFF
1894
1895
1896#define XREGDATAR (EXSI_REG_BASE_ADR + 0x28)
1897
1898#define XREGDATA_MASK 0x0000FFFF
1899
1900#define GPIOOER (EXSI_REG_BASE_ADR + 0x40)
1901
1902#define GPIOODENR (EXSI_REG_BASE_ADR + 0x44)
1903
1904#define GPIOINVR (EXSI_REG_BASE_ADR + 0x48)
1905
1906#define GPIODATAOR (EXSI_REG_BASE_ADR + 0x4C)
1907
1908#define GPIODATAIR (EXSI_REG_BASE_ADR + 0x50)
1909
1910#define GPIOCNFGR (EXSI_REG_BASE_ADR + 0x54)
1911
1912#define GPIO_EXTSRC 0x00000001
1913
1914#define SCNTRLR (EXSI_REG_BASE_ADR + 0xA0)
1915
1916#define SXFERDONE 0x00000100
1917#define SXFERCNT_MASK 0x000000E0
1918#define SCMDTYP_MASK 0x0000001C
1919#define SXFERSTART 0x00000002
1920#define SXFEREN 0x00000001
1921
1922#define SRATER (EXSI_REG_BASE_ADR + 0xA4)
1923
1924#define SADDRR (EXSI_REG_BASE_ADR + 0xA8)
1925
1926#define SADDR_MASK 0x0000FFFF
1927
1928#define SDATAOR (EXSI_REG_BASE_ADR + 0xAC)
1929
1930#define SDATAOR0 (EXSI_REG_BASE_ADR + 0xAC)
1931#define SDATAOR1 (EXSI_REG_BASE_ADR + 0xAD)
1932#define SDATAOR2 (EXSI_REG_BASE_ADR + 0xAE)
1933#define SDATAOR3 (EXSI_REG_BASE_ADR + 0xAF)
1934
1935#define SDATAIR (EXSI_REG_BASE_ADR + 0xB0)
1936
1937#define SDATAIR0 (EXSI_REG_BASE_ADR + 0xB0)
1938#define SDATAIR1 (EXSI_REG_BASE_ADR + 0xB1)
1939#define SDATAIR2 (EXSI_REG_BASE_ADR + 0xB2)
1940#define SDATAIR3 (EXSI_REG_BASE_ADR + 0xB3)
1941
1942#define ASISTAT0R (EXSI_REG_BASE_ADR + 0xD0)
1943#define ASIFMTERR 0x00000400
1944#define ASISEECHKERR 0x00000200
1945#define ASIERR 0x00000100
1946
1947#define ASISTAT1R (EXSI_REG_BASE_ADR + 0xD4)
1948#define CHECKSUM_MASK 0x0000FFFF
1949
1950#define ASIERRADDR (EXSI_REG_BASE_ADR + 0xD8)
1951#define ASIERRDATAR (EXSI_REG_BASE_ADR + 0xDC)
1952#define ASIERRSTATR (EXSI_REG_BASE_ADR + 0xE0)
1953#define CPI2ASIBYTECNT_MASK 0x00070000
1954#define CPI2ASIBYTEEN_MASK 0x0000F000
1955#define CPI2ASITARGERR_MASK 0x00000F00
1956#define CPI2ASITARGMID_MASK 0x000000F0
1957#define CPI2ASIMSTERR_MASK 0x0000000F
1958
1959/*
1960 * XSRAM, External SRAM (DWord and any BE pattern accessible)
1961 */
1962#define XSRAM_REG_BASE_ADDR 0xB8100000
1963#define XSRAM_SIZE 0x100000
1964
1965/*
1966 * NVRAM Registers, Address Range: (0x00000 - 0x3FFFF).
1967 */
1968#define NVRAM_REG_BASE_ADR 0xBF800000
1969#define NVRAM_MAX_BASE_ADR 0x003FFFFF
1970
1971/* OCM base address */
1972#define OCM_BASE_ADDR 0xA0000000
1973#define OCM_MAX_SIZE 0x20000
1974
1975/*
1976 * Sequencers (Central and Link) Scratch RAM page definitions.
1977 */
1978
1979/*
1980 * The Central Management Sequencer (CSEQ) Scratch Memory is a 1024
1981 * byte memory. It is dword accessible and has byte parity
1982 * protection. The CSEQ accesses it in 32 byte windows, either as mode
1983 * dependent or mode independent memory. Each mode has 96 bytes,
1984 * (three 32 byte pages 0-2, not contiguous), leaving 128 bytes of
1985 * Mode Independent memory (four 32 byte pages 3-7). Note that mode
1986 * dependent scratch memory, Mode 8, page 0-3 overlaps mode
1987 * independent scratch memory, pages 0-3.
1988 * - 896 bytes of mode dependent scratch, 96 bytes per Modes 0-7, and
1989 * 128 bytes in mode 8,
1990 * - 259 bytes of mode independent scratch, common to modes 0-15.
1991 *
1992 * Sequencer scratch RAM is 1024 bytes. This scratch memory is
1993 * divided into mode dependent and mode independent scratch with this
1994 * memory further subdivided into pages of size 32 bytes. There are 5
1995 * pages (160 bytes) of mode independent scratch and 3 pages of
1996 * dependent scratch memory for modes 0-7 (768 bytes). Mode 8 pages
1997 * 0-2 dependent scratch overlap with pages 0-2 of mode independent
1998 * scratch memory.
1999 *
2000 * The host accesses this scratch in a different manner from the
2001 * central sequencer. The sequencer has to use CSEQ registers CSCRPAGE
2002 * and CMnSCRPAGE to access the scratch memory. A flat mapping of the
2003 * scratch memory is avaliable for software convenience and to prevent
2004 * corruption while the sequencer is running. This memory is mapped
2005 * onto addresses 800h - BFFh, total of 400h bytes.
2006 *
2007 * These addresses are mapped as follows:
2008 *
2009 * 800h-83Fh Mode Dependent Scratch Mode 0 Pages 0-1
2010 * 840h-87Fh Mode Dependent Scratch Mode 1 Pages 0-1
2011 * 880h-8BFh Mode Dependent Scratch Mode 2 Pages 0-1
2012 * 8C0h-8FFh Mode Dependent Scratch Mode 3 Pages 0-1
2013 * 900h-93Fh Mode Dependent Scratch Mode 4 Pages 0-1
2014 * 940h-97Fh Mode Dependent Scratch Mode 5 Pages 0-1
2015 * 980h-9BFh Mode Dependent Scratch Mode 6 Pages 0-1
2016 * 9C0h-9FFh Mode Dependent Scratch Mode 7 Pages 0-1
2017 * A00h-A5Fh Mode Dependent Scratch Mode 8 Pages 0-2
2018 * Mode Independent Scratch Pages 0-2
2019 * A60h-A7Fh Mode Dependent Scratch Mode 8 Page 3
2020 * Mode Independent Scratch Page 3
2021 * A80h-AFFh Mode Independent Scratch Pages 4-7
2022 * B00h-B1Fh Mode Dependent Scratch Mode 0 Page 2
2023 * B20h-B3Fh Mode Dependent Scratch Mode 1 Page 2
2024 * B40h-B5Fh Mode Dependent Scratch Mode 2 Page 2
2025 * B60h-B7Fh Mode Dependent Scratch Mode 3 Page 2
2026 * B80h-B9Fh Mode Dependent Scratch Mode 4 Page 2
2027 * BA0h-BBFh Mode Dependent Scratch Mode 5 Page 2
2028 * BC0h-BDFh Mode Dependent Scratch Mode 6 Page 2
2029 * BE0h-BFFh Mode Dependent Scratch Mode 7 Page 2
2030 */
2031
2032/* General macros */
2033#define CSEQ_PAGE_SIZE 32 /* Scratch page size (in bytes) */
2034
2035/* All macros start with offsets from base + 0x800 (CMAPPEDSCR).
2036 * Mode dependent scratch page 0, mode 0.
2037 * For modes 1-7 you have to do arithmetic. */
2038#define CSEQ_LRM_SAVE_SINDEX (CMAPPEDSCR + 0x0000)
2039#define CSEQ_LRM_SAVE_SCBPTR (CMAPPEDSCR + 0x0002)
2040#define CSEQ_Q_LINK_HEAD (CMAPPEDSCR + 0x0004)
2041#define CSEQ_Q_LINK_TAIL (CMAPPEDSCR + 0x0006)
2042#define CSEQ_LRM_SAVE_SCRPAGE (CMAPPEDSCR + 0x0008)
2043
2044/* Mode dependent scratch page 0 mode 8 macros. */
2045#define CSEQ_RET_ADDR (CMAPPEDSCR + 0x0200)
2046#define CSEQ_RET_SCBPTR (CMAPPEDSCR + 0x0202)
2047#define CSEQ_SAVE_SCBPTR (CMAPPEDSCR + 0x0204)
2048#define CSEQ_EMPTY_TRANS_CTX (CMAPPEDSCR + 0x0206)
2049#define CSEQ_RESP_LEN (CMAPPEDSCR + 0x0208)
2050#define CSEQ_TMF_SCBPTR (CMAPPEDSCR + 0x020A)
2051#define CSEQ_GLOBAL_PREV_SCB (CMAPPEDSCR + 0x020C)
2052#define CSEQ_GLOBAL_HEAD (CMAPPEDSCR + 0x020E)
2053#define CSEQ_CLEAR_LU_HEAD (CMAPPEDSCR + 0x0210)
2054#define CSEQ_TMF_OPCODE (CMAPPEDSCR + 0x0212)
2055#define CSEQ_SCRATCH_FLAGS (CMAPPEDSCR + 0x0213)
2056#define CSEQ_HSB_SITE (CMAPPEDSCR + 0x021A)
2057#define CSEQ_FIRST_INV_SCB_SITE (CMAPPEDSCR + 0x021C)
2058#define CSEQ_FIRST_INV_DDB_SITE (CMAPPEDSCR + 0x021E)
2059
2060/* Mode dependent scratch page 1 mode 8 macros. */
2061#define CSEQ_LUN_TO_CLEAR (CMAPPEDSCR + 0x0220)
2062#define CSEQ_LUN_TO_CHECK (CMAPPEDSCR + 0x0228)
2063
2064/* Mode dependent scratch page 2 mode 8 macros */
2065#define CSEQ_HQ_NEW_POINTER (CMAPPEDSCR + 0x0240)
2066#define CSEQ_HQ_DONE_BASE (CMAPPEDSCR + 0x0248)
2067#define CSEQ_HQ_DONE_POINTER (CMAPPEDSCR + 0x0250)
2068#define CSEQ_HQ_DONE_PASS (CMAPPEDSCR + 0x0254)
2069
2070/* Mode independent scratch page 4 macros. */
2071#define CSEQ_Q_EXE_HEAD (CMAPPEDSCR + 0x0280)
2072#define CSEQ_Q_EXE_TAIL (CMAPPEDSCR + 0x0282)
2073#define CSEQ_Q_DONE_HEAD (CMAPPEDSCR + 0x0284)
2074#define CSEQ_Q_DONE_TAIL (CMAPPEDSCR + 0x0286)
2075#define CSEQ_Q_SEND_HEAD (CMAPPEDSCR + 0x0288)
2076#define CSEQ_Q_SEND_TAIL (CMAPPEDSCR + 0x028A)
2077#define CSEQ_Q_DMA2CHIM_HEAD (CMAPPEDSCR + 0x028C)
2078#define CSEQ_Q_DMA2CHIM_TAIL (CMAPPEDSCR + 0x028E)
2079#define CSEQ_Q_COPY_HEAD (CMAPPEDSCR + 0x0290)
2080#define CSEQ_Q_COPY_TAIL (CMAPPEDSCR + 0x0292)
2081#define CSEQ_REG0 (CMAPPEDSCR + 0x0294)
2082#define CSEQ_REG1 (CMAPPEDSCR + 0x0296)
2083#define CSEQ_REG2 (CMAPPEDSCR + 0x0298)
2084#define CSEQ_LINK_CTL_Q_MAP (CMAPPEDSCR + 0x029C)
2085#define CSEQ_MAX_CSEQ_MODE (CMAPPEDSCR + 0x029D)
2086#define CSEQ_FREE_LIST_HACK_COUNT (CMAPPEDSCR + 0x029E)
2087
2088/* Mode independent scratch page 5 macros. */
2089#define CSEQ_EST_NEXUS_REQ_QUEUE (CMAPPEDSCR + 0x02A0)
2090#define CSEQ_EST_NEXUS_REQ_COUNT (CMAPPEDSCR + 0x02A8)
2091#define CSEQ_Q_EST_NEXUS_HEAD (CMAPPEDSCR + 0x02B0)
2092#define CSEQ_Q_EST_NEXUS_TAIL (CMAPPEDSCR + 0x02B2)
2093#define CSEQ_NEED_EST_NEXUS_SCB (CMAPPEDSCR + 0x02B4)
2094#define CSEQ_EST_NEXUS_REQ_HEAD (CMAPPEDSCR + 0x02B6)
2095#define CSEQ_EST_NEXUS_REQ_TAIL (CMAPPEDSCR + 0x02B7)
2096#define CSEQ_EST_NEXUS_SCB_OFFSET (CMAPPEDSCR + 0x02B8)
2097
2098/* Mode independent scratch page 6 macros. */
2099#define CSEQ_INT_ROUT_RET_ADDR0 (CMAPPEDSCR + 0x02C0)
2100#define CSEQ_INT_ROUT_RET_ADDR1 (CMAPPEDSCR + 0x02C2)
2101#define CSEQ_INT_ROUT_SCBPTR (CMAPPEDSCR + 0x02C4)
2102#define CSEQ_INT_ROUT_MODE (CMAPPEDSCR + 0x02C6)
2103#define CSEQ_ISR_SCRATCH_FLAGS (CMAPPEDSCR + 0x02C7)
2104#define CSEQ_ISR_SAVE_SINDEX (CMAPPEDSCR + 0x02C8)
2105#define CSEQ_ISR_SAVE_DINDEX (CMAPPEDSCR + 0x02CA)
2106#define CSEQ_Q_MONIRTT_HEAD (CMAPPEDSCR + 0x02D0)
2107#define CSEQ_Q_MONIRTT_TAIL (CMAPPEDSCR + 0x02D2)
2108#define CSEQ_FREE_SCB_MASK (CMAPPEDSCR + 0x02D5)
2109#define CSEQ_BUILTIN_FREE_SCB_HEAD (CMAPPEDSCR + 0x02D6)
2110#define CSEQ_BUILTIN_FREE_SCB_TAIL (CMAPPEDSCR + 0x02D8)
2111#define CSEQ_EXTENDED_FREE_SCB_HEAD (CMAPPEDSCR + 0x02DA)
2112#define CSEQ_EXTENDED_FREE_SCB_TAIL (CMAPPEDSCR + 0x02DC)
2113
2114/* Mode independent scratch page 7 macros. */
2115#define CSEQ_EMPTY_REQ_QUEUE (CMAPPEDSCR + 0x02E0)
2116#define CSEQ_EMPTY_REQ_COUNT (CMAPPEDSCR + 0x02E8)
2117#define CSEQ_Q_EMPTY_HEAD (CMAPPEDSCR + 0x02F0)
2118#define CSEQ_Q_EMPTY_TAIL (CMAPPEDSCR + 0x02F2)
2119#define CSEQ_NEED_EMPTY_SCB (CMAPPEDSCR + 0x02F4)
2120#define CSEQ_EMPTY_REQ_HEAD (CMAPPEDSCR + 0x02F6)
2121#define CSEQ_EMPTY_REQ_TAIL (CMAPPEDSCR + 0x02F7)
2122#define CSEQ_EMPTY_SCB_OFFSET (CMAPPEDSCR + 0x02F8)
2123#define CSEQ_PRIMITIVE_DATA (CMAPPEDSCR + 0x02FA)
2124#define CSEQ_TIMEOUT_CONST (CMAPPEDSCR + 0x02FC)
2125
2126/***************************************************************************
2127* Link m Sequencer scratch RAM is 512 bytes.
2128* This scratch memory is divided into mode dependent and mode
2129* independent scratch with this memory further subdivided into
2130* pages of size 32 bytes. There are 4 pages (128 bytes) of
2131* mode independent scratch and 4 pages of dependent scratch
2132* memory for modes 0-2 (384 bytes).
2133*
2134* The host accesses this scratch in a different manner from the
2135* link sequencer. The sequencer has to use LSEQ registers
2136* LmSCRPAGE and LmMnSCRPAGE to access the scratch memory. A flat
2137* mapping of the scratch memory is avaliable for software
2138* convenience and to prevent corruption while the sequencer is
2139* running. This memory is mapped onto addresses 800h - 9FFh.
2140*
2141* These addresses are mapped as follows:
2142*
2143* 800h-85Fh Mode Dependent Scratch Mode 0 Pages 0-2
2144* 860h-87Fh Mode Dependent Scratch Mode 0 Page 3
2145* Mode Dependent Scratch Mode 5 Page 0
2146* 880h-8DFh Mode Dependent Scratch Mode 1 Pages 0-2
2147* 8E0h-8FFh Mode Dependent Scratch Mode 1 Page 3
2148* Mode Dependent Scratch Mode 5 Page 1
2149* 900h-95Fh Mode Dependent Scratch Mode 2 Pages 0-2
2150* 960h-97Fh Mode Dependent Scratch Mode 2 Page 3
2151* Mode Dependent Scratch Mode 5 Page 2
2152* 980h-9DFh Mode Independent Scratch Pages 0-3
2153* 9E0h-9FFh Mode Independent Scratch Page 3
2154* Mode Dependent Scratch Mode 5 Page 3
2155*
2156****************************************************************************/
2157/* General macros */
2158#define LSEQ_MODE_SCRATCH_SIZE 0x80 /* Size of scratch RAM per mode */
2159#define LSEQ_PAGE_SIZE 0x20 /* Scratch page size (in bytes) */
2160#define LSEQ_MODE5_PAGE0_OFFSET 0x60
2161
2162/* Common mode dependent scratch page 0 macros for modes 0,1,2, and 5 */
2163/* Indexed using LSEQ_MODE_SCRATCH_SIZE * mode, for modes 0,1,2. */
2164#define LmSEQ_RET_ADDR(LinkNum) (LmSCRATCH(LinkNum) + 0x0000)
2165#define LmSEQ_REG0_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0002)
2166#define LmSEQ_MODE_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0004)
2167
2168/* Mode flag macros (byte 0) */
2169#define SAS_SAVECTX_OCCURRED 0x80
2170#define SAS_OOBSVC_OCCURRED 0x40
2171#define SAS_OOB_DEVICE_PRESENT 0x20
2172#define SAS_CFGHDR_OCCURRED 0x10
2173#define SAS_RCV_INTS_ARE_DISABLED 0x08
2174#define SAS_OOB_HOT_PLUG_CNCT 0x04
2175#define SAS_AWAIT_OPEN_CONNECTION 0x02
2176#define SAS_CFGCMPLT_OCCURRED 0x01
2177
2178/* Mode flag macros (byte 1) */
2179#define SAS_RLSSCB_OCCURRED 0x80
2180#define SAS_FORCED_HEADER_MISS 0x40
2181
2182#define LmSEQ_RET_ADDR2(LinkNum) (LmSCRATCH(LinkNum) + 0x0006)
2183#define LmSEQ_RET_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0008)
2184#define LmSEQ_OPCODE_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000B)
2185#define LmSEQ_DATA_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000C)
2186
2187/* Mode dependent scratch page 0 macros for mode 0 (non-common) */
2188/* Absolute offsets */
2189#define LmSEQ_FIRST_INV_DDB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x000E)
2190#define LmSEQ_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0010)
2191#define LmSEQ_RESP_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x0012)
2192#define LmSEQ_FIRST_INV_SCB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x0014)
2193#define LmSEQ_INTEN_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0016)
2194#define LmSEQ_LINK_RST_FRM_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x001A)
2195#define LmSEQ_LINK_RST_PROTOCOL(LinkNum) (LmSCRATCH(LinkNum) + 0x001B)
2196#define LmSEQ_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x001C)
2197#define LmSEQ_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x001D)
2198#define LmSEQ_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x001E)
2199
2200/* Mode dependent scratch page 0 macros for mode 1 (non-common) */
2201/* Absolute offsets */
2202#define LmSEQ_Q_XMIT_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x008E)
2203#define LmSEQ_M1_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0090)
2204#define LmSEQ_INI_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0092)
2205#define LmSEQ_FAILED_OPEN_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009A)
2206#define LmSEQ_XMIT_REQUEST_TYPE(LinkNum) (LmSCRATCH(LinkNum) + 0x009B)
2207#define LmSEQ_M1_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009C)
2208#define LmSEQ_M1_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x009D)
2209#define LmSEQ_M1_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x009E)
2210
2211/* Mode dependent scratch page 0 macros for mode 2 (non-common) */
2212#define LmSEQ_PORT_COUNTER(LinkNum) (LmSCRATCH(LinkNum) + 0x010E)
2213#define LmSEQ_PM_TABLE_PTR(LinkNum) (LmSCRATCH(LinkNum) + 0x0110)
2214#define LmSEQ_SATA_INTERLOCK_TMR_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0112)
2215#define LmSEQ_IP_BITL(LinkNum) (LmSCRATCH(LinkNum) + 0x0114)
2216#define LmSEQ_COPY_SMP_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0116)
2217#define LmSEQ_P0M2_OFFS1AH(LinkNum) (LmSCRATCH(LinkNum) + 0x011A)
2218
2219/* Mode dependent scratch page 0 macros for modes 4/5 (non-common) */
2220/* Absolute offsets */
2221#define LmSEQ_SAVED_OOB_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x006E)
2222#define LmSEQ_SAVED_OOB_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x006F)
2223#define LmSEQ_Q_LINK_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0070)
2224#define LmSEQ_LINK_RST_ERR(LinkNum) (LmSCRATCH(LinkNum) + 0x0072)
2225#define LmSEQ_SAVED_OOB_SIGNALS(LinkNum) (LmSCRATCH(LinkNum) + 0x0073)
2226#define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074)
2227#define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075)
2228#define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076)
2229#define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x007A)
2230#define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C)
2231#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E)
2232
2233/* Mode dependent scratch page 1, mode 0 and mode 1 */
2234#define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020)
2235#define LmSEQ_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0030)
2236#define LmSEQ_M1_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x00A0)
2237#define LmSEQ_M1_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x00B0)
2238
2239/* Mode dependent scratch page 1 macros for mode 2 */
2240/* Absolute offsets */
2241#define LmSEQ_INVALID_DWORD_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0120)
2242#define LmSEQ_DISPARITY_ERROR_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0124)
2243#define LmSEQ_LOSS_OF_SYNC_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0128)
2244
2245/* Mode dependent scratch page 1 macros for mode 4/5 */
2246#define LmSEQ_FRAME_TYPE_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E0)
2247#define LmSEQ_HASHED_DEST_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E1)
2248#define LmSEQ_HASHED_SRC_ADDR_MASK_PRINT(LinkNum) (LmSCRATCH(LinkNum) + 0x00E4)
2249#define LmSEQ_HASHED_SRC_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E5)
2250#define LmSEQ_NUM_FILL_BYTES_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00EB)
2251#define LmSEQ_TAG_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00F0)
2252#define LmSEQ_TARGET_PORT_XFER_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x00F2)
2253#define LmSEQ_DATA_OFFSET(LinkNum) (LmSCRATCH(LinkNum) + 0x00F4)
2254
2255/* Mode dependent scratch page 2 macros for mode 0 */
2256/* Absolute offsets */
2257#define LmSEQ_SMP_RCV_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0040)
2258#define LmSEQ_DEVICE_BITS(LinkNum) (LmSCRATCH(LinkNum) + 0x005B)
2259#define LmSEQ_SDB_DDB(LinkNum) (LmSCRATCH(LinkNum) + 0x005C)
2260#define LmSEQ_SDB_NUM_TAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x005E)
2261#define LmSEQ_SDB_CURR_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x005F)
2262
2263/* Mode dependent scratch page 2 macros for mode 1 */
2264/* Absolute offsets */
2265/* byte 0 bits 1-0 are domain select. */
2266#define LmSEQ_TX_ID_ADDR_FRAME(LinkNum) (LmSCRATCH(LinkNum) + 0x00C0)
2267#define LmSEQ_OPEN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00C8)
2268#define LmSEQ_SRST_AS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00CC)
2269#define LmSEQ_LAST_LOADED_SG_EL(LinkNum) (LmSCRATCH(LinkNum) + 0x00D4)
2270
2271/* Mode dependent scratch page 2 macros for mode 2 */
2272/* Absolute offsets */
2273#define LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0140)
2274#define LmSEQ_CLOSE_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0144)
2275#define LmSEQ_BREAK_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0148)
2276#define LmSEQ_DWS_RESET_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x014C)
2277#define LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(LinkNum) \
2278 (LmSCRATCH(LinkNum) + 0x0150)
2279#define LmSEQ_MCTL_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0154)
2280
2281/* Mode dependent scratch page 2 macros for mode 5 */
2282#define LmSEQ_COMINIT_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0160)
2283#define LmSEQ_RCV_ID_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0164)
2284#define LmSEQ_RCV_FIS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0168)
2285#define LmSEQ_DEV_PRES_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x016C)
2286
2287/* Mode dependent scratch page 3 macros for modes 0 and 1 */
2288/* None defined */
2289
2290/* Mode dependent scratch page 3 macros for modes 2 and 5 */
2291/* None defined */
2292
2293/* Mode Independent Scratch page 0 macros. */
2294#define LmSEQ_Q_TGTXFR_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0180)
2295#define LmSEQ_Q_TGTXFR_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x0182)
2296#define LmSEQ_LINK_NUMBER(LinkNum) (LmSCRATCH(LinkNum) + 0x0186)
2297#define LmSEQ_SCRATCH_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0187)
2298/*
2299 * Currently only bit 0, SAS_DWSAQD, is used.
2300 */
2301#define SAS_DWSAQD 0x01 /*
2302 * DWSSTATUS: DWSAQD
2303 * bit las read in ISR.
2304 */
2305#define LmSEQ_CONNECTION_STATE(LinkNum) (LmSCRATCH(LinkNum) + 0x0188)
2306/* Connection states (byte 0) */
2307#define SAS_WE_OPENED_CS 0x01
2308#define SAS_DEVICE_OPENED_CS 0x02
2309#define SAS_WE_SENT_DONE_CS 0x04
2310#define SAS_DEVICE_SENT_DONE_CS 0x08
2311#define SAS_WE_SENT_CLOSE_CS 0x10
2312#define SAS_DEVICE_SENT_CLOSE_CS 0x20
2313#define SAS_WE_SENT_BREAK_CS 0x40
2314#define SAS_DEVICE_SENT_BREAK_CS 0x80
2315/* Connection states (byte 1) */
2316#define SAS_OPN_TIMEOUT_OR_OPN_RJCT_CS 0x01
2317#define SAS_AIP_RECEIVED_CS 0x02
2318#define SAS_CREDIT_TIMEOUT_OCCURRED_CS 0x04
2319#define SAS_ACKNAK_TIMEOUT_OCCURRED_CS 0x08
2320#define SAS_SMPRSP_TIMEOUT_OCCURRED_CS 0x10
2321#define SAS_DONE_TIMEOUT_OCCURRED_CS 0x20
2322/* Connection states (byte 2) */
2323#define SAS_SMP_RESPONSE_RECEIVED_CS 0x01
2324#define SAS_INTLK_TIMEOUT_OCCURRED_CS 0x02
2325#define SAS_DEVICE_SENT_DMAT_CS 0x04
2326#define SAS_DEVICE_SENT_SYNCSRST_CS 0x08
2327#define SAS_CLEARING_AFFILIATION_CS 0x20
2328#define SAS_RXTASK_ACTIVE_CS 0x40
2329#define SAS_TXTASK_ACTIVE_CS 0x80
2330/* Connection states (byte 3) */
2331#define SAS_PHY_LOSS_OF_SIGNAL_CS 0x01
2332#define SAS_DWS_TIMER_EXPIRED_CS 0x02
2333#define SAS_LINK_RESET_NOT_COMPLETE_CS 0x04
2334#define SAS_PHY_DISABLED_CS 0x08
2335#define SAS_LINK_CTL_TASK_ACTIVE_CS 0x10
2336#define SAS_PHY_EVENT_TASK_ACTIVE_CS 0x20
2337#define SAS_DEVICE_SENT_ID_FRAME_CS 0x40
2338#define SAS_DEVICE_SENT_REG_FIS_CS 0x40
2339#define SAS_DEVICE_SENT_HARD_RESET_CS 0x80
2340#define SAS_PHY_IS_DOWN_FLAGS (SAS_PHY_LOSS_OF_SIGNAL_CS|\
2341 SAS_DWS_TIMER_EXPIRED_CS |\
2342 SAS_LINK_RESET_NOT_COMPLETE_CS|\
2343 SAS_PHY_DISABLED_CS)
2344
2345#define SAS_LINK_CTL_PHY_EVENT_FLAGS (SAS_LINK_CTL_TASK_ACTIVE_CS |\
2346 SAS_PHY_EVENT_TASK_ACTIVE_CS |\
2347 SAS_DEVICE_SENT_ID_FRAME_CS |\
2348 SAS_DEVICE_SENT_HARD_RESET_CS)
2349
2350#define LmSEQ_CONCTL(LinkNum) (LmSCRATCH(LinkNum) + 0x018C)
2351#define LmSEQ_CONSTAT(LinkNum) (LmSCRATCH(LinkNum) + 0x018E)
2352#define LmSEQ_CONNECTION_MODES(LinkNum) (LmSCRATCH(LinkNum) + 0x018F)
2353#define LmSEQ_REG1_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0192)
2354#define LmSEQ_REG2_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0194)
2355#define LmSEQ_REG3_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0196)
2356#define LmSEQ_REG0_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0198)
2357
2358/* Mode independent scratch page 1 macros. */
2359#define LmSEQ_EST_NEXUS_SCBPTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A0)
2360#define LmSEQ_EST_NEXUS_SCBPTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A2)
2361#define LmSEQ_EST_NEXUS_SCBPTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01A4)
2362#define LmSEQ_EST_NEXUS_SCBPTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01A6)
2363#define LmSEQ_EST_NEXUS_SCB_OPCODE0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A8)
2364#define LmSEQ_EST_NEXUS_SCB_OPCODE1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A9)
2365#define LmSEQ_EST_NEXUS_SCB_OPCODE2(LinkNum) (LmSCRATCH(LinkNum) + 0x01AA)
2366#define LmSEQ_EST_NEXUS_SCB_OPCODE3(LinkNum) (LmSCRATCH(LinkNum) + 0x01AB)
2367#define LmSEQ_EST_NEXUS_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01AC)
2368#define LmSEQ_EST_NEXUS_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AD)
2369#define LmSEQ_EST_NEXUS_BUF_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AE)
2370#define LmSEQ_TIMEOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01B8)
2371#define LmSEQ_ISR_SAVE_SINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BC)
2372#define LmSEQ_ISR_SAVE_DINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BE)
2373
2374/* Mode independent scratch page 2 macros. */
2375#define LmSEQ_EMPTY_SCB_PTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C0)
2376#define LmSEQ_EMPTY_SCB_PTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C2)
2377#define LmSEQ_EMPTY_SCB_PTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01C4)
2378#define LmSEQ_EMPTY_SCB_PTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01C6)
2379#define LmSEQ_EMPTY_SCB_OPCD0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C8)
2380#define LmSEQ_EMPTY_SCB_OPCD1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C9)
2381#define LmSEQ_EMPTY_SCB_OPCD2(LinkNum) (LmSCRATCH(LinkNum) + 0x01CA)
2382#define LmSEQ_EMPTY_SCB_OPCD3(LinkNum) (LmSCRATCH(LinkNum) + 0x01CB)
2383#define LmSEQ_EMPTY_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01CC)
2384#define LmSEQ_EMPTY_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CD)
2385#define LmSEQ_EMPTY_BUFS_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CE)
2386#define LmSEQ_ATA_SCR_REGS(LinkNum) (LmSCRATCH(LinkNum) + 0x01D4)
2387
2388/* Mode independent scratch page 3 macros. */
2389#define LmSEQ_DEV_PRES_TMR_TOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01E0)
2390#define LmSEQ_SATA_INTERLOCK_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E4)
2391#define LmSEQ_STP_SHUTDOWN_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E8)
2392#define LmSEQ_SRST_ASSERT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01EC)
2393#define LmSEQ_RCV_FIS_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F0)
2394#define LmSEQ_ONE_MILLISEC_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F4)
2395#define LmSEQ_TEN_MS_COMINIT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F8)
2396#define LmSEQ_SMP_RCV_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01FC)
2397
2398#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h
new file mode 100644
index 000000000000..64d231712345
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sas.h
@@ -0,0 +1,785 @@
1/*
2 * Aic94xx SAS/SATA driver SAS definitions and hardware interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_SAS_H_
28#define _AIC94XX_SAS_H_
29
30#include <scsi/libsas.h>
31
32/* ---------- DDBs ---------- */
33/* DDBs are device descriptor blocks which describe a device in the
34 * domain that this sequencer can maintain low-level connections for
35 * us. They are be 64 bytes.
36 */
37
38struct asd_ddb_ssp_smp_target_port {
39 u8 conn_type; /* byte 0 */
40#define DDB_TP_CONN_TYPE 0x81 /* Initiator port and addr frame type 0x01 */
41
42 u8 conn_rate;
43 __be16 init_conn_tag;
44 u8 dest_sas_addr[8]; /* bytes 4-11 */
45
46 __le16 send_queue_head;
47 u8 sq_suspended;
48 u8 ddb_type; /* DDB_TYPE_TARGET */
49#define DDB_TYPE_UNUSED 0xFF
50#define DDB_TYPE_TARGET 0xFE
51#define DDB_TYPE_INITIATOR 0xFD
52#define DDB_TYPE_PM_PORT 0xFC
53
54 __le16 _r_a;
55 __be16 awt_def;
56
57 u8 compat_features; /* byte 20 */
58 u8 pathway_blocked_count;
59 __be16 arb_wait_time;
60 __be32 more_compat_features; /* byte 24 */
61
62 u8 conn_mask;
63 u8 flags; /* concurrent conn:2,2 and open:0(1) */
64#define CONCURRENT_CONN_SUPP 0x04
65#define OPEN_REQUIRED 0x01
66
67 u16 _r_b;
68 __le16 exec_queue_tail;
69 __le16 send_queue_tail;
70 __le16 sister_ddb;
71
72 __le16 _r_c;
73
74 u8 max_concurrent_conn;
75 u8 num_concurrent_conn;
76 u8 num_contexts;
77
78 u8 _r_d;
79
80 __le16 active_task_count;
81
82 u8 _r_e[9];
83
84 u8 itnl_reason; /* I_T nexus loss reason */
85
86 __le16 _r_f;
87
88 __le16 itnl_timeout;
89#define ITNL_TIMEOUT_CONST 0x7D0 /* 2 seconds */
90
91 __le32 itnl_timestamp;
92} __attribute__ ((packed));
93
94struct asd_ddb_stp_sata_target_port {
95 u8 conn_type; /* byte 0 */
96 u8 conn_rate;
97 __be16 init_conn_tag;
98 u8 dest_sas_addr[8]; /* bytes 4-11 */
99
100 __le16 send_queue_head;
101 u8 sq_suspended;
102 u8 ddb_type; /* DDB_TYPE_TARGET */
103
104 __le16 _r_a;
105
106 __be16 awt_def;
107 u8 compat_features; /* byte 20 */
108 u8 pathway_blocked_count;
109 __be16 arb_wait_time;
110 __be32 more_compat_features; /* byte 24 */
111
112 u8 conn_mask;
113 u8 flags; /* concurrent conn:2,2 and open:0(1) */
114#define SATA_MULTIPORT 0x80
115#define SUPPORTS_AFFIL 0x40
116#define STP_AFFIL_POL 0x20
117
118 u8 _r_b;
119 u8 flags2; /* STP close policy:0 */
120#define STP_CL_POL_NO_TX 0x00
121#define STP_CL_POL_BTW_CMDS 0x01
122
123 __le16 exec_queue_tail;
124 __le16 send_queue_tail;
125 __le16 sister_ddb;
126 __le16 ata_cmd_scbptr;
127 __le32 sata_tag_alloc_mask;
128 __le16 active_task_count;
129 __le16 _r_c;
130 __le32 sata_sactive;
131 u8 num_sata_tags;
132 u8 sata_status;
133 u8 sata_ending_status;
134 u8 itnl_reason; /* I_T nexus loss reason */
135 __le16 ncq_data_scb_ptr;
136 __le16 itnl_timeout;
137 __le32 itnl_timestamp;
138} __attribute__ ((packed));
139
140/* This struct asd_ddb_init_port, describes the device descriptor block
141 * of an initiator port (when the sequencer is operating in target mode).
142 * Bytes [0,11] and [20,27] are from the OPEN address frame.
143 * The sequencer allocates an initiator port DDB entry.
144 */
145struct asd_ddb_init_port {
146 u8 conn_type; /* byte 0 */
147 u8 conn_rate;
148 __be16 init_conn_tag; /* BE */
149 u8 dest_sas_addr[8];
150 __le16 send_queue_head; /* LE, byte 12 */
151 u8 sq_suspended;
152 u8 ddb_type; /* DDB_TYPE_INITIATOR */
153 __le16 _r_a;
154 __be16 awt_def; /* BE */
155 u8 compat_features;
156 u8 pathway_blocked_count;
157 __be16 arb_wait_time; /* BE */
158 __be32 more_compat_features; /* BE */
159 u8 conn_mask;
160 u8 flags; /* == 5 */
161 u16 _r_b;
162 __le16 exec_queue_tail; /* execution queue tail */
163 __le16 send_queue_tail;
164 __le16 sister_ddb;
165 __le16 init_resp_timeout; /* initiator response timeout */
166 __le32 _r_c;
167 __le16 active_tasks; /* active task count */
168 __le16 init_list; /* initiator list link pointer */
169 __le32 _r_d;
170 u8 max_conn_to[3]; /* from Conn-Disc mode page, in us, LE */
171 u8 itnl_reason; /* I_T nexus loss reason */
172 __le16 bus_inact_to; /* from Conn-Disc mode page, in 100 us, LE */
173 __le16 itnl_to; /* from the Protocol Specific Port Ctrl MP */
174 __le32 itnl_timestamp;
175} __attribute__ ((packed));
176
177/* This struct asd_ddb_sata_tag, describes a look-up table to be used
178 * by the sequencers. SATA II, IDENTIFY DEVICE data, word 76, bit 8:
179 * NCQ support. This table is used by the sequencers to find the
180 * corresponding SCB, given a SATA II tag value.
181 */
182struct asd_ddb_sata_tag {
183 __le16 scb_pointer[32];
184} __attribute__ ((packed));
185
186/* This struct asd_ddb_sata_pm_table, describes a port number to
187 * connection handle look-up table. SATA targets attached to a port
188 * multiplier require a 4-bit port number value. There is one DDB
189 * entry of this type for each SATA port multiplier (sister DDB).
190 * Given a SATA PM port number, this table gives us the SATA PM Port
191 * DDB of the SATA port multiplier port (i.e. the SATA target
192 * discovered on the port).
193 */
194struct asd_ddb_sata_pm_table {
195 __le16 ddb_pointer[16];
196 __le16 _r_a[16];
197} __attribute__ ((packed));
198
199/* This struct asd_ddb_sata_pm_port, describes the SATA port multiplier
200 * port format DDB.
201 */
202struct asd_ddb_sata_pm_port {
203 u8 _r_a[15];
204 u8 ddb_type;
205 u8 _r_b[13];
206 u8 pm_port_flags;
207#define PM_PORT_MASK 0xF0
208#define PM_PORT_SET 0x02
209 u8 _r_c[6];
210 __le16 sister_ddb;
211 __le16 ata_cmd_scbptr;
212 __le32 sata_tag_alloc_mask;
213 __le16 active_task_count;
214 __le16 parent_ddb;
215 __le32 sata_sactive;
216 u8 num_sata_tags;
217 u8 sata_status;
218 u8 sata_ending_status;
219 u8 _r_d[9];
220} __attribute__ ((packed));
221
222/* This struct asd_ddb_seq_shared, describes a DDB shared by the
223 * central and link sequencers. port_map_by_links is indexed phy
224 * number [0,7]; each byte is a bit mask of all the phys that are in
225 * the same port as the indexed phy.
226 */
227struct asd_ddb_seq_shared {
228 __le16 q_free_ddb_head;
229 __le16 q_free_ddb_tail;
230 __le16 q_free_ddb_cnt;
231 __le16 q_used_ddb_head;
232 __le16 q_used_ddb_tail;
233 __le16 shared_mem_lock;
234 __le16 smp_conn_tag;
235 __le16 est_nexus_buf_cnt;
236 __le16 est_nexus_buf_thresh;
237 u32 _r_a;
238 u8 settable_max_contexts;
239 u8 _r_b[23];
240 u8 conn_not_active;
241 u8 phy_is_up;
242 u8 _r_c[8];
243 u8 port_map_by_links[8];
244} __attribute__ ((packed));
245
246/* ---------- SG Element ---------- */
247
248/* This struct sg_el, describes the hardware scatter gather buffer
249 * element. All entries are little endian. In an SCB, there are 2 of
250 * this, plus one more, called a link element of this indicating a
251 * sublist if needed.
252 *
253 * A link element has only the bus address set and the flags (DS) bit
254 * valid. The bus address points to the start of the sublist.
255 *
256 * If a sublist is needed, then that sublist should also include the 2
257 * sg_el embedded in the SCB, in which case next_sg_offset is 32,
258 * since sizeof(sg_el) = 16; EOS should be 1 and EOL 0 in this case.
259 */
260struct sg_el {
261 __le64 bus_addr;
262 __le32 size;
263 __le16 _r;
264 u8 next_sg_offs;
265 u8 flags;
266#define ASD_SG_EL_DS_MASK 0x30
267#define ASD_SG_EL_DS_OCM 0x10
268#define ASD_SG_EL_DS_HM 0x00
269#define ASD_SG_EL_LIST_MASK 0xC0
270#define ASD_SG_EL_LIST_EOL 0x40
271#define ASD_SG_EL_LIST_EOS 0x80
272} __attribute__ ((packed));
273
274/* ---------- SCBs ---------- */
275
276/* An SCB (sequencer control block) is comprised of a common header
277 * and a task part, for a total of 128 bytes. All fields are in LE
278 * order, unless otherwise noted.
279 */
280
281/* This struct scb_header, defines the SCB header format.
282 */
283struct scb_header {
284 __le64 next_scb;
285 __le16 index; /* transaction context */
286 u8 opcode;
287} __attribute__ ((packed));
288
289/* SCB opcodes: Execution queue
290 */
291#define INITIATE_SSP_TASK 0x00
292#define INITIATE_LONG_SSP_TASK 0x01
293#define INITIATE_BIDIR_SSP_TASK 0x02
294#define ABORT_TASK 0x03
295#define INITIATE_SSP_TMF 0x04
296#define SSP_TARG_GET_DATA 0x05
297#define SSP_TARG_GET_DATA_GOOD 0x06
298#define SSP_TARG_SEND_RESP 0x07
299#define QUERY_SSP_TASK 0x08
300#define INITIATE_ATA_TASK 0x09
301#define INITIATE_ATAPI_TASK 0x0a
302#define CONTROL_ATA_DEV 0x0b
303#define INITIATE_SMP_TASK 0x0c
304#define SMP_TARG_SEND_RESP 0x0f
305
306/* SCB opcodes: Send Queue
307 */
308#define SSP_TARG_SEND_DATA 0x40
309#define SSP_TARG_SEND_DATA_GOOD 0x41
310
311/* SCB opcodes: Link Queue
312 */
313#define CONTROL_PHY 0x80
314#define SEND_PRIMITIVE 0x81
315#define INITIATE_LINK_ADM_TASK 0x82
316
317/* SCB opcodes: other
318 */
319#define EMPTY_SCB 0xc0
320#define INITIATE_SEQ_ADM_TASK 0xc1
321#define EST_ICL_TARG_WINDOW 0xc2
322#define COPY_MEM 0xc3
323#define CLEAR_NEXUS 0xc4
324#define INITIATE_DDB_ADM_TASK 0xc6
325#define ESTABLISH_NEXUS_ESCB 0xd0
326
327#define LUN_SIZE 8
328
329/* See SAS spec, task IU
330 */
331struct ssp_task_iu {
332 u8 lun[LUN_SIZE]; /* BE */
333 u16 _r_a;
334 u8 tmf;
335 u8 _r_b;
336 __be16 tag; /* BE */
337 u8 _r_c[14];
338} __attribute__ ((packed));
339
340/* See SAS spec, command IU
341 */
342struct ssp_command_iu {
343 u8 lun[LUN_SIZE];
344 u8 _r_a;
345 u8 efb_prio_attr; /* enable first burst, task prio & attr */
346#define EFB_MASK 0x80
347#define TASK_PRIO_MASK 0x78
348#define TASK_ATTR_MASK 0x07
349
350 u8 _r_b;
351 u8 add_cdb_len; /* in dwords, since bit 0,1 are reserved */
352 union {
353 u8 cdb[16];
354 struct {
355 __le64 long_cdb_addr; /* bus address, LE */
356 __le32 long_cdb_size; /* LE */
357 u8 _r_c[3];
358 u8 eol_ds; /* eol:6,6, ds:5,4 */
359 } long_cdb; /* sequencer extension */
360 };
361} __attribute__ ((packed));
362
363struct xfer_rdy_iu {
364 __be32 requested_offset; /* BE */
365 __be32 write_data_len; /* BE */
366 __be32 _r_a;
367} __attribute__ ((packed));
368
369/* ---------- SCB tasks ---------- */
370
371/* This is both ssp_task and long_ssp_task
372 */
373struct initiate_ssp_task {
374 u8 proto_conn_rate; /* proto:6,4, conn_rate:3,0 */
375 __le32 total_xfer_len;
376 struct ssp_frame_hdr ssp_frame;
377 struct ssp_command_iu ssp_cmd;
378 __le16 sister_scb; /* 0xFFFF */
379 __le16 conn_handle; /* index to DDB for the intended target */
380 u8 data_dir; /* :1,0 */
381#define DATA_DIR_NONE 0x00
382#define DATA_DIR_IN 0x01
383#define DATA_DIR_OUT 0x02
384#define DATA_DIR_BYRECIPIENT 0x03
385
386 u8 _r_a;
387 u8 retry_count;
388 u8 _r_b[5];
389 struct sg_el sg_element[3]; /* 2 real and 1 link */
390} __attribute__ ((packed));
391
392/* This defines both ata_task and atapi_task.
393 * ata: C bit of FIS should be 1,
394 * atapi: C bit of FIS should be 1, and command register should be 0xA0,
395 * to indicate a packet command.
396 */
397struct initiate_ata_task {
398 u8 proto_conn_rate;
399 __le32 total_xfer_len;
400 struct host_to_dev_fis fis;
401 __le32 data_offs;
402 u8 atapi_packet[16];
403 u8 _r_a[12];
404 __le16 sister_scb;
405 __le16 conn_handle;
406 u8 ata_flags; /* CSMI:6,6, DTM:4,4, QT:3,3, data dir:1,0 */
407#define CSMI_TASK 0x40
408#define DATA_XFER_MODE_DMA 0x10
409#define ATA_Q_TYPE_MASK 0x08
410#define ATA_Q_TYPE_UNTAGGED 0x00
411#define ATA_Q_TYPE_NCQ 0x08
412
413 u8 _r_b;
414 u8 retry_count;
415 u8 _r_c;
416 u8 flags;
417#define STP_AFFIL_POLICY 0x20
418#define SET_AFFIL_POLICY 0x10
419#define RET_PARTIAL_SGLIST 0x02
420
421 u8 _r_d[3];
422 struct sg_el sg_element[3];
423} __attribute__ ((packed));
424
425struct initiate_smp_task {
426 u8 proto_conn_rate;
427 u8 _r_a[40];
428 struct sg_el smp_req;
429 __le16 sister_scb;
430 __le16 conn_handle;
431 u8 _r_c[8];
432 struct sg_el smp_resp;
433 u8 _r_d[32];
434} __attribute__ ((packed));
435
436struct control_phy {
437 u8 phy_id;
438 u8 sub_func;
439#define DISABLE_PHY 0x00
440#define ENABLE_PHY 0x01
441#define RELEASE_SPINUP_HOLD 0x02
442#define ENABLE_PHY_NO_SAS_OOB 0x03
443#define ENABLE_PHY_NO_SATA_OOB 0x04
444#define PHY_NO_OP 0x05
445#define EXECUTE_HARD_RESET 0x81
446
447 u8 func_mask;
448 u8 speed_mask;
449 u8 hot_plug_delay;
450 u8 port_type;
451 u8 flags;
452#define DEV_PRES_TIMER_OVERRIDE_ENABLE 0x01
453#define DISABLE_PHY_IF_OOB_FAILS 0x02
454
455 __le32 timeout_override;
456 u8 link_reset_retries;
457 u8 _r_a[47];
458 __le16 conn_handle;
459 u8 _r_b[56];
460} __attribute__ ((packed));
461
462struct control_ata_dev {
463 u8 proto_conn_rate;
464 __le32 _r_a;
465 struct host_to_dev_fis fis;
466 u8 _r_b[32];
467 __le16 sister_scb;
468 __le16 conn_handle;
469 u8 ata_flags; /* 0 */
470 u8 _r_c[55];
471} __attribute__ ((packed));
472
473struct empty_scb {
474 u8 num_valid;
475 __le32 _r_a;
476#define ASD_EDBS_PER_SCB 7
477/* header+data+CRC+DMA suffix data */
478#define ASD_EDB_SIZE (24+1024+4+16)
479 struct sg_el eb[ASD_EDBS_PER_SCB];
480#define ELEMENT_NOT_VALID 0xC0
481} __attribute__ ((packed));
482
483struct initiate_link_adm {
484 u8 phy_id;
485 u8 sub_func;
486#define GET_LINK_ERROR_COUNT 0x00
487#define RESET_LINK_ERROR_COUNT 0x01
488#define ENABLE_NOTIFY_SPINUP_INTS 0x02
489
490 u8 _r_a[57];
491 __le16 conn_handle;
492 u8 _r_b[56];
493} __attribute__ ((packed));
494
495struct copy_memory {
496 u8 _r_a;
497 __le16 xfer_len;
498 __le16 _r_b;
499 __le64 src_busaddr;
500 u8 src_ds; /* See definition of sg_el */
501 u8 _r_c[45];
502 __le16 conn_handle;
503 __le64 _r_d;
504 __le64 dest_busaddr;
505 u8 dest_ds; /* See definition of sg_el */
506 u8 _r_e[39];
507} __attribute__ ((packed));
508
509struct abort_task {
510 u8 proto_conn_rate;
511 __le32 _r_a;
512 struct ssp_frame_hdr ssp_frame;
513 struct ssp_task_iu ssp_task;
514 __le16 sister_scb;
515 __le16 conn_handle;
516 u8 flags; /* ovrd_itnl_timer:3,3, suspend_data_trans:2,2 */
517#define SUSPEND_DATA_TRANS 0x04
518
519 u8 _r_b;
520 u8 retry_count;
521 u8 _r_c[5];
522 __le16 index; /* Transaction context of task to be queried */
523 __le16 itnl_to;
524 u8 _r_d[44];
525} __attribute__ ((packed));
526
527struct clear_nexus {
528 u8 nexus;
529#define NEXUS_ADAPTER 0x00
530#define NEXUS_PORT 0x01
531#define NEXUS_I_T 0x02
532#define NEXUS_I_T_L 0x03
533#define NEXUS_TAG 0x04
534#define NEXUS_TRANS_CX 0x05
535#define NEXUS_SATA_TAG 0x06
536#define NEXUS_T_L 0x07
537#define NEXUS_L 0x08
538#define NEXUS_T_TAG 0x09
539
540 __le32 _r_a;
541 u8 flags;
542#define SUSPEND_TX 0x80
543#define RESUME_TX 0x40
544#define SEND_Q 0x04
545#define EXEC_Q 0x02
546#define NOTINQ 0x01
547
548 u8 _r_b[3];
549 u8 conn_mask;
550 u8 _r_c[19];
551 struct ssp_task_iu ssp_task; /* LUN and TAG */
552 __le16 _r_d;
553 __le16 conn_handle;
554 __le64 _r_e;
555 __le16 index; /* Transaction context of task to be cleared */
556 __le16 context; /* Clear nexus context */
557 u8 _r_f[44];
558} __attribute__ ((packed));
559
560struct initiate_ssp_tmf {
561 u8 proto_conn_rate;
562 __le32 _r_a;
563 struct ssp_frame_hdr ssp_frame;
564 struct ssp_task_iu ssp_task;
565 __le16 sister_scb;
566 __le16 conn_handle;
567 u8 flags; /* itnl override and suspend data tx */
568#define OVERRIDE_ITNL_TIMER 8
569
570 u8 _r_b;
571 u8 retry_count;
572 u8 _r_c[5];
573 __le16 index; /* Transaction context of task to be queried */
574 __le16 itnl_to;
575 u8 _r_d[44];
576} __attribute__ ((packed));
577
578/* Transmits an arbitrary primitive on the link.
579 * Used for NOTIFY and BROADCAST.
580 */
581struct send_prim {
582 u8 phy_id;
583 u8 wait_transmit; /* :0,0 */
584 u8 xmit_flags;
585#define XMTPSIZE_MASK 0xF0
586#define XMTPSIZE_SINGLE 0x10
587#define XMTPSIZE_REPEATED 0x20
588#define XMTPSIZE_CONT 0x20
589#define XMTPSIZE_TRIPLE 0x30
590#define XMTPSIZE_REDUNDANT 0x60
591#define XMTPSIZE_INF 0
592
593#define XMTCONTEN 0x04
594#define XMTPFRM 0x02 /* Transmit at the next frame boundary */
595#define XMTPIMM 0x01 /* Transmit immediately */
596
597 __le16 _r_a;
598 u8 prim[4]; /* K, D0, D1, D2 */
599 u8 _r_b[50];
600 __le16 conn_handle;
601 u8 _r_c[56];
602} __attribute__ ((packed));
603
604/* This describes both SSP Target Get Data and SSP Target Get Data And
605 * Send Good Response SCBs. Used when the sequencer is operating in
606 * target mode...
607 */
608struct ssp_targ_get_data {
609 u8 proto_conn_rate;
610 __le32 total_xfer_len;
611 struct ssp_frame_hdr ssp_frame;
612 struct xfer_rdy_iu xfer_rdy;
613 u8 lun[LUN_SIZE];
614 __le64 _r_a;
615 __le16 sister_scb;
616 __le16 conn_handle;
617 u8 data_dir; /* 01b */
618 u8 _r_b;
619 u8 retry_count;
620 u8 _r_c[5];
621 struct sg_el sg_element[3];
622} __attribute__ ((packed));
623
624/* ---------- The actual SCB struct ---------- */
625
626struct scb {
627 struct scb_header header;
628 union {
629 struct initiate_ssp_task ssp_task;
630 struct initiate_ata_task ata_task;
631 struct initiate_smp_task smp_task;
632 struct control_phy control_phy;
633 struct control_ata_dev control_ata_dev;
634 struct empty_scb escb;
635 struct initiate_link_adm link_adm;
636 struct copy_memory cp_mem;
637 struct abort_task abort_task;
638 struct clear_nexus clear_nexus;
639 struct initiate_ssp_tmf ssp_tmf;
640 };
641} __attribute__ ((packed));
642
643/* ---------- Done List ---------- */
644/* The done list entry opcode field is defined below.
645 * The mnemonic encoding and meaning is as follows:
646 * TC - Task Complete, status was received and acknowledged
647 * TF - Task Failed, indicates an error prior to receiving acknowledgment
648 * for the command:
649 * - no conn,
650 * - NACK or R_ERR received in response to this command,
651 * - credit blocked or not available, or in the case of SMP request,
652 * - no SMP response was received.
653 * In these four cases it is known that the target didn't receive the
654 * command.
655 * TI - Task Interrupted, error after the command was acknowledged. It is
656 * known that the command was received by the target.
657 * TU - Task Unacked, command was transmitted but neither ACK (R_OK) nor NAK
658 * (R_ERR) was received due to loss of signal, broken connection, loss of
659 * dword sync or other reason. The application client should send the
660 * appropriate task query.
661 * TA - Task Aborted, see TF.
662 * _RESP - The completion includes an empty buffer containing status.
663 * TO - Timeout.
664 */
665#define TC_NO_ERROR 0x00
666#define TC_UNDERRUN 0x01
667#define TC_OVERRUN 0x02
668#define TF_OPEN_TO 0x03
669#define TF_OPEN_REJECT 0x04
670#define TI_BREAK 0x05
671#define TI_PROTO_ERR 0x06
672#define TC_SSP_RESP 0x07
673#define TI_PHY_DOWN 0x08
674#define TF_PHY_DOWN 0x09
675#define TC_LINK_ADM_RESP 0x0a
676#define TC_CSMI 0x0b
677#define TC_ATA_RESP 0x0c
678#define TU_PHY_DOWN 0x0d
679#define TU_BREAK 0x0e
680#define TI_SATA_TO 0x0f
681#define TI_NAK 0x10
682#define TC_CONTROL_PHY 0x11
683#define TF_BREAK 0x12
684#define TC_RESUME 0x13
685#define TI_ACK_NAK_TO 0x14
686#define TF_SMPRSP_TO 0x15
687#define TF_SMP_XMIT_RCV_ERR 0x16
688#define TC_PARTIAL_SG_LIST 0x17
689#define TU_ACK_NAK_TO 0x18
690#define TU_SATA_TO 0x19
691#define TF_NAK_RECV 0x1a
692#define TA_I_T_NEXUS_LOSS 0x1b
693#define TC_ATA_R_ERR_RECV 0x1c
694#define TF_TMF_NO_CTX 0x1d
695#define TA_ON_REQ 0x1e
696#define TF_TMF_NO_TAG 0x1f
697#define TF_TMF_TAG_FREE 0x20
698#define TF_TMF_TASK_DONE 0x21
699#define TF_TMF_NO_CONN_HANDLE 0x22
700#define TC_TASK_CLEARED 0x23
701#define TI_SYNCS_RECV 0x24
702#define TU_SYNCS_RECV 0x25
703#define TF_IRTT_TO 0x26
704#define TF_NO_SMP_CONN 0x27
705#define TF_IU_SHORT 0x28
706#define TF_DATA_OFFS_ERR 0x29
707#define TF_INV_CONN_HANDLE 0x2a
708#define TF_REQUESTED_N_PENDING 0x2b
709
710/* 0xc1 - 0xc7: empty buffer received,
711 0xd1 - 0xd7: establish nexus empty buffer received
712*/
713/* This is the ESCB mask */
714#define ESCB_RECVD 0xC0
715
716
717/* This struct done_list_struct defines the done list entry.
718 * All fields are LE.
719 */
720struct done_list_struct {
721 __le16 index; /* aka transaction context */
722 u8 opcode;
723 u8 status_block[4];
724 u8 toggle; /* bit 0 */
725#define DL_TOGGLE_MASK 0x01
726} __attribute__ ((packed));
727
728/* ---------- PHYS ---------- */
729
730struct asd_phy {
731 struct asd_sas_phy sas_phy;
732 struct asd_phy_desc *phy_desc; /* hw profile */
733
734 struct sas_identify_frame *identify_frame;
735 struct asd_dma_tok *id_frm_tok;
736
737 u8 frame_rcvd[ASD_EDB_SIZE];
738};
739
740
741#define ASD_SCB_SIZE sizeof(struct scb)
742#define ASD_DDB_SIZE sizeof(struct asd_ddb_ssp_smp_target_port)
743
744/* Define this to 0 if you do not want NOTIFY (ENABLE SPINIP) sent.
745 * Default: 0x10 (it's a mask)
746 */
747#define ASD_NOTIFY_ENABLE_SPINUP 0x10
748
749/* If enabled, set this to the interval between transmission
750 * of NOTIFY (ENABLE SPINUP). In units of 200 us.
751 */
752#define ASD_NOTIFY_TIMEOUT 2500
753
754/* Initial delay after OOB, before we transmit NOTIFY (ENABLE SPINUP).
755 * If 0, transmit immediately. In milliseconds.
756 */
757#define ASD_NOTIFY_DOWN_COUNT 0
758
759/* Device present timer timeout constant, 10 ms. */
760#define ASD_DEV_PRESENT_TIMEOUT 0x2710
761
762#define ASD_SATA_INTERLOCK_TIMEOUT 0
763
764/* How long to wait before shutting down an STP connection, unless
765 * an STP target sent frame(s). 50 usec.
766 * IGNORED by the sequencer (i.e. value 0 always).
767 */
768#define ASD_STP_SHUTDOWN_TIMEOUT 0x0
769
770/* ATA soft reset timer timeout. 5 usec. */
771#define ASD_SRST_ASSERT_TIMEOUT 0x05
772
773/* 31 sec */
774#define ASD_RCV_FIS_TIMEOUT 0x01D905C0
775
776#define ASD_ONE_MILLISEC_TIMEOUT 0x03e8
777
778/* COMINIT timer */
779#define ASD_TEN_MILLISEC_TIMEOUT 0x2710
780#define ASD_COMINIT_TIMEOUT ASD_TEN_MILLISEC_TIMEOUT
781
782/* 1 sec */
783#define ASD_SMP_RCV_TIMEOUT 0x000F4240
784
785#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
new file mode 100644
index 000000000000..7ee49b51b724
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -0,0 +1,758 @@
1/*
2 * Aic94xx SAS/SATA driver SCB management.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28
29#include "aic94xx.h"
30#include "aic94xx_reg.h"
31#include "aic94xx_hwi.h"
32#include "aic94xx_seq.h"
33
34#include "aic94xx_dump.h"
35
36/* ---------- EMPTY SCB ---------- */
37
38#define DL_PHY_MASK 7
39#define BYTES_DMAED 0
40#define PRIMITIVE_RECVD 0x08
41#define PHY_EVENT 0x10
42#define LINK_RESET_ERROR 0x18
43#define TIMER_EVENT 0x20
44#define REQ_TASK_ABORT 0xF0
45#define REQ_DEVICE_RESET 0xF1
46#define SIGNAL_NCQ_ERROR 0xF2
47#define CLEAR_NCQ_ERROR 0xF3
48
49#define PHY_EVENTS_STATUS (CURRENT_LOSS_OF_SIGNAL | CURRENT_OOB_DONE \
50 | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
51 | CURRENT_OOB_ERROR)
52
53static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode)
54{
55 struct sas_phy *sas_phy = phy->sas_phy.phy;
56
57 switch (oob_mode & 7) {
58 case PHY_SPEED_60:
59 /* FIXME: sas transport class doesn't have this */
60 phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
61 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
62 break;
63 case PHY_SPEED_30:
64 phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
65 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
66 break;
67 case PHY_SPEED_15:
68 phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
69 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
70 break;
71 }
72 sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
73 sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
74 sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
75 sas_phy->maximum_linkrate = phy->phy_desc->max_sas_lrate;
76 sas_phy->minimum_linkrate = phy->phy_desc->min_sas_lrate;
77
78 if (oob_mode & SAS_MODE)
79 phy->sas_phy.oob_mode = SAS_OOB_MODE;
80 else if (oob_mode & SATA_MODE)
81 phy->sas_phy.oob_mode = SATA_OOB_MODE;
82}
83
84static inline void asd_phy_event_tasklet(struct asd_ascb *ascb,
85 struct done_list_struct *dl)
86{
87 struct asd_ha_struct *asd_ha = ascb->ha;
88 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
89 int phy_id = dl->status_block[0] & DL_PHY_MASK;
90 struct asd_phy *phy = &asd_ha->phys[phy_id];
91
92 u8 oob_status = dl->status_block[1] & PHY_EVENTS_STATUS;
93 u8 oob_mode = dl->status_block[2];
94
95 switch (oob_status) {
96 case CURRENT_LOSS_OF_SIGNAL:
97 /* directly attached device was removed */
98 ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
99 asd_turn_led(asd_ha, phy_id, 0);
100 sas_phy_disconnected(&phy->sas_phy);
101 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
102 break;
103 case CURRENT_OOB_DONE:
104 /* hot plugged device */
105 asd_turn_led(asd_ha, phy_id, 1);
106 get_lrate_mode(phy, oob_mode);
107 ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
108 phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
109 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
110 break;
111 case CURRENT_SPINUP_HOLD:
112 /* hot plug SATA, no COMWAKE sent */
113 asd_turn_led(asd_ha, phy_id, 1);
114 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
115 break;
116 case CURRENT_GTO_TIMEOUT:
117 case CURRENT_OOB_ERROR:
118 ASD_DPRINTK("phy%d error while OOB: oob status:0x%x\n", phy_id,
119 dl->status_block[1]);
120 asd_turn_led(asd_ha, phy_id, 0);
121 sas_phy_disconnected(&phy->sas_phy);
122 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
123 break;
124 }
125}
126
127/* If phys are enabled sparsely, this will do the right thing. */
128static inline unsigned ord_phy(struct asd_ha_struct *asd_ha,
129 struct asd_phy *phy)
130{
131 u8 enabled_mask = asd_ha->hw_prof.enabled_phys;
132 int i, k = 0;
133
134 for_each_phy(enabled_mask, enabled_mask, i) {
135 if (&asd_ha->phys[i] == phy)
136 return k;
137 k++;
138 }
139 return 0;
140}
141
142/**
143 * asd_get_attached_sas_addr -- extract/generate attached SAS address
144 * phy: pointer to asd_phy
145 * sas_addr: pointer to buffer where the SAS address is to be written
146 *
147 * This function extracts the SAS address from an IDENTIFY frame
148 * received. If OOB is SATA, then a SAS address is generated from the
149 * HA tables.
150 *
151 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
152 * buffer.
153 */
154static inline void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr)
155{
156 if (phy->sas_phy.frame_rcvd[0] == 0x34
157 && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
158 struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
159 /* FIS device-to-host */
160 u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr);
161
162 addr += asd_ha->hw_prof.sata_name_base + ord_phy(asd_ha, phy);
163 *(__be64 *)sas_addr = cpu_to_be64(addr);
164 } else {
165 struct sas_identify_frame *idframe =
166 (void *) phy->sas_phy.frame_rcvd;
167 memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
168 }
169}
170
171static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
172 struct done_list_struct *dl,
173 int edb_id, int phy_id)
174{
175 unsigned long flags;
176 int edb_el = edb_id + ascb->edb_index;
177 struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
178 struct asd_phy *phy = &ascb->ha->phys[phy_id];
179 struct sas_ha_struct *sas_ha = phy->sas_phy.ha;
180 u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
181
182 size = min(size, (u16) sizeof(phy->frame_rcvd));
183
184 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
185 memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size);
186 phy->sas_phy.frame_rcvd_size = size;
187 asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
188 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
189 asd_dump_frame_rcvd(phy, dl);
190 sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
191}
192
193static inline void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
194 struct done_list_struct *dl,
195 int phy_id)
196{
197 struct asd_ha_struct *asd_ha = ascb->ha;
198 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
199 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
200 u8 lr_error = dl->status_block[1];
201 u8 retries_left = dl->status_block[2];
202
203 switch (lr_error) {
204 case 0:
205 ASD_DPRINTK("phy%d: Receive ID timer expired\n", phy_id);
206 break;
207 case 1:
208 ASD_DPRINTK("phy%d: Loss of signal\n", phy_id);
209 break;
210 case 2:
211 ASD_DPRINTK("phy%d: Loss of dword sync\n", phy_id);
212 break;
213 case 3:
214 ASD_DPRINTK("phy%d: Receive FIS timeout\n", phy_id);
215 break;
216 default:
217 ASD_DPRINTK("phy%d: unknown link reset error code: 0x%x\n",
218 phy_id, lr_error);
219 break;
220 }
221
222 asd_turn_led(asd_ha, phy_id, 0);
223 sas_phy_disconnected(sas_phy);
224 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
225
226 if (retries_left == 0) {
227 int num = 1;
228 struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
229 GFP_ATOMIC);
230 if (!cp) {
231 asd_printk("%s: out of memory\n", __FUNCTION__);
232 goto out;
233 }
234 ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
235 phy_id);
236 asd_build_control_phy(cp, phy_id, ENABLE_PHY);
237 if (asd_post_ascb_list(ascb->ha, cp, 1) != 0)
238 asd_ascb_free(cp);
239 }
240out:
241 ;
242}
243
244static inline void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
245 struct done_list_struct *dl,
246 int phy_id)
247{
248 unsigned long flags;
249 struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha;
250 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
251 u8 reg = dl->status_block[1];
252 u32 cont = dl->status_block[2] << ((reg & 3)*8);
253
254 reg &= ~3;
255 switch (reg) {
256 case LmPRMSTAT0BYTE0:
257 switch (cont) {
258 case LmBROADCH:
259 case LmBROADRVCH0:
260 case LmBROADRVCH1:
261 case LmBROADSES:
262 ASD_DPRINTK("phy%d: BROADCAST change received:%d\n",
263 phy_id, cont);
264 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
265 sas_phy->sas_prim = ffs(cont);
266 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
267 sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD);
268 break;
269
270 case LmUNKNOWNP:
271 ASD_DPRINTK("phy%d: unknown BREAK\n", phy_id);
272 break;
273
274 default:
275 ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
276 phy_id, reg, cont);
277 break;
278 }
279 break;
280 case LmPRMSTAT1BYTE0:
281 switch (cont) {
282 case LmHARDRST:
283 ASD_DPRINTK("phy%d: HARD_RESET primitive rcvd\n",
284 phy_id);
285 /* The sequencer disables all phys on that port.
286 * We have to re-enable the phys ourselves. */
287 sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
288 break;
289
290 default:
291 ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
292 phy_id, reg, cont);
293 break;
294 }
295 break;
296 default:
297 ASD_DPRINTK("unknown primitive register:0x%x\n",
298 dl->status_block[1]);
299 break;
300 }
301}
302
303/**
304 * asd_invalidate_edb -- invalidate an EDB and if necessary post the ESCB
305 * @ascb: pointer to Empty SCB
306 * @edb_id: index [0,6] to the empty data buffer which is to be invalidated
307 *
308 * After an EDB has been invalidated, if all EDBs in this ESCB have been
309 * invalidated, the ESCB is posted back to the sequencer.
310 * Context is tasklet/IRQ.
311 */
312void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
313{
314 struct asd_seq_data *seq = &ascb->ha->seq;
315 struct empty_scb *escb = &ascb->scb->escb;
316 struct sg_el *eb = &escb->eb[edb_id];
317 struct asd_dma_tok *edb = seq->edb_arr[ascb->edb_index + edb_id];
318
319 memset(edb->vaddr, 0, ASD_EDB_SIZE);
320 eb->flags |= ELEMENT_NOT_VALID;
321 escb->num_valid--;
322
323 if (escb->num_valid == 0) {
324 int i;
325 /* ASD_DPRINTK("reposting escb: vaddr: 0x%p, "
326 "dma_handle: 0x%08llx, next: 0x%08llx, "
327 "index:%d, opcode:0x%02x\n",
328 ascb->dma_scb.vaddr,
329 (u64)ascb->dma_scb.dma_handle,
330 le64_to_cpu(ascb->scb->header.next_scb),
331 le16_to_cpu(ascb->scb->header.index),
332 ascb->scb->header.opcode);
333 */
334 escb->num_valid = ASD_EDBS_PER_SCB;
335 for (i = 0; i < ASD_EDBS_PER_SCB; i++)
336 escb->eb[i].flags = 0;
337 if (!list_empty(&ascb->list))
338 list_del_init(&ascb->list);
339 i = asd_post_escb_list(ascb->ha, ascb, 1);
340 if (i)
341 asd_printk("couldn't post escb, err:%d\n", i);
342 }
343}
344
345static void escb_tasklet_complete(struct asd_ascb *ascb,
346 struct done_list_struct *dl)
347{
348 struct asd_ha_struct *asd_ha = ascb->ha;
349 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
350 int edb = (dl->opcode & DL_PHY_MASK) - 1; /* [0xc1,0xc7] -> [0,6] */
351 u8 sb_opcode = dl->status_block[0];
352 int phy_id = sb_opcode & DL_PHY_MASK;
353 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
354
355 if (edb > 6 || edb < 0) {
356 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
357 edb, dl->opcode);
358 ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
359 sb_opcode, phy_id);
360 ASD_DPRINTK("escb: vaddr: 0x%p, "
361 "dma_handle: 0x%llx, next: 0x%llx, "
362 "index:%d, opcode:0x%02x\n",
363 ascb->dma_scb.vaddr,
364 (unsigned long long)ascb->dma_scb.dma_handle,
365 (unsigned long long)
366 le64_to_cpu(ascb->scb->header.next_scb),
367 le16_to_cpu(ascb->scb->header.index),
368 ascb->scb->header.opcode);
369 }
370
371 sb_opcode &= ~DL_PHY_MASK;
372
373 switch (sb_opcode) {
374 case BYTES_DMAED:
375 ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id);
376 asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
377 break;
378 case PRIMITIVE_RECVD:
379 ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__,
380 phy_id);
381 asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
382 break;
383 case PHY_EVENT:
384 ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id);
385 asd_phy_event_tasklet(ascb, dl);
386 break;
387 case LINK_RESET_ERROR:
388 ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__,
389 phy_id);
390 asd_link_reset_err_tasklet(ascb, dl, phy_id);
391 break;
392 case TIMER_EVENT:
393 ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
394 __FUNCTION__, phy_id);
395 asd_turn_led(asd_ha, phy_id, 0);
396 /* the device is gone */
397 sas_phy_disconnected(sas_phy);
398 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
399 break;
400 case REQ_TASK_ABORT:
401 ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
402 phy_id);
403 break;
404 case REQ_DEVICE_RESET:
405 ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
406 phy_id);
407 break;
408 case SIGNAL_NCQ_ERROR:
409 ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
410 phy_id);
411 break;
412 case CLEAR_NCQ_ERROR:
413 ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
414 phy_id);
415 break;
416 default:
417 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
418 phy_id, sb_opcode);
419 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
420 edb, dl->opcode);
421 ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
422 sb_opcode, phy_id);
423 ASD_DPRINTK("escb: vaddr: 0x%p, "
424 "dma_handle: 0x%llx, next: 0x%llx, "
425 "index:%d, opcode:0x%02x\n",
426 ascb->dma_scb.vaddr,
427 (unsigned long long)ascb->dma_scb.dma_handle,
428 (unsigned long long)
429 le64_to_cpu(ascb->scb->header.next_scb),
430 le16_to_cpu(ascb->scb->header.index),
431 ascb->scb->header.opcode);
432
433 break;
434 }
435
436 asd_invalidate_edb(ascb, edb);
437}
438
439int asd_init_post_escbs(struct asd_ha_struct *asd_ha)
440{
441 struct asd_seq_data *seq = &asd_ha->seq;
442 int i;
443
444 for (i = 0; i < seq->num_escbs; i++)
445 seq->escb_arr[i]->tasklet_complete = escb_tasklet_complete;
446
447 ASD_DPRINTK("posting %d escbs\n", i);
448 return asd_post_escb_list(asd_ha, seq->escb_arr[0], seq->num_escbs);
449}
450
451/* ---------- CONTROL PHY ---------- */
452
453#define CONTROL_PHY_STATUS (CURRENT_DEVICE_PRESENT | CURRENT_OOB_DONE \
454 | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
455 | CURRENT_OOB_ERROR)
456
457/**
458 * control_phy_tasklet_complete -- tasklet complete for CONTROL PHY ascb
459 * @ascb: pointer to an ascb
460 * @dl: pointer to the done list entry
461 *
462 * This function completes a CONTROL PHY scb and frees the ascb.
463 * A note on LEDs:
464 * - an LED blinks if there is IO though it,
465 * - if a device is connected to the LED, it is lit,
466 * - if no device is connected to the LED, is is dimmed (off).
467 */
468static void control_phy_tasklet_complete(struct asd_ascb *ascb,
469 struct done_list_struct *dl)
470{
471 struct asd_ha_struct *asd_ha = ascb->ha;
472 struct scb *scb = ascb->scb;
473 struct control_phy *control_phy = &scb->control_phy;
474 u8 phy_id = control_phy->phy_id;
475 struct asd_phy *phy = &ascb->ha->phys[phy_id];
476
477 u8 status = dl->status_block[0];
478 u8 oob_status = dl->status_block[1];
479 u8 oob_mode = dl->status_block[2];
480 /* u8 oob_signals= dl->status_block[3]; */
481
482 if (status != 0) {
483 ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
484 __FUNCTION__, phy_id, status);
485 goto out;
486 }
487
488 switch (control_phy->sub_func) {
489 case DISABLE_PHY:
490 asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
491 asd_turn_led(asd_ha, phy_id, 0);
492 asd_control_led(asd_ha, phy_id, 0);
493 ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id);
494 break;
495
496 case ENABLE_PHY:
497 asd_control_led(asd_ha, phy_id, 1);
498 if (oob_status & CURRENT_OOB_DONE) {
499 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
500 get_lrate_mode(phy, oob_mode);
501 asd_turn_led(asd_ha, phy_id, 1);
502 ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
503 __FUNCTION__, phy_id,phy->sas_phy.linkrate,
504 phy->sas_phy.iproto);
505 } else if (oob_status & CURRENT_SPINUP_HOLD) {
506 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
507 asd_turn_led(asd_ha, phy_id, 1);
508 ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__,
509 phy_id);
510 } else if (oob_status & CURRENT_ERR_MASK) {
511 asd_turn_led(asd_ha, phy_id, 0);
512 ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
513 __FUNCTION__, phy_id, oob_status);
514 } else if (oob_status & (CURRENT_HOT_PLUG_CNCT
515 | CURRENT_DEVICE_PRESENT)) {
516 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
517 asd_turn_led(asd_ha, phy_id, 1);
518 ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
519 __FUNCTION__, phy_id);
520 } else {
521 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
522 asd_turn_led(asd_ha, phy_id, 0);
523 ASD_DPRINTK("%s: phy%d: no device present: "
524 "oob_status:0x%x\n",
525 __FUNCTION__, phy_id, oob_status);
526 }
527 break;
528 case RELEASE_SPINUP_HOLD:
529 case PHY_NO_OP:
530 case EXECUTE_HARD_RESET:
531 ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__,
532 phy_id, control_phy->sub_func);
533 /* XXX finish */
534 break;
535 default:
536 ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__,
537 phy_id, control_phy->sub_func);
538 break;
539 }
540out:
541 asd_ascb_free(ascb);
542}
543
544static inline void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
545{
546 /* disable all speeds, then enable defaults */
547 *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS
548 | SATA_SPEED_30_DIS | SATA_SPEED_15_DIS;
549
550 switch (pd->max_sas_lrate) {
551 case SAS_LINK_RATE_6_0_GBPS:
552 *speed_mask &= ~SAS_SPEED_60_DIS;
553 default:
554 case SAS_LINK_RATE_3_0_GBPS:
555 *speed_mask &= ~SAS_SPEED_30_DIS;
556 case SAS_LINK_RATE_1_5_GBPS:
557 *speed_mask &= ~SAS_SPEED_15_DIS;
558 }
559
560 switch (pd->min_sas_lrate) {
561 case SAS_LINK_RATE_6_0_GBPS:
562 *speed_mask |= SAS_SPEED_30_DIS;
563 case SAS_LINK_RATE_3_0_GBPS:
564 *speed_mask |= SAS_SPEED_15_DIS;
565 default:
566 case SAS_LINK_RATE_1_5_GBPS:
567 /* nothing to do */
568 ;
569 }
570
571 switch (pd->max_sata_lrate) {
572 case SAS_LINK_RATE_3_0_GBPS:
573 *speed_mask &= ~SATA_SPEED_30_DIS;
574 default:
575 case SAS_LINK_RATE_1_5_GBPS:
576 *speed_mask &= ~SATA_SPEED_15_DIS;
577 }
578
579 switch (pd->min_sata_lrate) {
580 case SAS_LINK_RATE_3_0_GBPS:
581 *speed_mask |= SATA_SPEED_15_DIS;
582 default:
583 case SAS_LINK_RATE_1_5_GBPS:
584 /* nothing to do */
585 ;
586 }
587}
588
589/**
590 * asd_build_control_phy -- build a CONTROL PHY SCB
591 * @ascb: pointer to an ascb
592 * @phy_id: phy id to control, integer
593 * @subfunc: subfunction, what to actually to do the phy
594 *
595 * This function builds a CONTROL PHY scb. No allocation of any kind
596 * is performed. @ascb is allocated with the list function.
597 * The caller can override the ascb->tasklet_complete to point
598 * to its own callback function. It must call asd_ascb_free()
599 * at its tasklet complete function.
600 * See the default implementation.
601 */
602void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
603{
604 struct asd_phy *phy = &ascb->ha->phys[phy_id];
605 struct scb *scb = ascb->scb;
606 struct control_phy *control_phy = &scb->control_phy;
607
608 scb->header.opcode = CONTROL_PHY;
609 control_phy->phy_id = (u8) phy_id;
610 control_phy->sub_func = subfunc;
611
612 switch (subfunc) {
613 case EXECUTE_HARD_RESET: /* 0x81 */
614 case ENABLE_PHY: /* 0x01 */
615 /* decide hot plug delay */
616 control_phy->hot_plug_delay = HOTPLUG_DELAY_TIMEOUT;
617
618 /* decide speed mask */
619 set_speed_mask(&control_phy->speed_mask, phy->phy_desc);
620
621 /* initiator port settings are in the hi nibble */
622 if (phy->sas_phy.role == PHY_ROLE_INITIATOR)
623 control_phy->port_type = SAS_PROTO_ALL << 4;
624 else if (phy->sas_phy.role == PHY_ROLE_TARGET)
625 control_phy->port_type = SAS_PROTO_ALL;
626 else
627 control_phy->port_type =
628 (SAS_PROTO_ALL << 4) | SAS_PROTO_ALL;
629
630 /* link reset retries, this should be nominal */
631 control_phy->link_reset_retries = 10;
632
633 case RELEASE_SPINUP_HOLD: /* 0x02 */
634 /* decide the func_mask */
635 control_phy->func_mask = FUNCTION_MASK_DEFAULT;
636 if (phy->phy_desc->flags & ASD_SATA_SPINUP_HOLD)
637 control_phy->func_mask &= ~SPINUP_HOLD_DIS;
638 else
639 control_phy->func_mask |= SPINUP_HOLD_DIS;
640 }
641
642 control_phy->conn_handle = cpu_to_le16(0xFFFF);
643
644 ascb->tasklet_complete = control_phy_tasklet_complete;
645}
646
647/* ---------- INITIATE LINK ADM TASK ---------- */
648
649static void link_adm_tasklet_complete(struct asd_ascb *ascb,
650 struct done_list_struct *dl)
651{
652 u8 opcode = dl->opcode;
653 struct initiate_link_adm *link_adm = &ascb->scb->link_adm;
654 u8 phy_id = link_adm->phy_id;
655
656 if (opcode != TC_NO_ERROR) {
657 asd_printk("phy%d: link adm task 0x%x completed with error "
658 "0x%x\n", phy_id, link_adm->sub_func, opcode);
659 }
660 ASD_DPRINTK("phy%d: link adm task 0x%x: 0x%x\n",
661 phy_id, link_adm->sub_func, opcode);
662
663 asd_ascb_free(ascb);
664}
665
666void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
667 u8 subfunc)
668{
669 struct scb *scb = ascb->scb;
670 struct initiate_link_adm *link_adm = &scb->link_adm;
671
672 scb->header.opcode = INITIATE_LINK_ADM_TASK;
673
674 link_adm->phy_id = phy_id;
675 link_adm->sub_func = subfunc;
676 link_adm->conn_handle = cpu_to_le16(0xFFFF);
677
678 ascb->tasklet_complete = link_adm_tasklet_complete;
679}
680
681/* ---------- SCB timer ---------- */
682
683/**
684 * asd_ascb_timedout -- called when a pending SCB's timer has expired
685 * @data: unsigned long, a pointer to the ascb in question
686 *
687 * This is the default timeout function which does the most necessary.
688 * Upper layers can implement their own timeout function, say to free
689 * resources they have with this SCB, and then call this one at the
690 * end of their timeout function. To do this, one should initialize
691 * the ascb->timer.{function, data, expires} prior to calling the post
692 * funcion. The timer is started by the post function.
693 */
694void asd_ascb_timedout(unsigned long data)
695{
696 struct asd_ascb *ascb = (void *) data;
697 struct asd_seq_data *seq = &ascb->ha->seq;
698 unsigned long flags;
699
700 ASD_DPRINTK("scb:0x%x timed out\n", ascb->scb->header.opcode);
701
702 spin_lock_irqsave(&seq->pend_q_lock, flags);
703 seq->pending--;
704 list_del_init(&ascb->list);
705 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
706
707 asd_ascb_free(ascb);
708}
709
710/* ---------- CONTROL PHY ---------- */
711
712/* Given the spec value, return a driver value. */
713static const int phy_func_table[] = {
714 [PHY_FUNC_NOP] = PHY_NO_OP,
715 [PHY_FUNC_LINK_RESET] = ENABLE_PHY,
716 [PHY_FUNC_HARD_RESET] = EXECUTE_HARD_RESET,
717 [PHY_FUNC_DISABLE] = DISABLE_PHY,
718 [PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD,
719};
720
721int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg)
722{
723 struct asd_ha_struct *asd_ha = phy->ha->lldd_ha;
724 struct asd_phy_desc *pd = asd_ha->phys[phy->id].phy_desc;
725 struct asd_ascb *ascb;
726 struct sas_phy_linkrates *rates;
727 int res = 1;
728
729 switch (func) {
730 case PHY_FUNC_CLEAR_ERROR_LOG:
731 return -ENOSYS;
732 case PHY_FUNC_SET_LINK_RATE:
733 rates = arg;
734 if (rates->minimum_linkrate) {
735 pd->min_sas_lrate = rates->minimum_linkrate;
736 pd->min_sata_lrate = rates->minimum_linkrate;
737 }
738 if (rates->maximum_linkrate) {
739 pd->max_sas_lrate = rates->maximum_linkrate;
740 pd->max_sata_lrate = rates->maximum_linkrate;
741 }
742 func = PHY_FUNC_LINK_RESET;
743 break;
744 default:
745 break;
746 }
747
748 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
749 if (!ascb)
750 return -ENOMEM;
751
752 asd_build_control_phy(ascb, phy->id, phy_func_table[func]);
753 res = asd_post_ascb_list(asd_ha, ascb , 1);
754 if (res)
755 asd_ascb_free(ascb);
756
757 return res;
758}
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
new file mode 100644
index 000000000000..83574b5b4e69
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -0,0 +1,1089 @@
1/*
2 * Aic94xx SAS/SATA driver access to shared data structures and memory
3 * maps.
4 *
5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * This file is licensed under GPLv2.
9 *
10 * This file is part of the aic94xx driver.
11 *
12 * The aic94xx driver is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; version 2 of the
15 * License.
16 *
17 * The aic94xx driver is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with the aic94xx driver; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 */
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30
31#include "aic94xx.h"
32#include "aic94xx_reg.h"
33
34/* ---------- OCM stuff ---------- */
35
36struct asd_ocm_dir_ent {
37 u8 type;
38 u8 offs[3];
39 u8 _r1;
40 u8 size[3];
41} __attribute__ ((packed));
42
43struct asd_ocm_dir {
44 char sig[2];
45 u8 _r1[2];
46 u8 major; /* 0 */
47 u8 minor; /* 0 */
48 u8 _r2;
49 u8 num_de;
50 struct asd_ocm_dir_ent entry[15];
51} __attribute__ ((packed));
52
53#define OCM_DE_OCM_DIR 0x00
54#define OCM_DE_WIN_DRVR 0x01
55#define OCM_DE_BIOS_CHIM 0x02
56#define OCM_DE_RAID_ENGN 0x03
57#define OCM_DE_BIOS_INTL 0x04
58#define OCM_DE_BIOS_CHIM_OSM 0x05
59#define OCM_DE_BIOS_CHIM_DYNAMIC 0x06
60#define OCM_DE_ADDC2C_RES0 0x07
61#define OCM_DE_ADDC2C_RES1 0x08
62#define OCM_DE_ADDC2C_RES2 0x09
63#define OCM_DE_ADDC2C_RES3 0x0A
64
65#define OCM_INIT_DIR_ENTRIES 5
66/***************************************************************************
67* OCM dircetory default
68***************************************************************************/
69static struct asd_ocm_dir OCMDirInit =
70{
71 .sig = {0x4D, 0x4F}, /* signature */
72 .num_de = OCM_INIT_DIR_ENTRIES, /* no. of directory entries */
73};
74
75/***************************************************************************
76* OCM dircetory Entries default
77***************************************************************************/
78static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] =
79{
80 {
81 .type = (OCM_DE_ADDC2C_RES0), /* Entry type */
82 .offs = {128}, /* Offset */
83 .size = {0, 4}, /* size */
84 },
85 {
86 .type = (OCM_DE_ADDC2C_RES1), /* Entry type */
87 .offs = {128, 4}, /* Offset */
88 .size = {0, 4}, /* size */
89 },
90 {
91 .type = (OCM_DE_ADDC2C_RES2), /* Entry type */
92 .offs = {128, 8}, /* Offset */
93 .size = {0, 4}, /* size */
94 },
95 {
96 .type = (OCM_DE_ADDC2C_RES3), /* Entry type */
97 .offs = {128, 12}, /* Offset */
98 .size = {0, 4}, /* size */
99 },
100 {
101 .type = (OCM_DE_WIN_DRVR), /* Entry type */
102 .offs = {128, 16}, /* Offset */
103 .size = {128, 235, 1}, /* size */
104 },
105};
106
107struct asd_bios_chim_struct {
108 char sig[4];
109 u8 major; /* 1 */
110 u8 minor; /* 0 */
111 u8 bios_major;
112 u8 bios_minor;
113 __le32 bios_build;
114 u8 flags;
115 u8 pci_slot;
116 __le16 ue_num;
117 __le16 ue_size;
118 u8 _r[14];
119 /* The unit element array is right here.
120 */
121} __attribute__ ((packed));
122
123/**
124 * asd_read_ocm_seg - read an on chip memory (OCM) segment
125 * @asd_ha: pointer to the host adapter structure
126 * @buffer: where to write the read data
127 * @offs: offset into OCM where to read from
128 * @size: how many bytes to read
129 *
130 * Return the number of bytes not read. Return 0 on success.
131 */
132static int asd_read_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer,
133 u32 offs, int size)
134{
135 u8 *p = buffer;
136 if (unlikely(asd_ha->iospace))
137 asd_read_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size);
138 else {
139 for ( ; size > 0; size--, offs++, p++)
140 *p = asd_read_ocm_byte(asd_ha, offs);
141 }
142 return size;
143}
144
145static int asd_read_ocm_dir(struct asd_ha_struct *asd_ha,
146 struct asd_ocm_dir *dir, u32 offs)
147{
148 int err = asd_read_ocm_seg(asd_ha, dir, offs, sizeof(*dir));
149 if (err) {
150 ASD_DPRINTK("couldn't read ocm segment\n");
151 return err;
152 }
153
154 if (dir->sig[0] != 'M' || dir->sig[1] != 'O') {
155 ASD_DPRINTK("no valid dir signature(%c%c) at start of OCM\n",
156 dir->sig[0], dir->sig[1]);
157 return -ENOENT;
158 }
159 if (dir->major != 0) {
160 asd_printk("unsupported major version of ocm dir:0x%x\n",
161 dir->major);
162 return -ENOENT;
163 }
164 dir->num_de &= 0xf;
165 return 0;
166}
167
168/**
169 * asd_write_ocm_seg - write an on chip memory (OCM) segment
170 * @asd_ha: pointer to the host adapter structure
171 * @buffer: where to read the write data
172 * @offs: offset into OCM to write to
173 * @size: how many bytes to write
174 *
175 * Return the number of bytes not written. Return 0 on success.
176 */
177static void asd_write_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer,
178 u32 offs, int size)
179{
180 u8 *p = buffer;
181 if (unlikely(asd_ha->iospace))
182 asd_write_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size);
183 else {
184 for ( ; size > 0; size--, offs++, p++)
185 asd_write_ocm_byte(asd_ha, offs, *p);
186 }
187 return;
188}
189
190#define THREE_TO_NUM(X) ((X)[0] | ((X)[1] << 8) | ((X)[2] << 16))
191
192static int asd_find_dir_entry(struct asd_ocm_dir *dir, u8 type,
193 u32 *offs, u32 *size)
194{
195 int i;
196 struct asd_ocm_dir_ent *ent;
197
198 for (i = 0; i < dir->num_de; i++) {
199 if (dir->entry[i].type == type)
200 break;
201 }
202 if (i >= dir->num_de)
203 return -ENOENT;
204 ent = &dir->entry[i];
205 *offs = (u32) THREE_TO_NUM(ent->offs);
206 *size = (u32) THREE_TO_NUM(ent->size);
207 return 0;
208}
209
210#define OCM_BIOS_CHIM_DE 2
211#define BC_BIOS_PRESENT 1
212
213static int asd_get_bios_chim(struct asd_ha_struct *asd_ha,
214 struct asd_ocm_dir *dir)
215{
216 int err;
217 struct asd_bios_chim_struct *bc_struct;
218 u32 offs, size;
219
220 err = asd_find_dir_entry(dir, OCM_BIOS_CHIM_DE, &offs, &size);
221 if (err) {
222 ASD_DPRINTK("couldn't find BIOS_CHIM dir ent\n");
223 goto out;
224 }
225 err = -ENOMEM;
226 bc_struct = kmalloc(sizeof(*bc_struct), GFP_KERNEL);
227 if (!bc_struct) {
228 asd_printk("no memory for bios_chim struct\n");
229 goto out;
230 }
231 err = asd_read_ocm_seg(asd_ha, (void *)bc_struct, offs,
232 sizeof(*bc_struct));
233 if (err) {
234 ASD_DPRINTK("couldn't read ocm segment\n");
235 goto out2;
236 }
237 if (strncmp(bc_struct->sig, "SOIB", 4)
238 && strncmp(bc_struct->sig, "IPSA", 4)) {
239 ASD_DPRINTK("BIOS_CHIM entry has no valid sig(%c%c%c%c)\n",
240 bc_struct->sig[0], bc_struct->sig[1],
241 bc_struct->sig[2], bc_struct->sig[3]);
242 err = -ENOENT;
243 goto out2;
244 }
245 if (bc_struct->major != 1) {
246 asd_printk("BIOS_CHIM unsupported major version:0x%x\n",
247 bc_struct->major);
248 err = -ENOENT;
249 goto out2;
250 }
251 if (bc_struct->flags & BC_BIOS_PRESENT) {
252 asd_ha->hw_prof.bios.present = 1;
253 asd_ha->hw_prof.bios.maj = bc_struct->bios_major;
254 asd_ha->hw_prof.bios.min = bc_struct->bios_minor;
255 asd_ha->hw_prof.bios.bld = le32_to_cpu(bc_struct->bios_build);
256 ASD_DPRINTK("BIOS present (%d,%d), %d\n",
257 asd_ha->hw_prof.bios.maj,
258 asd_ha->hw_prof.bios.min,
259 asd_ha->hw_prof.bios.bld);
260 }
261 asd_ha->hw_prof.ue.num = le16_to_cpu(bc_struct->ue_num);
262 asd_ha->hw_prof.ue.size= le16_to_cpu(bc_struct->ue_size);
263 ASD_DPRINTK("ue num:%d, ue size:%d\n", asd_ha->hw_prof.ue.num,
264 asd_ha->hw_prof.ue.size);
265 size = asd_ha->hw_prof.ue.num * asd_ha->hw_prof.ue.size;
266 if (size > 0) {
267 err = -ENOMEM;
268 asd_ha->hw_prof.ue.area = kmalloc(size, GFP_KERNEL);
269 if (!asd_ha->hw_prof.ue.area)
270 goto out2;
271 err = asd_read_ocm_seg(asd_ha, (void *)asd_ha->hw_prof.ue.area,
272 offs + sizeof(*bc_struct), size);
273 if (err) {
274 kfree(asd_ha->hw_prof.ue.area);
275 asd_ha->hw_prof.ue.area = NULL;
276 asd_ha->hw_prof.ue.num = 0;
277 asd_ha->hw_prof.ue.size = 0;
278 ASD_DPRINTK("couldn't read ue entries(%d)\n", err);
279 }
280 }
281out2:
282 kfree(bc_struct);
283out:
284 return err;
285}
286
287static void
288asd_hwi_initialize_ocm_dir (struct asd_ha_struct *asd_ha)
289{
290 int i;
291
292 /* Zero OCM */
293 for (i = 0; i < OCM_MAX_SIZE; i += 4)
294 asd_write_ocm_dword(asd_ha, i, 0);
295
296 /* Write Dir */
297 asd_write_ocm_seg(asd_ha, &OCMDirInit, 0,
298 sizeof(struct asd_ocm_dir));
299
300 /* Write Dir Entries */
301 for (i = 0; i < OCM_INIT_DIR_ENTRIES; i++)
302 asd_write_ocm_seg(asd_ha, &OCMDirEntriesInit[i],
303 sizeof(struct asd_ocm_dir) +
304 (i * sizeof(struct asd_ocm_dir_ent))
305 , sizeof(struct asd_ocm_dir_ent));
306
307}
308
309static int
310asd_hwi_check_ocm_access (struct asd_ha_struct *asd_ha)
311{
312 struct pci_dev *pcidev = asd_ha->pcidev;
313 u32 reg;
314 int err = 0;
315 u32 v;
316
317 /* check if OCM has been initialized by BIOS */
318 reg = asd_read_reg_dword(asd_ha, EXSICNFGR);
319
320 if (!(reg & OCMINITIALIZED)) {
321 err = pci_read_config_dword(pcidev, PCIC_INTRPT_STAT, &v);
322 if (err) {
323 asd_printk("couldn't access PCIC_INTRPT_STAT of %s\n",
324 pci_name(pcidev));
325 goto out;
326 }
327
328 printk(KERN_INFO "OCM is not initialized by BIOS,"
329 "reinitialize it and ignore it, current IntrptStatus"
330 "is 0x%x\n", v);
331
332 if (v)
333 err = pci_write_config_dword(pcidev,
334 PCIC_INTRPT_STAT, v);
335 if (err) {
336 asd_printk("couldn't write PCIC_INTRPT_STAT of %s\n",
337 pci_name(pcidev));
338 goto out;
339 }
340
341 asd_hwi_initialize_ocm_dir(asd_ha);
342
343 }
344out:
345 return err;
346}
347
348/**
349 * asd_read_ocm - read on chip memory (OCM)
350 * @asd_ha: pointer to the host adapter structure
351 */
352int asd_read_ocm(struct asd_ha_struct *asd_ha)
353{
354 int err;
355 struct asd_ocm_dir *dir;
356
357 if (asd_hwi_check_ocm_access(asd_ha))
358 return -1;
359
360 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
361 if (!dir) {
362 asd_printk("no memory for ocm dir\n");
363 return -ENOMEM;
364 }
365
366 err = asd_read_ocm_dir(asd_ha, dir, 0);
367 if (err)
368 goto out;
369
370 err = asd_get_bios_chim(asd_ha, dir);
371out:
372 kfree(dir);
373 return err;
374}
375
376/* ---------- FLASH stuff ---------- */
377
378#define FLASH_RESET 0xF0
379
380#define FLASH_SIZE 0x200000
381#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** "
382#define FLASH_NEXT_ENTRY_OFFS 0x2000
383#define FLASH_MAX_DIR_ENTRIES 32
384
385#define FLASH_DE_TYPE_MASK 0x3FFFFFFF
386#define FLASH_DE_MS 0x120
387#define FLASH_DE_CTRL_A_USER 0xE0
388
389struct asd_flash_de {
390 __le32 type;
391 __le32 offs;
392 __le32 pad_size;
393 __le32 image_size;
394 __le32 chksum;
395 u8 _r[12];
396 u8 version[32];
397} __attribute__ ((packed));
398
399struct asd_flash_dir {
400 u8 cookie[32];
401 __le32 rev; /* 2 */
402 __le32 chksum;
403 __le32 chksum_antidote;
404 __le32 bld;
405 u8 bld_id[32]; /* build id data */
406 u8 ver_data[32]; /* date and time of build */
407 __le32 ae_mask;
408 __le32 v_mask;
409 __le32 oc_mask;
410 u8 _r[20];
411 struct asd_flash_de dir_entry[FLASH_MAX_DIR_ENTRIES];
412} __attribute__ ((packed));
413
414struct asd_manuf_sec {
415 char sig[2]; /* 'S', 'M' */
416 u16 offs_next;
417 u8 maj; /* 0 */
418 u8 min; /* 0 */
419 u16 chksum;
420 u16 size;
421 u8 _r[6];
422 u8 sas_addr[SAS_ADDR_SIZE];
423 u8 pcba_sn[ASD_PCBA_SN_SIZE];
424 /* Here start the other segments */
425 u8 linked_list[0];
426} __attribute__ ((packed));
427
428struct asd_manuf_phy_desc {
429 u8 state; /* low 4 bits */
430#define MS_PHY_STATE_ENABLEABLE 0
431#define MS_PHY_STATE_REPORTED 1
432#define MS_PHY_STATE_HIDDEN 2
433 u8 phy_id;
434 u16 _r;
435 u8 phy_control_0; /* mode 5 reg 0x160 */
436 u8 phy_control_1; /* mode 5 reg 0x161 */
437 u8 phy_control_2; /* mode 5 reg 0x162 */
438 u8 phy_control_3; /* mode 5 reg 0x163 */
439} __attribute__ ((packed));
440
441struct asd_manuf_phy_param {
442 char sig[2]; /* 'P', 'M' */
443 u16 next;
444 u8 maj; /* 0 */
445 u8 min; /* 2 */
446 u8 num_phy_desc; /* 8 */
447 u8 phy_desc_size; /* 8 */
448 u8 _r[3];
449 u8 usage_model_id;
450 u32 _r2;
451 struct asd_manuf_phy_desc phy_desc[ASD_MAX_PHYS];
452} __attribute__ ((packed));
453
454#if 0
455static const char *asd_sb_type[] = {
456 "unknown",
457 "SGPIO",
458 [2 ... 0x7F] = "unknown",
459 [0x80] = "ADPT_I2C",
460 [0x81 ... 0xFF] = "VENDOR_UNIQUExx"
461};
462#endif
463
464struct asd_ms_sb_desc {
465 u8 type;
466 u8 node_desc_index;
467 u8 conn_desc_index;
468 u8 _recvd[0];
469} __attribute__ ((packed));
470
471#if 0
472static const char *asd_conn_type[] = {
473 [0 ... 7] = "unknown",
474 "SFF8470",
475 "SFF8482",
476 "SFF8484",
477 [0x80] = "PCIX_DAUGHTER0",
478 [0x81] = "SAS_DAUGHTER0",
479 [0x82 ... 0xFF] = "VENDOR_UNIQUExx"
480};
481
482static const char *asd_conn_location[] = {
483 "unknown",
484 "internal",
485 "external",
486 "board_to_board",
487};
488#endif
489
490struct asd_ms_conn_desc {
491 u8 type;
492 u8 location;
493 u8 num_sideband_desc;
494 u8 size_sideband_desc;
495 u32 _resvd;
496 u8 name[16];
497 struct asd_ms_sb_desc sb_desc[0];
498} __attribute__ ((packed));
499
500struct asd_nd_phy_desc {
501 u8 vp_attch_type;
502 u8 attch_specific[0];
503} __attribute__ ((packed));
504
505#if 0
506static const char *asd_node_type[] = {
507 "IOP",
508 "IO_CONTROLLER",
509 "EXPANDER",
510 "PORT_MULTIPLIER",
511 "PORT_MULTIPLEXER",
512 "MULTI_DROP_I2C_BUS",
513};
514#endif
515
516struct asd_ms_node_desc {
517 u8 type;
518 u8 num_phy_desc;
519 u8 size_phy_desc;
520 u8 _resvd;
521 u8 name[16];
522 struct asd_nd_phy_desc phy_desc[0];
523} __attribute__ ((packed));
524
525struct asd_ms_conn_map {
526 char sig[2]; /* 'M', 'C' */
527 __le16 next;
528 u8 maj; /* 0 */
529 u8 min; /* 0 */
530 __le16 cm_size; /* size of this struct */
531 u8 num_conn;
532 u8 conn_size;
533 u8 num_nodes;
534 u8 usage_model_id;
535 u32 _resvd;
536 struct asd_ms_conn_desc conn_desc[0];
537 struct asd_ms_node_desc node_desc[0];
538} __attribute__ ((packed));
539
540struct asd_ctrla_phy_entry {
541 u8 sas_addr[SAS_ADDR_SIZE];
542 u8 sas_link_rates; /* max in hi bits, min in low bits */
543 u8 flags;
544 u8 sata_link_rates;
545 u8 _r[5];
546} __attribute__ ((packed));
547
548struct asd_ctrla_phy_settings {
549 u8 id0; /* P'h'y */
550 u8 _r;
551 u16 next;
552 u8 num_phys; /* number of PHYs in the PCI function */
553 u8 _r2[3];
554 struct asd_ctrla_phy_entry phy_ent[ASD_MAX_PHYS];
555} __attribute__ ((packed));
556
557struct asd_ll_el {
558 u8 id0;
559 u8 id1;
560 __le16 next;
561 u8 something_here[0];
562} __attribute__ ((packed));
563
564static int asd_poll_flash(struct asd_ha_struct *asd_ha)
565{
566 int c;
567 u8 d;
568
569 for (c = 5000; c > 0; c--) {
570 d = asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar);
571 d ^= asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar);
572 if (!d)
573 return 0;
574 udelay(5);
575 }
576 return -ENOENT;
577}
578
579static int asd_reset_flash(struct asd_ha_struct *asd_ha)
580{
581 int err;
582
583 err = asd_poll_flash(asd_ha);
584 if (err)
585 return err;
586 asd_write_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar, FLASH_RESET);
587 err = asd_poll_flash(asd_ha);
588
589 return err;
590}
591
592static inline int asd_read_flash_seg(struct asd_ha_struct *asd_ha,
593 void *buffer, u32 offs, int size)
594{
595 asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs,
596 size);
597 return 0;
598}
599
600/**
601 * asd_find_flash_dir - finds and reads the flash directory
602 * @asd_ha: pointer to the host adapter structure
603 * @flash_dir: pointer to flash directory structure
604 *
605 * If found, the flash directory segment will be copied to
606 * @flash_dir. Return 1 if found, 0 if not.
607 */
608static int asd_find_flash_dir(struct asd_ha_struct *asd_ha,
609 struct asd_flash_dir *flash_dir)
610{
611 u32 v;
612 for (v = 0; v < FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) {
613 asd_read_flash_seg(asd_ha, flash_dir, v,
614 sizeof(FLASH_DIR_COOKIE)-1);
615 if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE,
616 sizeof(FLASH_DIR_COOKIE)-1) == 0) {
617 asd_ha->hw_prof.flash.dir_offs = v;
618 asd_read_flash_seg(asd_ha, flash_dir, v,
619 sizeof(*flash_dir));
620 return 1;
621 }
622 }
623 return 0;
624}
625
626static int asd_flash_getid(struct asd_ha_struct *asd_ha)
627{
628 int err = 0;
629 u32 reg;
630
631 reg = asd_read_reg_dword(asd_ha, EXSICNFGR);
632
633 if (!(reg & FLASHEX)) {
634 ASD_DPRINTK("flash doesn't exist\n");
635 return -ENOENT;
636 }
637 if (pci_read_config_dword(asd_ha->pcidev, PCI_CONF_FLSH_BAR,
638 &asd_ha->hw_prof.flash.bar)) {
639 asd_printk("couldn't read PCI_CONF_FLSH_BAR of %s\n",
640 pci_name(asd_ha->pcidev));
641 return -ENOENT;
642 }
643 asd_ha->hw_prof.flash.present = 1;
644 asd_ha->hw_prof.flash.wide = reg & FLASHW ? 1 : 0;
645 err = asd_reset_flash(asd_ha);
646 if (err) {
647 ASD_DPRINTK("couldn't reset flash(%d)\n", err);
648 return err;
649 }
650 return 0;
651}
652
653static u16 asd_calc_flash_chksum(u16 *p, int size)
654{
655 u16 chksum = 0;
656
657 while (size-- > 0)
658 chksum += *p++;
659
660 return chksum;
661}
662
663
664static int asd_find_flash_de(struct asd_flash_dir *flash_dir, u32 entry_type,
665 u32 *offs, u32 *size)
666{
667 int i;
668 struct asd_flash_de *de;
669
670 for (i = 0; i < FLASH_MAX_DIR_ENTRIES; i++) {
671 u32 type = le32_to_cpu(flash_dir->dir_entry[i].type);
672
673 type &= FLASH_DE_TYPE_MASK;
674 if (type == entry_type)
675 break;
676 }
677 if (i >= FLASH_MAX_DIR_ENTRIES)
678 return -ENOENT;
679 de = &flash_dir->dir_entry[i];
680 *offs = le32_to_cpu(de->offs);
681 *size = le32_to_cpu(de->pad_size);
682 return 0;
683}
684
685static int asd_validate_ms(struct asd_manuf_sec *ms)
686{
687 if (ms->sig[0] != 'S' || ms->sig[1] != 'M') {
688 ASD_DPRINTK("manuf sec: no valid sig(%c%c)\n",
689 ms->sig[0], ms->sig[1]);
690 return -ENOENT;
691 }
692 if (ms->maj != 0) {
693 asd_printk("unsupported manuf. sector. major version:%x\n",
694 ms->maj);
695 return -ENOENT;
696 }
697 ms->offs_next = le16_to_cpu((__force __le16) ms->offs_next);
698 ms->chksum = le16_to_cpu((__force __le16) ms->chksum);
699 ms->size = le16_to_cpu((__force __le16) ms->size);
700
701 if (asd_calc_flash_chksum((u16 *)ms, ms->size/2)) {
702 asd_printk("failed manuf sector checksum\n");
703 }
704
705 return 0;
706}
707
708static int asd_ms_get_sas_addr(struct asd_ha_struct *asd_ha,
709 struct asd_manuf_sec *ms)
710{
711 memcpy(asd_ha->hw_prof.sas_addr, ms->sas_addr, SAS_ADDR_SIZE);
712 return 0;
713}
714
715static int asd_ms_get_pcba_sn(struct asd_ha_struct *asd_ha,
716 struct asd_manuf_sec *ms)
717{
718 memcpy(asd_ha->hw_prof.pcba_sn, ms->pcba_sn, ASD_PCBA_SN_SIZE);
719 asd_ha->hw_prof.pcba_sn[ASD_PCBA_SN_SIZE] = '\0';
720 return 0;
721}
722
723/**
724 * asd_find_ll_by_id - find a linked list entry by its id
725 * @start: void pointer to the first element in the linked list
726 * @id0: the first byte of the id (offs 0)
727 * @id1: the second byte of the id (offs 1)
728 *
729 * @start has to be the _base_ element start, since the
730 * linked list entries's offset is from this pointer.
731 * Some linked list entries use only the first id, in which case
732 * you can pass 0xFF for the second.
733 */
734static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1)
735{
736 struct asd_ll_el *el = start;
737
738 do {
739 switch (id1) {
740 default:
741 if (el->id1 == id1)
742 case 0xFF:
743 if (el->id0 == id0)
744 return el;
745 }
746 el = start + le16_to_cpu(el->next);
747 } while (el != start);
748
749 return NULL;
750}
751
752/**
753 * asd_ms_get_phy_params - get phy parameters from the manufacturing sector
754 * @asd_ha: pointer to the host adapter structure
755 * @manuf_sec: pointer to the manufacturing sector
756 *
757 * The manufacturing sector contans also the linked list of sub-segments,
758 * since when it was read, its size was taken from the flash directory,
759 * not from the structure size.
760 *
761 * HIDDEN phys do not count in the total count. REPORTED phys cannot
762 * be enabled but are reported and counted towards the total.
763 * ENEBLEABLE phys are enabled by default and count towards the total.
764 * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys
765 * merely specifies the number of phys the host adapter decided to
766 * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN,
767 * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENEBLEABLE.
768 * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2
769 * are actually enabled (enabled by default, max number of phys
770 * enableable in this case).
771 */
772static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha,
773 struct asd_manuf_sec *manuf_sec)
774{
775 int i;
776 int en_phys = 0;
777 int rep_phys = 0;
778 struct asd_manuf_phy_param *phy_param;
779 struct asd_manuf_phy_param dflt_phy_param;
780
781 phy_param = asd_find_ll_by_id(manuf_sec, 'P', 'M');
782 if (!phy_param) {
783 ASD_DPRINTK("ms: no phy parameters found\n");
784 ASD_DPRINTK("ms: Creating default phy parameters\n");
785 dflt_phy_param.sig[0] = 'P';
786 dflt_phy_param.sig[1] = 'M';
787 dflt_phy_param.maj = 0;
788 dflt_phy_param.min = 2;
789 dflt_phy_param.num_phy_desc = 8;
790 dflt_phy_param.phy_desc_size = sizeof(struct asd_manuf_phy_desc);
791 for (i =0; i < ASD_MAX_PHYS; i++) {
792 dflt_phy_param.phy_desc[i].state = 0;
793 dflt_phy_param.phy_desc[i].phy_id = i;
794 dflt_phy_param.phy_desc[i].phy_control_0 = 0xf6;
795 dflt_phy_param.phy_desc[i].phy_control_1 = 0x10;
796 dflt_phy_param.phy_desc[i].phy_control_2 = 0x43;
797 dflt_phy_param.phy_desc[i].phy_control_3 = 0xeb;
798 }
799
800 phy_param = &dflt_phy_param;
801
802 }
803
804 if (phy_param->maj != 0) {
805 asd_printk("unsupported manuf. phy param major version:0x%x\n",
806 phy_param->maj);
807 return -ENOENT;
808 }
809
810 ASD_DPRINTK("ms: num_phy_desc: %d\n", phy_param->num_phy_desc);
811 asd_ha->hw_prof.enabled_phys = 0;
812 for (i = 0; i < phy_param->num_phy_desc; i++) {
813 struct asd_manuf_phy_desc *pd = &phy_param->phy_desc[i];
814 switch (pd->state & 0xF) {
815 case MS_PHY_STATE_HIDDEN:
816 ASD_DPRINTK("ms: phy%d: HIDDEN\n", i);
817 continue;
818 case MS_PHY_STATE_REPORTED:
819 ASD_DPRINTK("ms: phy%d: REPORTED\n", i);
820 asd_ha->hw_prof.enabled_phys &= ~(1 << i);
821 rep_phys++;
822 continue;
823 case MS_PHY_STATE_ENABLEABLE:
824 ASD_DPRINTK("ms: phy%d: ENEBLEABLE\n", i);
825 asd_ha->hw_prof.enabled_phys |= (1 << i);
826 en_phys++;
827 break;
828 }
829 asd_ha->hw_prof.phy_desc[i].phy_control_0 = pd->phy_control_0;
830 asd_ha->hw_prof.phy_desc[i].phy_control_1 = pd->phy_control_1;
831 asd_ha->hw_prof.phy_desc[i].phy_control_2 = pd->phy_control_2;
832 asd_ha->hw_prof.phy_desc[i].phy_control_3 = pd->phy_control_3;
833 }
834 asd_ha->hw_prof.max_phys = rep_phys + en_phys;
835 asd_ha->hw_prof.num_phys = en_phys;
836 ASD_DPRINTK("ms: max_phys:0x%x, num_phys:0x%x\n",
837 asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys);
838 ASD_DPRINTK("ms: enabled_phys:0x%x\n", asd_ha->hw_prof.enabled_phys);
839 return 0;
840}
841
842static int asd_ms_get_connector_map(struct asd_ha_struct *asd_ha,
843 struct asd_manuf_sec *manuf_sec)
844{
845 struct asd_ms_conn_map *cm;
846
847 cm = asd_find_ll_by_id(manuf_sec, 'M', 'C');
848 if (!cm) {
849 ASD_DPRINTK("ms: no connector map found\n");
850 return 0;
851 }
852
853 if (cm->maj != 0) {
854 ASD_DPRINTK("ms: unsupported: connector map major version 0x%x"
855 "\n", cm->maj);
856 return -ENOENT;
857 }
858
859 /* XXX */
860
861 return 0;
862}
863
864
865/**
866 * asd_process_ms - find and extract information from the manufacturing sector
867 * @asd_ha: pointer to the host adapter structure
868 * @flash_dir: pointer to the flash directory
869 */
870static int asd_process_ms(struct asd_ha_struct *asd_ha,
871 struct asd_flash_dir *flash_dir)
872{
873 int err;
874 struct asd_manuf_sec *manuf_sec;
875 u32 offs, size;
876
877 err = asd_find_flash_de(flash_dir, FLASH_DE_MS, &offs, &size);
878 if (err) {
879 ASD_DPRINTK("Couldn't find the manuf. sector\n");
880 goto out;
881 }
882
883 if (size == 0)
884 goto out;
885
886 err = -ENOMEM;
887 manuf_sec = kmalloc(size, GFP_KERNEL);
888 if (!manuf_sec) {
889 ASD_DPRINTK("no mem for manuf sector\n");
890 goto out;
891 }
892
893 err = asd_read_flash_seg(asd_ha, (void *)manuf_sec, offs, size);
894 if (err) {
895 ASD_DPRINTK("couldn't read manuf sector at 0x%x, size 0x%x\n",
896 offs, size);
897 goto out2;
898 }
899
900 err = asd_validate_ms(manuf_sec);
901 if (err) {
902 ASD_DPRINTK("couldn't validate manuf sector\n");
903 goto out2;
904 }
905
906 err = asd_ms_get_sas_addr(asd_ha, manuf_sec);
907 if (err) {
908 ASD_DPRINTK("couldn't read the SAS_ADDR\n");
909 goto out2;
910 }
911 ASD_DPRINTK("manuf sect SAS_ADDR %llx\n",
912 SAS_ADDR(asd_ha->hw_prof.sas_addr));
913
914 err = asd_ms_get_pcba_sn(asd_ha, manuf_sec);
915 if (err) {
916 ASD_DPRINTK("couldn't read the PCBA SN\n");
917 goto out2;
918 }
919 ASD_DPRINTK("manuf sect PCBA SN %s\n", asd_ha->hw_prof.pcba_sn);
920
921 err = asd_ms_get_phy_params(asd_ha, manuf_sec);
922 if (err) {
923 ASD_DPRINTK("ms: couldn't get phy parameters\n");
924 goto out2;
925 }
926
927 err = asd_ms_get_connector_map(asd_ha, manuf_sec);
928 if (err) {
929 ASD_DPRINTK("ms: couldn't get connector map\n");
930 goto out2;
931 }
932
933out2:
934 kfree(manuf_sec);
935out:
936 return err;
937}
938
939static int asd_process_ctrla_phy_settings(struct asd_ha_struct *asd_ha,
940 struct asd_ctrla_phy_settings *ps)
941{
942 int i;
943 for (i = 0; i < ps->num_phys; i++) {
944 struct asd_ctrla_phy_entry *pe = &ps->phy_ent[i];
945
946 if (!PHY_ENABLED(asd_ha, i))
947 continue;
948 if (*(u64 *)pe->sas_addr == 0) {
949 asd_ha->hw_prof.enabled_phys &= ~(1 << i);
950 continue;
951 }
952 /* This is the SAS address which should be sent in IDENTIFY. */
953 memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, pe->sas_addr,
954 SAS_ADDR_SIZE);
955 asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
956 (pe->sas_link_rates & 0xF0) >> 4;
957 asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
958 (pe->sas_link_rates & 0x0F);
959 asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
960 (pe->sata_link_rates & 0xF0) >> 4;
961 asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
962 (pe->sata_link_rates & 0x0F);
963 asd_ha->hw_prof.phy_desc[i].flags = pe->flags;
964 ASD_DPRINTK("ctrla: phy%d: sas_addr: %llx, sas rate:0x%x-0x%x,"
965 " sata rate:0x%x-0x%x, flags:0x%x\n",
966 i,
967 SAS_ADDR(asd_ha->hw_prof.phy_desc[i].sas_addr),
968 asd_ha->hw_prof.phy_desc[i].max_sas_lrate,
969 asd_ha->hw_prof.phy_desc[i].min_sas_lrate,
970 asd_ha->hw_prof.phy_desc[i].max_sata_lrate,
971 asd_ha->hw_prof.phy_desc[i].min_sata_lrate,
972 asd_ha->hw_prof.phy_desc[i].flags);
973 }
974
975 return 0;
976}
977
978/**
979 * asd_process_ctrl_a_user - process CTRL-A user settings
980 * @asd_ha: pointer to the host adapter structure
981 * @flash_dir: pointer to the flash directory
982 */
983static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
984 struct asd_flash_dir *flash_dir)
985{
986 int err, i;
987 u32 offs, size;
988 struct asd_ll_el *el;
989 struct asd_ctrla_phy_settings *ps;
990 struct asd_ctrla_phy_settings dflt_ps;
991
992 err = asd_find_flash_de(flash_dir, FLASH_DE_CTRL_A_USER, &offs, &size);
993 if (err) {
994 ASD_DPRINTK("couldn't find CTRL-A user settings section\n");
995 ASD_DPRINTK("Creating default CTRL-A user settings section\n");
996
997 dflt_ps.id0 = 'h';
998 dflt_ps.num_phys = 8;
999 for (i =0; i < ASD_MAX_PHYS; i++) {
1000 memcpy(dflt_ps.phy_ent[i].sas_addr,
1001 asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
1002 dflt_ps.phy_ent[i].sas_link_rates = 0x98;
1003 dflt_ps.phy_ent[i].flags = 0x0;
1004 dflt_ps.phy_ent[i].sata_link_rates = 0x0;
1005 }
1006
1007 size = sizeof(struct asd_ctrla_phy_settings);
1008 ps = &dflt_ps;
1009 }
1010
1011 if (size == 0)
1012 goto out;
1013
1014 err = -ENOMEM;
1015 el = kmalloc(size, GFP_KERNEL);
1016 if (!el) {
1017 ASD_DPRINTK("no mem for ctrla user settings section\n");
1018 goto out;
1019 }
1020
1021 err = asd_read_flash_seg(asd_ha, (void *)el, offs, size);
1022 if (err) {
1023 ASD_DPRINTK("couldn't read ctrla phy settings section\n");
1024 goto out2;
1025 }
1026
1027 err = -ENOENT;
1028 ps = asd_find_ll_by_id(el, 'h', 0xFF);
1029 if (!ps) {
1030 ASD_DPRINTK("couldn't find ctrla phy settings struct\n");
1031 goto out2;
1032 }
1033
1034 err = asd_process_ctrla_phy_settings(asd_ha, ps);
1035 if (err) {
1036 ASD_DPRINTK("couldn't process ctrla phy settings\n");
1037 goto out2;
1038 }
1039out2:
1040 kfree(el);
1041out:
1042 return err;
1043}
1044
1045/**
1046 * asd_read_flash - read flash memory
1047 * @asd_ha: pointer to the host adapter structure
1048 */
1049int asd_read_flash(struct asd_ha_struct *asd_ha)
1050{
1051 int err;
1052 struct asd_flash_dir *flash_dir;
1053
1054 err = asd_flash_getid(asd_ha);
1055 if (err)
1056 return err;
1057
1058 flash_dir = kmalloc(sizeof(*flash_dir), GFP_KERNEL);
1059 if (!flash_dir)
1060 return -ENOMEM;
1061
1062 err = -ENOENT;
1063 if (!asd_find_flash_dir(asd_ha, flash_dir)) {
1064 ASD_DPRINTK("couldn't find flash directory\n");
1065 goto out;
1066 }
1067
1068 if (le32_to_cpu(flash_dir->rev) != 2) {
1069 asd_printk("unsupported flash dir version:0x%x\n",
1070 le32_to_cpu(flash_dir->rev));
1071 goto out;
1072 }
1073
1074 err = asd_process_ms(asd_ha, flash_dir);
1075 if (err) {
1076 ASD_DPRINTK("couldn't process manuf sector settings\n");
1077 goto out;
1078 }
1079
1080 err = asd_process_ctrl_a_user(asd_ha, flash_dir);
1081 if (err) {
1082 ASD_DPRINTK("couldn't process CTRL-A user settings\n");
1083 goto out;
1084 }
1085
1086out:
1087 kfree(flash_dir);
1088 return err;
1089}
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
new file mode 100644
index 000000000000..d9b6da5fd06c
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -0,0 +1,1404 @@
1/*
2 * Aic94xx SAS/SATA driver sequencer interface.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * Parts of this code adapted from David Chaw's adp94xx_seq.c.
8 *
9 * This file is licensed under GPLv2.
10 *
11 * This file is part of the aic94xx driver.
12 *
13 * The aic94xx driver is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; version 2 of the
16 * License.
17 *
18 * The aic94xx driver is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with the aic94xx driver; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 *
27 */
28
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/module.h>
32#include <linux/firmware.h>
33#include "aic94xx_reg.h"
34#include "aic94xx_hwi.h"
35
36#include "aic94xx_seq.h"
37#include "aic94xx_dump.h"
38
39/* It takes no more than 0.05 us for an instruction
40 * to complete. So waiting for 1 us should be more than
41 * plenty.
42 */
43#define PAUSE_DELAY 1
44#define PAUSE_TRIES 1000
45
46static const struct firmware *sequencer_fw;
47static const char *sequencer_version;
48static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
49 cseq_idle_loop, lseq_idle_loop;
50static u8 *cseq_code, *lseq_code;
51static u32 cseq_code_size, lseq_code_size;
52
53static u16 first_scb_site_no = 0xFFFF;
54static u16 last_scb_site_no;
55
56/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
57
58/**
59 * asd_pause_cseq - pause the central sequencer
60 * @asd_ha: pointer to host adapter structure
61 *
62 * Return 0 on success, negative on failure.
63 */
64int asd_pause_cseq(struct asd_ha_struct *asd_ha)
65{
66 int count = PAUSE_TRIES;
67 u32 arp2ctl;
68
69 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
70 if (arp2ctl & PAUSED)
71 return 0;
72
73 asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE);
74 do {
75 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
76 if (arp2ctl & PAUSED)
77 return 0;
78 udelay(PAUSE_DELAY);
79 } while (--count > 0);
80
81 ASD_DPRINTK("couldn't pause CSEQ\n");
82 return -1;
83}
84
85/**
86 * asd_unpause_cseq - unpause the central sequencer.
87 * @asd_ha: pointer to host adapter structure.
88 *
89 * Return 0 on success, negative on error.
90 */
91int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
92{
93 u32 arp2ctl;
94 int count = PAUSE_TRIES;
95
96 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
97 if (!(arp2ctl & PAUSED))
98 return 0;
99
100 asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE);
101 do {
102 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
103 if (!(arp2ctl & PAUSED))
104 return 0;
105 udelay(PAUSE_DELAY);
106 } while (--count > 0);
107
108 ASD_DPRINTK("couldn't unpause the CSEQ\n");
109 return -1;
110}
111
112/**
113 * asd_seq_pause_lseq - pause a link sequencer
114 * @asd_ha: pointer to a host adapter structure
115 * @lseq: link sequencer of interest
116 *
117 * Return 0 on success, negative on error.
118 */
119static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
120{
121 u32 arp2ctl;
122 int count = PAUSE_TRIES;
123
124 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
125 if (arp2ctl & PAUSED)
126 return 0;
127
128 asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE);
129 do {
130 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
131 if (arp2ctl & PAUSED)
132 return 0;
133 udelay(PAUSE_DELAY);
134 } while (--count > 0);
135
136 ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq);
137 return -1;
138}
139
140/**
141 * asd_pause_lseq - pause the link sequencer(s)
142 * @asd_ha: pointer to host adapter structure
143 * @lseq_mask: mask of link sequencers of interest
144 *
145 * Return 0 on success, negative on failure.
146 */
147int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
148{
149 int lseq;
150 int err = 0;
151
152 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
153 err = asd_seq_pause_lseq(asd_ha, lseq);
154 if (err)
155 return err;
156 }
157
158 return err;
159}
160
161/**
162 * asd_seq_unpause_lseq - unpause a link sequencer
163 * @asd_ha: pointer to host adapter structure
164 * @lseq: link sequencer of interest
165 *
166 * Return 0 on success, negative on error.
167 */
168static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
169{
170 u32 arp2ctl;
171 int count = PAUSE_TRIES;
172
173 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
174 if (!(arp2ctl & PAUSED))
175 return 0;
176
177 asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE);
178 do {
179 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
180 if (!(arp2ctl & PAUSED))
181 return 0;
182 udelay(PAUSE_DELAY);
183 } while (--count > 0);
184
185 ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq);
186 return 0;
187}
188
189
190/**
191 * asd_unpause_lseq - unpause the link sequencer(s)
192 * @asd_ha: pointer to host adapter structure
193 * @lseq_mask: mask of link sequencers of interest
194 *
195 * Return 0 on success, negative on failure.
196 */
197int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
198{
199 int lseq;
200 int err = 0;
201
202 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
203 err = asd_seq_unpause_lseq(asd_ha, lseq);
204 if (err)
205 return err;
206 }
207
208 return err;
209}
210
211/* ---------- Downloading CSEQ/LSEQ microcode ---------- */
212
213static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
214 u32 size)
215{
216 u32 addr = CSEQ_RAM_REG_BASE_ADR;
217 const u32 *prog = (u32 *) _prog;
218 u32 i;
219
220 for (i = 0; i < size; i += 4, prog++, addr += 4) {
221 u32 val = asd_read_reg_dword(asd_ha, addr);
222
223 if (le32_to_cpu(*prog) != val) {
224 asd_printk("%s: cseq verify failed at %u "
225 "read:0x%x, wanted:0x%x\n",
226 pci_name(asd_ha->pcidev),
227 i, val, le32_to_cpu(*prog));
228 return -1;
229 }
230 }
231 ASD_DPRINTK("verified %d bytes, passed\n", size);
232 return 0;
233}
234
235/**
236 * asd_verify_lseq - verify the microcode of a link sequencer
237 * @asd_ha: pointer to host adapter structure
238 * @_prog: pointer to the microcode
239 * @size: size of the microcode in bytes
240 * @lseq: link sequencer of interest
241 *
242 * The link sequencer code is accessed in 4 KB pages, which are selected
243 * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
244 * The 10 KB LSEQm instruction code is mapped, page at a time, at
245 * LmSEQRAM address.
246 */
247static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
248 u32 size, int lseq)
249{
250#define LSEQ_CODEPAGE_SIZE 4096
251 int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE;
252 u32 page;
253 const u32 *prog = (u32 *) _prog;
254
255 for (page = 0; page < pages; page++) {
256 u32 i;
257
258 asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq),
259 page << LmRAMPAGE_LSHIFT);
260 for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE;
261 i += 4, prog++, size-=4) {
262
263 u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i);
264
265 if (le32_to_cpu(*prog) != val) {
266 asd_printk("%s: LSEQ%d verify failed "
267 "page:%d, offs:%d\n",
268 pci_name(asd_ha->pcidev),
269 lseq, page, i);
270 return -1;
271 }
272 }
273 }
274 ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq,
275 (int)((u8 *)prog-_prog));
276 return 0;
277}
278
279/**
280 * asd_verify_seq -- verify CSEQ/LSEQ microcode
281 * @asd_ha: pointer to host adapter structure
282 * @prog: pointer to microcode
283 * @size: size of the microcode
284 * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
285 *
286 * Return 0 if microcode is correct, negative on mismatch.
287 */
288static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog,
289 u32 size, u8 lseq_mask)
290{
291 if (lseq_mask == 0)
292 return asd_verify_cseq(asd_ha, prog, size);
293 else {
294 int lseq, err;
295
296 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
297 err = asd_verify_lseq(asd_ha, prog, size, lseq);
298 if (err)
299 return err;
300 }
301 }
302
303 return 0;
304}
305#define ASD_DMA_MODE_DOWNLOAD
306#ifdef ASD_DMA_MODE_DOWNLOAD
307/* This is the size of the CSEQ Mapped instruction page */
308#define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
309static int asd_download_seq(struct asd_ha_struct *asd_ha,
310 const u8 * const prog, u32 size, u8 lseq_mask)
311{
312 u32 comstaten;
313 u32 reg;
314 int page;
315 const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT;
316 struct asd_dma_tok *token;
317 int err = 0;
318
319 if (size % 4) {
320 asd_printk("sequencer program not multiple of 4\n");
321 return -1;
322 }
323
324 asd_pause_cseq(asd_ha);
325 asd_pause_lseq(asd_ha, 0xFF);
326
327 /* save, disable and clear interrupts */
328 comstaten = asd_read_reg_dword(asd_ha, COMSTATEN);
329 asd_write_reg_dword(asd_ha, COMSTATEN, 0);
330 asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK);
331
332 asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
333 asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK);
334
335 token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL);
336 if (!token) {
337 asd_printk("out of memory for dma SEQ download\n");
338 err = -ENOMEM;
339 goto out;
340 }
341 ASD_DPRINTK("dma-ing %d bytes\n", size);
342
343 for (page = 0; page < pages; page++) {
344 int i;
345 u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
346 (u32)MAX_DMA_OVLY_COUNT);
347
348 memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
349 asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle);
350 asd_write_reg_dword(asd_ha, OVLYDMACNT, left);
351 reg = !page ? RESETOVLYDMA : 0;
352 reg |= (STARTOVLYDMA | OVLYHALTERR);
353 reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
354 /* Start DMA. */
355 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
356
357 for (i = PAUSE_TRIES*100; i > 0; i--) {
358 u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL);
359 if (!(dmadone & OVLYDMAACT))
360 break;
361 udelay(PAUSE_DELAY);
362 }
363 }
364
365 reg = asd_read_reg_dword(asd_ha, COMSTAT);
366 if (!(reg & OVLYDMADONE) || (reg & OVLYERR)
367 || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){
368 asd_printk("%s: error DMA-ing sequencer code\n",
369 pci_name(asd_ha->pcidev));
370 err = -ENODEV;
371 }
372
373 asd_free_coherent(asd_ha, token);
374 out:
375 asd_write_reg_dword(asd_ha, COMSTATEN, comstaten);
376
377 return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask);
378}
379#else /* ASD_DMA_MODE_DOWNLOAD */
380static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog,
381 u32 size, u8 lseq_mask)
382{
383 int i;
384 u32 reg = 0;
385 const u32 *prog = (u32 *) _prog;
386
387 if (size % 4) {
388 asd_printk("sequencer program not multiple of 4\n");
389 return -1;
390 }
391
392 asd_pause_cseq(asd_ha);
393 asd_pause_lseq(asd_ha, 0xFF);
394
395 reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
396 reg |= PIOCMODE;
397
398 asd_write_reg_dword(asd_ha, OVLYDMACNT, size);
399 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
400
401 ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
402 lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : "");
403
404 for (i = 0; i < size; i += 4, prog++)
405 asd_write_reg_dword(asd_ha, SPIODATA, *prog);
406
407 reg = (reg & ~PIOCMODE) | OVLYHALTERR;
408 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
409
410 return asd_verify_seq(asd_ha, _prog, size, lseq_mask);
411}
412#endif /* ASD_DMA_MODE_DOWNLOAD */
413
414/**
415 * asd_seq_download_seqs - download the sequencer microcode
416 * @asd_ha: pointer to host adapter structure
417 *
418 * Download the central and link sequencer microcode.
419 */
420static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha)
421{
422 int err;
423
424 if (!asd_ha->hw_prof.enabled_phys) {
425 asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev));
426 return -ENODEV;
427 }
428
429 /* Download the CSEQ */
430 ASD_DPRINTK("downloading CSEQ...\n");
431 err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0);
432 if (err) {
433 asd_printk("CSEQ download failed:%d\n", err);
434 return err;
435 }
436
437 /* Download the Link Sequencers code. All of the Link Sequencers
438 * microcode can be downloaded at the same time.
439 */
440 ASD_DPRINTK("downloading LSEQs...\n");
441 err = asd_download_seq(asd_ha, lseq_code, lseq_code_size,
442 asd_ha->hw_prof.enabled_phys);
443 if (err) {
444 /* Try it one at a time */
445 u8 lseq;
446 u8 lseq_mask = asd_ha->hw_prof.enabled_phys;
447
448 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
449 err = asd_download_seq(asd_ha, lseq_code,
450 lseq_code_size, 1<<lseq);
451 if (err)
452 break;
453 }
454 }
455 if (err)
456 asd_printk("LSEQs download failed:%d\n", err);
457
458 return err;
459}
460
461/* ---------- Initializing the chip, chip memory, etc. ---------- */
462
463/**
464 * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
465 * @asd_ha: pointer to host adapter structure
466 */
467static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha)
468{
469 /* CSEQ Mode Independent, page 4 setup. */
470 asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF);
471 asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF);
472 asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF);
473 asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF);
474 asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF);
475 asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF);
476 asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF);
477 asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF);
478 asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF);
479 asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF);
480 asd_write_reg_word(asd_ha, CSEQ_REG0, 0);
481 asd_write_reg_word(asd_ha, CSEQ_REG1, 0);
482 asd_write_reg_dword(asd_ha, CSEQ_REG2, 0);
483 asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0);
484 {
485 u8 con = asd_read_reg_byte(asd_ha, CCONEXIST);
486 u8 val = hweight8(con);
487 asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val);
488 }
489 asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0);
490
491 /* CSEQ Mode independent, page 5 setup. */
492 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0);
493 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0);
494 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0);
495 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0);
496 asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF);
497 asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF);
498 asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0);
499 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0);
500 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0);
501 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0);
502
503 /* CSEQ Mode independent, page 6 setup. */
504 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0);
505 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0);
506 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0);
507 asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0);
508 asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0);
509 asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0);
510 asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0);
511 asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF);
512 asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF);
513 /* Calculate the free scb mask. */
514 {
515 u16 cmdctx = asd_get_cmdctx_size(asd_ha);
516 cmdctx = (~((cmdctx/128)-1)) >> 8;
517 asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx);
518 }
519 asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD,
520 first_scb_site_no);
521 asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL,
522 last_scb_site_no);
523 asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF);
524 asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF);
525
526 /* CSEQ Mode independent, page 7 setup. */
527 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0);
528 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0);
529 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0);
530 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0);
531 asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF);
532 asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF);
533 asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0);
534 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0);
535 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0);
536 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0);
537 asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0);
538 asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0);
539}
540
541/**
542 * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
543 * @asd_ha: pointer to host adapter structure
544 */
545static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha)
546{
547 int i;
548 int moffs;
549
550 moffs = CSEQ_PAGE_SIZE * 2;
551
552 /* CSEQ Mode dependent, modes 0-7, page 0 setup. */
553 for (i = 0; i < 8; i++) {
554 asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0);
555 asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0);
556 asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF);
557 asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF);
558 asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0);
559 }
560
561 /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
562
563 /* CSEQ Mode dependent, mode 8, page 0 setup. */
564 asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF);
565 asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0);
566 asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0);
567 asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0);
568 asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0);
569 asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0);
570 asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0);
571 asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0);
572 asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0);
573 asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0);
574 asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0);
575 asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0);
576 asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE,
577 (u16)last_scb_site_no+1);
578 asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE,
579 (u16)asd_ha->hw_prof.max_ddbs);
580
581 /* CSEQ Mode dependent, mode 8, page 1 setup. */
582 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0);
583 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0);
584 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0);
585 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0);
586
587 /* CSEQ Mode dependent, mode 8, page 2 setup. */
588 /* Tell the sequencer the bus address of the first SCB. */
589 asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER,
590 asd_ha->seq.next_scb.dma_handle);
591 ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
592 (unsigned long long)asd_ha->seq.next_scb.dma_handle);
593
594 /* Tell the sequencer the first Done List entry address. */
595 asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE,
596 asd_ha->seq.actual_dl->dma_handle);
597
598 /* Initialize the Q_DONE_POINTER with the least significant
599 * 4 bytes of the first Done List address. */
600 asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER,
601 ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle));
602
603 asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE);
604
605 /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
606}
607
608/**
609 * asd_init_cseq_scratch -- setup and init CSEQ
610 * @asd_ha: pointer to host adapter structure
611 *
612 * Setup and initialize Central sequencers. Initialiaze the mode
613 * independent and dependent scratch page to the default settings.
614 */
615static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
616{
617 asd_init_cseq_mip(asd_ha);
618 asd_init_cseq_mdp(asd_ha);
619}
620
621/**
622 * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
623 * @asd_ha: pointer to host adapter structure
624 */
625static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
626{
627 int i;
628
629 /* LSEQ Mode independent page 0 setup. */
630 asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF);
631 asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF);
632 asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq);
633 asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq),
634 ASD_NOTIFY_ENABLE_SPINUP);
635 asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000);
636 asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0);
637 asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0);
638 asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0);
639 asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0);
640 asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0);
641 asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0);
642 asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0);
643 asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0);
644
645 /* LSEQ Mode independent page 1 setup. */
646 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF);
647 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF);
648 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF);
649 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF);
650 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0);
651 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0);
652 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0);
653 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0);
654 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0);
655 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0);
656 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0);
657 asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0);
658 asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0);
659 asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0);
660
661 /* LSEQ Mode Independent page 2 setup. */
662 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF);
663 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF);
664 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF);
665 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF);
666 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0);
667 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0);
668 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0);
669 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0);
670 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0);
671 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0);
672 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0);
673 for (i = 0; i < 12; i += 4)
674 asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0);
675
676 /* LSEQ Mode Independent page 3 setup. */
677
678 /* Device present timer timeout */
679 asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq),
680 ASD_DEV_PRESENT_TIMEOUT);
681
682 /* SATA interlock timer disabled */
683 asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq),
684 ASD_SATA_INTERLOCK_TIMEOUT);
685
686 /* STP shutdown timer timeout constant, IGNORED by the sequencer,
687 * always 0. */
688 asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq),
689 ASD_STP_SHUTDOWN_TIMEOUT);
690
691 asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq),
692 ASD_SRST_ASSERT_TIMEOUT);
693
694 asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq),
695 ASD_RCV_FIS_TIMEOUT);
696
697 asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq),
698 ASD_ONE_MILLISEC_TIMEOUT);
699
700 /* COM_INIT timer */
701 asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq),
702 ASD_TEN_MILLISEC_TIMEOUT);
703
704 asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq),
705 ASD_SMP_RCV_TIMEOUT);
706}
707
708/**
709 * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
710 * @asd_ha: pointer to host adapter structure
711 */
712static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
713{
714 int i;
715 u32 moffs;
716 u16 ret_addr[] = {
717 0xFFFF, /* mode 0 */
718 0xFFFF, /* mode 1 */
719 mode2_task, /* mode 2 */
720 0,
721 0xFFFF, /* mode 4/5 */
722 0xFFFF, /* mode 4/5 */
723 };
724
725 /*
726 * Mode 0,1,2 and 4/5 have common field on page 0 for the first
727 * 14 bytes.
728 */
729 for (i = 0; i < 3; i++) {
730 moffs = i * LSEQ_MODE_SCRATCH_SIZE;
731 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs,
732 ret_addr[i]);
733 asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0);
734 asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0);
735 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF);
736 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF);
737 asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0);
738 asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0);
739 }
740 /*
741 * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
742 */
743 asd_write_reg_word(asd_ha,
744 LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET,
745 ret_addr[5]);
746 asd_write_reg_word(asd_ha,
747 LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
748 asd_write_reg_word(asd_ha,
749 LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
750 asd_write_reg_word(asd_ha,
751 LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
752 asd_write_reg_word(asd_ha,
753 LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
754 asd_write_reg_byte(asd_ha,
755 LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
756 asd_write_reg_word(asd_ha,
757 LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
758
759 /* LSEQ Mode dependent 0, page 0 setup. */
760 asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq),
761 (u16)asd_ha->hw_prof.max_ddbs);
762 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0);
763 asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0);
764 asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq),
765 (u16)last_scb_site_no+1);
766 asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq),
767 (u16) LmM0INTEN_MASK & 0xFFFF0000 >> 16);
768 asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2,
769 (u16) LmM0INTEN_MASK & 0xFFFF);
770 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0);
771 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0);
772 asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0);
773 asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0);
774 asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0);
775
776 /* LSEQ mode dependent, mode 1, page 0 setup. */
777 asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF);
778 asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0);
779 asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0);
780 asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0);
781 asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0);
782 asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0);
783 asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0);
784 asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0);
785
786 /* LSEQ Mode dependent mode 2, page 0 setup */
787 asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0);
788 asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0);
789 asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0);
790 asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0);
791 asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0);
792 asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0);
793
794 /* LSEQ Mode dependent, mode 4/5, page 0 setup. */
795 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0);
796 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0);
797 asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF);
798 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0);
799 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0);
800 asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0);
801 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0);
802 asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0);
803 asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0);
804 /*
805 * Set the desired interval between transmissions of the NOTIFY
806 * (ENABLE SPINUP) primitive. Must be initilized to val - 1.
807 */
808 asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq),
809 ASD_NOTIFY_TIMEOUT - 1);
810 /* No delay for the first NOTIFY to be sent to the attached target. */
811 asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
812 ASD_NOTIFY_DOWN_COUNT);
813
814 /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
815 for (i = 0; i < 2; i++) {
816 int j;
817 /* Start from Page 1 of Mode 0 and 1. */
818 moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
819 /* All the fields of page 1 can be intialized to 0. */
820 for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
821 asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
822 }
823
824 /* LSEQ Mode dependent, mode 2, page 1 setup. */
825 asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0);
826 asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0);
827 asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0);
828
829 /* LSEQ Mode dependent, mode 4/5, page 1. */
830 for (i = 0; i < LSEQ_PAGE_SIZE; i+=4)
831 asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0);
832 asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF);
833 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF);
834 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF);
835 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF);
836 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF);
837 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF);
838 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF);
839 asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF);
840
841 /* LSEQ Mode dependent, mode 0, page 2 setup. */
842 asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0);
843 asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0);
844 asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0);
845 asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0);
846 asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0);
847
848 /* LSEQ Mode Dependent 1, page 2 setup. */
849 asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0);
850 asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0);
851 asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0);
852 asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0);
853 asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0);
854
855 /* LSEQ Mode Dependent 2, page 2 setup. */
856 /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
857 * i.e. always 0. */
858 asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0);
859 asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0);
860 asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0);
861 asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0);
862 asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0);
863 asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0);
864
865 /* LSEQ Mode Dependent 4/5, page 2 setup. */
866 asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0);
867 asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0);
868 asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0);
869 asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0);
870}
871
872/**
873 * asd_init_lseq_scratch -- setup and init link sequencers
874 * @asd_ha: pointer to host adapter struct
875 */
876static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha)
877{
878 u8 lseq;
879 u8 lseq_mask;
880
881 lseq_mask = asd_ha->hw_prof.enabled_phys;
882 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
883 asd_init_lseq_mip(asd_ha, lseq);
884 asd_init_lseq_mdp(asd_ha, lseq);
885 }
886}
887
888/**
889 * asd_init_scb_sites -- initialize sequencer SCB sites (memory).
890 * @asd_ha: pointer to host adapter structure
891 *
892 * This should be done before initializing common CSEQ and LSEQ
893 * scratch since those areas depend on some computed values here,
894 * last_scb_site_no, etc.
895 */
896static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
897{
898 u16 site_no;
899 u16 max_scbs = 0;
900
901 for (site_no = asd_ha->hw_prof.max_scbs-1;
902 site_no != (u16) -1;
903 site_no--) {
904 u16 i;
905
906 /* Initialize all fields in the SCB site to 0. */
907 for (i = 0; i < ASD_SCB_SIZE; i += 4)
908 asd_scbsite_write_dword(asd_ha, site_no, i, 0);
909
910 /* Workaround needed by SEQ to fix a SATA issue is to exclude
911 * certain SCB sites from the free list. */
912 if (!SCB_SITE_VALID(site_no))
913 continue;
914
915 if (last_scb_site_no == 0)
916 last_scb_site_no = site_no;
917
918 /* For every SCB site, we need to initialize the
919 * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
920 * and SG Element Flag. */
921
922 /* Q_NEXT field of the last SCB is invalidated. */
923 asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
924
925 /* Initialize SCB Site Opcode field to invalid. */
926 asd_scbsite_write_byte(asd_ha, site_no,
927 offsetof(struct scb_header, opcode),
928 0xFF);
929
930 /* Initialize SCB Site Flags field to mean a response
931 * frame has been received. This means inadvertent
932 * frames received to be dropped. */
933 asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
934
935 first_scb_site_no = site_no;
936 max_scbs++;
937 }
938 asd_ha->hw_prof.max_scbs = max_scbs;
939 ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs);
940 ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no);
941 ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no);
942}
943
944/**
945 * asd_init_cseq_cio - initialize CSEQ CIO registers
946 * @asd_ha: pointer to host adapter structure
947 */
948static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
949{
950 int i;
951
952 asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0);
953 asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS);
954 asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0);
955 asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0);
956 asd_ha->seq.scbpro = 0;
957 asd_write_reg_dword(asd_ha, SCBPRO, 0);
958 asd_write_reg_dword(asd_ha, CSEQCON, 0);
959
960 /* Intialize CSEQ Mode 11 Interrupt Vectors.
961 * The addresses are 16 bit wide and in dword units.
962 * The values of their macros are in byte units.
963 * Thus we have to divide by 4. */
964 asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]);
965 asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]);
966 asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]);
967
968 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
969 asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC);
970
971 /* Initialize CSEQ Scratch Page to 0x04. */
972 asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04);
973
974 /* Initialize CSEQ Mode[0-8] Dependent registers. */
975 /* Initialize Scratch Page to 0. */
976 for (i = 0; i < 9; i++)
977 asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0);
978
979 /* Reset the ARP2 Program Count. */
980 asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
981
982 for (i = 0; i < 8; i++) {
983 /* Intialize Mode n Link m Interrupt Enable. */
984 asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
985 /* Initialize Mode n Request Mailbox. */
986 asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
987 }
988}
989
990/**
991 * asd_init_lseq_cio -- initialize LmSEQ CIO registers
992 * @asd_ha: pointer to host adapter structure
993 */
994static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
995{
996 u8 *sas_addr;
997 int i;
998
999 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
1000 asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC);
1001
1002 asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0);
1003
1004 /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
1005 for (i = 0; i < 3; i++)
1006 asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0);
1007
1008 /* Initialize Mode 5 SCRATCHPAGE to 0. */
1009 asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0);
1010
1011 asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0);
1012 /* Initialize Mode 0,1,2 and 5 Interrupt Enable and
1013 * Interrupt registers. */
1014 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK);
1015 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF);
1016 /* Mode 1 */
1017 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK);
1018 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF);
1019 /* Mode 2 */
1020 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK);
1021 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF);
1022 /* Mode 5 */
1023 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK);
1024 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF);
1025
1026 /* Enable HW Timer status. */
1027 asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK);
1028
1029 /* Enable Primitive Status 0 and 1. */
1030 asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK);
1031 asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK);
1032
1033 /* Enable Frame Error. */
1034 asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK);
1035 asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50);
1036
1037 /* Initialize Mode 0 Transfer Level to 512. */
1038 asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512);
1039 /* Initialize Mode 1 Transfer Level to 256. */
1040 asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256);
1041
1042 /* Initialize Program Count. */
1043 asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1044
1045 /* Enable Blind SG Move. */
1046 asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48);
1047 asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq),
1048 ASD_SATA_INTERLOCK_TIMEOUT);
1049
1050 (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq));
1051
1052 /* Clear Primitive Status 0 and 1. */
1053 asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF);
1054 asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF);
1055
1056 /* Clear HW Timer status. */
1057 asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF);
1058
1059 /* Clear DMA Errors for Mode 0 and 1. */
1060 asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF);
1061 asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF);
1062
1063 /* Clear SG DMA Errors for Mode 0 and 1. */
1064 asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF);
1065 asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF);
1066
1067 /* Clear Mode 0 Buffer Parity Error. */
1068 asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR);
1069
1070 /* Clear Mode 0 Frame Error register. */
1071 asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF);
1072
1073 /* Reset LSEQ external interrupt arbiter. */
1074 asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL);
1075
1076 /* Set the Phy SAS for the LmSEQ WWN. */
1077 sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr;
1078 for (i = 0; i < SAS_ADDR_SIZE; i++)
1079 asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]);
1080
1081 /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
1082 asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0);
1083
1084 /* Set the Bus Inactivity Time Limit Timer. */
1085 asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9);
1086
1087 /* Enable SATA Port Multiplier. */
1088 asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80);
1089
1090 /* Initialize Interrupt Vector[0-10] address in Mode 3.
1091 * See the comment on CSEQ_INT_* */
1092 asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]);
1093 asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]);
1094 asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]);
1095 asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]);
1096 asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]);
1097 asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]);
1098 asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]);
1099 asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]);
1100 asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]);
1101 asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]);
1102 asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]);
1103 /*
1104 * Program the Link LED control, applicable only for
1105 * Chip Rev. B or later.
1106 */
1107 asd_write_reg_dword(asd_ha, LmCONTROL(lseq),
1108 (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms));
1109
1110 /* Set the Align Rate for SAS and STP mode. */
1111 asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT);
1112 asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT);
1113}
1114
1115
1116/**
1117 * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
1118 * @asd_ha: pointer to host adapter struct
1119 */
1120static void asd_post_init_cseq(struct asd_ha_struct *asd_ha)
1121{
1122 int i;
1123
1124 for (i = 0; i < 8; i++)
1125 asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF);
1126 for (i = 0; i < 8; i++)
1127 asd_read_reg_dword(asd_ha, CMnRSPMBX(i));
1128 /* Reset the external interrupt arbiter. */
1129 asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL);
1130}
1131
1132/**
1133 * asd_init_ddb_0 -- initialize DDB 0
1134 * @asd_ha: pointer to host adapter structure
1135 *
1136 * Initialize DDB site 0 which is used internally by the sequencer.
1137 */
1138static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
1139{
1140 int i;
1141
1142 /* Zero out the DDB explicitly */
1143 for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4)
1144 asd_ddbsite_write_dword(asd_ha, 0, i, 0);
1145
1146 asd_ddbsite_write_word(asd_ha, 0,
1147 offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0);
1148 asd_ddbsite_write_word(asd_ha, 0,
1149 offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail),
1150 asd_ha->hw_prof.max_ddbs-1);
1151 asd_ddbsite_write_word(asd_ha, 0,
1152 offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0);
1153 asd_ddbsite_write_word(asd_ha, 0,
1154 offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF);
1155 asd_ddbsite_write_word(asd_ha, 0,
1156 offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF);
1157 asd_ddbsite_write_word(asd_ha, 0,
1158 offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0);
1159 asd_ddbsite_write_word(asd_ha, 0,
1160 offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0);
1161 asd_ddbsite_write_word(asd_ha, 0,
1162 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0);
1163 asd_ddbsite_write_word(asd_ha, 0,
1164 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh),
1165 asd_ha->hw_prof.num_phys * 2);
1166 asd_ddbsite_write_byte(asd_ha, 0,
1167 offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0);
1168 asd_ddbsite_write_byte(asd_ha, 0,
1169 offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF);
1170 asd_ddbsite_write_byte(asd_ha, 0,
1171 offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00);
1172 /* DDB 0 is reserved */
1173 set_bit(0, asd_ha->hw_prof.ddb_bitmap);
1174}
1175
1176/**
1177 * asd_seq_setup_seqs -- setup and initialize central and link sequencers
1178 * @asd_ha: pointer to host adapter structure
1179 */
1180static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
1181{
1182 int lseq;
1183 u8 lseq_mask;
1184
1185 /* Initialize SCB sites. Done first to compute some values which
1186 * the rest of the init code depends on. */
1187 asd_init_scb_sites(asd_ha);
1188
1189 /* Initialize CSEQ Scratch RAM registers. */
1190 asd_init_cseq_scratch(asd_ha);
1191
1192 /* Initialize LmSEQ Scratch RAM registers. */
1193 asd_init_lseq_scratch(asd_ha);
1194
1195 /* Initialize CSEQ CIO registers. */
1196 asd_init_cseq_cio(asd_ha);
1197
1198 asd_init_ddb_0(asd_ha);
1199
1200 /* Initialize LmSEQ CIO registers. */
1201 lseq_mask = asd_ha->hw_prof.enabled_phys;
1202 for_each_sequencer(lseq_mask, lseq_mask, lseq)
1203 asd_init_lseq_cio(asd_ha, lseq);
1204 asd_post_init_cseq(asd_ha);
1205}
1206
1207
1208/**
1209 * asd_seq_start_cseq -- start the central sequencer, CSEQ
1210 * @asd_ha: pointer to host adapter structure
1211 */
1212static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha)
1213{
1214 /* Reset the ARP2 instruction to location zero. */
1215 asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
1216
1217 /* Unpause the CSEQ */
1218 return asd_unpause_cseq(asd_ha);
1219}
1220
1221/**
1222 * asd_seq_start_lseq -- start a link sequencer
1223 * @asd_ha: pointer to host adapter structure
1224 * @lseq: the link sequencer of interest
1225 */
1226static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
1227{
1228 /* Reset the ARP2 instruction to location zero. */
1229 asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1230
1231 /* Unpause the LmSEQ */
1232 return asd_seq_unpause_lseq(asd_ha, lseq);
1233}
1234
1235static int asd_request_firmware(struct asd_ha_struct *asd_ha)
1236{
1237 int err, i;
1238 struct sequencer_file_header header, *hdr_ptr;
1239 u32 csum = 0;
1240 u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
1241
1242 if (sequencer_fw)
1243 /* already loaded */
1244 return 0;
1245
1246 err = request_firmware(&sequencer_fw,
1247 SAS_RAZOR_SEQUENCER_FW_FILE,
1248 &asd_ha->pcidev->dev);
1249 if (err)
1250 return err;
1251
1252 hdr_ptr = (struct sequencer_file_header *)sequencer_fw->data;
1253
1254 header.csum = le32_to_cpu(hdr_ptr->csum);
1255 header.major = le32_to_cpu(hdr_ptr->major);
1256 header.minor = le32_to_cpu(hdr_ptr->minor);
1257 sequencer_version = hdr_ptr->version;
1258 header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset);
1259 header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size);
1260 header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset);
1261 header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size);
1262 header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset);
1263 header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size);
1264 header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset);
1265 header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size);
1266 header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task);
1267 header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop);
1268 header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop);
1269
1270 for (i = sizeof(header.csum); i < sequencer_fw->size; i++)
1271 csum += sequencer_fw->data[i];
1272
1273 if (csum != header.csum) {
1274 asd_printk("Firmware file checksum mismatch\n");
1275 return -EINVAL;
1276 }
1277
1278 if (header.cseq_table_size != CSEQ_NUM_VECS ||
1279 header.lseq_table_size != LSEQ_NUM_VECS) {
1280 asd_printk("Firmware file table size mismatch\n");
1281 return -EINVAL;
1282 }
1283
1284 ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset];
1285 ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset];
1286 mode2_task = header.mode2_task;
1287 cseq_idle_loop = header.cseq_idle_loop;
1288 lseq_idle_loop = header.lseq_idle_loop;
1289
1290 for (i = 0; i < CSEQ_NUM_VECS; i++)
1291 cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]);
1292
1293 for (i = 0; i < LSEQ_NUM_VECS; i++)
1294 lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]);
1295
1296 cseq_code = &sequencer_fw->data[header.cseq_code_offset];
1297 cseq_code_size = header.cseq_code_size;
1298 lseq_code = &sequencer_fw->data[header.lseq_code_offset];
1299 lseq_code_size = header.lseq_code_size;
1300
1301 return 0;
1302}
1303
1304int asd_init_seqs(struct asd_ha_struct *asd_ha)
1305{
1306 int err;
1307
1308 err = asd_request_firmware(asd_ha);
1309
1310 if (err) {
1311 asd_printk("Failed to load sequencer firmware file %s, error %d\n",
1312 SAS_RAZOR_SEQUENCER_FW_FILE, err);
1313 return err;
1314 }
1315
1316 asd_printk("using sequencer %s\n", sequencer_version);
1317 err = asd_seq_download_seqs(asd_ha);
1318 if (err) {
1319 asd_printk("couldn't download sequencers for %s\n",
1320 pci_name(asd_ha->pcidev));
1321 return err;
1322 }
1323
1324 asd_seq_setup_seqs(asd_ha);
1325
1326 return 0;
1327}
1328
1329int asd_start_seqs(struct asd_ha_struct *asd_ha)
1330{
1331 int err;
1332 u8 lseq_mask;
1333 int lseq;
1334
1335 err = asd_seq_start_cseq(asd_ha);
1336 if (err) {
1337 asd_printk("couldn't start CSEQ for %s\n",
1338 pci_name(asd_ha->pcidev));
1339 return err;
1340 }
1341
1342 lseq_mask = asd_ha->hw_prof.enabled_phys;
1343 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
1344 err = asd_seq_start_lseq(asd_ha, lseq);
1345 if (err) {
1346 asd_printk("coudln't start LSEQ %d for %s\n", lseq,
1347 pci_name(asd_ha->pcidev));
1348 return err;
1349 }
1350 }
1351
1352 return 0;
1353}
1354
1355/**
1356 * asd_update_port_links -- update port_map_by_links and phy_is_up
1357 * @sas_phy: pointer to the phy which has been added to a port
1358 *
1359 * 1) When a link reset has completed and we got BYTES DMAED with a
1360 * valid frame we call this function for that phy, to indicate that
1361 * the phy is up, i.e. we update the phy_is_up in DDB 0. The
1362 * sequencer checks phy_is_up when pending SCBs are to be sent, and
1363 * when an open address frame has been received.
1364 *
1365 * 2) When we know of ports, we call this function to update the map
1366 * of phys participaing in that port, i.e. we update the
1367 * port_map_by_links in DDB 0. When a HARD_RESET primitive has been
1368 * received, the sequencer disables all phys in that port.
1369 * port_map_by_links is also used as the conn_mask byte in the
1370 * initiator/target port DDB.
1371 */
1372void asd_update_port_links(struct asd_sas_phy *sas_phy)
1373{
1374 struct asd_ha_struct *asd_ha = sas_phy->ha->lldd_ha;
1375 const u8 phy_mask = (u8) sas_phy->port->phy_mask;
1376 u8 phy_is_up;
1377 u8 mask;
1378 int i, err;
1379
1380 for_each_phy(phy_mask, mask, i)
1381 asd_ddbsite_write_byte(asd_ha, 0,
1382 offsetof(struct asd_ddb_seq_shared,
1383 port_map_by_links)+i,phy_mask);
1384
1385 for (i = 0; i < 12; i++) {
1386 phy_is_up = asd_ddbsite_read_byte(asd_ha, 0,
1387 offsetof(struct asd_ddb_seq_shared, phy_is_up));
1388 err = asd_ddbsite_update_byte(asd_ha, 0,
1389 offsetof(struct asd_ddb_seq_shared, phy_is_up),
1390 phy_is_up,
1391 phy_is_up | phy_mask);
1392 if (!err)
1393 break;
1394 else if (err == -EFAULT) {
1395 asd_printk("phy_is_up: parity error in DDB 0\n");
1396 break;
1397 }
1398 }
1399
1400 if (err)
1401 asd_printk("couldn't update DDB 0:error:%d\n", err);
1402}
1403
1404MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE);
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h
new file mode 100644
index 000000000000..42281c36153b
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_seq.h
@@ -0,0 +1,70 @@
1/*
2 * Aic94xx SAS/SATA driver sequencer interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_SEQ_H_
28#define _AIC94XX_SEQ_H_
29
30#define CSEQ_NUM_VECS 3
31#define LSEQ_NUM_VECS 11
32
33#define SAS_RAZOR_SEQUENCER_FW_FILE "aic94xx-seq.fw"
34
35/* Note: All quantites in the sequencer file are little endian */
36struct sequencer_file_header {
37 /* Checksum of the entire contents of the sequencer excluding
38 * these four bytes */
39 u32 csum;
40 /* numeric major version */
41 u32 major;
42 /* numeric minor version */
43 u32 minor;
44 /* version string printed by driver */
45 char version[16];
46 u32 cseq_table_offset;
47 u32 cseq_table_size;
48 u32 lseq_table_offset;
49 u32 lseq_table_size;
50 u32 cseq_code_offset;
51 u32 cseq_code_size;
52 u32 lseq_code_offset;
53 u32 lseq_code_size;
54 u16 mode2_task;
55 u16 cseq_idle_loop;
56 u16 lseq_idle_loop;
57} __attribute__((packed));
58
59#ifdef __KERNEL__
60int asd_pause_cseq(struct asd_ha_struct *asd_ha);
61int asd_unpause_cseq(struct asd_ha_struct *asd_ha);
62int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
63int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
64int asd_init_seqs(struct asd_ha_struct *asd_ha);
65int asd_start_seqs(struct asd_ha_struct *asd_ha);
66
67void asd_update_port_links(struct asd_sas_phy *phy);
68#endif
69
70#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
new file mode 100644
index 000000000000..285e70dae933
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -0,0 +1,642 @@
1/*
2 * Aic94xx SAS/SATA Tasks
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/spinlock.h>
28#include "aic94xx.h"
29#include "aic94xx_sas.h"
30#include "aic94xx_hwi.h"
31
32static void asd_unbuild_ata_ascb(struct asd_ascb *a);
33static void asd_unbuild_smp_ascb(struct asd_ascb *a);
34static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
35
36static inline void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
37{
38 unsigned long flags;
39
40 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
41 asd_ha->seq.can_queue += num;
42 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
43}
44
45/* PCI_DMA_... to our direction translation.
46 */
47static const u8 data_dir_flags[] = {
48 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
49 [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */
50 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */
51 [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
52};
53
54static inline int asd_map_scatterlist(struct sas_task *task,
55 struct sg_el *sg_arr,
56 unsigned long gfp_flags)
57{
58 struct asd_ascb *ascb = task->lldd_task;
59 struct asd_ha_struct *asd_ha = ascb->ha;
60 struct scatterlist *sc;
61 int num_sg, res;
62
63 if (task->data_dir == PCI_DMA_NONE)
64 return 0;
65
66 if (task->num_scatter == 0) {
67 void *p = task->scatter;
68 dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
69 task->total_xfer_len,
70 task->data_dir);
71 sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
72 sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
73 sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
74 return 0;
75 }
76
77 num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
78 task->data_dir);
79 if (num_sg == 0)
80 return -ENOMEM;
81
82 if (num_sg > 3) {
83 int i;
84
85 ascb->sg_arr = asd_alloc_coherent(asd_ha,
86 num_sg*sizeof(struct sg_el),
87 gfp_flags);
88 if (!ascb->sg_arr) {
89 res = -ENOMEM;
90 goto err_unmap;
91 }
92 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
93 struct sg_el *sg =
94 &((struct sg_el *)ascb->sg_arr->vaddr)[i];
95 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
96 sg->size = cpu_to_le32((u32)sg_dma_len(sc));
97 if (i == num_sg-1)
98 sg->flags |= ASD_SG_EL_LIST_EOL;
99 }
100
101 for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
102 sg_arr[i].bus_addr =
103 cpu_to_le64((u64)sg_dma_address(sc));
104 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
105 }
106 sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
107 sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;
108
109 memset(&sg_arr[2], 0, sizeof(*sg_arr));
110 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
111 } else {
112 int i;
113 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
114 sg_arr[i].bus_addr =
115 cpu_to_le64((u64)sg_dma_address(sc));
116 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
117 }
118 sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
119 }
120
121 return 0;
122err_unmap:
123 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
124 task->data_dir);
125 return res;
126}
127
128static inline void asd_unmap_scatterlist(struct asd_ascb *ascb)
129{
130 struct asd_ha_struct *asd_ha = ascb->ha;
131 struct sas_task *task = ascb->uldd_task;
132
133 if (task->data_dir == PCI_DMA_NONE)
134 return;
135
136 if (task->num_scatter == 0) {
137 dma_addr_t dma = (dma_addr_t)
138 le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
139 pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
140 task->data_dir);
141 return;
142 }
143
144 asd_free_coherent(asd_ha, ascb->sg_arr);
145 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
146 task->data_dir);
147}
148
149/* ---------- Task complete tasklet ---------- */
150
151static void asd_get_response_tasklet(struct asd_ascb *ascb,
152 struct done_list_struct *dl)
153{
154 struct asd_ha_struct *asd_ha = ascb->ha;
155 struct sas_task *task = ascb->uldd_task;
156 struct task_status_struct *ts = &task->task_status;
157 unsigned long flags;
158 struct tc_resp_sb_struct {
159 __le16 index_escb;
160 u8 len_lsb;
161 u8 flags;
162 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
163
164/* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
165 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
166 struct asd_ascb *escb;
167 struct asd_dma_tok *edb;
168 void *r;
169
170 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
171 escb = asd_tc_index_find(&asd_ha->seq,
172 (int)le16_to_cpu(resp_sb->index_escb));
173 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
174
175 if (!escb) {
176 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
177 return;
178 }
179
180 ts->buf_valid_size = 0;
181 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
182 r = edb->vaddr;
183 if (task->task_proto == SAS_PROTO_SSP) {
184 struct ssp_response_iu *iu =
185 r + 16 + sizeof(struct ssp_frame_hdr);
186
187 ts->residual = le32_to_cpu(*(__le32 *)r);
188 ts->resp = SAS_TASK_COMPLETE;
189 if (iu->datapres == 0)
190 ts->stat = iu->status;
191 else if (iu->datapres == 1)
192 ts->stat = iu->resp_data[3];
193 else if (iu->datapres == 2) {
194 ts->stat = SAM_CHECK_COND;
195 ts->buf_valid_size = min((u32) SAS_STATUS_BUF_SIZE,
196 be32_to_cpu(iu->sense_data_len));
197 memcpy(ts->buf, iu->sense_data, ts->buf_valid_size);
198 if (iu->status != SAM_CHECK_COND) {
199 ASD_DPRINTK("device %llx sent sense data, but "
200 "stat(0x%x) is not CHECK_CONDITION"
201 "\n",
202 SAS_ADDR(task->dev->sas_addr),
203 ts->stat);
204 }
205 }
206 } else {
207 struct ata_task_resp *resp = (void *) &ts->buf[0];
208
209 ts->residual = le32_to_cpu(*(__le32 *)r);
210
211 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
212 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
213 memcpy(&resp->ending_fis[0], r+16, 24);
214 ts->buf_valid_size = sizeof(*resp);
215 }
216 }
217
218 asd_invalidate_edb(escb, edb_id);
219}
220
221static void asd_task_tasklet_complete(struct asd_ascb *ascb,
222 struct done_list_struct *dl)
223{
224 struct sas_task *task = ascb->uldd_task;
225 struct task_status_struct *ts = &task->task_status;
226 unsigned long flags;
227 u8 opcode = dl->opcode;
228
229 asd_can_dequeue(ascb->ha, 1);
230
231Again:
232 switch (opcode) {
233 case TC_NO_ERROR:
234 ts->resp = SAS_TASK_COMPLETE;
235 ts->stat = SAM_GOOD;
236 break;
237 case TC_UNDERRUN:
238 ts->resp = SAS_TASK_COMPLETE;
239 ts->stat = SAS_DATA_UNDERRUN;
240 ts->residual = le32_to_cpu(*(__le32 *)dl->status_block);
241 break;
242 case TC_OVERRUN:
243 ts->resp = SAS_TASK_COMPLETE;
244 ts->stat = SAS_DATA_OVERRUN;
245 ts->residual = 0;
246 break;
247 case TC_SSP_RESP:
248 case TC_ATA_RESP:
249 ts->resp = SAS_TASK_COMPLETE;
250 ts->stat = SAS_PROTO_RESPONSE;
251 asd_get_response_tasklet(ascb, dl);
252 break;
253 case TF_OPEN_REJECT:
254 ts->resp = SAS_TASK_UNDELIVERED;
255 ts->stat = SAS_OPEN_REJECT;
256 if (dl->status_block[1] & 2)
257 ts->open_rej_reason = 1 + dl->status_block[2];
258 else if (dl->status_block[1] & 1)
259 ts->open_rej_reason = (dl->status_block[2] >> 4)+10;
260 else
261 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
262 break;
263 case TF_OPEN_TO:
264 ts->resp = SAS_TASK_UNDELIVERED;
265 ts->stat = SAS_OPEN_TO;
266 break;
267 case TF_PHY_DOWN:
268 case TU_PHY_DOWN:
269 ts->resp = SAS_TASK_UNDELIVERED;
270 ts->stat = SAS_PHY_DOWN;
271 break;
272 case TI_PHY_DOWN:
273 ts->resp = SAS_TASK_COMPLETE;
274 ts->stat = SAS_PHY_DOWN;
275 break;
276 case TI_BREAK:
277 case TI_PROTO_ERR:
278 case TI_NAK:
279 case TI_ACK_NAK_TO:
280 case TF_SMP_XMIT_RCV_ERR:
281 case TC_ATA_R_ERR_RECV:
282 ts->resp = SAS_TASK_COMPLETE;
283 ts->stat = SAS_INTERRUPTED;
284 break;
285 case TF_BREAK:
286 case TU_BREAK:
287 case TU_ACK_NAK_TO:
288 case TF_SMPRSP_TO:
289 ts->resp = SAS_TASK_UNDELIVERED;
290 ts->stat = SAS_DEV_NO_RESPONSE;
291 break;
292 case TF_NAK_RECV:
293 ts->resp = SAS_TASK_COMPLETE;
294 ts->stat = SAS_NAK_R_ERR;
295 break;
296 case TA_I_T_NEXUS_LOSS:
297 opcode = dl->status_block[0];
298 goto Again;
299 break;
300 case TF_INV_CONN_HANDLE:
301 ts->resp = SAS_TASK_UNDELIVERED;
302 ts->stat = SAS_DEVICE_UNKNOWN;
303 break;
304 case TF_REQUESTED_N_PENDING:
305 ts->resp = SAS_TASK_UNDELIVERED;
306 ts->stat = SAS_PENDING;
307 break;
308 case TC_TASK_CLEARED:
309 case TA_ON_REQ:
310 ts->resp = SAS_TASK_COMPLETE;
311 ts->stat = SAS_ABORTED_TASK;
312 break;
313
314 case TF_NO_SMP_CONN:
315 case TF_TMF_NO_CTX:
316 case TF_TMF_NO_TAG:
317 case TF_TMF_TAG_FREE:
318 case TF_TMF_TASK_DONE:
319 case TF_TMF_NO_CONN_HANDLE:
320 case TF_IRTT_TO:
321 case TF_IU_SHORT:
322 case TF_DATA_OFFS_ERR:
323 ts->resp = SAS_TASK_UNDELIVERED;
324 ts->stat = SAS_DEV_NO_RESPONSE;
325 break;
326
327 case TC_LINK_ADM_RESP:
328 case TC_CONTROL_PHY:
329 case TC_RESUME:
330 case TC_PARTIAL_SG_LIST:
331 default:
332 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode);
333 break;
334 }
335
336 switch (task->task_proto) {
337 case SATA_PROTO:
338 case SAS_PROTO_STP:
339 asd_unbuild_ata_ascb(ascb);
340 break;
341 case SAS_PROTO_SMP:
342 asd_unbuild_smp_ascb(ascb);
343 break;
344 case SAS_PROTO_SSP:
345 asd_unbuild_ssp_ascb(ascb);
346 default:
347 break;
348 }
349
350 spin_lock_irqsave(&task->task_state_lock, flags);
351 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
352 task->task_state_flags |= SAS_TASK_STATE_DONE;
353 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
354 spin_unlock_irqrestore(&task->task_state_lock, flags);
355 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
356 "stat 0x%x but aborted by upper layer!\n",
357 task, opcode, ts->resp, ts->stat);
358 complete(&ascb->completion);
359 } else {
360 spin_unlock_irqrestore(&task->task_state_lock, flags);
361 task->lldd_task = NULL;
362 asd_ascb_free(ascb);
363 mb();
364 task->task_done(task);
365 }
366}
367
368/* ---------- ATA ---------- */
369
370static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
371 unsigned long gfp_flags)
372{
373 struct domain_device *dev = task->dev;
374 struct scb *scb;
375 u8 flags;
376 int res = 0;
377
378 scb = ascb->scb;
379
380 if (unlikely(task->ata_task.device_control_reg_update))
381 scb->header.opcode = CONTROL_ATA_DEV;
382 else if (dev->sata_dev.command_set == ATA_COMMAND_SET)
383 scb->header.opcode = INITIATE_ATA_TASK;
384 else
385 scb->header.opcode = INITIATE_ATAPI_TASK;
386
387 scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
388 if (dev->port->oob_mode == SAS_OOB_MODE)
389 scb->ata_task.proto_conn_rate |= dev->linkrate;
390
391 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
392 scb->ata_task.fis = task->ata_task.fis;
393 scb->ata_task.fis.fis_type = 0x27;
394 if (likely(!task->ata_task.device_control_reg_update))
395 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
396 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
397 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
398 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
399 16);
400 scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
401 scb->ata_task.conn_handle = cpu_to_le16(
402 (u16)(unsigned long)dev->lldd_dev);
403
404 if (likely(!task->ata_task.device_control_reg_update)) {
405 flags = 0;
406 if (task->ata_task.dma_xfer)
407 flags |= DATA_XFER_MODE_DMA;
408 if (task->ata_task.use_ncq &&
409 dev->sata_dev.command_set != ATAPI_COMMAND_SET)
410 flags |= ATA_Q_TYPE_NCQ;
411 flags |= data_dir_flags[task->data_dir];
412 scb->ata_task.ata_flags = flags;
413
414 scb->ata_task.retry_count = task->ata_task.retry_count;
415
416 flags = 0;
417 if (task->ata_task.set_affil_pol)
418 flags |= SET_AFFIL_POLICY;
419 if (task->ata_task.stp_affil_pol)
420 flags |= STP_AFFIL_POLICY;
421 scb->ata_task.flags = flags;
422 }
423 ascb->tasklet_complete = asd_task_tasklet_complete;
424
425 if (likely(!task->ata_task.device_control_reg_update))
426 res = asd_map_scatterlist(task, scb->ata_task.sg_element,
427 gfp_flags);
428
429 return res;
430}
431
432static void asd_unbuild_ata_ascb(struct asd_ascb *a)
433{
434 asd_unmap_scatterlist(a);
435}
436
437/* ---------- SMP ---------- */
438
439static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
440 unsigned long gfp_flags)
441{
442 struct asd_ha_struct *asd_ha = ascb->ha;
443 struct domain_device *dev = task->dev;
444 struct scb *scb;
445
446 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
447 PCI_DMA_FROMDEVICE);
448 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
449 PCI_DMA_FROMDEVICE);
450
451 scb = ascb->scb;
452
453 scb->header.opcode = INITIATE_SMP_TASK;
454
455 scb->smp_task.proto_conn_rate = dev->linkrate;
456
457 scb->smp_task.smp_req.bus_addr =
458 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
459 scb->smp_task.smp_req.size =
460 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
461
462 scb->smp_task.smp_resp.bus_addr =
463 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
464 scb->smp_task.smp_resp.size =
465 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
466
467 scb->smp_task.sister_scb = cpu_to_le16(0xFFFF);
468 scb->smp_task.conn_handle = cpu_to_le16((u16)
469 (unsigned long)dev->lldd_dev);
470
471 ascb->tasklet_complete = asd_task_tasklet_complete;
472
473 return 0;
474}
475
476static void asd_unbuild_smp_ascb(struct asd_ascb *a)
477{
478 struct sas_task *task = a->uldd_task;
479
480 BUG_ON(!task);
481 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
482 PCI_DMA_FROMDEVICE);
483 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
484 PCI_DMA_FROMDEVICE);
485}
486
487/* ---------- SSP ---------- */
488
489static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
490 unsigned long gfp_flags)
491{
492 struct domain_device *dev = task->dev;
493 struct scb *scb;
494 int res = 0;
495
496 scb = ascb->scb;
497
498 scb->header.opcode = INITIATE_SSP_TASK;
499
500 scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */
501 scb->ssp_task.proto_conn_rate |= dev->linkrate;
502 scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
503 scb->ssp_task.ssp_frame.frame_type = SSP_DATA;
504 memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr,
505 HASHED_SAS_ADDR_SIZE);
506 memcpy(scb->ssp_task.ssp_frame.hashed_src_addr,
507 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
508 scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
509
510 memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
511 if (task->ssp_task.enable_first_burst)
512 scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
513 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
514 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
515 memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16);
516
517 scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
518 scb->ssp_task.conn_handle = cpu_to_le16(
519 (u16)(unsigned long)dev->lldd_dev);
520 scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
521 scb->ssp_task.retry_count = scb->ssp_task.retry_count;
522
523 ascb->tasklet_complete = asd_task_tasklet_complete;
524
525 res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
526
527 return res;
528}
529
530static void asd_unbuild_ssp_ascb(struct asd_ascb *a)
531{
532 asd_unmap_scatterlist(a);
533}
534
535/* ---------- Execute Task ---------- */
536
537static inline int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
538{
539 int res = 0;
540 unsigned long flags;
541
542 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
543 if ((asd_ha->seq.can_queue - num) < 0)
544 res = -SAS_QUEUE_FULL;
545 else
546 asd_ha->seq.can_queue -= num;
547 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
548
549 return res;
550}
551
552int asd_execute_task(struct sas_task *task, const int num,
553 unsigned long gfp_flags)
554{
555 int res = 0;
556 LIST_HEAD(alist);
557 struct sas_task *t = task;
558 struct asd_ascb *ascb = NULL, *a;
559 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
560
561 res = asd_can_queue(asd_ha, num);
562 if (res)
563 return res;
564
565 res = num;
566 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
567 if (res) {
568 res = -ENOMEM;
569 goto out_err;
570 }
571
572 __list_add(&alist, ascb->list.prev, &ascb->list);
573 list_for_each_entry(a, &alist, list) {
574 a->uldd_task = t;
575 t->lldd_task = a;
576 t = list_entry(t->list.next, struct sas_task, list);
577 }
578 list_for_each_entry(a, &alist, list) {
579 t = a->uldd_task;
580 a->uldd_timer = 1;
581 if (t->task_proto & SAS_PROTO_STP)
582 t->task_proto = SAS_PROTO_STP;
583 switch (t->task_proto) {
584 case SATA_PROTO:
585 case SAS_PROTO_STP:
586 res = asd_build_ata_ascb(a, t, gfp_flags);
587 break;
588 case SAS_PROTO_SMP:
589 res = asd_build_smp_ascb(a, t, gfp_flags);
590 break;
591 case SAS_PROTO_SSP:
592 res = asd_build_ssp_ascb(a, t, gfp_flags);
593 break;
594 default:
595 asd_printk("unknown sas_task proto: 0x%x\n",
596 t->task_proto);
597 res = -ENOMEM;
598 break;
599 }
600 if (res)
601 goto out_err_unmap;
602 }
603 list_del_init(&alist);
604
605 res = asd_post_ascb_list(asd_ha, ascb, num);
606 if (unlikely(res)) {
607 a = NULL;
608 __list_add(&alist, ascb->list.prev, &ascb->list);
609 goto out_err_unmap;
610 }
611
612 return 0;
613out_err_unmap:
614 {
615 struct asd_ascb *b = a;
616 list_for_each_entry(a, &alist, list) {
617 if (a == b)
618 break;
619 t = a->uldd_task;
620 switch (t->task_proto) {
621 case SATA_PROTO:
622 case SAS_PROTO_STP:
623 asd_unbuild_ata_ascb(a);
624 break;
625 case SAS_PROTO_SMP:
626 asd_unbuild_smp_ascb(a);
627 break;
628 case SAS_PROTO_SSP:
629 asd_unbuild_ssp_ascb(a);
630 default:
631 break;
632 }
633 t->lldd_task = NULL;
634 }
635 }
636 list_del_init(&alist);
637out_err:
638 if (ascb)
639 asd_ascb_free_list(ascb);
640 asd_can_dequeue(asd_ha, num);
641 return res;
642}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
new file mode 100644
index 000000000000..61234384503b
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -0,0 +1,636 @@
1/*
2 * Aic94xx Task Management Functions
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/spinlock.h>
28#include "aic94xx.h"
29#include "aic94xx_sas.h"
30#include "aic94xx_hwi.h"
31
32/* ---------- Internal enqueue ---------- */
33
34static int asd_enqueue_internal(struct asd_ascb *ascb,
35 void (*tasklet_complete)(struct asd_ascb *,
36 struct done_list_struct *),
37 void (*timed_out)(unsigned long))
38{
39 int res;
40
41 ascb->tasklet_complete = tasklet_complete;
42 ascb->uldd_timer = 1;
43
44 ascb->timer.data = (unsigned long) ascb;
45 ascb->timer.function = timed_out;
46 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
47
48 add_timer(&ascb->timer);
49
50 res = asd_post_ascb_list(ascb->ha, ascb, 1);
51 if (unlikely(res))
52 del_timer(&ascb->timer);
53 return res;
54}
55
56static inline void asd_timedout_common(unsigned long data)
57{
58 struct asd_ascb *ascb = (void *) data;
59 struct asd_seq_data *seq = &ascb->ha->seq;
60 unsigned long flags;
61
62 spin_lock_irqsave(&seq->pend_q_lock, flags);
63 seq->pending--;
64 list_del_init(&ascb->list);
65 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
66}
67
68/* ---------- CLEAR NEXUS ---------- */
69
70static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
71 struct done_list_struct *dl)
72{
73 ASD_DPRINTK("%s: here\n", __FUNCTION__);
74 if (!del_timer(&ascb->timer)) {
75 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
76 return;
77 }
78 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
79 ascb->uldd_task = (void *) (unsigned long) dl->opcode;
80 complete(&ascb->completion);
81}
82
83static void asd_clear_nexus_timedout(unsigned long data)
84{
85 struct asd_ascb *ascb = (void *) data;
86
87 ASD_DPRINTK("%s: here\n", __FUNCTION__);
88 asd_timedout_common(data);
89 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
90 complete(&ascb->completion);
91}
92
93#define CLEAR_NEXUS_PRE \
94 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
95 res = 1; \
96 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
97 if (!ascb) \
98 return -ENOMEM; \
99 \
100 scb = ascb->scb; \
101 scb->header.opcode = CLEAR_NEXUS
102
103#define CLEAR_NEXUS_POST \
104 ASD_DPRINTK("%s: POST\n", __FUNCTION__); \
105 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
106 asd_clear_nexus_timedout); \
107 if (res) \
108 goto out_err; \
109 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
110 wait_for_completion(&ascb->completion); \
111 res = (int) (unsigned long) ascb->uldd_task; \
112 if (res == TC_NO_ERROR) \
113 res = TMF_RESP_FUNC_COMPLETE; \
114out_err: \
115 asd_ascb_free(ascb); \
116 return res
117
118int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
119{
120 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
121 struct asd_ascb *ascb;
122 struct scb *scb;
123 int res;
124
125 CLEAR_NEXUS_PRE;
126 scb->clear_nexus.nexus = NEXUS_ADAPTER;
127 CLEAR_NEXUS_POST;
128}
129
130int asd_clear_nexus_port(struct asd_sas_port *port)
131{
132 struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
133 struct asd_ascb *ascb;
134 struct scb *scb;
135 int res;
136
137 CLEAR_NEXUS_PRE;
138 scb->clear_nexus.nexus = NEXUS_PORT;
139 scb->clear_nexus.conn_mask = port->phy_mask;
140 CLEAR_NEXUS_POST;
141}
142
143#if 0
144static int asd_clear_nexus_I_T(struct domain_device *dev)
145{
146 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
147 struct asd_ascb *ascb;
148 struct scb *scb;
149 int res;
150
151 CLEAR_NEXUS_PRE;
152 scb->clear_nexus.nexus = NEXUS_I_T;
153 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
154 if (dev->tproto)
155 scb->clear_nexus.flags |= SUSPEND_TX;
156 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
157 dev->lldd_dev);
158 CLEAR_NEXUS_POST;
159}
160#endif
161
162static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
163{
164 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
165 struct asd_ascb *ascb;
166 struct scb *scb;
167 int res;
168
169 CLEAR_NEXUS_PRE;
170 scb->clear_nexus.nexus = NEXUS_I_T_L;
171 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
172 if (dev->tproto)
173 scb->clear_nexus.flags |= SUSPEND_TX;
174 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
176 dev->lldd_dev);
177 CLEAR_NEXUS_POST;
178}
179
180static int asd_clear_nexus_tag(struct sas_task *task)
181{
182 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
183 struct asd_ascb *tascb = task->lldd_task;
184 struct asd_ascb *ascb;
185 struct scb *scb;
186 int res;
187
188 CLEAR_NEXUS_PRE;
189 scb->clear_nexus.nexus = NEXUS_TAG;
190 memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
191 scb->clear_nexus.ssp_task.tag = tascb->tag;
192 if (task->dev->tproto)
193 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
194 task->dev->lldd_dev);
195 CLEAR_NEXUS_POST;
196}
197
198static int asd_clear_nexus_index(struct sas_task *task)
199{
200 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
201 struct asd_ascb *tascb = task->lldd_task;
202 struct asd_ascb *ascb;
203 struct scb *scb;
204 int res;
205
206 CLEAR_NEXUS_PRE;
207 scb->clear_nexus.nexus = NEXUS_TRANS_CX;
208 if (task->dev->tproto)
209 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
210 task->dev->lldd_dev);
211 scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
212 CLEAR_NEXUS_POST;
213}
214
215/* ---------- TMFs ---------- */
216
217static void asd_tmf_timedout(unsigned long data)
218{
219 struct asd_ascb *ascb = (void *) data;
220
221 ASD_DPRINTK("tmf timed out\n");
222 asd_timedout_common(data);
223 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
224 complete(&ascb->completion);
225}
226
227static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
228 struct done_list_struct *dl)
229{
230 struct asd_ha_struct *asd_ha = ascb->ha;
231 unsigned long flags;
232 struct tc_resp_sb_struct {
233 __le16 index_escb;
234 u8 len_lsb;
235 u8 flags;
236 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
237
238 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
239 struct asd_ascb *escb;
240 struct asd_dma_tok *edb;
241 struct ssp_frame_hdr *fh;
242 struct ssp_response_iu *ru;
243 int res = TMF_RESP_FUNC_FAILED;
244
245 ASD_DPRINTK("tmf resp tasklet\n");
246
247 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
248 escb = asd_tc_index_find(&asd_ha->seq,
249 (int)le16_to_cpu(resp_sb->index_escb));
250 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
251
252 if (!escb) {
253 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
254 return res;
255 }
256
257 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
258 ascb->tag = *(__be16 *)(edb->vaddr+4);
259 fh = edb->vaddr + 16;
260 ru = edb->vaddr + 16 + sizeof(*fh);
261 res = ru->status;
262 if (ru->datapres == 1) /* Response data present */
263 res = ru->resp_data[3];
264#if 0
265 ascb->tag = fh->tag;
266#endif
267 ascb->tag_valid = 1;
268
269 asd_invalidate_edb(escb, edb_id);
270 return res;
271}
272
273static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
274 struct done_list_struct *dl)
275{
276 if (!del_timer(&ascb->timer))
277 return;
278
279 ASD_DPRINTK("tmf tasklet complete\n");
280
281 if (dl->opcode == TC_SSP_RESP)
282 ascb->uldd_task = (void *) (unsigned long)
283 asd_get_tmf_resp_tasklet(ascb, dl);
284 else
285 ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode;
286
287 complete(&ascb->completion);
288}
289
290static inline int asd_clear_nexus(struct sas_task *task)
291{
292 int res = TMF_RESP_FUNC_FAILED;
293 struct asd_ascb *tascb = task->lldd_task;
294 unsigned long flags;
295
296 ASD_DPRINTK("task not done, clearing nexus\n");
297 if (tascb->tag_valid)
298 res = asd_clear_nexus_tag(task);
299 else
300 res = asd_clear_nexus_index(task);
301 wait_for_completion_timeout(&tascb->completion,
302 AIC94XX_SCB_TIMEOUT);
303 ASD_DPRINTK("came back from clear nexus\n");
304 spin_lock_irqsave(&task->task_state_lock, flags);
305 if (task->task_state_flags & SAS_TASK_STATE_DONE)
306 res = TMF_RESP_FUNC_COMPLETE;
307 spin_unlock_irqrestore(&task->task_state_lock, flags);
308
309 return res;
310}
311
312/**
313 * asd_abort_task -- ABORT TASK TMF
314 * @task: the task to be aborted
315 *
316 * Before calling ABORT TASK the task state flags should be ORed with
317 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
318 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
319 *
320 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
321 * Returns: SAS TMF responses (see sas_task.h),
322 * -ENOMEM,
323 * -SAS_QUEUE_FULL.
324 *
325 * When ABORT TASK returns, the caller of ABORT TASK checks first the
326 * task->task_state_flags, and then the return value of ABORT TASK.
327 *
328 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
329 * task was completed successfully prior to it being aborted. The
330 * caller of ABORT TASK has responsibility to call task->task_done()
331 * xor free the task, depending on their framework. The return code
332 * is TMF_RESP_FUNC_FAILED in this case.
333 *
334 * Else the SAS_TASK_STATE_DONE bit is not set,
335 * If the return code is TMF_RESP_FUNC_COMPLETE, then
336 * the task was aborted successfully. The caller of
337 * ABORT TASK has responsibility to call task->task_done()
338 * to finish the task, xor free the task depending on their
339 * framework.
340 * else
341 * the ABORT TASK returned some kind of error. The task
342 * was _not_ cancelled. Nothing can be assumed.
343 * The caller of ABORT TASK may wish to retry.
344 */
345int asd_abort_task(struct sas_task *task)
346{
347 struct asd_ascb *tascb = task->lldd_task;
348 struct asd_ha_struct *asd_ha = tascb->ha;
349 int res = 1;
350 unsigned long flags;
351 struct asd_ascb *ascb = NULL;
352 struct scb *scb;
353
354 spin_lock_irqsave(&task->task_state_lock, flags);
355 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
356 spin_unlock_irqrestore(&task->task_state_lock, flags);
357 res = TMF_RESP_FUNC_COMPLETE;
358 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
359 goto out_done;
360 }
361 spin_unlock_irqrestore(&task->task_state_lock, flags);
362
363 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
364 if (!ascb)
365 return -ENOMEM;
366 scb = ascb->scb;
367
368 scb->header.opcode = ABORT_TASK;
369
370 switch (task->task_proto) {
371 case SATA_PROTO:
372 case SAS_PROTO_STP:
373 scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
374 break;
375 case SAS_PROTO_SSP:
376 scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
377 scb->abort_task.proto_conn_rate |= task->dev->linkrate;
378 break;
379 case SAS_PROTO_SMP:
380 break;
381 default:
382 break;
383 }
384
385 if (task->task_proto == SAS_PROTO_SSP) {
386 scb->abort_task.ssp_frame.frame_type = SSP_TASK;
387 memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
388 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
389 memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
390 task->dev->port->ha->hashed_sas_addr,
391 HASHED_SAS_ADDR_SIZE);
392 scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
393
394 memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
395 scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
396 scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
397 }
398
399 scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
400 scb->abort_task.conn_handle = cpu_to_le16(
401 (u16)(unsigned long)task->dev->lldd_dev);
402 scb->abort_task.retry_count = 1;
403 scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
404 scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
405
406 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
407 asd_tmf_timedout);
408 if (res)
409 goto out;
410 wait_for_completion(&ascb->completion);
411 ASD_DPRINTK("tmf came back\n");
412
413 res = (int) (unsigned long) ascb->uldd_task;
414 tascb->tag = ascb->tag;
415 tascb->tag_valid = ascb->tag_valid;
416
417 spin_lock_irqsave(&task->task_state_lock, flags);
418 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
419 spin_unlock_irqrestore(&task->task_state_lock, flags);
420 res = TMF_RESP_FUNC_COMPLETE;
421 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
422 goto out_done;
423 }
424 spin_unlock_irqrestore(&task->task_state_lock, flags);
425
426 switch (res) {
427 /* The task to be aborted has been sent to the device.
428 * We got a Response IU for the ABORT TASK TMF. */
429 case TC_NO_ERROR + 0xFF00:
430 case TMF_RESP_FUNC_COMPLETE:
431 case TMF_RESP_FUNC_FAILED:
432 res = asd_clear_nexus(task);
433 break;
434 case TMF_RESP_INVALID_FRAME:
435 case TMF_RESP_OVERLAPPED_TAG:
436 case TMF_RESP_FUNC_ESUPP:
437 case TMF_RESP_NO_LUN:
438 goto out_done; break;
439 }
440 /* In the following we assume that the managing layer
441 * will _never_ make a mistake, when issuing ABORT TASK.
442 */
443 switch (res) {
444 default:
445 res = asd_clear_nexus(task);
446 /* fallthrough */
447 case TC_NO_ERROR + 0xFF00:
448 case TMF_RESP_FUNC_COMPLETE:
449 break;
450 /* The task hasn't been sent to the device xor we never got
451 * a (sane) Response IU for the ABORT TASK TMF.
452 */
453 case TF_NAK_RECV + 0xFF00:
454 res = TMF_RESP_INVALID_FRAME;
455 break;
456 case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */
457 res = TMF_RESP_FUNC_FAILED;
458 wait_for_completion_timeout(&tascb->completion,
459 AIC94XX_SCB_TIMEOUT);
460 spin_lock_irqsave(&task->task_state_lock, flags);
461 if (task->task_state_flags & SAS_TASK_STATE_DONE)
462 res = TMF_RESP_FUNC_COMPLETE;
463 spin_unlock_irqrestore(&task->task_state_lock, flags);
464 goto out_done;
465 case TF_TMF_NO_TAG + 0xFF00:
466 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
467 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
468 res = TMF_RESP_FUNC_COMPLETE;
469 goto out_done;
470 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
471 res = TMF_RESP_FUNC_ESUPP;
472 goto out;
473 }
474out_done:
475 if (res == TMF_RESP_FUNC_COMPLETE) {
476 task->lldd_task = NULL;
477 mb();
478 asd_ascb_free(tascb);
479 }
480out:
481 asd_ascb_free(ascb);
482 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
483 return res;
484}
485
486/**
487 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
488 * @dev: pointer to struct domain_device of interest
489 * @lun: pointer to u8[8] which is the LUN
490 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
491 * @index: the transaction context of the task to be queried if QT TMF
492 *
493 * This function is used to send ABORT TASK SET, CLEAR ACA,
494 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
495 *
496 * No SCBs should be queued to the I_T_L nexus when this SCB is
497 * pending.
498 *
499 * Returns: TMF response code (see sas_task.h or the SAS spec)
500 */
501static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
502 int tmf, int index)
503{
504 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
505 struct asd_ascb *ascb;
506 int res = 1;
507 struct scb *scb;
508
509 if (!(dev->tproto & SAS_PROTO_SSP))
510 return TMF_RESP_FUNC_ESUPP;
511
512 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
513 if (!ascb)
514 return -ENOMEM;
515 scb = ascb->scb;
516
517 if (tmf == TMF_QUERY_TASK)
518 scb->header.opcode = QUERY_SSP_TASK;
519 else
520 scb->header.opcode = INITIATE_SSP_TMF;
521
522 scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
523 scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
524 /* SSP frame header */
525 scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
526 memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
527 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
528 memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
529 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
530 scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
531 /* SSP Task IU */
532 memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
533 scb->ssp_tmf.ssp_task.tmf = tmf;
534
535 scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
536 scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
537 dev->lldd_dev);
538 scb->ssp_tmf.retry_count = 1;
539 scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
540 if (tmf == TMF_QUERY_TASK)
541 scb->ssp_tmf.index = cpu_to_le16(index);
542
543 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
544 asd_tmf_timedout);
545 if (res)
546 goto out_err;
547 wait_for_completion(&ascb->completion);
548 res = (int) (unsigned long) ascb->uldd_task;
549
550 switch (res) {
551 case TC_NO_ERROR + 0xFF00:
552 res = TMF_RESP_FUNC_COMPLETE;
553 break;
554 case TF_NAK_RECV + 0xFF00:
555 res = TMF_RESP_INVALID_FRAME;
556 break;
557 case TF_TMF_TASK_DONE + 0xFF00:
558 res = TMF_RESP_FUNC_FAILED;
559 break;
560 case TF_TMF_NO_TAG + 0xFF00:
561 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
562 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
563 res = TMF_RESP_FUNC_COMPLETE;
564 break;
565 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
566 res = TMF_RESP_FUNC_ESUPP;
567 break;
568 default:
569 ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n",
570 __FUNCTION__, res);
571 res = TMF_RESP_FUNC_FAILED;
572 break;
573 }
574out_err:
575 asd_ascb_free(ascb);
576 return res;
577}
578
579int asd_abort_task_set(struct domain_device *dev, u8 *lun)
580{
581 int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
582
583 if (res == TMF_RESP_FUNC_COMPLETE)
584 asd_clear_nexus_I_T_L(dev, lun);
585 return res;
586}
587
588int asd_clear_aca(struct domain_device *dev, u8 *lun)
589{
590 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
591
592 if (res == TMF_RESP_FUNC_COMPLETE)
593 asd_clear_nexus_I_T_L(dev, lun);
594 return res;
595}
596
597int asd_clear_task_set(struct domain_device *dev, u8 *lun)
598{
599 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
600
601 if (res == TMF_RESP_FUNC_COMPLETE)
602 asd_clear_nexus_I_T_L(dev, lun);
603 return res;
604}
605
606int asd_lu_reset(struct domain_device *dev, u8 *lun)
607{
608 int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
609
610 if (res == TMF_RESP_FUNC_COMPLETE)
611 asd_clear_nexus_I_T_L(dev, lun);
612 return res;
613}
614
615/**
616 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
617 * task: pointer to sas_task struct of interest
618 *
619 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
620 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
621 *
622 * Normally the management layer sets the task to aborted state,
623 * and then calls query task and then abort task.
624 */
625int asd_query_task(struct sas_task *task)
626{
627 struct asd_ascb *ascb = task->lldd_task;
628 int index;
629
630 if (ascb) {
631 index = ascb->tc_index;
632 return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
633 TMF_QUERY_TASK, index);
634 }
635 return TMF_RESP_FUNC_COMPLETE;
636}
diff --git a/drivers/scsi/arcmsr/Makefile b/drivers/scsi/arcmsr/Makefile
new file mode 100644
index 000000000000..721aced39168
--- /dev/null
+++ b/drivers/scsi/arcmsr/Makefile
@@ -0,0 +1,6 @@
1# File: drivers/arcmsr/Makefile
2# Makefile for the ARECA PCI-X PCI-EXPRESS SATA RAID controllers SCSI driver.
3
4arcmsr-objs := arcmsr_attr.o arcmsr_hba.o
5
6obj-$(CONFIG_SCSI_ARCMSR) := arcmsr.o
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
new file mode 100644
index 000000000000..aff96db9ccf6
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -0,0 +1,472 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr.h
5** BY : Erich Chen
6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter
8*******************************************************************************
9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved.
10**
11** Web site: www.areca.com.tw
12** E-mail: erich@areca.com.tw
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License version 2 as
16** published by the Free Software Foundation.
17** This program is distributed in the hope that it will be useful,
18** but WITHOUT ANY WARRANTY; without even the implied warranty of
19** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20** GNU General Public License for more details.
21*******************************************************************************
22** Redistribution and use in source and binary forms, with or without
23** modification, are permitted provided that the following conditions
24** are met:
25** 1. Redistributions of source code must retain the above copyright
26** notice, this list of conditions and the following disclaimer.
27** 2. Redistributions in binary form must reproduce the above copyright
28** notice, this list of conditions and the following disclaimer in the
29** documentation and/or other materials provided with the distribution.
30** 3. The name of the author may not be used to endorse or promote products
31** derived from this software without specific prior written permission.
32**
33** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
38** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41**(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43*******************************************************************************
44*/
45#include <linux/interrupt.h>
46
47struct class_device_attribute;
48
49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 288
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.13"
52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_TARGETID 17
55#define ARCMSR_MAX_TARGETLUN 8
56#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
57#define ARCMSR_MAX_QBUFFER 4096
58#define ARCMSR_MAX_SG_ENTRIES 38
59
60/*
61*******************************************************************************
62** split 64bits dma addressing
63*******************************************************************************
64*/
65#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
66#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
67/*
68*******************************************************************************
69** MESSAGE CONTROL CODE
70*******************************************************************************
71*/
72struct CMD_MESSAGE
73{
74 uint32_t HeaderLength;
75 uint8_t Signature[8];
76 uint32_t Timeout;
77 uint32_t ControlCode;
78 uint32_t ReturnCode;
79 uint32_t Length;
80};
81/*
82*******************************************************************************
83** IOP Message Transfer Data for user space
84*******************************************************************************
85*/
86struct CMD_MESSAGE_FIELD
87{
88 struct CMD_MESSAGE cmdmessage;
89 uint8_t messagedatabuffer[1032];
90};
91/* IOP message transfer */
92#define ARCMSR_MESSAGE_FAIL 0x0001
93/* DeviceType */
94#define ARECA_SATA_RAID 0x90000000
95/* FunctionCode */
96#define FUNCTION_READ_RQBUFFER 0x0801
97#define FUNCTION_WRITE_WQBUFFER 0x0802
98#define FUNCTION_CLEAR_RQBUFFER 0x0803
99#define FUNCTION_CLEAR_WQBUFFER 0x0804
100#define FUNCTION_CLEAR_ALLQBUFFER 0x0805
101#define FUNCTION_RETURN_CODE_3F 0x0806
102#define FUNCTION_SAY_HELLO 0x0807
103#define FUNCTION_SAY_GOODBYE 0x0808
104#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
105/* ARECA IO CONTROL CODE*/
106#define ARCMSR_MESSAGE_READ_RQBUFFER \
107 ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER
108#define ARCMSR_MESSAGE_WRITE_WQBUFFER \
109 ARECA_SATA_RAID | FUNCTION_WRITE_WQBUFFER
110#define ARCMSR_MESSAGE_CLEAR_RQBUFFER \
111 ARECA_SATA_RAID | FUNCTION_CLEAR_RQBUFFER
112#define ARCMSR_MESSAGE_CLEAR_WQBUFFER \
113 ARECA_SATA_RAID | FUNCTION_CLEAR_WQBUFFER
114#define ARCMSR_MESSAGE_CLEAR_ALLQBUFFER \
115 ARECA_SATA_RAID | FUNCTION_CLEAR_ALLQBUFFER
116#define ARCMSR_MESSAGE_RETURN_CODE_3F \
117 ARECA_SATA_RAID | FUNCTION_RETURN_CODE_3F
118#define ARCMSR_MESSAGE_SAY_HELLO \
119 ARECA_SATA_RAID | FUNCTION_SAY_HELLO
120#define ARCMSR_MESSAGE_SAY_GOODBYE \
121 ARECA_SATA_RAID | FUNCTION_SAY_GOODBYE
122#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \
123 ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE
124/* ARECA IOCTL ReturnCode */
125#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
126#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
127#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
128/*
129*************************************************************
130** structure for holding DMA address data
131*************************************************************
132*/
133#define IS_SG64_ADDR 0x01000000 /* bit24 */
134struct SG32ENTRY
135{
136 uint32_t length;
137 uint32_t address;
138};
139struct SG64ENTRY
140{
141 uint32_t length;
142 uint32_t address;
143 uint32_t addresshigh;
144};
145struct SGENTRY_UNION
146{
147 union
148 {
149 struct SG32ENTRY sg32entry;
150 struct SG64ENTRY sg64entry;
151 }u;
152};
153/*
154********************************************************************
155** Q Buffer of IOP Message Transfer
156********************************************************************
157*/
158struct QBUFFER
159{
160 uint32_t data_len;
161 uint8_t data[124];
162};
163/*
164*******************************************************************************
165** FIRMWARE INFO
166*******************************************************************************
167*/
168struct FIRMWARE_INFO
169{
170 uint32_t signature; /*0, 00-03*/
171 uint32_t request_len; /*1, 04-07*/
172 uint32_t numbers_queue; /*2, 08-11*/
173 uint32_t sdram_size; /*3, 12-15*/
174 uint32_t ide_channels; /*4, 16-19*/
175 char vendor[40]; /*5, 20-59*/
176 char model[8]; /*15, 60-67*/
177 char firmware_ver[16]; /*17, 68-83*/
178 char device_map[16]; /*21, 84-99*/
179};
180/* signature of set and get firmware config */
181#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
182#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
183/* message code of inbound message register */
184#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
185#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
186#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
187#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
188#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
189#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
190#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
191#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
192#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
193/* doorbell interrupt generator */
194#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
195#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
196#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
197#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
198/* ccb areca cdb flag */
199#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
200#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
201#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
202#define ARCMSR_CCBREPLY_FLAG_ERROR 0x10000000
203/* outbound firmware ok */
204#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
205/*
206*******************************************************************************
207** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
208*******************************************************************************
209*/
210struct ARCMSR_CDB
211{
212 uint8_t Bus;
213 uint8_t TargetID;
214 uint8_t LUN;
215 uint8_t Function;
216
217 uint8_t CdbLength;
218 uint8_t sgcount;
219 uint8_t Flags;
220#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
221#define ARCMSR_CDB_FLAG_BIOS 0x02
222#define ARCMSR_CDB_FLAG_WRITE 0x04
223#define ARCMSR_CDB_FLAG_SIMPLEQ 0x00
224#define ARCMSR_CDB_FLAG_HEADQ 0x08
225#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
226 uint8_t Reserved1;
227
228 uint32_t Context;
229 uint32_t DataLength;
230
231 uint8_t Cdb[16];
232
233 uint8_t DeviceStatus;
234#define ARCMSR_DEV_CHECK_CONDITION 0x02
235#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
236#define ARCMSR_DEV_ABORTED 0xF1
237#define ARCMSR_DEV_INIT_FAIL 0xF2
238 uint8_t SenseData[15];
239
240 union
241 {
242 struct SG32ENTRY sg32entry[ARCMSR_MAX_SG_ENTRIES];
243 struct SG64ENTRY sg64entry[ARCMSR_MAX_SG_ENTRIES];
244 } u;
245};
246/*
247*******************************************************************************
248** Messaging Unit (MU) of the Intel R 80331 I/O processor (80331)
249*******************************************************************************
250*/
251struct MessageUnit
252{
253 uint32_t resrved0[4]; /*0000 000F*/
254 uint32_t inbound_msgaddr0; /*0010 0013*/
255 uint32_t inbound_msgaddr1; /*0014 0017*/
256 uint32_t outbound_msgaddr0; /*0018 001B*/
257 uint32_t outbound_msgaddr1; /*001C 001F*/
258 uint32_t inbound_doorbell; /*0020 0023*/
259 uint32_t inbound_intstatus; /*0024 0027*/
260 uint32_t inbound_intmask; /*0028 002B*/
261 uint32_t outbound_doorbell; /*002C 002F*/
262 uint32_t outbound_intstatus; /*0030 0033*/
263 uint32_t outbound_intmask; /*0034 0037*/
264 uint32_t reserved1[2]; /*0038 003F*/
265 uint32_t inbound_queueport; /*0040 0043*/
266 uint32_t outbound_queueport; /*0044 0047*/
267 uint32_t reserved2[2]; /*0048 004F*/
268 uint32_t reserved3[492]; /*0050 07FF 492*/
269 uint32_t reserved4[128]; /*0800 09FF 128*/
270 uint32_t message_rwbuffer[256]; /*0a00 0DFF 256*/
271 uint32_t message_wbuffer[32]; /*0E00 0E7F 32*/
272 uint32_t reserved5[32]; /*0E80 0EFF 32*/
273 uint32_t message_rbuffer[32]; /*0F00 0F7F 32*/
274 uint32_t reserved6[32]; /*0F80 0FFF 32*/
275};
276/*
277*******************************************************************************
278** Adapter Control Block
279*******************************************************************************
280*/
281struct AdapterControlBlock
282{
283 struct pci_dev * pdev;
284 struct Scsi_Host * host;
285 unsigned long vir2phy_offset;
286 /* Offset is used in making arc cdb physical to virtual calculations */
287 uint32_t outbound_int_enable;
288
289 struct MessageUnit __iomem * pmu;
290 /* message unit ATU inbound base address0 */
291
292 uint32_t acb_flags;
293#define ACB_F_SCSISTOPADAPTER 0x0001
294#define ACB_F_MSG_STOP_BGRB 0x0002
295 /* stop RAID background rebuild */
296#define ACB_F_MSG_START_BGRB 0x0004
297 /* stop RAID background rebuild */
298#define ACB_F_IOPDATA_OVERFLOW 0x0008
299 /* iop message data rqbuffer overflow */
300#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
301 /* message clear wqbuffer */
302#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
303 /* message clear rqbuffer */
304#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
305#define ACB_F_BUS_RESET 0x0080
306#define ACB_F_IOP_INITED 0x0100
307 /* iop init */
308
309 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
310 /* used for memory free */
311 struct list_head ccb_free_list;
312 /* head of free ccb list */
313 atomic_t ccboutstandingcount;
314
315 void * dma_coherent;
316 /* dma_coherent used for memory free */
317 dma_addr_t dma_coherent_handle;
318 /* dma_coherent_handle used for memory free */
319
320 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
321 /* data collection buffer for read from 80331 */
322 int32_t rqbuf_firstindex;
323 /* first of read buffer */
324 int32_t rqbuf_lastindex;
325 /* last of read buffer */
326 uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
327 /* data collection buffer for write to 80331 */
328 int32_t wqbuf_firstindex;
329 /* first of write buffer */
330 int32_t wqbuf_lastindex;
331 /* last of write buffer */
332 uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
333 /* id0 ..... id15, lun0...lun7 */
334#define ARECA_RAID_GONE 0x55
335#define ARECA_RAID_GOOD 0xaa
336 uint32_t num_resets;
337 uint32_t num_aborts;
338 uint32_t firm_request_len;
339 uint32_t firm_numbers_queue;
340 uint32_t firm_sdram_size;
341 uint32_t firm_hd_channels;
342 char firm_model[12];
343 char firm_version[20];
344};/* HW_DEVICE_EXTENSION */
345/*
346*******************************************************************************
347** Command Control Block
348** this CCB length must be 32 bytes boundary
349*******************************************************************************
350*/
351struct CommandControlBlock
352{
353 struct ARCMSR_CDB arcmsr_cdb;
354 /*
355 ** 0-503 (size of CDB=504):
356 ** arcmsr messenger scsi command descriptor size 504 bytes
357 */
358 uint32_t cdb_shifted_phyaddr;
359 /* 504-507 */
360 uint32_t reserved1;
361 /* 508-511 */
362#if BITS_PER_LONG == 64
363 /* ======================512+64 bytes======================== */
364 struct list_head list;
365 /* 512-527 16 bytes next/prev ptrs for ccb lists */
366 struct scsi_cmnd * pcmd;
367 /* 528-535 8 bytes pointer of linux scsi command */
368 struct AdapterControlBlock * acb;
369 /* 536-543 8 bytes pointer of acb */
370
371 uint16_t ccb_flags;
372 /* 544-545 */
373 #define CCB_FLAG_READ 0x0000
374 #define CCB_FLAG_WRITE 0x0001
375 #define CCB_FLAG_ERROR 0x0002
376 #define CCB_FLAG_FLUSHCACHE 0x0004
377 #define CCB_FLAG_MASTER_ABORTED 0x0008
378 uint16_t startdone;
379 /* 546-547 */
380 #define ARCMSR_CCB_DONE 0x0000
381 #define ARCMSR_CCB_START 0x55AA
382 #define ARCMSR_CCB_ABORTED 0xAA55
383 #define ARCMSR_CCB_ILLEGAL 0xFFFF
384 uint32_t reserved2[7];
385 /* 548-551 552-555 556-559 560-563 564-567 568-571 572-575 */
386#else
387 /* ======================512+32 bytes======================== */
388 struct list_head list;
389 /* 512-519 8 bytes next/prev ptrs for ccb lists */
390 struct scsi_cmnd * pcmd;
391 /* 520-523 4 bytes pointer of linux scsi command */
392 struct AdapterControlBlock * acb;
393 /* 524-527 4 bytes pointer of acb */
394
395 uint16_t ccb_flags;
396 /* 528-529 */
397 #define CCB_FLAG_READ 0x0000
398 #define CCB_FLAG_WRITE 0x0001
399 #define CCB_FLAG_ERROR 0x0002
400 #define CCB_FLAG_FLUSHCACHE 0x0004
401 #define CCB_FLAG_MASTER_ABORTED 0x0008
402 uint16_t startdone;
403 /* 530-531 */
404 #define ARCMSR_CCB_DONE 0x0000
405 #define ARCMSR_CCB_START 0x55AA
406 #define ARCMSR_CCB_ABORTED 0xAA55
407 #define ARCMSR_CCB_ILLEGAL 0xFFFF
408 uint32_t reserved2[3];
409 /* 532-535 536-539 540-543 */
410#endif
411 /* ========================================================== */
412};
413/*
414*******************************************************************************
415** ARECA SCSI sense data
416*******************************************************************************
417*/
418struct SENSE_DATA
419{
420 uint8_t ErrorCode:7;
421#define SCSI_SENSE_CURRENT_ERRORS 0x70
422#define SCSI_SENSE_DEFERRED_ERRORS 0x71
423 uint8_t Valid:1;
424 uint8_t SegmentNumber;
425 uint8_t SenseKey:4;
426 uint8_t Reserved:1;
427 uint8_t IncorrectLength:1;
428 uint8_t EndOfMedia:1;
429 uint8_t FileMark:1;
430 uint8_t Information[4];
431 uint8_t AdditionalSenseLength;
432 uint8_t CommandSpecificInformation[4];
433 uint8_t AdditionalSenseCode;
434 uint8_t AdditionalSenseCodeQualifier;
435 uint8_t FieldReplaceableUnitCode;
436 uint8_t SenseKeySpecific[3];
437};
438/*
439*******************************************************************************
440** Outbound Interrupt Status Register - OISR
441*******************************************************************************
442*/
443#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
444#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
445#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
446#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
447#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
448#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
449#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
450 (ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
451 |ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
452 |ARCMSR_MU_OUTBOUND_DOORBELL_INT \
453 |ARCMSR_MU_OUTBOUND_POSTQUEUE_INT \
454 |ARCMSR_MU_OUTBOUND_PCI_INT)
455/*
456*******************************************************************************
457** Outbound Interrupt Mask Register - OIMR
458*******************************************************************************
459*/
460#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
461#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
462#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
463#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
464#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
465#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
466#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
467
468extern void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb);
469extern struct class_device_attribute *arcmsr_host_attrs[];
470extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb);
471void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
472
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
new file mode 100644
index 000000000000..12497da5529d
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -0,0 +1,381 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr_attr.c
5** BY : Erich Chen
6** Description: attributes exported to sysfs and device host
7*******************************************************************************
8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
9**
10** Web site: www.areca.com.tw
11** E-mail: erich@areca.com.tw
12**
13** This program is free software; you can redistribute it and/or modify
14** it under the terms of the GNU General Public License version 2 as
15** published by the Free Software Foundation.
16** This program is distributed in the hope that it will be useful,
17** but WITHOUT ANY WARRANTY; without even the implied warranty of
18** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19** GNU General Public License for more details.
20*******************************************************************************
21** Redistribution and use in source and binary forms, with or without
22** modification, are permitted provided that the following conditions
23** are met:
24** 1. Redistributions of source code must retain the above copyright
25** notice, this list of conditions and the following disclaimer.
26** 2. Redistributions in binary form must reproduce the above copyright
27** notice, this list of conditions and the following disclaimer in the
28** documentation and/or other materials provided with the distribution.
29** 3. The name of the author may not be used to endorse or promote products
30** derived from this software without specific prior written permission.
31**
32** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42*******************************************************************************
43** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
45*******************************************************************************
46*/
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/errno.h>
51#include <linux/delay.h>
52#include <linux/pci.h>
53
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h>
57#include <scsi/scsi_transport.h>
58#include "arcmsr.h"
59
60struct class_device_attribute *arcmsr_host_attrs[];
61
62static ssize_t
63arcmsr_sysfs_iop_message_read(struct kobject *kobj, char *buf, loff_t off,
64 size_t count)
65{
66 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
67 struct Scsi_Host *host = class_to_shost(cdev);
68 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
69 struct MessageUnit __iomem *reg = acb->pmu;
70 uint8_t *pQbuffer,*ptmpQbuffer;
71 int32_t allxfer_len = 0;
72
73 if (!capable(CAP_SYS_ADMIN))
74 return -EACCES;
75
76 /* do message unit read. */
77 ptmpQbuffer = (uint8_t *)buf;
78 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
79 && (allxfer_len < 1031)) {
80 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
81 memcpy(ptmpQbuffer, pQbuffer, 1);
82 acb->rqbuf_firstindex++;
83 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
84 ptmpQbuffer++;
85 allxfer_len++;
86 }
87 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
88 struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
89 &reg->message_rbuffer;
90 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
91 int32_t iop_len;
92
93 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
94 iop_len = readl(&prbuffer->data_len);
95 while (iop_len > 0) {
96 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
97 acb->rqbuf_lastindex++;
98 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
99 iop_data++;
100 iop_len--;
101 }
102 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
103 &reg->inbound_doorbell);
104 }
105 return (allxfer_len);
106}
107
108static ssize_t
109arcmsr_sysfs_iop_message_write(struct kobject *kobj, char *buf, loff_t off,
110 size_t count)
111{
112 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
113 struct Scsi_Host *host = class_to_shost(cdev);
114 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
115 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
116 uint8_t *pQbuffer, *ptmpuserbuffer;
117
118 if (!capable(CAP_SYS_ADMIN))
119 return -EACCES;
120 if (count > 1032)
121 return -EINVAL;
122 /* do message unit write. */
123 ptmpuserbuffer = (uint8_t *)buf;
124 user_len = (int32_t)count;
125 wqbuf_lastindex = acb->wqbuf_lastindex;
126 wqbuf_firstindex = acb->wqbuf_firstindex;
127 if (wqbuf_lastindex != wqbuf_firstindex) {
128 arcmsr_post_Qbuffer(acb);
129 return 0; /*need retry*/
130 } else {
131 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
132 &(ARCMSR_MAX_QBUFFER - 1);
133 if (my_empty_len >= user_len) {
134 while (user_len > 0) {
135 pQbuffer =
136 &acb->wqbuffer[acb->wqbuf_lastindex];
137 memcpy(pQbuffer, ptmpuserbuffer, 1);
138 acb->wqbuf_lastindex++;
139 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
140 ptmpuserbuffer++;
141 user_len--;
142 }
143 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
144 acb->acb_flags &=
145 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
146 arcmsr_post_Qbuffer(acb);
147 }
148 return count;
149 } else {
150 return 0; /*need retry*/
151 }
152 }
153}
154
155static ssize_t
156arcmsr_sysfs_iop_message_clear(struct kobject *kobj, char *buf, loff_t off,
157 size_t count)
158{
159 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
160 struct Scsi_Host *host = class_to_shost(cdev);
161 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
162 struct MessageUnit __iomem *reg = acb->pmu;
163 uint8_t *pQbuffer;
164
165 if (!capable(CAP_SYS_ADMIN))
166 return -EACCES;
167
168 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
169 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
170 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
171 , &reg->inbound_doorbell);
172 }
173 acb->acb_flags |=
174 (ACB_F_MESSAGE_WQBUFFER_CLEARED
175 | ACB_F_MESSAGE_RQBUFFER_CLEARED
176 | ACB_F_MESSAGE_WQBUFFER_READED);
177 acb->rqbuf_firstindex = 0;
178 acb->rqbuf_lastindex = 0;
179 acb->wqbuf_firstindex = 0;
180 acb->wqbuf_lastindex = 0;
181 pQbuffer = acb->rqbuffer;
182 memset(pQbuffer, 0, sizeof (struct QBUFFER));
183 pQbuffer = acb->wqbuffer;
184 memset(pQbuffer, 0, sizeof (struct QBUFFER));
185 return 1;
186}
187
188static struct bin_attribute arcmsr_sysfs_message_read_attr = {
189 .attr = {
190 .name = "mu_read",
191 .mode = S_IRUSR ,
192 .owner = THIS_MODULE,
193 },
194 .size = 1032,
195 .read = arcmsr_sysfs_iop_message_read,
196};
197
198static struct bin_attribute arcmsr_sysfs_message_write_attr = {
199 .attr = {
200 .name = "mu_write",
201 .mode = S_IWUSR,
202 .owner = THIS_MODULE,
203 },
204 .size = 1032,
205 .write = arcmsr_sysfs_iop_message_write,
206};
207
208static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
209 .attr = {
210 .name = "mu_clear",
211 .mode = S_IWUSR,
212 .owner = THIS_MODULE,
213 },
214 .size = 1,
215 .write = arcmsr_sysfs_iop_message_clear,
216};
217
218int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
219{
220 struct Scsi_Host *host = acb->host;
221 int error;
222
223 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
224 &arcmsr_sysfs_message_read_attr);
225 if (error) {
226 printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n");
227 goto error_bin_file_message_read;
228 }
229 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
230 &arcmsr_sysfs_message_write_attr);
231 if (error) {
232 printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n");
233 goto error_bin_file_message_write;
234 }
235 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
236 &arcmsr_sysfs_message_clear_attr);
237 if (error) {
238 printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n");
239 goto error_bin_file_message_clear;
240 }
241 return 0;
242error_bin_file_message_clear:
243 sysfs_remove_bin_file(&host->shost_classdev.kobj,
244 &arcmsr_sysfs_message_write_attr);
245error_bin_file_message_write:
246 sysfs_remove_bin_file(&host->shost_classdev.kobj,
247 &arcmsr_sysfs_message_read_attr);
248error_bin_file_message_read:
249 return error;
250}
251
252void
253arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb) {
254 struct Scsi_Host *host = acb->host;
255
256 sysfs_remove_bin_file(&host->shost_classdev.kobj,
257 &arcmsr_sysfs_message_clear_attr);
258 sysfs_remove_bin_file(&host->shost_classdev.kobj,
259 &arcmsr_sysfs_message_write_attr);
260 sysfs_remove_bin_file(&host->shost_classdev.kobj,
261 &arcmsr_sysfs_message_read_attr);
262}
263
264
265static ssize_t
266arcmsr_attr_host_driver_version(struct class_device *cdev, char *buf) {
267 return snprintf(buf, PAGE_SIZE,
268 "%s\n",
269 ARCMSR_DRIVER_VERSION);
270}
271
272static ssize_t
273arcmsr_attr_host_driver_posted_cmd(struct class_device *cdev, char *buf) {
274 struct Scsi_Host *host = class_to_shost(cdev);
275 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
276 return snprintf(buf, PAGE_SIZE,
277 "%4d\n",
278 atomic_read(&acb->ccboutstandingcount));
279}
280
281static ssize_t
282arcmsr_attr_host_driver_reset(struct class_device *cdev, char *buf) {
283 struct Scsi_Host *host = class_to_shost(cdev);
284 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
285 return snprintf(buf, PAGE_SIZE,
286 "%4d\n",
287 acb->num_resets);
288}
289
290static ssize_t
291arcmsr_attr_host_driver_abort(struct class_device *cdev, char *buf) {
292 struct Scsi_Host *host = class_to_shost(cdev);
293 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
294 return snprintf(buf, PAGE_SIZE,
295 "%4d\n",
296 acb->num_aborts);
297}
298
299static ssize_t
300arcmsr_attr_host_fw_model(struct class_device *cdev, char *buf) {
301 struct Scsi_Host *host = class_to_shost(cdev);
302 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
303 return snprintf(buf, PAGE_SIZE,
304 "%s\n",
305 acb->firm_model);
306}
307
308static ssize_t
309arcmsr_attr_host_fw_version(struct class_device *cdev, char *buf) {
310 struct Scsi_Host *host = class_to_shost(cdev);
311 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
312
313 return snprintf(buf, PAGE_SIZE,
314 "%s\n",
315 acb->firm_version);
316}
317
318static ssize_t
319arcmsr_attr_host_fw_request_len(struct class_device *cdev, char *buf) {
320 struct Scsi_Host *host = class_to_shost(cdev);
321 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
322
323 return snprintf(buf, PAGE_SIZE,
324 "%4d\n",
325 acb->firm_request_len);
326}
327
328static ssize_t
329arcmsr_attr_host_fw_numbers_queue(struct class_device *cdev, char *buf) {
330 struct Scsi_Host *host = class_to_shost(cdev);
331 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
332
333 return snprintf(buf, PAGE_SIZE,
334 "%4d\n",
335 acb->firm_numbers_queue);
336}
337
338static ssize_t
339arcmsr_attr_host_fw_sdram_size(struct class_device *cdev, char *buf) {
340 struct Scsi_Host *host = class_to_shost(cdev);
341 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
342
343 return snprintf(buf, PAGE_SIZE,
344 "%4d\n",
345 acb->firm_sdram_size);
346}
347
348static ssize_t
349arcmsr_attr_host_fw_hd_channels(struct class_device *cdev, char *buf) {
350 struct Scsi_Host *host = class_to_shost(cdev);
351 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
352
353 return snprintf(buf, PAGE_SIZE,
354 "%4d\n",
355 acb->firm_hd_channels);
356}
357
358static CLASS_DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL);
359static CLASS_DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL);
360static CLASS_DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL);
361static CLASS_DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL);
362static CLASS_DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL);
363static CLASS_DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL);
364static CLASS_DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL);
365static CLASS_DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL);
366static CLASS_DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
367static CLASS_DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
368
369struct class_device_attribute *arcmsr_host_attrs[] = {
370 &class_device_attr_host_driver_version,
371 &class_device_attr_host_driver_posted_cmd,
372 &class_device_attr_host_driver_reset,
373 &class_device_attr_host_driver_abort,
374 &class_device_attr_host_fw_model,
375 &class_device_attr_host_fw_version,
376 &class_device_attr_host_fw_request_len,
377 &class_device_attr_host_fw_numbers_queue,
378 &class_device_attr_host_fw_sdram_size,
379 &class_device_attr_host_fw_hd_channels,
380 NULL,
381};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
new file mode 100644
index 000000000000..475f978ff8f0
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -0,0 +1,1496 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr_hba.c
5** BY : Erich Chen
6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter
8*******************************************************************************
9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
10**
11** Web site: www.areca.com.tw
12** E-mail: erich@areca.com.tw
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License version 2 as
16** published by the Free Software Foundation.
17** This program is distributed in the hope that it will be useful,
18** but WITHOUT ANY WARRANTY; without even the implied warranty of
19** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20** GNU General Public License for more details.
21*******************************************************************************
22** Redistribution and use in source and binary forms, with or without
23** modification, are permitted provided that the following conditions
24** are met:
25** 1. Redistributions of source code must retain the above copyright
26** notice, this list of conditions and the following disclaimer.
27** 2. Redistributions in binary form must reproduce the above copyright
28** notice, this list of conditions and the following disclaimer in the
29** documentation and/or other materials provided with the distribution.
30** 3. The name of the author may not be used to endorse or promote products
31** derived from this software without specific prior written permission.
32**
33** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43*******************************************************************************
44** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46*******************************************************************************
47*/
48#include <linux/module.h>
49#include <linux/reboot.h>
50#include <linux/spinlock.h>
51#include <linux/pci_ids.h>
52#include <linux/interrupt.h>
53#include <linux/moduleparam.h>
54#include <linux/errno.h>
55#include <linux/types.h>
56#include <linux/delay.h>
57#include <linux/dma-mapping.h>
58#include <linux/timer.h>
59#include <linux/pci.h>
60#include <asm/dma.h>
61#include <asm/io.h>
62#include <asm/system.h>
63#include <asm/uaccess.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi.h>
66#include <scsi/scsi_cmnd.h>
67#include <scsi/scsi_tcq.h>
68#include <scsi/scsi_device.h>
69#include <scsi/scsi_transport.h>
70#include <scsi/scsicam.h>
71#include "arcmsr.h"
72
73MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
74MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter");
75MODULE_LICENSE("Dual BSD/GPL");
76MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd);
79static int arcmsr_abort(struct scsi_cmnd *);
80static int arcmsr_bus_reset(struct scsi_cmnd *);
81static int arcmsr_bios_param(struct scsi_device *sdev,
82 struct block_device *bdev, sector_t capacity, int *info);
83static int arcmsr_queue_command(struct scsi_cmnd * cmd,
84 void (*done) (struct scsi_cmnd *));
85static int arcmsr_probe(struct pci_dev *pdev,
86 const struct pci_device_id *id);
87static void arcmsr_remove(struct pci_dev *pdev);
88static void arcmsr_shutdown(struct pci_dev *pdev);
89static void arcmsr_iop_init(struct AdapterControlBlock *acb);
90static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
91static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
92static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
93static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
94static const char *arcmsr_info(struct Scsi_Host *);
95static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
96
97static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
98{
99 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
100 queue_depth = ARCMSR_MAX_CMD_PERLUN;
101 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
102 return queue_depth;
103}
104
105static struct scsi_host_template arcmsr_scsi_host_template = {
106 .module = THIS_MODULE,
107 .name = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION,
108 .info = arcmsr_info,
109 .queuecommand = arcmsr_queue_command,
110 .eh_abort_handler = arcmsr_abort,
111 .eh_bus_reset_handler = arcmsr_bus_reset,
112 .bios_param = arcmsr_bios_param,
113 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
114 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
115 .this_id = ARCMSR_SCSI_INITIATOR_ID,
116 .sg_tablesize = ARCMSR_MAX_SG_ENTRIES,
117 .max_sectors = ARCMSR_MAX_XFER_SECTORS,
118 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
119 .use_clustering = ENABLE_CLUSTERING,
120 .shost_attrs = arcmsr_host_attrs,
121};
122
123static struct pci_device_id arcmsr_device_id_table[] = {
124 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
125 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
126 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
127 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
128 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
129 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
130 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
131 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
132 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
133 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
134 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
135 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
136 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
137 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
138 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
139 {0, 0}, /* Terminating entry */
140};
141MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
142static struct pci_driver arcmsr_pci_driver = {
143 .name = "arcmsr",
144 .id_table = arcmsr_device_id_table,
145 .probe = arcmsr_probe,
146 .remove = arcmsr_remove,
147 .shutdown = arcmsr_shutdown
148};
149
150static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id,
151 struct pt_regs *regs)
152{
153 irqreturn_t handle_state;
154 struct AdapterControlBlock *acb;
155 unsigned long flags;
156
157 acb = (struct AdapterControlBlock *)dev_id;
158
159 spin_lock_irqsave(acb->host->host_lock, flags);
160 handle_state = arcmsr_interrupt(acb);
161 spin_unlock_irqrestore(acb->host->host_lock, flags);
162 return handle_state;
163}
164
165static int arcmsr_bios_param(struct scsi_device *sdev,
166 struct block_device *bdev, sector_t capacity, int *geom)
167{
168 int ret, heads, sectors, cylinders, total_capacity;
169 unsigned char *buffer;/* return copy of block device's partition table */
170
171 buffer = scsi_bios_ptable(bdev);
172 if (buffer) {
173 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
174 kfree(buffer);
175 if (ret != -1)
176 return ret;
177 }
178 total_capacity = capacity;
179 heads = 64;
180 sectors = 32;
181 cylinders = total_capacity / (heads * sectors);
182 if (cylinders > 1024) {
183 heads = 255;
184 sectors = 63;
185 cylinders = total_capacity / (heads * sectors);
186 }
187 geom[0] = heads;
188 geom[1] = sectors;
189 geom[2] = cylinders;
190 return 0;
191}
192
193static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
194{
195 struct pci_dev *pdev = acb->pdev;
196 struct MessageUnit __iomem *reg = acb->pmu;
197 u32 ccb_phyaddr_hi32;
198 void *dma_coherent;
199 dma_addr_t dma_coherent_handle, dma_addr;
200 struct CommandControlBlock *ccb_tmp;
201 int i, j;
202
203 dma_coherent = dma_alloc_coherent(&pdev->dev,
204 ARCMSR_MAX_FREECCB_NUM *
205 sizeof (struct CommandControlBlock) + 0x20,
206 &dma_coherent_handle, GFP_KERNEL);
207 if (!dma_coherent)
208 return -ENOMEM;
209
210 acb->dma_coherent = dma_coherent;
211 acb->dma_coherent_handle = dma_coherent_handle;
212
213 if (((unsigned long)dma_coherent & 0x1F)) {
214 dma_coherent = dma_coherent +
215 (0x20 - ((unsigned long)dma_coherent & 0x1F));
216 dma_coherent_handle = dma_coherent_handle +
217 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
218 }
219
220 dma_addr = dma_coherent_handle;
221 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
222 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
223 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
224 ccb_tmp->acb = acb;
225 acb->pccb_pool[i] = ccb_tmp;
226 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
227 dma_addr = dma_addr + sizeof (struct CommandControlBlock);
228 ccb_tmp++;
229 }
230
231 acb->vir2phy_offset = (unsigned long)ccb_tmp -
232 (unsigned long)dma_addr;
233 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
234 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
235 acb->devstate[i][j] = ARECA_RAID_GOOD;
236
237 /*
238 ** here we need to tell iop 331 our ccb_tmp.HighPart
239 ** if ccb_tmp.HighPart is not zero
240 */
241 ccb_phyaddr_hi32 = (uint32_t) ((dma_coherent_handle >> 16) >> 16);
242 if (ccb_phyaddr_hi32 != 0) {
243 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->message_rwbuffer[0]);
244 writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
245 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
246 if (arcmsr_wait_msgint_ready(acb))
247 printk(KERN_NOTICE "arcmsr%d: "
248 "'set ccb high part physical address' timeout\n",
249 acb->host->host_no);
250 }
251
252 writel(readl(&reg->outbound_intmask) |
253 ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
254 &reg->outbound_intmask);
255 return 0;
256}
257
258static int arcmsr_probe(struct pci_dev *pdev,
259 const struct pci_device_id *id)
260{
261 struct Scsi_Host *host;
262 struct AdapterControlBlock *acb;
263 uint8_t bus, dev_fun;
264 int error;
265
266 error = pci_enable_device(pdev);
267 if (error)
268 goto out;
269 pci_set_master(pdev);
270
271 host = scsi_host_alloc(&arcmsr_scsi_host_template,
272 sizeof(struct AdapterControlBlock));
273 if (!host) {
274 error = -ENOMEM;
275 goto out_disable_device;
276 }
277 acb = (struct AdapterControlBlock *)host->hostdata;
278 memset(acb, 0, sizeof (struct AdapterControlBlock));
279
280 error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
281 if (error) {
282 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
283 if (error) {
284 printk(KERN_WARNING
285 "scsi%d: No suitable DMA mask available\n",
286 host->host_no);
287 goto out_host_put;
288 }
289 }
290 bus = pdev->bus->number;
291 dev_fun = pdev->devfn;
292 acb->host = host;
293 acb->pdev = pdev;
294 host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
295 host->max_lun = ARCMSR_MAX_TARGETLUN;
296 host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
297 host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
298 host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
299 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
300 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
301 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
302 host->unique_id = (bus << 8) | dev_fun;
303 host->irq = pdev->irq;
304 error = pci_request_regions(pdev, "arcmsr");
305 if (error)
306 goto out_host_put;
307
308 acb->pmu = ioremap(pci_resource_start(pdev, 0),
309 pci_resource_len(pdev, 0));
310 if (!acb->pmu) {
311 printk(KERN_NOTICE "arcmsr%d: memory"
312 " mapping region fail \n", acb->host->host_no);
313 goto out_release_regions;
314 }
315 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
316 ACB_F_MESSAGE_RQBUFFER_CLEARED |
317 ACB_F_MESSAGE_WQBUFFER_READED);
318 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
319 INIT_LIST_HEAD(&acb->ccb_free_list);
320
321 error = arcmsr_alloc_ccb_pool(acb);
322 if (error)
323 goto out_iounmap;
324
325 error = request_irq(pdev->irq, arcmsr_do_interrupt,
326 SA_INTERRUPT | SA_SHIRQ, "arcmsr", acb);
327 if (error)
328 goto out_free_ccb_pool;
329
330 arcmsr_iop_init(acb);
331 pci_set_drvdata(pdev, host);
332
333 error = scsi_add_host(host, &pdev->dev);
334 if (error)
335 goto out_free_irq;
336
337 error = arcmsr_alloc_sysfs_attr(acb);
338 if (error)
339 goto out_free_sysfs;
340
341 scsi_scan_host(host);
342 return 0;
343 out_free_sysfs:
344 out_free_irq:
345 free_irq(pdev->irq, acb);
346 out_free_ccb_pool:
347 arcmsr_free_ccb_pool(acb);
348 out_iounmap:
349 iounmap(acb->pmu);
350 out_release_regions:
351 pci_release_regions(pdev);
352 out_host_put:
353 scsi_host_put(host);
354 out_disable_device:
355 pci_disable_device(pdev);
356 out:
357 return error;
358}
359
360static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
361{
362 struct MessageUnit __iomem *reg = acb->pmu;
363
364 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
365 if (arcmsr_wait_msgint_ready(acb))
366 printk(KERN_NOTICE
367 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
368 , acb->host->host_no);
369}
370
371static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
372{
373 struct AdapterControlBlock *acb = ccb->acb;
374 struct scsi_cmnd *pcmd = ccb->pcmd;
375
376 if (pcmd->use_sg != 0) {
377 struct scatterlist *sl;
378
379 sl = (struct scatterlist *)pcmd->request_buffer;
380 pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
381 }
382 else if (pcmd->request_bufflen != 0)
383 pci_unmap_single(acb->pdev,
384 pcmd->SCp.dma_handle,
385 pcmd->request_bufflen, pcmd->sc_data_direction);
386}
387
388static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
389{
390 struct AdapterControlBlock *acb = ccb->acb;
391 struct scsi_cmnd *pcmd = ccb->pcmd;
392
393 arcmsr_pci_unmap_dma(ccb);
394 if (stand_flag == 1)
395 atomic_dec(&acb->ccboutstandingcount);
396 ccb->startdone = ARCMSR_CCB_DONE;
397 ccb->ccb_flags = 0;
398 list_add_tail(&ccb->list, &acb->ccb_free_list);
399 pcmd->scsi_done(pcmd);
400}
401
402static void arcmsr_remove(struct pci_dev *pdev)
403{
404 struct Scsi_Host *host = pci_get_drvdata(pdev);
405 struct AdapterControlBlock *acb =
406 (struct AdapterControlBlock *) host->hostdata;
407 struct MessageUnit __iomem *reg = acb->pmu;
408 int poll_count = 0;
409
410 arcmsr_free_sysfs_attr(acb);
411 scsi_remove_host(host);
412 arcmsr_stop_adapter_bgrb(acb);
413 arcmsr_flush_adapter_cache(acb);
414 writel(readl(&reg->outbound_intmask) |
415 ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
416 &reg->outbound_intmask);
417 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
418 acb->acb_flags &= ~ACB_F_IOP_INITED;
419
420 for (poll_count = 0; poll_count < 256; poll_count++) {
421 if (!atomic_read(&acb->ccboutstandingcount))
422 break;
423 arcmsr_interrupt(acb);
424 msleep(25);
425 }
426
427 if (atomic_read(&acb->ccboutstandingcount)) {
428 int i;
429
430 arcmsr_abort_allcmd(acb);
431 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
432 readl(&reg->outbound_queueport);
433 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
434 struct CommandControlBlock *ccb = acb->pccb_pool[i];
435 if (ccb->startdone == ARCMSR_CCB_START) {
436 ccb->startdone = ARCMSR_CCB_ABORTED;
437 ccb->pcmd->result = DID_ABORT << 16;
438 arcmsr_ccb_complete(ccb, 1);
439 }
440 }
441 }
442
443 free_irq(pdev->irq, acb);
444 iounmap(acb->pmu);
445 arcmsr_free_ccb_pool(acb);
446 pci_release_regions(pdev);
447
448 scsi_host_put(host);
449
450 pci_disable_device(pdev);
451 pci_set_drvdata(pdev, NULL);
452}
453
454static void arcmsr_shutdown(struct pci_dev *pdev)
455{
456 struct Scsi_Host *host = pci_get_drvdata(pdev);
457 struct AdapterControlBlock *acb =
458 (struct AdapterControlBlock *)host->hostdata;
459
460 arcmsr_stop_adapter_bgrb(acb);
461 arcmsr_flush_adapter_cache(acb);
462}
463
464static int arcmsr_module_init(void)
465{
466 int error = 0;
467
468 error = pci_register_driver(&arcmsr_pci_driver);
469 return error;
470}
471
472static void arcmsr_module_exit(void)
473{
474 pci_unregister_driver(&arcmsr_pci_driver);
475}
476module_init(arcmsr_module_init);
477module_exit(arcmsr_module_exit);
478
479static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
480{
481 struct MessageUnit __iomem *reg = acb->pmu;
482 u32 orig_mask = readl(&reg->outbound_intmask);
483
484 writel(orig_mask | ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
485 &reg->outbound_intmask);
486 return orig_mask;
487}
488
489static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
490 u32 orig_mask)
491{
492 struct MessageUnit __iomem *reg = acb->pmu;
493 u32 mask;
494
495 mask = orig_mask & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
496 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
497 writel(mask, &reg->outbound_intmask);
498}
499
500static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
501{
502 struct MessageUnit __iomem *reg=acb->pmu;
503
504 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
505 if (arcmsr_wait_msgint_ready(acb))
506 printk(KERN_NOTICE
507 "arcmsr%d: wait 'flush adapter cache' timeout \n"
508 , acb->host->host_no);
509}
510
511static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
512{
513 struct scsi_cmnd *pcmd = ccb->pcmd;
514 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
515
516 pcmd->result = DID_OK << 16;
517 if (sensebuffer) {
518 int sense_data_length =
519 sizeof (struct SENSE_DATA) < sizeof (pcmd->sense_buffer)
520 ? sizeof (struct SENSE_DATA) : sizeof (pcmd->sense_buffer);
521 memset(sensebuffer, 0, sizeof (pcmd->sense_buffer));
522 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
523 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
524 sensebuffer->Valid = 1;
525 }
526}
527
528static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb)
529{
530 struct MessageUnit __iomem *reg = acb->pmu;
531 uint32_t Index;
532 uint8_t Retries = 0x00;
533
534 do {
535 for (Index = 0; Index < 100; Index++) {
536 if (readl(&reg->outbound_intstatus)
537 & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
538 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
539 , &reg->outbound_intstatus);
540 return 0x00;
541 }
542 msleep_interruptible(10);
543 }/*max 1 seconds*/
544 } while (Retries++ < 20);/*max 20 sec*/
545 return 0xff;
546}
547
548static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
549 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
550{
551 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
552 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
553 uint32_t address_lo, address_hi;
554 int arccdbsize = 0x30;
555
556 ccb->pcmd = pcmd;
557 memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
558 arcmsr_cdb->Bus = 0;
559 arcmsr_cdb->TargetID = pcmd->device->id;
560 arcmsr_cdb->LUN = pcmd->device->lun;
561 arcmsr_cdb->Function = 1;
562 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
563 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
564 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
565 if (pcmd->use_sg) {
566 int length, sgcount, i, cdb_sgcount = 0;
567 struct scatterlist *sl;
568
569 /* Get Scatter Gather List from scsiport. */
570 sl = (struct scatterlist *) pcmd->request_buffer;
571 sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg,
572 pcmd->sc_data_direction);
573 /* map stor port SG list to our iop SG List. */
574 for (i = 0; i < sgcount; i++) {
575 /* Get the physical address of the current data pointer */
576 length = cpu_to_le32(sg_dma_len(sl));
577 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl)));
578 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl)));
579 if (address_hi == 0) {
580 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
581
582 pdma_sg->address = address_lo;
583 pdma_sg->length = length;
584 psge += sizeof (struct SG32ENTRY);
585 arccdbsize += sizeof (struct SG32ENTRY);
586 } else {
587 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
588
589 pdma_sg->addresshigh = address_hi;
590 pdma_sg->address = address_lo;
591 pdma_sg->length = length|IS_SG64_ADDR;
592 psge += sizeof (struct SG64ENTRY);
593 arccdbsize += sizeof (struct SG64ENTRY);
594 }
595 sl++;
596 cdb_sgcount++;
597 }
598 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
599 arcmsr_cdb->DataLength = pcmd->request_bufflen;
600 if ( arccdbsize > 256)
601 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
602 } else if (pcmd->request_bufflen) {
603 dma_addr_t dma_addr;
604 dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
605 pcmd->request_bufflen, pcmd->sc_data_direction);
606 pcmd->SCp.dma_handle = dma_addr;
607 address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
608 address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
609 if (address_hi == 0) {
610 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
611 pdma_sg->address = address_lo;
612 pdma_sg->length = pcmd->request_bufflen;
613 } else {
614 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
615 pdma_sg->addresshigh = address_hi;
616 pdma_sg->address = address_lo;
617 pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
618 }
619 arcmsr_cdb->sgcount = 1;
620 arcmsr_cdb->DataLength = pcmd->request_bufflen;
621 }
622 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
623 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
624 ccb->ccb_flags |= CCB_FLAG_WRITE;
625 }
626}
627
628static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
629{
630 struct MessageUnit __iomem *reg = acb->pmu;
631 uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
632 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
633
634 atomic_inc(&acb->ccboutstandingcount);
635 ccb->startdone = ARCMSR_CCB_START;
636 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
637 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
638 &reg->inbound_queueport);
639 else
640 writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
641}
642
643void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb)
644{
645 struct MessageUnit __iomem *reg = acb->pmu;
646 struct QBUFFER __iomem *pwbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
647 uint8_t __iomem *iop_data = (uint8_t __iomem *) pwbuffer->data;
648 int32_t allxfer_len = 0;
649
650 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
651 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
652 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
653 && (allxfer_len < 124)) {
654 writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
655 acb->wqbuf_firstindex++;
656 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
657 iop_data++;
658 allxfer_len++;
659 }
660 writel(allxfer_len, &pwbuffer->data_len);
661 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
662 , &reg->inbound_doorbell);
663 }
664}
665
666static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
667{
668 struct MessageUnit __iomem *reg = acb->pmu;
669
670 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
671 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
672 if (arcmsr_wait_msgint_ready(acb))
673 printk(KERN_NOTICE
674 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
675 , acb->host->host_no);
676}
677
678static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
679{
680 dma_free_coherent(&acb->pdev->dev,
681 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
682 acb->dma_coherent,
683 acb->dma_coherent_handle);
684}
685
686static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
687{
688 struct MessageUnit __iomem *reg = acb->pmu;
689 struct CommandControlBlock *ccb;
690 uint32_t flag_ccb, outbound_intstatus, outbound_doorbell;
691
692 outbound_intstatus = readl(&reg->outbound_intstatus)
693 & acb->outbound_int_enable;
694 writel(outbound_intstatus, &reg->outbound_intstatus);
695 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
696 outbound_doorbell = readl(&reg->outbound_doorbell);
697 writel(outbound_doorbell, &reg->outbound_doorbell);
698 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
699 struct QBUFFER __iomem * prbuffer =
700 (struct QBUFFER __iomem *) &reg->message_rbuffer;
701 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
702 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
703
704 rqbuf_lastindex = acb->rqbuf_lastindex;
705 rqbuf_firstindex = acb->rqbuf_firstindex;
706 iop_len = readl(&prbuffer->data_len);
707 my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1)
708 &(ARCMSR_MAX_QBUFFER - 1);
709 if (my_empty_len >= iop_len) {
710 while (iop_len > 0) {
711 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
712 acb->rqbuf_lastindex++;
713 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
714 iop_data++;
715 iop_len--;
716 }
717 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
718 &reg->inbound_doorbell);
719 } else
720 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
721 }
722 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
723 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
724 if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
725 struct QBUFFER __iomem * pwbuffer =
726 (struct QBUFFER __iomem *) &reg->message_wbuffer;
727 uint8_t __iomem * iop_data = (uint8_t __iomem *) pwbuffer->data;
728 int32_t allxfer_len = 0;
729
730 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
731 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
732 && (allxfer_len < 124)) {
733 writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
734 acb->wqbuf_firstindex++;
735 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
736 iop_data++;
737 allxfer_len++;
738 }
739 writel(allxfer_len, &pwbuffer->data_len);
740 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK,
741 &reg->inbound_doorbell);
742 }
743 if (acb->wqbuf_firstindex == acb->wqbuf_lastindex)
744 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
745 }
746 }
747 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
748 int id, lun;
749 /*
750 ****************************************************************
751 ** areca cdb command done
752 ****************************************************************
753 */
754 while (1) {
755 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF)
756 break;/*chip FIFO no ccb for completion already*/
757 /* check if command done with no error*/
758 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
759 (flag_ccb << 5));
760 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
761 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
762 struct scsi_cmnd *abortcmd=ccb->pcmd;
763 if (abortcmd) {
764 abortcmd->result |= DID_ABORT >> 16;
765 arcmsr_ccb_complete(ccb, 1);
766 printk(KERN_NOTICE
767 "arcmsr%d: ccb='0x%p' isr got aborted command \n"
768 , acb->host->host_no, ccb);
769 }
770 continue;
771 }
772 printk(KERN_NOTICE
773 "arcmsr%d: isr get an illegal ccb command done acb='0x%p'"
774 "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x"
775 " ccboutstandingcount=%d \n"
776 , acb->host->host_no
777 , acb
778 , ccb
779 , ccb->acb
780 , ccb->startdone
781 , atomic_read(&acb->ccboutstandingcount));
782 continue;
783 }
784 id = ccb->pcmd->device->id;
785 lun = ccb->pcmd->device->lun;
786 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
787 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
788 acb->devstate[id][lun] = ARECA_RAID_GOOD;
789 ccb->pcmd->result = DID_OK << 16;
790 arcmsr_ccb_complete(ccb, 1);
791 } else {
792 switch(ccb->arcmsr_cdb.DeviceStatus) {
793 case ARCMSR_DEV_SELECT_TIMEOUT: {
794 acb->devstate[id][lun] = ARECA_RAID_GONE;
795 ccb->pcmd->result = DID_TIME_OUT << 16;
796 arcmsr_ccb_complete(ccb, 1);
797 }
798 break;
799 case ARCMSR_DEV_ABORTED:
800 case ARCMSR_DEV_INIT_FAIL: {
801 acb->devstate[id][lun] = ARECA_RAID_GONE;
802 ccb->pcmd->result = DID_BAD_TARGET << 16;
803 arcmsr_ccb_complete(ccb, 1);
804 }
805 break;
806 case ARCMSR_DEV_CHECK_CONDITION: {
807 acb->devstate[id][lun] = ARECA_RAID_GOOD;
808 arcmsr_report_sense_info(ccb);
809 arcmsr_ccb_complete(ccb, 1);
810 }
811 break;
812 default:
813 printk(KERN_NOTICE
814 "arcmsr%d: scsi id=%d lun=%d"
815 " isr get command error done,"
816 "but got unknown DeviceStatus = 0x%x \n"
817 , acb->host->host_no
818 , id
819 , lun
820 , ccb->arcmsr_cdb.DeviceStatus);
821 acb->devstate[id][lun] = ARECA_RAID_GONE;
822 ccb->pcmd->result = DID_NO_CONNECT << 16;
823 arcmsr_ccb_complete(ccb, 1);
824 break;
825 }
826 }
827 }/*drain reply FIFO*/
828 }
829 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
830 return IRQ_NONE;
831 return IRQ_HANDLED;
832}
833
834static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
835{
836 if (acb) {
837 /* stop adapter background rebuild */
838 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
839 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
840 arcmsr_stop_adapter_bgrb(acb);
841 arcmsr_flush_adapter_cache(acb);
842 }
843 }
844}
845
846static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd)
847{
848 struct MessageUnit __iomem *reg = acb->pmu;
849 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
850 int retvalue = 0, transfer_len = 0;
851 char *buffer;
852 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
853 (uint32_t ) cmd->cmnd[6] << 16 |
854 (uint32_t ) cmd->cmnd[7] << 8 |
855 (uint32_t ) cmd->cmnd[8];
856 /* 4 bytes: Areca io control code */
857 if (cmd->use_sg) {
858 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
859
860 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
861 if (cmd->use_sg > 1) {
862 retvalue = ARCMSR_MESSAGE_FAIL;
863 goto message_out;
864 }
865 transfer_len += sg->length;
866 } else {
867 buffer = cmd->request_buffer;
868 transfer_len = cmd->request_bufflen;
869 }
870 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
871 retvalue = ARCMSR_MESSAGE_FAIL;
872 goto message_out;
873 }
874 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
875 switch(controlcode) {
876 case ARCMSR_MESSAGE_READ_RQBUFFER: {
877 unsigned long *ver_addr;
878 dma_addr_t buf_handle;
879 uint8_t *pQbuffer, *ptmpQbuffer;
880 int32_t allxfer_len = 0;
881
882 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
883 if (!ver_addr) {
884 retvalue = ARCMSR_MESSAGE_FAIL;
885 goto message_out;
886 }
887 ptmpQbuffer = (uint8_t *) ver_addr;
888 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
889 && (allxfer_len < 1031)) {
890 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
891 memcpy(ptmpQbuffer, pQbuffer, 1);
892 acb->rqbuf_firstindex++;
893 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
894 ptmpQbuffer++;
895 allxfer_len++;
896 }
897 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
898 struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
899 &reg->message_rbuffer;
900 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
901 int32_t iop_len;
902
903 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
904 iop_len = readl(&prbuffer->data_len);
905 while (iop_len > 0) {
906 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
907 acb->rqbuf_lastindex++;
908 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
909 iop_data++;
910 iop_len--;
911 }
912 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
913 &reg->inbound_doorbell);
914 }
915 memcpy(pcmdmessagefld->messagedatabuffer,
916 (uint8_t *)ver_addr, allxfer_len);
917 pcmdmessagefld->cmdmessage.Length = allxfer_len;
918 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
919 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
920 }
921 break;
922 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
923 unsigned long *ver_addr;
924 dma_addr_t buf_handle;
925 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
926 uint8_t *pQbuffer, *ptmpuserbuffer;
927
928 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
929 if (!ver_addr) {
930 retvalue = ARCMSR_MESSAGE_FAIL;
931 goto message_out;
932 }
933 ptmpuserbuffer = (uint8_t *)ver_addr;
934 user_len = pcmdmessagefld->cmdmessage.Length;
935 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
936 wqbuf_lastindex = acb->wqbuf_lastindex;
937 wqbuf_firstindex = acb->wqbuf_firstindex;
938 if (wqbuf_lastindex != wqbuf_firstindex) {
939 struct SENSE_DATA *sensebuffer =
940 (struct SENSE_DATA *)cmd->sense_buffer;
941 arcmsr_post_Qbuffer(acb);
942 /* has error report sensedata */
943 sensebuffer->ErrorCode = 0x70;
944 sensebuffer->SenseKey = ILLEGAL_REQUEST;
945 sensebuffer->AdditionalSenseLength = 0x0A;
946 sensebuffer->AdditionalSenseCode = 0x20;
947 sensebuffer->Valid = 1;
948 retvalue = ARCMSR_MESSAGE_FAIL;
949 } else {
950 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
951 &(ARCMSR_MAX_QBUFFER - 1);
952 if (my_empty_len >= user_len) {
953 while (user_len > 0) {
954 pQbuffer =
955 &acb->wqbuffer[acb->wqbuf_lastindex];
956 memcpy(pQbuffer, ptmpuserbuffer, 1);
957 acb->wqbuf_lastindex++;
958 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
959 ptmpuserbuffer++;
960 user_len--;
961 }
962 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
963 acb->acb_flags &=
964 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
965 arcmsr_post_Qbuffer(acb);
966 }
967 } else {
968 /* has error report sensedata */
969 struct SENSE_DATA *sensebuffer =
970 (struct SENSE_DATA *)cmd->sense_buffer;
971 sensebuffer->ErrorCode = 0x70;
972 sensebuffer->SenseKey = ILLEGAL_REQUEST;
973 sensebuffer->AdditionalSenseLength = 0x0A;
974 sensebuffer->AdditionalSenseCode = 0x20;
975 sensebuffer->Valid = 1;
976 retvalue = ARCMSR_MESSAGE_FAIL;
977 }
978 }
979 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
980 }
981 break;
982 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
983 uint8_t *pQbuffer = acb->rqbuffer;
984
985 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
986 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
987 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
988 &reg->inbound_doorbell);
989 }
990 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
991 acb->rqbuf_firstindex = 0;
992 acb->rqbuf_lastindex = 0;
993 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
994 pcmdmessagefld->cmdmessage.ReturnCode =
995 ARCMSR_MESSAGE_RETURNCODE_OK;
996 }
997 break;
998 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
999 uint8_t *pQbuffer = acb->wqbuffer;
1000
1001 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1002 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1003 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1004 , &reg->inbound_doorbell);
1005 }
1006 acb->acb_flags |=
1007 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1008 ACB_F_MESSAGE_WQBUFFER_READED);
1009 acb->wqbuf_firstindex = 0;
1010 acb->wqbuf_lastindex = 0;
1011 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1012 pcmdmessagefld->cmdmessage.ReturnCode =
1013 ARCMSR_MESSAGE_RETURNCODE_OK;
1014 }
1015 break;
1016 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1017 uint8_t *pQbuffer;
1018
1019 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1020 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1021 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1022 , &reg->inbound_doorbell);
1023 }
1024 acb->acb_flags |=
1025 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1026 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1027 | ACB_F_MESSAGE_WQBUFFER_READED);
1028 acb->rqbuf_firstindex = 0;
1029 acb->rqbuf_lastindex = 0;
1030 acb->wqbuf_firstindex = 0;
1031 acb->wqbuf_lastindex = 0;
1032 pQbuffer = acb->rqbuffer;
1033 memset(pQbuffer, 0, sizeof (struct QBUFFER));
1034 pQbuffer = acb->wqbuffer;
1035 memset(pQbuffer, 0, sizeof (struct QBUFFER));
1036 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1037 }
1038 break;
1039 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1040 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1041 }
1042 break;
1043 case ARCMSR_MESSAGE_SAY_HELLO: {
1044 int8_t * hello_string = "Hello! I am ARCMSR";
1045
1046 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1047 , (int16_t)strlen(hello_string));
1048 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1049 }
1050 break;
1051 case ARCMSR_MESSAGE_SAY_GOODBYE:
1052 arcmsr_iop_parking(acb);
1053 break;
1054 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1055 arcmsr_flush_adapter_cache(acb);
1056 break;
1057 default:
1058 retvalue = ARCMSR_MESSAGE_FAIL;
1059 }
1060 message_out:
1061 if (cmd->use_sg) {
1062 struct scatterlist *sg;
1063
1064 sg = (struct scatterlist *) cmd->request_buffer;
1065 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1066 }
1067 return retvalue;
1068}
1069
1070static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1071{
1072 struct list_head *head = &acb->ccb_free_list;
1073 struct CommandControlBlock *ccb = NULL;
1074
1075 if (!list_empty(head)) {
1076 ccb = list_entry(head->next, struct CommandControlBlock, list);
1077 list_del(head->next);
1078 }
1079 return ccb;
1080}
1081
1082static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1083 struct scsi_cmnd *cmd)
1084{
1085 switch (cmd->cmnd[0]) {
1086 case INQUIRY: {
1087 unsigned char inqdata[36];
1088 char *buffer;
1089
1090 if (cmd->device->lun) {
1091 cmd->result = (DID_TIME_OUT << 16);
1092 cmd->scsi_done(cmd);
1093 return;
1094 }
1095 inqdata[0] = TYPE_PROCESSOR;
1096 /* Periph Qualifier & Periph Dev Type */
1097 inqdata[1] = 0;
1098 /* rem media bit & Dev Type Modifier */
1099 inqdata[2] = 0;
1100 /* ISO,ECMA,& ANSI versions */
1101 inqdata[4] = 31;
1102 /* length of additional data */
1103 strncpy(&inqdata[8], "Areca ", 8);
1104 /* Vendor Identification */
1105 strncpy(&inqdata[16], "RAID controller ", 16);
1106 /* Product Identification */
1107 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1108 if (cmd->use_sg) {
1109 struct scatterlist *sg;
1110
1111 sg = (struct scatterlist *) cmd->request_buffer;
1112 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1113 } else {
1114 buffer = cmd->request_buffer;
1115 }
1116 memcpy(buffer, inqdata, sizeof(inqdata));
1117 if (cmd->use_sg) {
1118 struct scatterlist *sg;
1119
1120 sg = (struct scatterlist *) cmd->request_buffer;
1121 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1122 }
1123 cmd->scsi_done(cmd);
1124 }
1125 break;
1126 case WRITE_BUFFER:
1127 case READ_BUFFER: {
1128 if (arcmsr_iop_message_xfer(acb, cmd))
1129 cmd->result = (DID_ERROR << 16);
1130 cmd->scsi_done(cmd);
1131 }
1132 break;
1133 default:
1134 cmd->scsi_done(cmd);
1135 }
1136}
1137
1138static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1139 void (* done)(struct scsi_cmnd *))
1140{
1141 struct Scsi_Host *host = cmd->device->host;
1142 struct AdapterControlBlock *acb =
1143 (struct AdapterControlBlock *) host->hostdata;
1144 struct CommandControlBlock *ccb;
1145 int target = cmd->device->id;
1146 int lun = cmd->device->lun;
1147
1148 cmd->scsi_done = done;
1149 cmd->host_scribble = NULL;
1150 cmd->result = 0;
1151 if (acb->acb_flags & ACB_F_BUS_RESET) {
1152 printk(KERN_NOTICE "arcmsr%d: bus reset"
1153 " and return busy \n"
1154 , acb->host->host_no);
1155 return SCSI_MLQUEUE_HOST_BUSY;
1156 }
1157 if(target == 16) {
1158 /* virtual device for iop message transfer */
1159 arcmsr_handle_virtual_command(acb, cmd);
1160 return 0;
1161 }
1162 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1163 uint8_t block_cmd;
1164
1165 block_cmd = cmd->cmnd[0] & 0x0f;
1166 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1167 printk(KERN_NOTICE
1168 "arcmsr%d: block 'read/write'"
1169 "command with gone raid volume"
1170 " Cmd=%2x, TargetId=%d, Lun=%d \n"
1171 , acb->host->host_no
1172 , cmd->cmnd[0]
1173 , target, lun);
1174 cmd->result = (DID_NO_CONNECT << 16);
1175 cmd->scsi_done(cmd);
1176 return 0;
1177 }
1178 }
1179 if (atomic_read(&acb->ccboutstandingcount) >=
1180 ARCMSR_MAX_OUTSTANDING_CMD)
1181 return SCSI_MLQUEUE_HOST_BUSY;
1182
1183 ccb = arcmsr_get_freeccb(acb);
1184 if (!ccb)
1185 return SCSI_MLQUEUE_HOST_BUSY;
1186 arcmsr_build_ccb(acb, ccb, cmd);
1187 arcmsr_post_ccb(acb, ccb);
1188 return 0;
1189}
1190
1191static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1192{
1193 struct MessageUnit __iomem *reg = acb->pmu;
1194 char *acb_firm_model = acb->firm_model;
1195 char *acb_firm_version = acb->firm_version;
1196 char __iomem *iop_firm_model = (char __iomem *) &reg->message_rwbuffer[15];
1197 char __iomem *iop_firm_version = (char __iomem *) &reg->message_rwbuffer[17];
1198 int count;
1199
1200 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1201 if (arcmsr_wait_msgint_ready(acb))
1202 printk(KERN_NOTICE
1203 "arcmsr%d: wait "
1204 "'get adapter firmware miscellaneous data' timeout \n"
1205 , acb->host->host_no);
1206 count = 8;
1207 while (count) {
1208 *acb_firm_model = readb(iop_firm_model);
1209 acb_firm_model++;
1210 iop_firm_model++;
1211 count--;
1212 }
1213 count = 16;
1214 while (count) {
1215 *acb_firm_version = readb(iop_firm_version);
1216 acb_firm_version++;
1217 iop_firm_version++;
1218 count--;
1219 }
1220 printk(KERN_INFO
1221 "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1222 , acb->host->host_no
1223 , acb->firm_version);
1224 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1225 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1226 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1227 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1228}
1229
1230static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1231 struct CommandControlBlock *poll_ccb)
1232{
1233 struct MessageUnit __iomem *reg = acb->pmu;
1234 struct CommandControlBlock *ccb;
1235 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1236 int id, lun;
1237
1238 polling_ccb_retry:
1239 poll_count++;
1240 outbound_intstatus = readl(&reg->outbound_intstatus)
1241 & acb->outbound_int_enable;
1242 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1243 while (1) {
1244 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1245 if (poll_ccb_done)
1246 break;
1247 else {
1248 msleep(25);
1249 if (poll_count > 100)
1250 break;
1251 goto polling_ccb_retry;
1252 }
1253 }
1254 ccb = (struct CommandControlBlock *)
1255 (acb->vir2phy_offset + (flag_ccb << 5));
1256 if ((ccb->acb != acb) ||
1257 (ccb->startdone != ARCMSR_CCB_START)) {
1258 if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
1259 (ccb == poll_ccb)) {
1260 printk(KERN_NOTICE
1261 "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'"
1262 " poll command abort successfully \n"
1263 , acb->host->host_no
1264 , ccb->pcmd->device->id
1265 , ccb->pcmd->device->lun
1266 , ccb);
1267 ccb->pcmd->result = DID_ABORT << 16;
1268 arcmsr_ccb_complete(ccb, 1);
1269 poll_ccb_done = 1;
1270 continue;
1271 }
1272 printk(KERN_NOTICE
1273 "arcmsr%d: polling get an illegal ccb"
1274 " command done ccb='0x%p'"
1275 "ccboutstandingcount=%d \n"
1276 , acb->host->host_no
1277 , ccb
1278 , atomic_read(&acb->ccboutstandingcount));
1279 continue;
1280 }
1281 id = ccb->pcmd->device->id;
1282 lun = ccb->pcmd->device->lun;
1283 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
1284 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1285 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1286 ccb->pcmd->result = DID_OK << 16;
1287 arcmsr_ccb_complete(ccb, 1);
1288 } else {
1289 switch(ccb->arcmsr_cdb.DeviceStatus) {
1290 case ARCMSR_DEV_SELECT_TIMEOUT: {
1291 acb->devstate[id][lun] = ARECA_RAID_GONE;
1292 ccb->pcmd->result = DID_TIME_OUT << 16;
1293 arcmsr_ccb_complete(ccb, 1);
1294 }
1295 break;
1296 case ARCMSR_DEV_ABORTED:
1297 case ARCMSR_DEV_INIT_FAIL: {
1298 acb->devstate[id][lun] = ARECA_RAID_GONE;
1299 ccb->pcmd->result = DID_BAD_TARGET << 16;
1300 arcmsr_ccb_complete(ccb, 1);
1301 }
1302 break;
1303 case ARCMSR_DEV_CHECK_CONDITION: {
1304 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1305 arcmsr_report_sense_info(ccb);
1306 arcmsr_ccb_complete(ccb, 1);
1307 }
1308 break;
1309 default:
1310 printk(KERN_NOTICE
1311 "arcmsr%d: scsi id=%d lun=%d"
1312 " polling and getting command error done"
1313 "but got unknown DeviceStatus = 0x%x \n"
1314 , acb->host->host_no
1315 , id
1316 , lun
1317 , ccb->arcmsr_cdb.DeviceStatus);
1318 acb->devstate[id][lun] = ARECA_RAID_GONE;
1319 ccb->pcmd->result = DID_BAD_TARGET << 16;
1320 arcmsr_ccb_complete(ccb, 1);
1321 break;
1322 }
1323 }
1324 }
1325}
1326
1327static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1328{
1329 struct MessageUnit __iomem *reg = acb->pmu;
1330 uint32_t intmask_org, mask, outbound_doorbell, firmware_state = 0;
1331
1332 do {
1333 firmware_state = readl(&reg->outbound_msgaddr1);
1334 } while (!(firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK));
1335 intmask_org = readl(&reg->outbound_intmask)
1336 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
1337 arcmsr_get_firmware_spec(acb);
1338
1339 acb->acb_flags |= ACB_F_MSG_START_BGRB;
1340 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
1341 if (arcmsr_wait_msgint_ready(acb)) {
1342 printk(KERN_NOTICE "arcmsr%d: "
1343 "wait 'start adapter background rebulid' timeout\n",
1344 acb->host->host_no);
1345 }
1346
1347 outbound_doorbell = readl(&reg->outbound_doorbell);
1348 writel(outbound_doorbell, &reg->outbound_doorbell);
1349 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1350 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
1351 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
1352 writel(intmask_org & mask, &reg->outbound_intmask);
1353 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1354 acb->acb_flags |= ACB_F_IOP_INITED;
1355}
1356
1357static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
1358{
1359 struct MessageUnit __iomem *reg = acb->pmu;
1360 struct CommandControlBlock *ccb;
1361 uint32_t intmask_org;
1362 int i = 0;
1363
1364 if (atomic_read(&acb->ccboutstandingcount) != 0) {
1365 /* talk to iop 331 outstanding command aborted */
1366 arcmsr_abort_allcmd(acb);
1367 /* wait for 3 sec for all command aborted*/
1368 msleep_interruptible(3000);
1369 /* disable all outbound interrupt */
1370 intmask_org = arcmsr_disable_outbound_ints(acb);
1371 /* clear all outbound posted Q */
1372 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
1373 readl(&reg->outbound_queueport);
1374 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1375 ccb = acb->pccb_pool[i];
1376 if ((ccb->startdone == ARCMSR_CCB_START) ||
1377 (ccb->startdone == ARCMSR_CCB_ABORTED)) {
1378 ccb->startdone = ARCMSR_CCB_ABORTED;
1379 ccb->pcmd->result = DID_ABORT << 16;
1380 arcmsr_ccb_complete(ccb, 1);
1381 }
1382 }
1383 /* enable all outbound interrupt */
1384 arcmsr_enable_outbound_ints(acb, intmask_org);
1385 }
1386 atomic_set(&acb->ccboutstandingcount, 0);
1387}
1388
1389static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
1390{
1391 struct AdapterControlBlock *acb =
1392 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1393 int i;
1394
1395 acb->num_resets++;
1396 acb->acb_flags |= ACB_F_BUS_RESET;
1397 for (i = 0; i < 400; i++) {
1398 if (!atomic_read(&acb->ccboutstandingcount))
1399 break;
1400 arcmsr_interrupt(acb);
1401 msleep(25);
1402 }
1403 arcmsr_iop_reset(acb);
1404 acb->acb_flags &= ~ACB_F_BUS_RESET;
1405 return SUCCESS;
1406}
1407
1408static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
1409 struct CommandControlBlock *ccb)
1410{
1411 u32 intmask;
1412
1413 ccb->startdone = ARCMSR_CCB_ABORTED;
1414
1415 /*
1416 ** Wait for 3 sec for all command done.
1417 */
1418 msleep_interruptible(3000);
1419
1420 intmask = arcmsr_disable_outbound_ints(acb);
1421 arcmsr_polling_ccbdone(acb, ccb);
1422 arcmsr_enable_outbound_ints(acb, intmask);
1423}
1424
1425static int arcmsr_abort(struct scsi_cmnd *cmd)
1426{
1427 struct AdapterControlBlock *acb =
1428 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1429 int i = 0;
1430
1431 printk(KERN_NOTICE
1432 "arcmsr%d: abort device command of scsi id=%d lun=%d \n",
1433 acb->host->host_no, cmd->device->id, cmd->device->lun);
1434 acb->num_aborts++;
1435
1436 /*
1437 ************************************************
1438 ** the all interrupt service routine is locked
1439 ** we need to handle it as soon as possible and exit
1440 ************************************************
1441 */
1442 if (!atomic_read(&acb->ccboutstandingcount))
1443 return SUCCESS;
1444
1445 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1446 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1447 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
1448 arcmsr_abort_one_cmd(acb, ccb);
1449 break;
1450 }
1451 }
1452
1453 return SUCCESS;
1454}
1455
1456static const char *arcmsr_info(struct Scsi_Host *host)
1457{
1458 struct AdapterControlBlock *acb =
1459 (struct AdapterControlBlock *) host->hostdata;
1460 static char buf[256];
1461 char *type;
1462 int raid6 = 1;
1463
1464 switch (acb->pdev->device) {
1465 case PCI_DEVICE_ID_ARECA_1110:
1466 case PCI_DEVICE_ID_ARECA_1210:
1467 raid6 = 0;
1468 /*FALLTHRU*/
1469 case PCI_DEVICE_ID_ARECA_1120:
1470 case PCI_DEVICE_ID_ARECA_1130:
1471 case PCI_DEVICE_ID_ARECA_1160:
1472 case PCI_DEVICE_ID_ARECA_1170:
1473 case PCI_DEVICE_ID_ARECA_1220:
1474 case PCI_DEVICE_ID_ARECA_1230:
1475 case PCI_DEVICE_ID_ARECA_1260:
1476 case PCI_DEVICE_ID_ARECA_1270:
1477 case PCI_DEVICE_ID_ARECA_1280:
1478 type = "SATA";
1479 break;
1480 case PCI_DEVICE_ID_ARECA_1380:
1481 case PCI_DEVICE_ID_ARECA_1381:
1482 case PCI_DEVICE_ID_ARECA_1680:
1483 case PCI_DEVICE_ID_ARECA_1681:
1484 type = "SAS";
1485 break;
1486 default:
1487 type = "X-TYPE";
1488 break;
1489 }
1490 sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
1491 type, raid6 ? "( RAID6 capable)" : "",
1492 ARCMSR_DRIVER_VERSION);
1493 return buf;
1494}
1495
1496
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index e1337339cacc..7b3bd34faf47 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -46,7 +46,6 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
46 46
47#include <linux/stat.h> 47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */ 48#include <linux/slab.h> /* for kmalloc() */
49#include <linux/config.h> /* for CONFIG_PCI */
50#include <linux/pci.h> /* for PCI support */ 49#include <linux/pci.h> /* for PCI support */
51#include <linux/proc_fs.h> 50#include <linux/proc_fs.h>
52#include <linux/blkdev.h> 51#include <linux/blkdev.h>
@@ -185,7 +184,7 @@ static int adpt_detect(struct scsi_host_template* sht)
185 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 184 PINFO("Detecting Adaptec I2O RAID controllers...\n");
186 185
187 /* search for all Adatpec I2O RAID cards */ 186 /* search for all Adatpec I2O RAID cards */
188 while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) { 187 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
189 if(pDev->device == PCI_DPT_DEVICE_ID || 188 if(pDev->device == PCI_DPT_DEVICE_ID ||
190 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){ 189 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
191 if(adpt_install_hba(sht, pDev) ){ 190 if(adpt_install_hba(sht, pDev) ){
@@ -193,8 +192,11 @@ static int adpt_detect(struct scsi_host_template* sht)
193 PERROR("Will not try to detect others.\n"); 192 PERROR("Will not try to detect others.\n");
194 return hba_count-1; 193 return hba_count-1;
195 } 194 }
195 pci_dev_get(pDev);
196 } 196 }
197 } 197 }
198 if (pDev)
199 pci_dev_put(pDev);
198 200
199 /* In INIT state, Activate IOPs */ 201 /* In INIT state, Activate IOPs */
200 for (pHba = hba_chain; pHba; pHba = pHba->next) { 202 for (pHba = hba_chain; pHba; pHba = pHba->next) {
@@ -1076,6 +1078,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1076 } 1078 }
1077 } 1079 }
1078 } 1080 }
1081 pci_dev_put(pHba->pDev);
1079 kfree(pHba); 1082 kfree(pHba);
1080 1083
1081 if(hba_count <= 0){ 1084 if(hba_count <= 0){
diff --git a/drivers/scsi/eata_generic.h b/drivers/scsi/eata_generic.h
index 34bce2c9e92e..635c14861f86 100644
--- a/drivers/scsi/eata_generic.h
+++ b/drivers/scsi/eata_generic.h
@@ -364,6 +364,7 @@ typedef struct hstd {
364 __u8 moresupport; /* HBA supports MORE flag */ 364 __u8 moresupport; /* HBA supports MORE flag */
365 struct Scsi_Host *next; 365 struct Scsi_Host *next;
366 struct Scsi_Host *prev; 366 struct Scsi_Host *prev;
367 struct pci_dev *pdev; /* PCI device or NULL for non PCI */
367 struct eata_sp sp; /* status packet */ 368 struct eata_sp sp; /* status packet */
368 struct eata_ccb ccb[0]; /* ccb array begins here */ 369 struct eata_ccb ccb[0]; /* ccb array begins here */
369}hostdata; 370}hostdata;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 771b01984cbc..d312633db92b 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -71,11 +71,11 @@
71#include "eata_pio.h" 71#include "eata_pio.h"
72 72
73 73
74static uint ISAbases[MAXISA] = { 74static unsigned int ISAbases[MAXISA] = {
75 0x1F0, 0x170, 0x330, 0x230 75 0x1F0, 0x170, 0x330, 0x230
76}; 76};
77 77
78static uint ISAirqs[MAXISA] = { 78static unsigned int ISAirqs[MAXISA] = {
79 14, 12, 15, 11 79 14, 12, 15, 11
80}; 80};
81 81
@@ -84,7 +84,7 @@ static unsigned char EISAbases[] = {
84 1, 1, 1, 1, 1, 1, 1, 1 84 1, 1, 1, 1, 1, 1, 1, 1
85}; 85};
86 86
87static uint registered_HBAs; 87static unsigned int registered_HBAs;
88static struct Scsi_Host *last_HBA; 88static struct Scsi_Host *last_HBA;
89static struct Scsi_Host *first_HBA; 89static struct Scsi_Host *first_HBA;
90static unsigned char reg_IRQ[16]; 90static unsigned char reg_IRQ[16];
@@ -165,6 +165,7 @@ static int eata_pio_proc_info(struct Scsi_Host *shost, char *buffer, char **star
165 165
166static int eata_pio_release(struct Scsi_Host *sh) 166static int eata_pio_release(struct Scsi_Host *sh)
167{ 167{
168 hostdata *hd = SD(sh);
168 if (sh->irq && reg_IRQ[sh->irq] == 1) 169 if (sh->irq && reg_IRQ[sh->irq] == 1)
169 free_irq(sh->irq, NULL); 170 free_irq(sh->irq, NULL);
170 else 171 else
@@ -173,10 +174,13 @@ static int eata_pio_release(struct Scsi_Host *sh)
173 if (sh->io_port && sh->n_io_port) 174 if (sh->io_port && sh->n_io_port)
174 release_region(sh->io_port, sh->n_io_port); 175 release_region(sh->io_port, sh->n_io_port);
175 } 176 }
177 /* At this point the PCI reference can go */
178 if (hd->pdev)
179 pci_dev_put(hd->pdev);
176 return 1; 180 return 1;
177} 181}
178 182
179static void IncStat(struct scsi_pointer *SCp, uint Increment) 183static void IncStat(struct scsi_pointer *SCp, unsigned int Increment)
180{ 184{
181 SCp->ptr += Increment; 185 SCp->ptr += Increment;
182 if ((SCp->this_residual -= Increment) == 0) { 186 if ((SCp->this_residual -= Increment) == 0) {
@@ -190,46 +194,49 @@ static void IncStat(struct scsi_pointer *SCp, uint Increment)
190 } 194 }
191} 195}
192 196
193static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs); 197static irqreturn_t eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs);
194 198
195static irqreturn_t do_eata_pio_int_handler(int irq, void *dev_id, 199static irqreturn_t do_eata_pio_int_handler(int irq, void *dev_id,
196 struct pt_regs *regs) 200 struct pt_regs *regs)
197{ 201{
198 unsigned long flags; 202 unsigned long flags;
199 struct Scsi_Host *dev = dev_id; 203 struct Scsi_Host *dev = dev_id;
204 irqreturn_t ret;
200 205
201 spin_lock_irqsave(dev->host_lock, flags); 206 spin_lock_irqsave(dev->host_lock, flags);
202 eata_pio_int_handler(irq, dev_id, regs); 207 ret = eata_pio_int_handler(irq, dev_id, regs);
203 spin_unlock_irqrestore(dev->host_lock, flags); 208 spin_unlock_irqrestore(dev->host_lock, flags);
204 return IRQ_HANDLED; 209 return ret;
205} 210}
206 211
207static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs) 212static irqreturn_t eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
208{ 213{
209 uint eata_stat = 0xfffff; 214 unsigned int eata_stat = 0xfffff;
210 struct scsi_cmnd *cmd; 215 struct scsi_cmnd *cmd;
211 hostdata *hd; 216 hostdata *hd;
212 struct eata_ccb *cp; 217 struct eata_ccb *cp;
213 uint base; 218 unsigned long base;
214 uint x, z; 219 unsigned int x, z;
215 struct Scsi_Host *sh; 220 struct Scsi_Host *sh;
216 unsigned short zwickel = 0; 221 unsigned short zwickel = 0;
217 unsigned char stat, odd; 222 unsigned char stat, odd;
223 irqreturn_t ret = IRQ_NONE;
218 224
219 for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) 225 for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev)
220 { 226 {
221 if (sh->irq != irq) 227 if (sh->irq != irq)
222 continue; 228 continue;
223 if (inb((uint) sh->base + HA_RSTATUS) & HA_SBUSY) 229 if (inb(sh->base + HA_RSTATUS) & HA_SBUSY)
224 continue; 230 continue;
225 231
226 int_counter++; 232 int_counter++;
233 ret = IRQ_HANDLED;
227 234
228 hd = SD(sh); 235 hd = SD(sh);
229 236
230 cp = &hd->ccb[0]; 237 cp = &hd->ccb[0];
231 cmd = cp->cmd; 238 cmd = cp->cmd;
232 base = (uint) cmd->device->host->base; 239 base = cmd->device->host->base;
233 240
234 do { 241 do {
235 stat = inb(base + HA_RSTATUS); 242 stat = inb(base + HA_RSTATUS);
@@ -304,7 +311,7 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
304 if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { 311 if (!(inb(base + HA_RSTATUS) & HA_SERROR)) {
305 cmd->result = (DID_OK << 16); 312 cmd->result = (DID_OK << 16);
306 hd->devflags |= (1 << cp->cp_id); 313 hd->devflags |= (1 << cp->cp_id);
307 } else if (hd->devflags & 1 << cp->cp_id) 314 } else if (hd->devflags & (1 << cp->cp_id))
308 cmd->result = (DID_OK << 16) + 0x02; 315 cmd->result = (DID_OK << 16) + 0x02;
309 else 316 else
310 cmd->result = (DID_NO_CONNECT << 16); 317 cmd->result = (DID_NO_CONNECT << 16);
@@ -313,7 +320,7 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
313 cp->status = FREE; 320 cp->status = FREE;
314 eata_stat = inb(base + HA_RSTATUS); 321 eata_stat = inb(base + HA_RSTATUS);
315 printk(KERN_CRIT "eata_pio: int_handler, freeing locked " "queueslot\n"); 322 printk(KERN_CRIT "eata_pio: int_handler, freeing locked " "queueslot\n");
316 return; 323 return ret;
317 } 324 }
318#if DBG_INTR2 325#if DBG_INTR2
319 if (stat != 0x50) 326 if (stat != 0x50)
@@ -325,12 +332,12 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
325 cmd->scsi_done(cmd); 332 cmd->scsi_done(cmd);
326 } 333 }
327 334
328 return; 335 return ret;
329} 336}
330 337
331static inline uint eata_pio_send_command(uint base, unsigned char command) 338static inline unsigned int eata_pio_send_command(unsigned long base, unsigned char command)
332{ 339{
333 uint loop = HZ / 2; 340 unsigned int loop = 50;
334 341
335 while (inb(base + HA_RSTATUS) & HA_SBUSY) 342 while (inb(base + HA_RSTATUS) & HA_SBUSY)
336 if (--loop == 0) 343 if (--loop == 0)
@@ -349,8 +356,8 @@ static inline uint eata_pio_send_command(uint base, unsigned char command)
349static int eata_pio_queue(struct scsi_cmnd *cmd, 356static int eata_pio_queue(struct scsi_cmnd *cmd,
350 void (*done)(struct scsi_cmnd *)) 357 void (*done)(struct scsi_cmnd *))
351{ 358{
352 uint x, y; 359 unsigned int x, y;
353 uint base; 360 unsigned long base;
354 361
355 hostdata *hd; 362 hostdata *hd;
356 struct Scsi_Host *sh; 363 struct Scsi_Host *sh;
@@ -360,7 +367,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
360 367
361 hd = HD(cmd); 368 hd = HD(cmd);
362 sh = cmd->device->host; 369 sh = cmd->device->host;
363 base = (uint) sh->base; 370 base = sh->base;
364 371
365 /* use only slot 0, as 2001 can handle only one cmd at a time */ 372 /* use only slot 0, as 2001 can handle only one cmd at a time */
366 373
@@ -395,9 +402,9 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
395 cp->DataIn = 0; /* Input mode */ 402 cp->DataIn = 0; /* Input mode */
396 403
397 cp->Interpret = (cmd->device->id == hd->hostid); 404 cp->Interpret = (cmd->device->id == hd->hostid);
398 cp->cp_datalen = htonl((unsigned long) cmd->request_bufflen); 405 cp->cp_datalen = cpu_to_be32(cmd->request_bufflen);
399 cp->Auto_Req_Sen = 0; 406 cp->Auto_Req_Sen = 0;
400 cp->cp_reqDMA = htonl(0); 407 cp->cp_reqDMA = 0;
401 cp->reqlen = 0; 408 cp->reqlen = 0;
402 409
403 cp->cp_id = cmd->device->id; 410 cp->cp_id = cmd->device->id;
@@ -406,7 +413,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
406 cp->cp_identify = 1; 413 cp->cp_identify = 1;
407 memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); 414 memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
408 415
409 cp->cp_statDMA = htonl(0); 416 cp->cp_statDMA = 0;
410 417
411 cp->cp_viraddr = cp; 418 cp->cp_viraddr = cp;
412 cp->cmd = cmd; 419 cp->cmd = cmd;
@@ -445,14 +452,14 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
445 452
446 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, 453 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
447 "Queued base %#.4lx pid: %ld " 454 "Queued base %#.4lx pid: %ld "
448 "slot %d irq %d\n", (long) sh->base, cmd->pid, y, sh->irq)); 455 "slot %d irq %d\n", sh->base, cmd->pid, y, sh->irq));
449 456
450 return (0); 457 return (0);
451} 458}
452 459
453static int eata_pio_abort(struct scsi_cmnd *cmd) 460static int eata_pio_abort(struct scsi_cmnd *cmd)
454{ 461{
455 uint loop = HZ; 462 unsigned int loop = 100;
456 463
457 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, 464 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
458 "eata_pio_abort called pid: %ld\n", 465 "eata_pio_abort called pid: %ld\n",
@@ -485,7 +492,7 @@ static int eata_pio_abort(struct scsi_cmnd *cmd)
485 492
486static int eata_pio_host_reset(struct scsi_cmnd *cmd) 493static int eata_pio_host_reset(struct scsi_cmnd *cmd)
487{ 494{
488 uint x, limit = 0; 495 unsigned int x, limit = 0;
489 unsigned char success = 0; 496 unsigned char success = 0;
490 struct scsi_cmnd *sp; 497 struct scsi_cmnd *sp;
491 struct Scsi_Host *host = cmd->device->host; 498 struct Scsi_Host *host = cmd->device->host;
@@ -518,7 +525,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
518 } 525 }
519 526
520 /* hard reset the HBA */ 527 /* hard reset the HBA */
521 outb(EATA_CMD_RESET, (uint) cmd->device->host->base + HA_WCOMMAND); 528 outb(EATA_CMD_RESET, cmd->device->host->base + HA_WCOMMAND);
522 529
523 DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n")); 530 DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n"));
524 HD(cmd)->state = RESET; 531 HD(cmd)->state = RESET;
@@ -558,7 +565,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
558 } 565 }
559} 566}
560 567
561static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned long cplen, unsigned short cppadlen) 568static char *get_pio_board_data(unsigned long base, unsigned int irq, unsigned int id, unsigned long cplen, unsigned short cppadlen)
562{ 569{
563 struct eata_ccb cp; 570 struct eata_ccb cp;
564 static char buff[256]; 571 static char buff[256];
@@ -570,8 +577,8 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
570 cp.DataIn = 1; 577 cp.DataIn = 1;
571 cp.Interpret = 1; /* Interpret command */ 578 cp.Interpret = 1; /* Interpret command */
572 579
573 cp.cp_datalen = htonl(254); 580 cp.cp_datalen = cpu_to_be32(254);
574 cp.cp_dataDMA = htonl(0); 581 cp.cp_dataDMA = cpu_to_be32(0);
575 582
576 cp.cp_id = id; 583 cp.cp_id = id;
577 cp.cp_lun = 0; 584 cp.cp_lun = 0;
@@ -583,7 +590,7 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
583 cp.cp_cdb[4] = 254; 590 cp.cp_cdb[4] = 254;
584 cp.cp_cdb[5] = 0; 591 cp.cp_cdb[5] = 0;
585 592
586 if (eata_pio_send_command((uint) base, EATA_CMD_PIO_SEND_CP)) 593 if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP))
587 return (NULL); 594 return (NULL);
588 while (!(inb(base + HA_RSTATUS) & HA_SDRQ)); 595 while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
589 outsw(base + HA_RDATA, &cp, cplen); 596 outsw(base + HA_RDATA, &cp, cplen);
@@ -604,7 +611,7 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
604 } 611 }
605} 612}
606 613
607static int get_pio_conf_PIO(u32 base, struct get_conf *buf) 614static int get_pio_conf_PIO(unsigned long base, struct get_conf *buf)
608{ 615{
609 unsigned long loop = HZ / 2; 616 unsigned long loop = HZ / 2;
610 int z; 617 int z;
@@ -619,30 +626,30 @@ static int get_pio_conf_PIO(u32 base, struct get_conf *buf)
619 if (--loop == 0) 626 if (--loop == 0)
620 goto fail; 627 goto fail;
621 628
622 DBG(DBG_PIO && DBG_PROBE, printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#x\n", base)); 629 DBG(DBG_PIO && DBG_PROBE, printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#lx\n", base));
623 eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG); 630 eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG);
624 631
625 loop = HZ / 2; 632 loop = 50;
626 for (p = (unsigned short *) buf; (long) p <= ((long) buf + (sizeof(struct get_conf) / 2)); p++) { 633 for (p = (unsigned short *) buf; (long) p <= ((long) buf + (sizeof(struct get_conf) / 2)); p++) {
627 while (!(inb(base + HA_RSTATUS) & HA_SDRQ)) 634 while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
628 if (--loop == 0) 635 if (--loop == 0)
629 goto fail; 636 goto fail;
630 637
631 loop = HZ / 2; 638 loop = 50;
632 *p = inw(base + HA_RDATA); 639 *p = inw(base + HA_RDATA);
633 } 640 }
634 if (inb(base + HA_RSTATUS) & HA_SERROR) { 641 if (inb(base + HA_RSTATUS) & HA_SERROR) {
635 DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during " 642 DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during "
636 "transfer for HBA at %x\n", base)); 643 "transfer for HBA at %lx\n", base));
637 goto fail; 644 goto fail;
638 } 645 }
639 646
640 if (htonl(EATA_SIGNATURE) != buf->signature) 647 if (cpu_to_be32(EATA_SIGNATURE) != buf->signature)
641 goto fail; 648 goto fail;
642 649
643 DBG(DBG_PIO && DBG_PROBE, printk(KERN_NOTICE "EATA Controller found " 650 DBG(DBG_PIO && DBG_PROBE, printk(KERN_NOTICE "EATA Controller found "
644 "at %#4x EATA Level: %x\n", 651 "at %#4lx EATA Level: %x\n",
645 base, (uint) (buf->version))); 652 base, (unsigned int) (buf->version)));
646 653
647 while (inb(base + HA_RSTATUS) & HA_SDRQ) 654 while (inb(base + HA_RSTATUS) & HA_SDRQ)
648 inw(base + HA_RDATA); 655 inw(base + HA_RDATA);
@@ -665,12 +672,12 @@ static int get_pio_conf_PIO(u32 base, struct get_conf *buf)
665static void print_pio_config(struct get_conf *gc) 672static void print_pio_config(struct get_conf *gc)
666{ 673{
667 printk("Please check values: (read config data)\n"); 674 printk("Please check values: (read config data)\n");
668 printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n", (uint) ntohl(gc->len), gc->version, gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support); 675 printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n", be32_to_cpu(gc->len), gc->version, gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support);
669 printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND); 676 printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], gc->scsi_id[1], be16_to_cpu(gc->queuesiz), be16_to_cpu(gc->SGsiz), gc->SECOND);
670 printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n", gc->IRQ, gc->IRQ_TR, gc->FORCADR, gc->MAX_CHAN, gc->ID_qest); 677 printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n", gc->IRQ, gc->IRQ_TR, gc->FORCADR, gc->MAX_CHAN, gc->ID_qest);
671} 678}
672 679
673static uint print_selftest(uint base) 680static unsigned int print_selftest(unsigned int base)
674{ 681{
675 unsigned char buffer[512]; 682 unsigned char buffer[512];
676#ifdef VERBOSE_SETUP 683#ifdef VERBOSE_SETUP
@@ -697,7 +704,7 @@ static uint print_selftest(uint base)
697 return (!(inb(base + HA_RSTATUS) & HA_SERROR)); 704 return (!(inb(base + HA_RSTATUS) & HA_SERROR));
698} 705}
699 706
700static int register_pio_HBA(long base, struct get_conf *gc) 707static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev)
701{ 708{
702 unsigned long size = 0; 709 unsigned long size = 0;
703 char *buff; 710 char *buff;
@@ -714,17 +721,17 @@ static int register_pio_HBA(long base, struct get_conf *gc)
714 return 0; 721 return 0;
715 } 722 }
716 723
717 if ((buff = get_pio_board_data((uint) base, gc->IRQ, gc->scsi_id[3], cplen = (htonl(gc->cplen) + 1) / 2, cppadlen = (htons(gc->cppadlen) + 1) / 2)) == NULL) { 724 if ((buff = get_pio_board_data(base, gc->IRQ, gc->scsi_id[3], cplen = (cpu_to_be32(gc->cplen) + 1) / 2, cppadlen = (cpu_to_be16(gc->cppadlen) + 1) / 2)) == NULL) {
718 printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", (unsigned long) base); 725 printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", base);
719 return 0; 726 return 0;
720 } 727 }
721 728
722 if (!print_selftest(base) && !ALLOW_DMA_BOARDS) { 729 if (!print_selftest(base) && !ALLOW_DMA_BOARDS) {
723 printk("HBA at %#lx failed while performing self test & setup.\n", (unsigned long) base); 730 printk("HBA at %#lx failed while performing self test & setup.\n", base);
724 return 0; 731 return 0;
725 } 732 }
726 733
727 size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz)); 734 size = sizeof(hostdata) + (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz));
728 735
729 sh = scsi_register(&driver_template, size); 736 sh = scsi_register(&driver_template, size);
730 if (sh == NULL) 737 if (sh == NULL)
@@ -749,8 +756,8 @@ static int register_pio_HBA(long base, struct get_conf *gc)
749 756
750 hd = SD(sh); 757 hd = SD(sh);
751 758
752 memset(hd->ccb, 0, (sizeof(struct eata_ccb) * ntohs(gc->queuesiz))); 759 memset(hd->ccb, 0, (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz)));
753 memset(hd->reads, 0, sizeof(unsigned long) * 26); 760 memset(hd->reads, 0, sizeof(hd->reads));
754 761
755 strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor)); 762 strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor));
756 strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name)); 763 strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name));
@@ -761,7 +768,7 @@ static int register_pio_HBA(long base, struct get_conf *gc)
761 SD(sh)->revision[4] = buff[35]; 768 SD(sh)->revision[4] = buff[35];
762 SD(sh)->revision[5] = 0; 769 SD(sh)->revision[5] = 0;
763 770
764 switch (ntohl(gc->len)) { 771 switch (be32_to_cpu(gc->len)) {
765 case 0x1c: 772 case 0x1c:
766 SD(sh)->EATA_revision = 'a'; 773 SD(sh)->EATA_revision = 'a';
767 break; 774 break;
@@ -777,7 +784,7 @@ static int register_pio_HBA(long base, struct get_conf *gc)
777 SD(sh)->EATA_revision = '?'; 784 SD(sh)->EATA_revision = '?';
778 } 785 }
779 786
780 if (ntohl(gc->len) >= 0x22) { 787 if (be32_to_cpu(gc->len) >= 0x22) {
781 if (gc->is_PCI) 788 if (gc->is_PCI)
782 hd->bustype = IS_PCI; 789 hd->bustype = IS_PCI;
783 else if (gc->is_EISA) 790 else if (gc->is_EISA)
@@ -811,6 +818,8 @@ static int register_pio_HBA(long base, struct get_conf *gc)
811 818
812 hd->channel = 0; 819 hd->channel = 0;
813 820
821 hd->pdev = pci_dev_get(pdev); /* Keep a PCI reference */
822
814 sh->max_id = 8; 823 sh->max_id = 8;
815 sh->max_lun = 8; 824 sh->max_lun = 8;
816 825
@@ -841,7 +850,7 @@ static void find_pio_ISA(struct get_conf *buf)
841 continue; 850 continue;
842 if (!get_pio_conf_PIO(ISAbases[i], buf)) 851 if (!get_pio_conf_PIO(ISAbases[i], buf))
843 continue; 852 continue;
844 if (!register_pio_HBA(ISAbases[i], buf)) 853 if (!register_pio_HBA(ISAbases[i], buf, NULL))
845 release_region(ISAbases[i], 9); 854 release_region(ISAbases[i], 9);
846 else 855 else
847 ISAbases[i] = 0; 856 ISAbases[i] = 0;
@@ -873,7 +882,7 @@ static void find_pio_EISA(struct get_conf *buf)
873 if (get_pio_conf_PIO(base, buf)) { 882 if (get_pio_conf_PIO(base, buf)) {
874 DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf)); 883 DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf));
875 if (buf->IRQ) { 884 if (buf->IRQ) {
876 if (!register_pio_HBA(base, buf)) 885 if (!register_pio_HBA(base, buf, NULL))
877 release_region(base, 9); 886 release_region(base, 9);
878 } else { 887 } else {
879 printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA " "removed from list\n"); 888 printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA " "removed from list\n");
@@ -896,9 +905,9 @@ static void find_pio_PCI(struct get_conf *buf)
896 printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n"); 905 printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
897#else 906#else
898 struct pci_dev *dev = NULL; 907 struct pci_dev *dev = NULL;
899 u32 base, x; 908 unsigned long base, x;
900 909
901 while ((dev = pci_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, dev)) != NULL) { 910 while ((dev = pci_get_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, dev)) != NULL) {
902 DBG(DBG_PROBE && DBG_PCI, printk("eata_pio: find_PCI, HBA at %s\n", pci_name(dev))); 911 DBG(DBG_PROBE && DBG_PCI, printk("eata_pio: find_PCI, HBA at %s\n", pci_name(dev)));
903 if (pci_enable_device(dev)) 912 if (pci_enable_device(dev))
904 continue; 913 continue;
@@ -926,7 +935,7 @@ static void find_pio_PCI(struct get_conf *buf)
926 * eventually remove it from the EISA and ISA list 935 * eventually remove it from the EISA and ISA list
927 */ 936 */
928 937
929 if (!register_pio_HBA(base, buf)) { 938 if (!register_pio_HBA(base, buf, dev)) {
930 release_region(base, 9); 939 release_region(base, 9);
931 continue; 940 continue;
932 } 941 }
@@ -976,12 +985,12 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
976 printk("Registered HBAs:\n"); 985 printk("Registered HBAs:\n");
977 printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:" " QS: SG: CPL:\n"); 986 printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:" " QS: SG: CPL:\n");
978 for (i = 1; i <= registered_HBAs; i++) { 987 for (i = 1; i <= registered_HBAs; i++) {
979 printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4x %2d %d %d %c" 988 printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4lx %2d %d %d %c"
980 " %2d %2d %2d\n", 989 " %2d %2d %2d\n",
981 HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision, 990 HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
982 SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P') ? 991 SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P') ?
983 "PCI " : (SD(HBA_ptr)->bustype == 'E') ? "EISA" : "ISA ", 992 "PCI " : (SD(HBA_ptr)->bustype == 'E') ? "EISA" : "ISA ",
984 (uint) HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel, HBA_ptr->this_id, 993 HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel, HBA_ptr->this_id,
985 SD(HBA_ptr)->primary ? 'Y' : 'N', HBA_ptr->can_queue, 994 SD(HBA_ptr)->primary ? 'Y' : 'N', HBA_ptr->can_queue,
986 HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun); 995 HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun);
987 HBA_ptr = SD(HBA_ptr)->next; 996 HBA_ptr = SD(HBA_ptr)->next;
diff --git a/drivers/scsi/fcal.c b/drivers/scsi/fcal.c
index 7f891023aa15..c4e16c0775de 100644
--- a/drivers/scsi/fcal.c
+++ b/drivers/scsi/fcal.c
@@ -248,8 +248,7 @@ int fcal_proc_info (struct Scsi_Host *host, char *buffer, char **start, off_t of
248 if (scd->id == target) { 248 if (scd->id == target) {
249 SPRINTF (" [AL-PA: %02x, Id: %02d, Port WWN: %08x%08x, Node WWN: %08x%08x] ", 249 SPRINTF (" [AL-PA: %02x, Id: %02d, Port WWN: %08x%08x, Node WWN: %08x%08x] ",
250 alpa, target, u1[0], u1[1], u2[0], u2[1]); 250 alpa, target, u1[0], u1[1], u2[0], u2[1]);
251 SPRINTF ("%s ", (scd->type < MAX_SCSI_DEVICE_CODE) ? 251 SPRINTF ("%s ", scsi_device_type(scd->type));
252 scsi_device_types[(short) scd->type] : "Unknown device");
253 252
254 for (j = 0; (j < 8) && (scd->vendor[j] >= 0x20); j++) 253 for (j = 0; (j < 8) && (scd->vendor[j] >= 0x20); j++)
255 SPRINTF ("%c", scd->vendor[j]); 254 SPRINTF ("%c", scd->vendor[j]);
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 67f1100f3103..cdd893bb4e28 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -811,7 +811,6 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c
811 struct NCR5380_hostdata *hostdata; 811 struct NCR5380_hostdata *hostdata;
812#ifdef NCR5380_STATS 812#ifdef NCR5380_STATS
813 struct scsi_device *dev; 813 struct scsi_device *dev;
814 extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
815#endif 814#endif
816 815
817 NCR5380_setup(scsi_ptr); 816 NCR5380_setup(scsi_ptr);
@@ -851,7 +850,7 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c
851 long tr = hostdata->time_read[dev->id] / HZ; 850 long tr = hostdata->time_read[dev->id] / HZ;
852 long tw = hostdata->time_write[dev->id] / HZ; 851 long tw = hostdata->time_write[dev->id] / HZ;
853 852
854 PRINTP(" T:%d %s " ANDP dev->id ANDP(dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int) dev->type] : "Unknown"); 853 PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type));
855 for (i = 0; i < 8; i++) 854 for (i = 0; i < 8; i++)
856 if (dev->vendor[i] >= 0x20) 855 if (dev->vendor[i] >= 0x20)
857 *(buffer + (len++)) = dev->vendor[i]; 856 *(buffer + (len++)) = dev->vendor[i];
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index a0d831b1bada..18dbe5c27dac 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -47,7 +47,7 @@ void gvp11_setup (char *str, int *ints)
47 gvp11_xfer_mask = ints[1]; 47 gvp11_xfer_mask = ints[1];
48} 48}
49 49
50static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 50static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
51{ 51{
52 unsigned short cntr = GVP11_DMAC_INT_ENABLE; 52 unsigned short cntr = GVP11_DMAC_INT_ENABLE;
53 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 53 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -142,8 +142,8 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
142 return 0; 142 return 0;
143} 143}
144 144
145static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 145static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
146 int status) 146 int status)
147{ 147{
148 /* stop DMA */ 148 /* stop DMA */
149 DMA(instance)->SP_DMA = 1; 149 DMA(instance)->SP_DMA = 1;
@@ -341,7 +341,7 @@ release:
341 return num_gvp11; 341 return num_gvp11;
342} 342}
343 343
344static int gvp11_bus_reset(Scsi_Cmnd *cmd) 344static int gvp11_bus_reset(struct scsi_cmnd *cmd)
345{ 345{
346 /* FIXME perform bus-specific reset */ 346 /* FIXME perform bus-specific reset */
347 347
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index 575d219d14ba..bf22859a5035 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -13,10 +13,6 @@
13 13
14int gvp11_detect(struct scsi_host_template *); 14int gvp11_detect(struct scsi_host_template *);
15int gvp11_release(struct Scsi_Host *); 15int gvp11_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index dfcb96f3e60c..68ef1636678d 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -265,6 +265,9 @@ static void scsi_host_dev_release(struct device *dev)
265 destroy_workqueue(shost->work_q); 265 destroy_workqueue(shost->work_q);
266 266
267 scsi_destroy_command_freelist(shost); 267 scsi_destroy_command_freelist(shost);
268 if (shost->bqt)
269 blk_free_tags(shost->bqt);
270
268 kfree(shost->shost_data); 271 kfree(shost->shost_data);
269 272
270 if (parent) 273 if (parent)
@@ -487,7 +490,9 @@ EXPORT_SYMBOL(scsi_is_host_device);
487 * @work: Work to queue for execution. 490 * @work: Work to queue for execution.
488 * 491 *
489 * Return value: 492 * Return value:
490 * 0 on success / != 0 for error 493 * 1 - work queued for execution
494 * 0 - work is already queued
495 * -EINVAL - work queue doesn't exist
491 **/ 496 **/
492int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) 497int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
493{ 498{
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index bcb3444f1dcf..28bfb8f9f81d 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -15,7 +15,6 @@
15 * 15 *
16 * For more information, visit http://www.highpoint-tech.com 16 * For more information, visit http://www.highpoint-tech.com
17 */ 17 */
18#include <linux/config.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/types.h> 19#include <linux/types.h>
21#include <linux/string.h> 20#include <linux/string.h>
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 01080b3acf5e..7ed4eef8347b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -175,6 +175,8 @@ struct ipr_error_table_t ipr_error_table[] = {
175 "Qualified success"}, 175 "Qualified success"},
176 {0x01080000, 1, 1, 176 {0x01080000, 1, 1,
177 "FFFE: Soft device bus error recovered by the IOA"}, 177 "FFFE: Soft device bus error recovered by the IOA"},
178 {0x01088100, 0, 1,
179 "4101: Soft device bus fabric error"},
178 {0x01170600, 0, 1, 180 {0x01170600, 0, 1,
179 "FFF9: Device sector reassign successful"}, 181 "FFF9: Device sector reassign successful"},
180 {0x01170900, 0, 1, 182 {0x01170900, 0, 1,
@@ -225,6 +227,8 @@ struct ipr_error_table_t ipr_error_table[] = {
225 "3109: IOA timed out a device command"}, 227 "3109: IOA timed out a device command"},
226 {0x04088000, 0, 0, 228 {0x04088000, 0, 0,
227 "3120: SCSI bus is not operational"}, 229 "3120: SCSI bus is not operational"},
230 {0x04088100, 0, 1,
231 "4100: Hard device bus fabric error"},
228 {0x04118000, 0, 1, 232 {0x04118000, 0, 1,
229 "9000: IOA reserved area data check"}, 233 "9000: IOA reserved area data check"},
230 {0x04118100, 0, 1, 234 {0x04118100, 0, 1,
@@ -273,6 +277,14 @@ struct ipr_error_table_t ipr_error_table[] = {
273 "9091: Incorrect hardware configuration change has been detected"}, 277 "9091: Incorrect hardware configuration change has been detected"},
274 {0x04678000, 0, 1, 278 {0x04678000, 0, 1,
275 "9073: Invalid multi-adapter configuration"}, 279 "9073: Invalid multi-adapter configuration"},
280 {0x04678100, 0, 1,
281 "4010: Incorrect connection between cascaded expanders"},
282 {0x04678200, 0, 1,
283 "4020: Connections exceed IOA design limits"},
284 {0x04678300, 0, 1,
285 "4030: Incorrect multipath connection"},
286 {0x04679000, 0, 1,
287 "4110: Unsupported enclosure function"},
276 {0x046E0000, 0, 1, 288 {0x046E0000, 0, 1,
277 "FFF4: Command to logical unit failed"}, 289 "FFF4: Command to logical unit failed"},
278 {0x05240000, 1, 0, 290 {0x05240000, 1, 0,
@@ -297,6 +309,8 @@ struct ipr_error_table_t ipr_error_table[] = {
297 "9031: Array protection temporarily suspended, protection resuming"}, 309 "9031: Array protection temporarily suspended, protection resuming"},
298 {0x06040600, 0, 1, 310 {0x06040600, 0, 1,
299 "9040: Array protection temporarily suspended, protection resuming"}, 311 "9040: Array protection temporarily suspended, protection resuming"},
312 {0x06288000, 0, 1,
313 "3140: Device bus not ready to ready transition"},
300 {0x06290000, 0, 1, 314 {0x06290000, 0, 1,
301 "FFFB: SCSI bus was reset"}, 315 "FFFB: SCSI bus was reset"},
302 {0x06290500, 0, 0, 316 {0x06290500, 0, 0,
@@ -319,6 +333,16 @@ struct ipr_error_table_t ipr_error_table[] = {
319 "3150: SCSI bus configuration error"}, 333 "3150: SCSI bus configuration error"},
320 {0x06678100, 0, 1, 334 {0x06678100, 0, 1,
321 "9074: Asymmetric advanced function disk configuration"}, 335 "9074: Asymmetric advanced function disk configuration"},
336 {0x06678300, 0, 1,
337 "4040: Incomplete multipath connection between IOA and enclosure"},
338 {0x06678400, 0, 1,
339 "4041: Incomplete multipath connection between enclosure and device"},
340 {0x06678500, 0, 1,
341 "9075: Incomplete multipath connection between IOA and remote IOA"},
342 {0x06678600, 0, 1,
343 "9076: Configuration error, missing remote IOA"},
344 {0x06679100, 0, 1,
345 "4050: Enclosure does not support a required multipath function"},
322 {0x06690200, 0, 1, 346 {0x06690200, 0, 1,
323 "9041: Array protection temporarily suspended"}, 347 "9041: Array protection temporarily suspended"},
324 {0x06698200, 0, 1, 348 {0x06698200, 0, 1,
@@ -331,6 +355,10 @@ struct ipr_error_table_t ipr_error_table[] = {
331 "9072: Link not operational transition"}, 355 "9072: Link not operational transition"},
332 {0x066B8200, 0, 1, 356 {0x066B8200, 0, 1,
333 "9032: Array exposed but still protected"}, 357 "9032: Array exposed but still protected"},
358 {0x066B9100, 0, 1,
359 "4061: Multipath redundancy level got better"},
360 {0x066B9200, 0, 1,
361 "4060: Multipath redundancy level got worse"},
334 {0x07270000, 0, 0, 362 {0x07270000, 0, 0,
335 "Failure due to other device"}, 363 "Failure due to other device"},
336 {0x07278000, 0, 1, 364 {0x07278000, 0, 1,
@@ -4099,8 +4127,7 @@ static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4099{ 4127{
4100 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 4128 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4101 4129
4102 if ((be32_to_cpu(ioasa->ioasc_specific) & 4130 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4103 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4104 return 0; 4131 return 0;
4105 4132
4106 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 4133 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
@@ -4190,7 +4217,8 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4190 case IPR_IOASC_NR_INIT_CMD_REQUIRED: 4217 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4191 break; 4218 break;
4192 default: 4219 default:
4193 scsi_cmd->result |= (DID_ERROR << 16); 4220 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4221 scsi_cmd->result |= (DID_ERROR << 16);
4194 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) 4222 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4195 res->needs_sync_complete = 1; 4223 res->needs_sync_complete = 1;
4196 break; 4224 break;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 1ad24df69d70..11eaff524327 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -36,8 +36,8 @@
36/* 36/*
37 * Literals 37 * Literals
38 */ 38 */
39#define IPR_DRIVER_VERSION "2.1.3" 39#define IPR_DRIVER_VERSION "2.1.4"
40#define IPR_DRIVER_DATE "(March 29, 2006)" 40#define IPR_DRIVER_DATE "(August 2, 2006)"
41 41
42/* 42/*
43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -45,6 +45,7 @@
45 * This can be adjusted at runtime through sysfs device attributes. 45 * This can be adjusted at runtime through sysfs device attributes.
46 */ 46 */
47#define IPR_MAX_CMD_PER_LUN 6 47#define IPR_MAX_CMD_PER_LUN 6
48#define IPR_MAX_CMD_PER_ATA_LUN 1
48 49
49/* 50/*
50 * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of 51 * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
@@ -106,7 +107,7 @@
106#define IPR_IOA_BUS 0xff 107#define IPR_IOA_BUS 0xff
107#define IPR_IOA_TARGET 0xff 108#define IPR_IOA_TARGET 0xff
108#define IPR_IOA_LUN 0xff 109#define IPR_IOA_LUN 0xff
109#define IPR_MAX_NUM_BUSES 8 110#define IPR_MAX_NUM_BUSES 16
110#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES 111#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES
111 112
112#define IPR_NUM_RESET_RELOAD_RETRIES 3 113#define IPR_NUM_RESET_RELOAD_RETRIES 3
@@ -145,6 +146,7 @@
145#define IPR_LUN_RESET 0x40 146#define IPR_LUN_RESET 0x40
146#define IPR_TARGET_RESET 0x20 147#define IPR_TARGET_RESET 0x20
147#define IPR_BUS_RESET 0x10 148#define IPR_BUS_RESET 0x10
149#define IPR_ATA_PHY_RESET 0x80
148#define IPR_ID_HOST_RR_Q 0xC4 150#define IPR_ID_HOST_RR_Q 0xC4
149#define IPR_QUERY_IOA_CONFIG 0xC5 151#define IPR_QUERY_IOA_CONFIG 0xC5
150#define IPR_CANCEL_ALL_REQUESTS 0xCE 152#define IPR_CANCEL_ALL_REQUESTS 0xCE
@@ -295,7 +297,11 @@ struct ipr_std_inq_data {
295}__attribute__ ((packed)); 297}__attribute__ ((packed));
296 298
297struct ipr_config_table_entry { 299struct ipr_config_table_entry {
298 u8 service_level; 300 u8 proto;
301#define IPR_PROTO_SATA 0x02
302#define IPR_PROTO_SATA_ATAPI 0x03
303#define IPR_PROTO_SAS_STP 0x06
304#define IPR_PROTO_SAS_STP_ATAPI 0x07
299 u8 array_id; 305 u8 array_id;
300 u8 flags; 306 u8 flags;
301#define IPR_IS_IOA_RESOURCE 0x80 307#define IPR_IS_IOA_RESOURCE 0x80
@@ -307,6 +313,7 @@ struct ipr_config_table_entry {
307#define IPR_SUBTYPE_AF_DASD 0 313#define IPR_SUBTYPE_AF_DASD 0
308#define IPR_SUBTYPE_GENERIC_SCSI 1 314#define IPR_SUBTYPE_GENERIC_SCSI 1
309#define IPR_SUBTYPE_VOLUME_SET 2 315#define IPR_SUBTYPE_VOLUME_SET 2
316#define IPR_SUBTYPE_GENERIC_ATA 4
310 317
311#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4) 318#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4)
312#define IPR_QUEUE_FROZEN_MODEL 0 319#define IPR_QUEUE_FROZEN_MODEL 0
@@ -350,6 +357,7 @@ struct ipr_cmd_pkt {
350#define IPR_RQTYPE_SCSICDB 0x00 357#define IPR_RQTYPE_SCSICDB 0x00
351#define IPR_RQTYPE_IOACMD 0x01 358#define IPR_RQTYPE_IOACMD 0x01
352#define IPR_RQTYPE_HCAM 0x02 359#define IPR_RQTYPE_HCAM 0x02
360#define IPR_RQTYPE_ATA_PASSTHRU 0x04
353 361
354 u8 luntar_luntrn; 362 u8 luntar_luntrn;
355 363
@@ -373,6 +381,37 @@ struct ipr_cmd_pkt {
373 __be16 timeout; 381 __be16 timeout;
374}__attribute__ ((packed, aligned(4))); 382}__attribute__ ((packed, aligned(4)));
375 383
384struct ipr_ioarcb_ata_regs {
385 u8 flags;
386#define IPR_ATA_FLAG_PACKET_CMD 0x80
387#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
388#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20
389 u8 reserved[3];
390
391 __be16 data;
392 u8 feature;
393 u8 nsect;
394 u8 lbal;
395 u8 lbam;
396 u8 lbah;
397 u8 device;
398 u8 command;
399 u8 reserved2[3];
400 u8 hob_feature;
401 u8 hob_nsect;
402 u8 hob_lbal;
403 u8 hob_lbam;
404 u8 hob_lbah;
405 u8 ctl;
406}__attribute__ ((packed, aligned(4)));
407
408struct ipr_ioarcb_add_data {
409 union {
410 struct ipr_ioarcb_ata_regs regs;
411 __be32 add_cmd_parms[10];
412 }u;
413}__attribute__ ((packed, aligned(4)));
414
376/* IOA Request Control Block 128 bytes */ 415/* IOA Request Control Block 128 bytes */
377struct ipr_ioarcb { 416struct ipr_ioarcb {
378 __be32 ioarcb_host_pci_addr; 417 __be32 ioarcb_host_pci_addr;
@@ -397,7 +436,7 @@ struct ipr_ioarcb {
397 struct ipr_cmd_pkt cmd_pkt; 436 struct ipr_cmd_pkt cmd_pkt;
398 437
399 __be32 add_cmd_parms_len; 438 __be32 add_cmd_parms_len;
400 __be32 add_cmd_parms[10]; 439 struct ipr_ioarcb_add_data add_data;
401}__attribute__((packed, aligned (4))); 440}__attribute__((packed, aligned (4)));
402 441
403struct ipr_ioadl_desc { 442struct ipr_ioadl_desc {
@@ -433,6 +472,21 @@ struct ipr_ioasa_gpdd {
433 __be32 ioa_data[2]; 472 __be32 ioa_data[2];
434}__attribute__((packed, aligned (4))); 473}__attribute__((packed, aligned (4)));
435 474
475struct ipr_ioasa_gata {
476 u8 error;
477 u8 nsect; /* Interrupt reason */
478 u8 lbal;
479 u8 lbam;
480 u8 lbah;
481 u8 device;
482 u8 status;
483 u8 alt_status; /* ATA CTL */
484 u8 hob_nsect;
485 u8 hob_lbal;
486 u8 hob_lbam;
487 u8 hob_lbah;
488}__attribute__((packed, aligned (4)));
489
436struct ipr_auto_sense { 490struct ipr_auto_sense {
437 __be16 auto_sense_len; 491 __be16 auto_sense_len;
438 __be16 ioa_data_len; 492 __be16 ioa_data_len;
@@ -466,6 +520,7 @@ struct ipr_ioasa {
466 __be32 ioasc_specific; /* status code specific field */ 520 __be32 ioasc_specific; /* status code specific field */
467#define IPR_ADDITIONAL_STATUS_FMT 0x80000000 521#define IPR_ADDITIONAL_STATUS_FMT 0x80000000
468#define IPR_AUTOSENSE_VALID 0x40000000 522#define IPR_AUTOSENSE_VALID 0x40000000
523#define IPR_ATA_DEVICE_WAS_RESET 0x20000000
469#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff 524#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff
470#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) 525#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
471#define IPR_FIELD_POINTER_MASK 0x0000ffff 526#define IPR_FIELD_POINTER_MASK 0x0000ffff
@@ -474,6 +529,7 @@ struct ipr_ioasa {
474 struct ipr_ioasa_vset vset; 529 struct ipr_ioasa_vset vset;
475 struct ipr_ioasa_af_dasd dasd; 530 struct ipr_ioasa_af_dasd dasd;
476 struct ipr_ioasa_gpdd gpdd; 531 struct ipr_ioasa_gpdd gpdd;
532 struct ipr_ioasa_gata gata;
477 } u; 533 } u;
478 534
479 struct ipr_auto_sense auto_sense; 535 struct ipr_auto_sense auto_sense;
@@ -1308,6 +1364,22 @@ static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
1308} 1364}
1309 1365
1310/** 1366/**
1367 * ipr_is_gata - Determine if a resource is a generic ATA resource
1368 * @res: resource entry struct
1369 *
1370 * Return value:
1371 * 1 if GATA / 0 if not GATA
1372 **/
1373static inline int ipr_is_gata(struct ipr_resource_entry *res)
1374{
1375 if (!ipr_is_ioa_resource(res) &&
1376 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA)
1377 return 1;
1378 else
1379 return 0;
1380}
1381
1382/**
1311 * ipr_is_naca_model - Determine if a resource is using NACA queueing model 1383 * ipr_is_naca_model - Determine if a resource is using NACA queueing model
1312 * @res: resource entry struct 1384 * @res: resource entry struct
1313 * 1385 *
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 66a1ae1d6982..0a9dbc59663f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -26,7 +26,6 @@
26 * Zhenyu Wang 26 * Zhenyu Wang
27 */ 27 */
28 28
29#include <linux/err.h>
30#include <linux/types.h> 29#include <linux/types.h>
31#include <linux/list.h> 30#include <linux/list.h>
32#include <linux/inet.h> 31#include <linux/inet.h>
@@ -108,12 +107,9 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
108 u8* crc) 107 u8* crc)
109{ 108{
110 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 109 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
111 struct hash_desc desc;
112 110
113 desc.tfm = tcp_conn->tx_tfm; 111 crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
114 desc.flags = 0; 112 buf->sg.length = tcp_conn->hdr_size;
115 crypto_hash_digest(&desc, &buf->sg, buf->sg.length, crc);
116 buf->sg.length += sizeof(uint32_t);
117} 113}
118 114
119static inline int 115static inline int
@@ -285,7 +281,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
285{ 281{
286 struct iscsi_data *hdr; 282 struct iscsi_data *hdr;
287 struct scsi_cmnd *sc = ctask->sc; 283 struct scsi_cmnd *sc = ctask->sc;
288 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
289 284
290 hdr = &r2t->dtask.hdr; 285 hdr = &r2t->dtask.hdr;
291 memset(hdr, 0, sizeof(struct iscsi_data)); 286 memset(hdr, 0, sizeof(struct iscsi_data));
@@ -340,10 +335,12 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
340 sg_count += sg->length; 335 sg_count += sg->length;
341 } 336 }
342 BUG_ON(r2t->sg == NULL); 337 BUG_ON(r2t->sg == NULL);
343 } else 338 } else {
344 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 339 iscsi_buf_init_iov(&r2t->sendbuf,
345 (char*)sc->request_buffer + r2t->data_offset, 340 (char*)sc->request_buffer + r2t->data_offset,
346 r2t->data_count); 341 r2t->data_count);
342 r2t->sg = NULL;
343 }
347} 344}
348 345
349/** 346/**
@@ -362,8 +359,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
362 int r2tsn = be32_to_cpu(rhdr->r2tsn); 359 int r2tsn = be32_to_cpu(rhdr->r2tsn);
363 int rc; 360 int rc;
364 361
365 if (tcp_conn->in.datalen) 362 if (tcp_conn->in.datalen) {
363 printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
364 tcp_conn->in.datalen);
366 return ISCSI_ERR_DATALEN; 365 return ISCSI_ERR_DATALEN;
366 }
367 367
368 if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn) 368 if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
369 return ISCSI_ERR_R2TSN; 369 return ISCSI_ERR_R2TSN;
@@ -389,15 +389,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
389 389
390 r2t->exp_statsn = rhdr->statsn; 390 r2t->exp_statsn = rhdr->statsn;
391 r2t->data_length = be32_to_cpu(rhdr->data_length); 391 r2t->data_length = be32_to_cpu(rhdr->data_length);
392 if (r2t->data_length == 0 || 392 if (r2t->data_length == 0) {
393 r2t->data_length > session->max_burst) { 393 printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
394 spin_unlock(&session->lock); 394 spin_unlock(&session->lock);
395 return ISCSI_ERR_DATALEN; 395 return ISCSI_ERR_DATALEN;
396 } 396 }
397 397
398 if (r2t->data_length > session->max_burst)
399 debug_scsi("invalid R2T with data len %u and max burst %u."
400 "Attempting to execute request.\n",
401 r2t->data_length, session->max_burst);
402
398 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 403 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
399 if (r2t->data_offset + r2t->data_length > ctask->total_length) { 404 if (r2t->data_offset + r2t->data_length > ctask->total_length) {
400 spin_unlock(&session->lock); 405 spin_unlock(&session->lock);
406 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
407 "offset %u and total length %d\n", r2t->data_length,
408 r2t->data_offset, ctask->total_length);
401 return ISCSI_ERR_DATALEN; 409 return ISCSI_ERR_DATALEN;
402 } 410 }
403 411
@@ -456,14 +464,12 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
456 } 464 }
457 465
458 if (conn->hdrdgst_en) { 466 if (conn->hdrdgst_en) {
459 struct hash_desc desc;
460 struct scatterlist sg; 467 struct scatterlist sg;
461 468
462 sg_init_one(&sg, (u8 *)hdr, 469 sg_init_one(&sg, (u8 *)hdr,
463 sizeof(struct iscsi_hdr) + ahslen); 470 sizeof(struct iscsi_hdr) + ahslen);
464 desc.tfm = tcp_conn->rx_tfm; 471 crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
465 desc.flags = 0; 472 (u8 *)&cdgst);
466 crypto_hash_digest(&desc, &sg, sg.length, (u8 *)&cdgst);
467 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + 473 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
468 ahslen); 474 ahslen);
469 if (cdgst != rdgst) { 475 if (cdgst != rdgst) {
@@ -499,7 +505,6 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
499 goto copy_hdr; 505 goto copy_hdr;
500 506
501 spin_lock(&session->lock); 507 spin_lock(&session->lock);
502 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
503 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); 508 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
504 spin_unlock(&session->lock); 509 spin_unlock(&session->lock);
505 break; 510 break;
@@ -644,10 +649,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
644 * byte counters. 649 * byte counters.
645 **/ 650 **/
646static inline int 651static inline int
647iscsi_tcp_copy(struct iscsi_conn *conn) 652iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
648{ 653{
649 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 654 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
650 int buf_size = tcp_conn->in.datalen;
651 int buf_left = buf_size - tcp_conn->data_copied; 655 int buf_left = buf_size - tcp_conn->data_copied;
652 int size = min(tcp_conn->in.copy, buf_left); 656 int size = min(tcp_conn->in.copy, buf_left);
653 int rc; 657 int rc;
@@ -672,15 +676,15 @@ iscsi_tcp_copy(struct iscsi_conn *conn)
672} 676}
673 677
674static inline void 678static inline void
675partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn, 679partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
676 struct scatterlist *sg, int offset, int length) 680 int offset, int length)
677{ 681{
678 struct scatterlist temp; 682 struct scatterlist temp;
679 683
680 memcpy(&temp, sg, sizeof(struct scatterlist)); 684 memcpy(&temp, sg, sizeof(struct scatterlist));
681 temp.offset = offset; 685 temp.offset = offset;
682 temp.length = length; 686 temp.length = length;
683 crypto_hash_update(&tcp_conn->data_rx_hash, &temp, length); 687 crypto_hash_update(desc, &temp, length);
684} 688}
685 689
686static void 690static void
@@ -689,7 +693,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
689 struct scatterlist tmp; 693 struct scatterlist tmp;
690 694
691 sg_init_one(&tmp, buf, len); 695 sg_init_one(&tmp, buf, len);
692 crypto_hash_update(&tcp_conn->data_rx_hash, &tmp, len); 696 crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
693} 697}
694 698
695static int iscsi_scsi_data_in(struct iscsi_conn *conn) 699static int iscsi_scsi_data_in(struct iscsi_conn *conn)
@@ -744,10 +748,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
744 if (conn->datadgst_en) { 748 if (conn->datadgst_en) {
745 if (!offset) 749 if (!offset)
746 crypto_hash_update( 750 crypto_hash_update(
747 &tcp_conn->data_rx_hash, 751 &tcp_conn->rx_hash,
748 &sg[i], sg[i].length); 752 &sg[i], 1);
749 else 753 else
750 partial_sg_digest_update(tcp_conn, 754 partial_sg_digest_update(
755 &tcp_conn->rx_hash,
751 &sg[i], 756 &sg[i],
752 sg[i].offset + offset, 757 sg[i].offset + offset,
753 sg[i].length - offset); 758 sg[i].length - offset);
@@ -761,8 +766,10 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
761 /* 766 /*
762 * data-in is complete, but buffer not... 767 * data-in is complete, but buffer not...
763 */ 768 */
764 partial_sg_digest_update(tcp_conn, &sg[i], 769 partial_sg_digest_update(&tcp_conn->rx_hash,
765 sg[i].offset, sg[i].length-rc); 770 &sg[i],
771 sg[i].offset,
772 sg[i].length-rc);
766 rc = 0; 773 rc = 0;
767 break; 774 break;
768 } 775 }
@@ -779,7 +786,6 @@ done:
779 (long)sc, sc->result, ctask->itt, 786 (long)sc, sc->result, ctask->itt,
780 tcp_conn->in.hdr->flags); 787 tcp_conn->in.hdr->flags);
781 spin_lock(&conn->session->lock); 788 spin_lock(&conn->session->lock);
782 iscsi_tcp_cleanup_ctask(conn, ctask);
783 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); 789 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
784 spin_unlock(&conn->session->lock); 790 spin_unlock(&conn->session->lock);
785 } 791 }
@@ -799,9 +805,6 @@ iscsi_data_recv(struct iscsi_conn *conn)
799 rc = iscsi_scsi_data_in(conn); 805 rc = iscsi_scsi_data_in(conn);
800 break; 806 break;
801 case ISCSI_OP_SCSI_CMD_RSP: 807 case ISCSI_OP_SCSI_CMD_RSP:
802 spin_lock(&conn->session->lock);
803 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
804 spin_unlock(&conn->session->lock);
805 case ISCSI_OP_TEXT_RSP: 808 case ISCSI_OP_TEXT_RSP:
806 case ISCSI_OP_LOGIN_RSP: 809 case ISCSI_OP_LOGIN_RSP:
807 case ISCSI_OP_ASYNC_EVENT: 810 case ISCSI_OP_ASYNC_EVENT:
@@ -810,7 +813,7 @@ iscsi_data_recv(struct iscsi_conn *conn)
810 * Collect data segment to the connection's data 813 * Collect data segment to the connection's data
811 * placeholder 814 * placeholder
812 */ 815 */
813 if (iscsi_tcp_copy(conn)) { 816 if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
814 rc = -EAGAIN; 817 rc = -EAGAIN;
815 goto exit; 818 goto exit;
816 } 819 }
@@ -883,9 +886,8 @@ more:
883 */ 886 */
884 rc = iscsi_tcp_hdr_recv(conn); 887 rc = iscsi_tcp_hdr_recv(conn);
885 if (!rc && tcp_conn->in.datalen) { 888 if (!rc && tcp_conn->in.datalen) {
886 if (conn->datadgst_en) { 889 if (conn->datadgst_en)
887 crypto_hash_init(&tcp_conn->data_rx_hash); 890 crypto_hash_init(&tcp_conn->rx_hash);
888 }
889 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; 891 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
890 } else if (rc) { 892 } else if (rc) {
891 iscsi_conn_failure(conn, rc); 893 iscsi_conn_failure(conn, rc);
@@ -898,10 +900,15 @@ more:
898 900
899 debug_tcp("extra data_recv offset %d copy %d\n", 901 debug_tcp("extra data_recv offset %d copy %d\n",
900 tcp_conn->in.offset, tcp_conn->in.copy); 902 tcp_conn->in.offset, tcp_conn->in.copy);
901 skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 903 rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
902 &recv_digest, 4); 904 if (rc) {
903 tcp_conn->in.offset += 4; 905 if (rc == -EAGAIN)
904 tcp_conn->in.copy -= 4; 906 goto again;
907 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
908 return 0;
909 }
910
911 memcpy(&recv_digest, conn->data, sizeof(uint32_t));
905 if (recv_digest != tcp_conn->in.datadgst) { 912 if (recv_digest != tcp_conn->in.datadgst) {
906 debug_tcp("iscsi_tcp: data digest error!" 913 debug_tcp("iscsi_tcp: data digest error!"
907 "0x%x != 0x%x\n", recv_digest, 914 "0x%x != 0x%x\n", recv_digest,
@@ -937,13 +944,14 @@ more:
937 tcp_conn->in.padding); 944 tcp_conn->in.padding);
938 memset(pad, 0, tcp_conn->in.padding); 945 memset(pad, 0, tcp_conn->in.padding);
939 sg_init_one(&sg, pad, tcp_conn->in.padding); 946 sg_init_one(&sg, pad, tcp_conn->in.padding);
940 crypto_hash_update(&tcp_conn->data_rx_hash, 947 crypto_hash_update(&tcp_conn->rx_hash,
941 &sg, sg.length); 948 &sg, sg.length);
942 } 949 }
943 crypto_hash_final(&tcp_conn->data_rx_hash, 950 crypto_hash_final(&tcp_conn->rx_hash,
944 (u8 *)&tcp_conn->in.datadgst); 951 (u8 *) &tcp_conn->in.datadgst);
945 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); 952 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
946 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; 953 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
954 tcp_conn->data_copied = 0;
947 } else 955 } else
948 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 956 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
949 } 957 }
@@ -1183,36 +1191,12 @@ iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
1183 1191
1184static inline void 1192static inline void
1185iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, 1193iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
1186 struct iscsi_cmd_task *ctask) 1194 struct iscsi_tcp_cmd_task *tcp_ctask)
1187{ 1195{
1188 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1196 crypto_hash_init(&tcp_conn->tx_hash);
1189
1190 crypto_hash_init(&tcp_conn->data_tx_hash);
1191 tcp_ctask->digest_count = 4; 1197 tcp_ctask->digest_count = 4;
1192} 1198}
1193 1199
1194static int
1195iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1196 struct iscsi_buf *buf, uint32_t *digest, int final)
1197{
1198 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1199 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1200 int rc = 0;
1201 int sent = 0;
1202
1203 if (final)
1204 crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)digest);
1205
1206 iscsi_buf_init_iov(buf, (char*)digest, 4);
1207 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
1208 if (rc) {
1209 tcp_ctask->datadigest = *digest;
1210 tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST;
1211 } else
1212 tcp_ctask->digest_count = 4;
1213 return rc;
1214}
1215
1216/** 1200/**
1217 * iscsi_solicit_data_cont - initialize next Data-Out 1201 * iscsi_solicit_data_cont - initialize next Data-Out
1218 * @conn: iscsi connection 1202 * @conn: iscsi connection
@@ -1230,7 +1214,6 @@ static void
1230iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1214iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1231 struct iscsi_r2t_info *r2t, int left) 1215 struct iscsi_r2t_info *r2t, int left)
1232{ 1216{
1233 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1234 struct iscsi_data *hdr; 1217 struct iscsi_data *hdr;
1235 struct scsi_cmnd *sc = ctask->sc; 1218 struct scsi_cmnd *sc = ctask->sc;
1236 int new_offset; 1219 int new_offset;
@@ -1259,27 +1242,30 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1259 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, 1242 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
1260 sizeof(struct iscsi_hdr)); 1243 sizeof(struct iscsi_hdr));
1261 1244
1262 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) { 1245 if (iscsi_buf_left(&r2t->sendbuf))
1263 BUG_ON(tcp_ctask->bad_sg == r2t->sg); 1246 return;
1247
1248 if (sc->use_sg) {
1264 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); 1249 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1265 r2t->sg += 1; 1250 r2t->sg += 1;
1266 } else 1251 } else {
1267 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 1252 iscsi_buf_init_iov(&r2t->sendbuf,
1268 (char*)sc->request_buffer + new_offset, 1253 (char*)sc->request_buffer + new_offset,
1269 r2t->data_count); 1254 r2t->data_count);
1255 r2t->sg = NULL;
1256 }
1270} 1257}
1271 1258
1272static void 1259static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
1273iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1260 unsigned long len)
1274{ 1261{
1275 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1262 tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
1276 struct iscsi_data_task *dtask; 1263 if (!tcp_ctask->pad_count)
1264 return;
1277 1265
1278 dtask = tcp_ctask->dtask = &tcp_ctask->unsol_dtask; 1266 tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
1279 iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr, 1267 debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
1280 tcp_ctask->r2t_data_count); 1268 tcp_ctask->xmstate |= XMSTATE_W_PAD;
1281 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
1282 sizeof(struct iscsi_hdr));
1283} 1269}
1284 1270
1285/** 1271/**
@@ -1307,38 +1293,20 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1307 if (sc->use_sg) { 1293 if (sc->use_sg) {
1308 struct scatterlist *sg = sc->request_buffer; 1294 struct scatterlist *sg = sc->request_buffer;
1309 1295
1310 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1296 iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
1311 &sg[tcp_ctask->sg_count++]); 1297 tcp_ctask->sg = sg + 1;
1312 tcp_ctask->sg = sg;
1313 tcp_ctask->bad_sg = sg + sc->use_sg; 1298 tcp_ctask->bad_sg = sg + sc->use_sg;
1314 } else 1299 } else {
1315 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 1300 iscsi_buf_init_iov(&tcp_ctask->sendbuf,
1316 sc->request_buffer, 1301 sc->request_buffer,
1317 sc->request_bufflen); 1302 sc->request_bufflen);
1318 1303 tcp_ctask->sg = NULL;
1319 if (ctask->imm_count) 1304 tcp_ctask->bad_sg = NULL;
1320 tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1321
1322 tcp_ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1);
1323 if (tcp_ctask->pad_count) {
1324 tcp_ctask->pad_count = ISCSI_PAD_LEN -
1325 tcp_ctask->pad_count;
1326 debug_scsi("write padding %d bytes\n",
1327 tcp_ctask->pad_count);
1328 tcp_ctask->xmstate |= XMSTATE_W_PAD;
1329 } 1305 }
1330 1306 debug_scsi("cmd [itt 0x%x total %d imm_data %d "
1331 if (ctask->unsol_count) 1307 "unsol count %d, unsol offset %d]\n",
1332 tcp_ctask->xmstate |= XMSTATE_UNS_HDR |
1333 XMSTATE_UNS_INIT;
1334 tcp_ctask->r2t_data_count = ctask->total_length -
1335 ctask->imm_count -
1336 ctask->unsol_count;
1337
1338 debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d "
1339 "r2t_data %d]\n",
1340 ctask->itt, ctask->total_length, ctask->imm_count, 1308 ctask->itt, ctask->total_length, ctask->imm_count,
1341 ctask->unsol_count, tcp_ctask->r2t_data_count); 1309 ctask->unsol_count, ctask->unsol_offset);
1342 } else 1310 } else
1343 tcp_ctask->xmstate = XMSTATE_R_HDR; 1311 tcp_ctask->xmstate = XMSTATE_R_HDR;
1344 1312
@@ -1420,8 +1388,8 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1420} 1388}
1421 1389
1422static inline int 1390static inline int
1423handle_xmstate_r_hdr(struct iscsi_conn *conn, 1391iscsi_send_read_hdr(struct iscsi_conn *conn,
1424 struct iscsi_tcp_cmd_task *tcp_ctask) 1392 struct iscsi_tcp_cmd_task *tcp_ctask)
1425{ 1393{
1426 int rc; 1394 int rc;
1427 1395
@@ -1439,7 +1407,7 @@ handle_xmstate_r_hdr(struct iscsi_conn *conn,
1439} 1407}
1440 1408
1441static inline int 1409static inline int
1442handle_xmstate_w_hdr(struct iscsi_conn *conn, 1410iscsi_send_write_hdr(struct iscsi_conn *conn,
1443 struct iscsi_cmd_task *ctask) 1411 struct iscsi_cmd_task *ctask)
1444{ 1412{
1445 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1413 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
@@ -1450,86 +1418,126 @@ handle_xmstate_w_hdr(struct iscsi_conn *conn,
1450 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1418 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1451 (u8*)tcp_ctask->hdrext); 1419 (u8*)tcp_ctask->hdrext);
1452 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); 1420 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
1453 if (rc) 1421 if (rc) {
1454 tcp_ctask->xmstate |= XMSTATE_W_HDR; 1422 tcp_ctask->xmstate |= XMSTATE_W_HDR;
1455 return rc; 1423 return rc;
1424 }
1425
1426 if (ctask->imm_count) {
1427 tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1428 iscsi_set_padding(tcp_ctask, ctask->imm_count);
1429
1430 if (ctask->conn->datadgst_en) {
1431 iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
1432 tcp_ctask->immdigest = 0;
1433 }
1434 }
1435
1436 if (ctask->unsol_count)
1437 tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1438 return 0;
1456} 1439}
1457 1440
1458static inline int 1441static int
1459handle_xmstate_data_digest(struct iscsi_conn *conn, 1442iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1460 struct iscsi_cmd_task *ctask)
1461{ 1443{
1462 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1444 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1463 int rc; 1445 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1446 int sent = 0, rc;
1464 1447
1465 tcp_ctask->xmstate &= ~XMSTATE_DATA_DIGEST; 1448 if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
1466 debug_tcp("resent data digest 0x%x\n", tcp_ctask->datadigest); 1449 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
1467 rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, 1450 tcp_ctask->pad_count);
1468 &tcp_ctask->datadigest, 0); 1451 if (conn->datadgst_en)
1452 crypto_hash_update(&tcp_conn->tx_hash,
1453 &tcp_ctask->sendbuf.sg,
1454 tcp_ctask->sendbuf.sg.length);
1455 } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
1456 return 0;
1457
1458 tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
1459 tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
1460 debug_scsi("sending %d pad bytes for itt 0x%x\n",
1461 tcp_ctask->pad_count, ctask->itt);
1462 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
1463 &sent);
1469 if (rc) { 1464 if (rc) {
1470 tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST; 1465 debug_scsi("padding send failed %d\n", rc);
1471 debug_tcp("resent data digest 0x%x fail!\n", 1466 tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
1472 tcp_ctask->datadigest);
1473 } 1467 }
1474
1475 return rc; 1468 return rc;
1476} 1469}
1477 1470
1478static inline int 1471static int
1479handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1472iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1473 struct iscsi_buf *buf, uint32_t *digest)
1480{ 1474{
1481 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1475 struct iscsi_tcp_cmd_task *tcp_ctask;
1482 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1476 struct iscsi_tcp_conn *tcp_conn;
1483 int rc; 1477 int rc, sent = 0;
1484 1478
1485 BUG_ON(!ctask->imm_count); 1479 if (!conn->datadgst_en)
1486 tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; 1480 return 0;
1487 1481
1488 if (conn->datadgst_en) { 1482 tcp_ctask = ctask->dd_data;
1489 iscsi_data_digest_init(tcp_conn, ctask); 1483 tcp_conn = conn->dd_data;
1490 tcp_ctask->immdigest = 0;
1491 }
1492 1484
1493 for (;;) { 1485 if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
1494 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, 1486 crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
1495 &ctask->imm_count, &tcp_ctask->sent); 1487 iscsi_buf_init_iov(buf, (char*)digest, 4);
1496 if (rc) { 1488 }
1497 tcp_ctask->xmstate |= XMSTATE_IMM_DATA; 1489 tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
1498 if (conn->datadgst_en) {
1499 crypto_hash_final(&tcp_conn->data_tx_hash,
1500 (u8 *)&tcp_ctask->immdigest);
1501 debug_tcp("tx imm sendpage fail 0x%x\n",
1502 tcp_ctask->datadigest);
1503 }
1504 return rc;
1505 }
1506 if (conn->datadgst_en)
1507 crypto_hash_update(&tcp_conn->data_tx_hash,
1508 &tcp_ctask->sendbuf.sg,
1509 tcp_ctask->sendbuf.sg.length);
1510 1490
1511 if (!ctask->imm_count) 1491 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
1512 break; 1492 if (!rc)
1513 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1493 debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
1514 &tcp_ctask->sg[tcp_ctask->sg_count++]); 1494 ctask->itt);
1495 else {
1496 debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
1497 *digest, ctask->itt);
1498 tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
1515 } 1499 }
1500 return rc;
1501}
1516 1502
1517 if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) { 1503static int
1518 rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, 1504iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
1519 &tcp_ctask->immdigest, 1); 1505 struct scatterlist **sg, int *sent, int *count,
1520 if (rc) { 1506 struct iscsi_buf *digestbuf, uint32_t *digest)
1521 debug_tcp("sending imm digest 0x%x fail!\n", 1507{
1522 tcp_ctask->immdigest); 1508 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1523 return rc; 1509 struct iscsi_conn *conn = ctask->conn;
1510 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1511 int rc, buf_sent, offset;
1512
1513 while (*count) {
1514 buf_sent = 0;
1515 offset = sendbuf->sent;
1516
1517 rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
1518 *sent = *sent + buf_sent;
1519 if (buf_sent && conn->datadgst_en)
1520 partial_sg_digest_update(&tcp_conn->tx_hash,
1521 &sendbuf->sg, sendbuf->sg.offset + offset,
1522 buf_sent);
1523 if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
1524 iscsi_buf_init_sg(sendbuf, *sg);
1525 *sg = *sg + 1;
1524 } 1526 }
1525 debug_tcp("sending imm digest 0x%x\n", tcp_ctask->immdigest); 1527
1528 if (rc)
1529 return rc;
1526 } 1530 }
1527 1531
1528 return 0; 1532 rc = iscsi_send_padding(conn, ctask);
1533 if (rc)
1534 return rc;
1535
1536 return iscsi_send_digest(conn, ctask, digestbuf, digest);
1529} 1537}
1530 1538
1531static inline int 1539static int
1532handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1540iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1533{ 1541{
1534 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1542 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1535 struct iscsi_data_task *dtask; 1543 struct iscsi_data_task *dtask;
@@ -1537,12 +1545,17 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1537 1545
1538 tcp_ctask->xmstate |= XMSTATE_UNS_DATA; 1546 tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
1539 if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { 1547 if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
1540 iscsi_unsolicit_data_init(conn, ctask); 1548 dtask = &tcp_ctask->unsol_dtask;
1541 dtask = tcp_ctask->dtask; 1549
1550 iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
1551 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
1552 sizeof(struct iscsi_hdr));
1542 if (conn->hdrdgst_en) 1553 if (conn->hdrdgst_en)
1543 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1554 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1544 (u8*)dtask->hdrext); 1555 (u8*)dtask->hdrext);
1556
1545 tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; 1557 tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
1558 iscsi_set_padding(tcp_ctask, ctask->data_count);
1546 } 1559 }
1547 1560
1548 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); 1561 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
@@ -1552,256 +1565,138 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1552 return rc; 1565 return rc;
1553 } 1566 }
1554 1567
1568 if (conn->datadgst_en) {
1569 dtask = &tcp_ctask->unsol_dtask;
1570 iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
1571 dtask->digest = 0;
1572 }
1573
1555 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", 1574 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1556 ctask->itt, ctask->unsol_count, tcp_ctask->sent); 1575 ctask->itt, ctask->unsol_count, tcp_ctask->sent);
1557 return 0; 1576 return 0;
1558} 1577}
1559 1578
1560static inline int 1579static int
1561handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1580iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1562{ 1581{
1563 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1582 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1564 struct iscsi_data_task *dtask = tcp_ctask->dtask;
1565 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1566 int rc; 1583 int rc;
1567 1584
1568 BUG_ON(!ctask->data_count); 1585 if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
1569 tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; 1586 BUG_ON(!ctask->unsol_count);
1570 1587 tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
1571 if (conn->datadgst_en) { 1588send_hdr:
1572 iscsi_data_digest_init(tcp_conn, ctask); 1589 rc = iscsi_send_unsol_hdr(conn, ctask);
1573 dtask->digest = 0; 1590 if (rc)
1591 return rc;
1574 } 1592 }
1575 1593
1576 for (;;) { 1594 if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
1595 struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
1577 int start = tcp_ctask->sent; 1596 int start = tcp_ctask->sent;
1578 1597
1579 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, 1598 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
1580 &ctask->data_count, &tcp_ctask->sent); 1599 &tcp_ctask->sent, &ctask->data_count,
1581 if (rc) { 1600 &dtask->digestbuf, &dtask->digest);
1582 ctask->unsol_count -= tcp_ctask->sent - start;
1583 tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
1584 /* will continue with this ctask later.. */
1585 if (conn->datadgst_en) {
1586 crypto_hash_final(&tcp_conn->data_tx_hash,
1587 (u8 *)&dtask->digest);
1588 debug_tcp("tx uns data fail 0x%x\n",
1589 dtask->digest);
1590 }
1591 return rc;
1592 }
1593
1594 BUG_ON(tcp_ctask->sent > ctask->total_length);
1595 ctask->unsol_count -= tcp_ctask->sent - start; 1601 ctask->unsol_count -= tcp_ctask->sent - start;
1596 1602 if (rc)
1603 return rc;
1604 tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
1597 /* 1605 /*
1598 * XXX:we may run here with un-initial sendbuf. 1606 * Done with the Data-Out. Next, check if we need
1599 * so pass it 1607 * to send another unsolicited Data-Out.
1600 */ 1608 */
1601 if (conn->datadgst_en && tcp_ctask->sent - start > 0) 1609 if (ctask->unsol_count) {
1602 crypto_hash_update(&tcp_conn->data_tx_hash, 1610 debug_scsi("sending more uns\n");
1603 &tcp_ctask->sendbuf.sg, 1611 tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
1604 tcp_ctask->sendbuf.sg.length); 1612 goto send_hdr;
1605
1606 if (!ctask->data_count)
1607 break;
1608 iscsi_buf_init_sg(&tcp_ctask->sendbuf,
1609 &tcp_ctask->sg[tcp_ctask->sg_count++]);
1610 }
1611 BUG_ON(ctask->unsol_count < 0);
1612
1613 /*
1614 * Done with the Data-Out. Next, check if we need
1615 * to send another unsolicited Data-Out.
1616 */
1617 if (ctask->unsol_count) {
1618 if (conn->datadgst_en) {
1619 rc = iscsi_digest_final_send(conn, ctask,
1620 &dtask->digestbuf,
1621 &dtask->digest, 1);
1622 if (rc) {
1623 debug_tcp("send uns digest 0x%x fail\n",
1624 dtask->digest);
1625 return rc;
1626 }
1627 debug_tcp("sending uns digest 0x%x, more uns\n",
1628 dtask->digest);
1629 } 1613 }
1630 tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
1631 return 1;
1632 } 1614 }
1633
1634 if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) {
1635 rc = iscsi_digest_final_send(conn, ctask,
1636 &dtask->digestbuf,
1637 &dtask->digest, 1);
1638 if (rc) {
1639 debug_tcp("send last uns digest 0x%x fail\n",
1640 dtask->digest);
1641 return rc;
1642 }
1643 debug_tcp("sending uns digest 0x%x\n",dtask->digest);
1644 }
1645
1646 return 0; 1615 return 0;
1647} 1616}
1648 1617
1649static inline int 1618static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
1650handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1619 struct iscsi_cmd_task *ctask)
1651{ 1620{
1652 struct iscsi_session *session = conn->session;
1653 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1654 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1621 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1655 struct iscsi_r2t_info *r2t = tcp_ctask->r2t; 1622 struct iscsi_session *session = conn->session;
1656 struct iscsi_data_task *dtask = &r2t->dtask; 1623 struct iscsi_r2t_info *r2t;
1624 struct iscsi_data_task *dtask;
1657 int left, rc; 1625 int left, rc;
1658 1626
1659 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; 1627 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1660 tcp_ctask->dtask = dtask; 1628 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1661
1662 if (conn->datadgst_en) {
1663 iscsi_data_digest_init(tcp_conn, ctask);
1664 dtask->digest = 0;
1665 }
1666solicit_again:
1667 /*
1668 * send Data-Out within this R2T sequence.
1669 */
1670 if (!r2t->data_count)
1671 goto data_out_done;
1672
1673 rc = iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent);
1674 if (rc) {
1675 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1629 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1676 /* will continue with this ctask later.. */ 1630 if (!tcp_ctask->r2t)
1677 if (conn->datadgst_en) { 1631 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
1678 crypto_hash_final(&tcp_conn->data_tx_hash, 1632 sizeof(void*));
1679 (u8 *)&dtask->digest); 1633send_hdr:
1680 debug_tcp("r2t data send fail 0x%x\n", dtask->digest); 1634 r2t = tcp_ctask->r2t;
1681 } 1635 dtask = &r2t->dtask;
1682 return rc;
1683 }
1684 1636
1685 BUG_ON(r2t->data_count < 0); 1637 if (conn->hdrdgst_en)
1686 if (conn->datadgst_en) 1638 iscsi_hdr_digest(conn, &r2t->headbuf,
1687 crypto_hash_update(&tcp_conn->data_tx_hash, &r2t->sendbuf.sg, 1639 (u8*)dtask->hdrext);
1688 r2t->sendbuf.sg.length); 1640 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1689 1641 if (rc) {
1690 if (r2t->data_count) { 1642 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1691 BUG_ON(ctask->sc->use_sg == 0); 1643 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1692 if (!iscsi_buf_left(&r2t->sendbuf)) { 1644 return rc;
1693 BUG_ON(tcp_ctask->bad_sg == r2t->sg);
1694 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1695 r2t->sg += 1;
1696 } 1645 }
1697 goto solicit_again;
1698 }
1699 1646
1700data_out_done:
1701 /*
1702 * Done with this Data-Out. Next, check if we have
1703 * to send another Data-Out for this R2T.
1704 */
1705 BUG_ON(r2t->data_length - r2t->sent < 0);
1706 left = r2t->data_length - r2t->sent;
1707 if (left) {
1708 if (conn->datadgst_en) { 1647 if (conn->datadgst_en) {
1709 rc = iscsi_digest_final_send(conn, ctask, 1648 iscsi_data_digest_init(conn->dd_data, tcp_ctask);
1710 &dtask->digestbuf, 1649 dtask->digest = 0;
1711 &dtask->digest, 1);
1712 if (rc) {
1713 debug_tcp("send r2t data digest 0x%x"
1714 "fail\n", dtask->digest);
1715 return rc;
1716 }
1717 debug_tcp("r2t data send digest 0x%x\n",
1718 dtask->digest);
1719 }
1720 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1721 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1722 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1723 return 1;
1724 }
1725
1726 /*
1727 * Done with this R2T. Check if there are more
1728 * outstanding R2Ts ready to be processed.
1729 */
1730 BUG_ON(tcp_ctask->r2t_data_count - r2t->data_length < 0);
1731 if (conn->datadgst_en) {
1732 rc = iscsi_digest_final_send(conn, ctask, &dtask->digestbuf,
1733 &dtask->digest, 1);
1734 if (rc) {
1735 debug_tcp("send last r2t data digest 0x%x"
1736 "fail\n", dtask->digest);
1737 return rc;
1738 } 1650 }
1739 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest);
1740 }
1741 1651
1742 tcp_ctask->r2t_data_count -= r2t->data_length; 1652 iscsi_set_padding(tcp_ctask, r2t->data_count);
1743 tcp_ctask->r2t = NULL; 1653 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
1744 spin_lock_bh(&session->lock); 1654 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
1745 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 1655 r2t->sent);
1746 spin_unlock_bh(&session->lock);
1747 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
1748 tcp_ctask->r2t = r2t;
1749 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1750 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1751 return 1;
1752 } 1656 }
1753 1657
1754 return 0; 1658 if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
1755} 1659 r2t = tcp_ctask->r2t;
1660 dtask = &r2t->dtask;
1756 1661
1757static inline int 1662 rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
1758handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1663 &r2t->sent, &r2t->data_count,
1759{ 1664 &dtask->digestbuf, &dtask->digest);
1760 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1665 if (rc)
1761 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1666 return rc;
1762 struct iscsi_data_task *dtask = tcp_ctask->dtask; 1667 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1763 int sent = 0, rc;
1764 1668
1765 tcp_ctask->xmstate &= ~XMSTATE_W_PAD; 1669 /*
1766 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, 1670 * Done with this Data-Out. Next, check if we have
1767 tcp_ctask->pad_count); 1671 * to send another Data-Out for this R2T.
1768 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, 1672 */
1769 &sent); 1673 BUG_ON(r2t->data_length - r2t->sent < 0);
1770 if (rc) { 1674 left = r2t->data_length - r2t->sent;
1771 tcp_ctask->xmstate |= XMSTATE_W_PAD; 1675 if (left) {
1772 return rc; 1676 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1773 } 1677 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1678 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1679 goto send_hdr;
1680 }
1774 1681
1775 if (conn->datadgst_en) { 1682 /*
1776 crypto_hash_update(&tcp_conn->data_tx_hash, 1683 * Done with this R2T. Check if there are more
1777 &tcp_ctask->sendbuf.sg, 1684 * outstanding R2Ts ready to be processed.
1778 tcp_ctask->sendbuf.sg.length); 1685 */
1779 /* imm data? */ 1686 spin_lock_bh(&session->lock);
1780 if (!dtask) { 1687 tcp_ctask->r2t = NULL;
1781 rc = iscsi_digest_final_send(conn, ctask, 1688 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
1782 &tcp_ctask->immbuf, 1689 sizeof(void*));
1783 &tcp_ctask->immdigest, 1); 1690 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
1784 if (rc) { 1691 sizeof(void*))) {
1785 debug_tcp("send padding digest 0x%x" 1692 tcp_ctask->r2t = r2t;
1786 "fail!\n", tcp_ctask->immdigest); 1693 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1787 return rc; 1694 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1788 } 1695 spin_unlock_bh(&session->lock);
1789 debug_tcp("done with padding, digest 0x%x\n", 1696 goto send_hdr;
1790 tcp_ctask->datadigest);
1791 } else {
1792 rc = iscsi_digest_final_send(conn, ctask,
1793 &dtask->digestbuf,
1794 &dtask->digest, 1);
1795 if (rc) {
1796 debug_tcp("send padding digest 0x%x"
1797 "fail\n", dtask->digest);
1798 return rc;
1799 }
1800 debug_tcp("done with padding, digest 0x%x\n",
1801 dtask->digest);
1802 } 1697 }
1698 spin_unlock_bh(&session->lock);
1803 } 1699 }
1804
1805 return 0; 1700 return 0;
1806} 1701}
1807 1702
@@ -1821,85 +1716,30 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1821 return rc; 1716 return rc;
1822 1717
1823 if (tcp_ctask->xmstate & XMSTATE_R_HDR) 1718 if (tcp_ctask->xmstate & XMSTATE_R_HDR)
1824 return handle_xmstate_r_hdr(conn, tcp_ctask); 1719 return iscsi_send_read_hdr(conn, tcp_ctask);
1825 1720
1826 if (tcp_ctask->xmstate & XMSTATE_W_HDR) { 1721 if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
1827 rc = handle_xmstate_w_hdr(conn, ctask); 1722 rc = iscsi_send_write_hdr(conn, ctask);
1828 if (rc)
1829 return rc;
1830 }
1831
1832 /* XXX: for data digest xmit recover */
1833 if (tcp_ctask->xmstate & XMSTATE_DATA_DIGEST) {
1834 rc = handle_xmstate_data_digest(conn, ctask);
1835 if (rc) 1723 if (rc)
1836 return rc; 1724 return rc;
1837 } 1725 }
1838 1726
1839 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { 1727 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
1840 rc = handle_xmstate_imm_data(conn, ctask); 1728 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
1729 &tcp_ctask->sent, &ctask->imm_count,
1730 &tcp_ctask->immbuf, &tcp_ctask->immdigest);
1841 if (rc) 1731 if (rc)
1842 return rc; 1732 return rc;
1733 tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
1843 } 1734 }
1844 1735
1845 if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { 1736 rc = iscsi_send_unsol_pdu(conn, ctask);
1846 BUG_ON(!ctask->unsol_count); 1737 if (rc)
1847 tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; 1738 return rc;
1848unsolicit_head_again:
1849 rc = handle_xmstate_uns_hdr(conn, ctask);
1850 if (rc)
1851 return rc;
1852 }
1853
1854 if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
1855 rc = handle_xmstate_uns_data(conn, ctask);
1856 if (rc == 1)
1857 goto unsolicit_head_again;
1858 else if (rc)
1859 return rc;
1860 goto done;
1861 }
1862
1863 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1864 struct iscsi_r2t_info *r2t;
1865
1866 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1867 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1868 if (!tcp_ctask->r2t)
1869 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
1870 sizeof(void*));
1871solicit_head_again:
1872 r2t = tcp_ctask->r2t;
1873 if (conn->hdrdgst_en)
1874 iscsi_hdr_digest(conn, &r2t->headbuf,
1875 (u8*)r2t->dtask.hdrext);
1876 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1877 if (rc) {
1878 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1879 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1880 return rc;
1881 }
1882
1883 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
1884 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
1885 r2t->sent);
1886 }
1887
1888 if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
1889 rc = handle_xmstate_sol_data(conn, ctask);
1890 if (rc == 1)
1891 goto solicit_head_again;
1892 if (rc)
1893 return rc;
1894 }
1895 1739
1896done: 1740 rc = iscsi_send_sol_pdu(conn, ctask);
1897 /* 1741 if (rc)
1898 * Last thing to check is whether we need to send write 1742 return rc;
1899 * padding. Note that we check for xmstate equality, not just the bit.
1900 */
1901 if (tcp_ctask->xmstate == XMSTATE_W_PAD)
1902 rc = handle_xmstate_w_pad(conn, ctask);
1903 1743
1904 return rc; 1744 return rc;
1905} 1745}
@@ -1931,8 +1771,24 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1931 /* initial operational parameters */ 1771 /* initial operational parameters */
1932 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1772 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1933 1773
1774 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1775 CRYPTO_ALG_ASYNC);
1776 tcp_conn->tx_hash.flags = 0;
1777 if (!tcp_conn->tx_hash.tfm)
1778 goto free_tcp_conn;
1779
1780 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1781 CRYPTO_ALG_ASYNC);
1782 tcp_conn->rx_hash.flags = 0;
1783 if (!tcp_conn->rx_hash.tfm)
1784 goto free_tx_tfm;
1785
1934 return cls_conn; 1786 return cls_conn;
1935 1787
1788free_tx_tfm:
1789 crypto_free_hash(tcp_conn->tx_hash.tfm);
1790free_tcp_conn:
1791 kfree(tcp_conn);
1936tcp_conn_alloc_fail: 1792tcp_conn_alloc_fail:
1937 iscsi_conn_teardown(cls_conn); 1793 iscsi_conn_teardown(cls_conn);
1938 return NULL; 1794 return NULL;
@@ -1970,14 +1826,10 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1970 1826
1971 /* now free tcp_conn */ 1827 /* now free tcp_conn */
1972 if (digest) { 1828 if (digest) {
1973 if (tcp_conn->tx_tfm) 1829 if (tcp_conn->tx_hash.tfm)
1974 crypto_free_hash(tcp_conn->tx_tfm); 1830 crypto_free_hash(tcp_conn->tx_hash.tfm);
1975 if (tcp_conn->rx_tfm) 1831 if (tcp_conn->rx_hash.tfm)
1976 crypto_free_hash(tcp_conn->rx_tfm); 1832 crypto_free_hash(tcp_conn->rx_hash.tfm);
1977 if (tcp_conn->data_tx_hash.tfm)
1978 crypto_free_hash(tcp_conn->data_tx_hash.tfm);
1979 if (tcp_conn->data_rx_hash.tfm)
1980 crypto_free_hash(tcp_conn->data_rx_hash.tfm);
1981 } 1833 }
1982 1834
1983 kfree(tcp_conn); 1835 kfree(tcp_conn);
@@ -1987,9 +1839,11 @@ static void
1987iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 1839iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1988{ 1840{
1989 struct iscsi_conn *conn = cls_conn->dd_data; 1841 struct iscsi_conn *conn = cls_conn->dd_data;
1842 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1990 1843
1991 iscsi_conn_stop(cls_conn, flag); 1844 iscsi_conn_stop(cls_conn, flag);
1992 iscsi_tcp_release_conn(conn); 1845 iscsi_tcp_release_conn(conn);
1846 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1993} 1847}
1994 1848
1995static int 1849static int
@@ -2135,52 +1989,11 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2135 case ISCSI_PARAM_HDRDGST_EN: 1989 case ISCSI_PARAM_HDRDGST_EN:
2136 iscsi_set_param(cls_conn, param, buf, buflen); 1990 iscsi_set_param(cls_conn, param, buf, buflen);
2137 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1991 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
2138 if (conn->hdrdgst_en) { 1992 if (conn->hdrdgst_en)
2139 tcp_conn->hdr_size += sizeof(__u32); 1993 tcp_conn->hdr_size += sizeof(__u32);
2140 if (!tcp_conn->tx_tfm)
2141 tcp_conn->tx_tfm =
2142 crypto_alloc_hash("crc32c", 0,
2143 CRYPTO_ALG_ASYNC);
2144 if (IS_ERR(tcp_conn->tx_tfm))
2145 return PTR_ERR(tcp_conn->tx_tfm);
2146 if (!tcp_conn->rx_tfm)
2147 tcp_conn->rx_tfm =
2148 crypto_alloc_hash("crc32c", 0,
2149 CRYPTO_ALG_ASYNC);
2150 if (IS_ERR(tcp_conn->rx_tfm)) {
2151 crypto_free_hash(tcp_conn->tx_tfm);
2152 return PTR_ERR(tcp_conn->rx_tfm);
2153 }
2154 } else {
2155 if (tcp_conn->tx_tfm)
2156 crypto_free_hash(tcp_conn->tx_tfm);
2157 if (tcp_conn->rx_tfm)
2158 crypto_free_hash(tcp_conn->rx_tfm);
2159 }
2160 break; 1994 break;
2161 case ISCSI_PARAM_DATADGST_EN: 1995 case ISCSI_PARAM_DATADGST_EN:
2162 iscsi_set_param(cls_conn, param, buf, buflen); 1996 iscsi_set_param(cls_conn, param, buf, buflen);
2163 if (conn->datadgst_en) {
2164 if (!tcp_conn->data_tx_hash.tfm)
2165 tcp_conn->data_tx_hash.tfm =
2166 crypto_alloc_hash("crc32c", 0,
2167 CRYPTO_ALG_ASYNC);
2168 if (IS_ERR(tcp_conn->data_tx_hash.tfm))
2169 return PTR_ERR(tcp_conn->data_tx_hash.tfm);
2170 if (!tcp_conn->data_rx_hash.tfm)
2171 tcp_conn->data_rx_hash.tfm =
2172 crypto_alloc_hash("crc32c", 0,
2173 CRYPTO_ALG_ASYNC);
2174 if (IS_ERR(tcp_conn->data_rx_hash.tfm)) {
2175 crypto_free_hash(tcp_conn->data_tx_hash.tfm);
2176 return PTR_ERR(tcp_conn->data_rx_hash.tfm);
2177 }
2178 } else {
2179 if (tcp_conn->data_tx_hash.tfm)
2180 crypto_free_hash(tcp_conn->data_tx_hash.tfm);
2181 if (tcp_conn->data_rx_hash.tfm)
2182 crypto_free_hash(tcp_conn->data_rx_hash.tfm);
2183 }
2184 tcp_conn->sendpage = conn->datadgst_en ? 1997 tcp_conn->sendpage = conn->datadgst_en ?
2185 sock_no_sendpage : tcp_conn->sock->ops->sendpage; 1998 sock_no_sendpage : tcp_conn->sock->ops->sendpage;
2186 break; 1999 break;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index e35701305fc9..32736831790e 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -31,23 +31,21 @@
31#define IN_PROGRESS_DDIGEST_RECV 0x3 31#define IN_PROGRESS_DDIGEST_RECV 0x3
32 32
33/* xmit state machine */ 33/* xmit state machine */
34#define XMSTATE_IDLE 0x0 34#define XMSTATE_IDLE 0x0
35#define XMSTATE_R_HDR 0x1 35#define XMSTATE_R_HDR 0x1
36#define XMSTATE_W_HDR 0x2 36#define XMSTATE_W_HDR 0x2
37#define XMSTATE_IMM_HDR 0x4 37#define XMSTATE_IMM_HDR 0x4
38#define XMSTATE_IMM_DATA 0x8 38#define XMSTATE_IMM_DATA 0x8
39#define XMSTATE_UNS_INIT 0x10 39#define XMSTATE_UNS_INIT 0x10
40#define XMSTATE_UNS_HDR 0x20 40#define XMSTATE_UNS_HDR 0x20
41#define XMSTATE_UNS_DATA 0x40 41#define XMSTATE_UNS_DATA 0x40
42#define XMSTATE_SOL_HDR 0x80 42#define XMSTATE_SOL_HDR 0x80
43#define XMSTATE_SOL_DATA 0x100 43#define XMSTATE_SOL_DATA 0x100
44#define XMSTATE_W_PAD 0x200 44#define XMSTATE_W_PAD 0x200
45#define XMSTATE_DATA_DIGEST 0x400 45#define XMSTATE_W_RESEND_PAD 0x400
46 46#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
47#define ISCSI_CONN_RCVBUF_MIN 262144 47
48#define ISCSI_CONN_SNDBUF_MIN 262144
49#define ISCSI_PAD_LEN 4 48#define ISCSI_PAD_LEN 4
50#define ISCSI_R2T_MAX 16
51#define ISCSI_SG_TABLESIZE SG_ALL 49#define ISCSI_SG_TABLESIZE SG_ALL
52#define ISCSI_TCP_MAX_CMD_LEN 16 50#define ISCSI_TCP_MAX_CMD_LEN 16
53 51
@@ -85,9 +83,6 @@ struct iscsi_tcp_conn {
85 /* iSCSI connection-wide sequencing */ 83 /* iSCSI connection-wide sequencing */
86 int hdr_size; /* PDU header size */ 84 int hdr_size; /* PDU header size */
87 85
88 struct crypto_hash *rx_tfm; /* CRC32C (Rx) */
89 struct hash_desc data_rx_hash; /* CRC32C (Rx) for data */
90
91 /* control data */ 86 /* control data */
92 struct iscsi_tcp_recv in; /* TCP receive context */ 87 struct iscsi_tcp_recv in; /* TCP receive context */
93 int in_progress; /* connection state machine */ 88 int in_progress; /* connection state machine */
@@ -97,9 +92,9 @@ struct iscsi_tcp_conn {
97 void (*old_state_change)(struct sock *); 92 void (*old_state_change)(struct sock *);
98 void (*old_write_space)(struct sock *); 93 void (*old_write_space)(struct sock *);
99 94
100 /* xmit */ 95 /* data and header digests */
101 struct crypto_hash *tx_tfm; /* CRC32C (Tx) */ 96 struct hash_desc tx_hash; /* CRC32C (Tx) */
102 struct hash_desc data_tx_hash; /* CRC32C (Tx) for data */ 97 struct hash_desc rx_hash; /* CRC32C (Rx) */
103 98
104 /* MIB custom statistics */ 99 /* MIB custom statistics */
105 uint32_t sendpage_failures_cnt; 100 uint32_t sendpage_failures_cnt;
@@ -158,19 +153,15 @@ struct iscsi_tcp_cmd_task {
158 struct scatterlist *bad_sg; /* assert statement */ 153 struct scatterlist *bad_sg; /* assert statement */
159 int sg_count; /* SG's to process */ 154 int sg_count; /* SG's to process */
160 uint32_t exp_r2tsn; 155 uint32_t exp_r2tsn;
161 int r2t_data_count; /* R2T Data-Out bytes */
162 int data_offset; 156 int data_offset;
163 struct iscsi_r2t_info *r2t; /* in progress R2T */ 157 struct iscsi_r2t_info *r2t; /* in progress R2T */
164 struct iscsi_queue r2tpool; 158 struct iscsi_queue r2tpool;
165 struct kfifo *r2tqueue; 159 struct kfifo *r2tqueue;
166 struct iscsi_r2t_info **r2ts; 160 struct iscsi_r2t_info **r2ts;
167 uint32_t datadigest; /* for recover digest */
168 int digest_count; 161 int digest_count;
169 uint32_t immdigest; /* for imm data */ 162 uint32_t immdigest; /* for imm data */
170 struct iscsi_buf immbuf; /* for imm data digest */ 163 struct iscsi_buf immbuf; /* for imm data digest */
171 struct iscsi_data_task *dtask; /* data task in progress*/
172 struct iscsi_data_task unsol_dtask; /* unsol data task */ 164 struct iscsi_data_task unsol_dtask; /* unsol data task */
173 int digest_offset; /* for partial buff digest */
174}; 165};
175 166
176#endif /* ISCSI_H */ 167#endif /* ISCSI_H */
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 29f59345305d..2c34af99627d 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -32,7 +32,6 @@
32 * 32 *
33 */ 33 */
34 34
35#include <linux/config.h>
36#include <linux/kernel.h> 35#include <linux/kernel.h>
37#include <scsi/scsi.h> 36#include <scsi/scsi.h>
38#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5884cd26d53a..c542d0e95e68 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -68,8 +68,7 @@ iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
68EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn); 68EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
69 69
70void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, 70void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
71 struct iscsi_data *hdr, 71 struct iscsi_data *hdr)
72 int transport_data_cnt)
73{ 72{
74 struct iscsi_conn *conn = ctask->conn; 73 struct iscsi_conn *conn = ctask->conn;
75 74
@@ -82,14 +81,12 @@ void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
82 81
83 hdr->itt = ctask->hdr->itt; 82 hdr->itt = ctask->hdr->itt;
84 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 83 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
85 84 hdr->offset = cpu_to_be32(ctask->unsol_offset);
86 hdr->offset = cpu_to_be32(ctask->total_length -
87 transport_data_cnt -
88 ctask->unsol_count);
89 85
90 if (ctask->unsol_count > conn->max_xmit_dlength) { 86 if (ctask->unsol_count > conn->max_xmit_dlength) {
91 hton24(hdr->dlength, conn->max_xmit_dlength); 87 hton24(hdr->dlength, conn->max_xmit_dlength);
92 ctask->data_count = conn->max_xmit_dlength; 88 ctask->data_count = conn->max_xmit_dlength;
89 ctask->unsol_offset += ctask->data_count;
93 hdr->flags = 0; 90 hdr->flags = 0;
94 } else { 91 } else {
95 hton24(hdr->dlength, ctask->unsol_count); 92 hton24(hdr->dlength, ctask->unsol_count);
@@ -125,6 +122,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
125 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); 122 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
126 memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len); 123 memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
127 124
125 ctask->data_count = 0;
128 if (sc->sc_data_direction == DMA_TO_DEVICE) { 126 if (sc->sc_data_direction == DMA_TO_DEVICE) {
129 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 127 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
130 /* 128 /*
@@ -143,6 +141,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
143 */ 141 */
144 ctask->imm_count = 0; 142 ctask->imm_count = 0;
145 ctask->unsol_count = 0; 143 ctask->unsol_count = 0;
144 ctask->unsol_offset = 0;
146 ctask->unsol_datasn = 0; 145 ctask->unsol_datasn = 0;
147 146
148 if (session->imm_data_en) { 147 if (session->imm_data_en) {
@@ -156,9 +155,12 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
156 } else 155 } else
157 zero_data(ctask->hdr->dlength); 156 zero_data(ctask->hdr->dlength);
158 157
159 if (!session->initial_r2t_en) 158 if (!session->initial_r2t_en) {
160 ctask->unsol_count = min(session->first_burst, 159 ctask->unsol_count = min(session->first_burst,
161 ctask->total_length) - ctask->imm_count; 160 ctask->total_length) - ctask->imm_count;
161 ctask->unsol_offset = ctask->imm_count;
162 }
163
162 if (!ctask->unsol_count) 164 if (!ctask->unsol_count)
163 /* No unsolicit Data-Out's */ 165 /* No unsolicit Data-Out's */
164 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL; 166 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
@@ -177,25 +179,51 @@ EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
177 179
178/** 180/**
179 * iscsi_complete_command - return command back to scsi-ml 181 * iscsi_complete_command - return command back to scsi-ml
180 * @session: iscsi session
181 * @ctask: iscsi cmd task 182 * @ctask: iscsi cmd task
182 * 183 *
183 * Must be called with session lock. 184 * Must be called with session lock.
184 * This function returns the scsi command to scsi-ml and returns 185 * This function returns the scsi command to scsi-ml and returns
185 * the cmd task to the pool of available cmd tasks. 186 * the cmd task to the pool of available cmd tasks.
186 */ 187 */
187static void iscsi_complete_command(struct iscsi_session *session, 188static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
188 struct iscsi_cmd_task *ctask)
189{ 189{
190 struct iscsi_session *session = ctask->conn->session;
190 struct scsi_cmnd *sc = ctask->sc; 191 struct scsi_cmnd *sc = ctask->sc;
191 192
192 ctask->state = ISCSI_TASK_COMPLETED; 193 ctask->state = ISCSI_TASK_COMPLETED;
193 ctask->sc = NULL; 194 ctask->sc = NULL;
195 /* SCSI eh reuses commands to verify us */
196 sc->SCp.ptr = NULL;
194 list_del_init(&ctask->running); 197 list_del_init(&ctask->running);
195 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 198 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
196 sc->scsi_done(sc); 199 sc->scsi_done(sc);
197} 200}
198 201
202static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
203{
204 atomic_inc(&ctask->refcount);
205}
206
207static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
208{
209 spin_lock_bh(&ctask->conn->session->lock);
210 __iscsi_get_ctask(ctask);
211 spin_unlock_bh(&ctask->conn->session->lock);
212}
213
214static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
215{
216 if (atomic_dec_and_test(&ctask->refcount))
217 iscsi_complete_command(ctask);
218}
219
220static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
221{
222 spin_lock_bh(&ctask->conn->session->lock);
223 __iscsi_put_ctask(ctask);
224 spin_unlock_bh(&ctask->conn->session->lock);
225}
226
199/** 227/**
200 * iscsi_cmd_rsp - SCSI Command Response processing 228 * iscsi_cmd_rsp - SCSI Command Response processing
201 * @conn: iscsi connection 229 * @conn: iscsi connection
@@ -272,7 +300,7 @@ out:
272 (long)sc, sc->result, ctask->itt); 300 (long)sc, sc->result, ctask->itt);
273 conn->scsirsp_pdus_cnt++; 301 conn->scsirsp_pdus_cnt++;
274 302
275 iscsi_complete_command(conn->session, ctask); 303 __iscsi_put_ctask(ctask);
276 return rc; 304 return rc;
277} 305}
278 306
@@ -295,6 +323,30 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
295 wake_up(&conn->ehwait); 323 wake_up(&conn->ehwait);
296} 324}
297 325
326static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
327 char *data, int datalen)
328{
329 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
330 struct iscsi_hdr rejected_pdu;
331 uint32_t itt;
332
333 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
334
335 if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) {
336 if (ntoh24(reject->dlength) > datalen)
337 return ISCSI_ERR_PROTO;
338
339 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
340 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
341 itt = rejected_pdu.itt & ISCSI_ITT_MASK;
342 printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
343 "due to DataDigest error.\n", itt,
344 rejected_pdu.opcode);
345 }
346 }
347 return 0;
348}
349
298/** 350/**
299 * __iscsi_complete_pdu - complete pdu 351 * __iscsi_complete_pdu - complete pdu
300 * @conn: iscsi conn 352 * @conn: iscsi conn
@@ -336,7 +388,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
336 BUG_ON((void*)ctask != ctask->sc->SCp.ptr); 388 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
337 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 389 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
338 conn->scsirsp_pdus_cnt++; 390 conn->scsirsp_pdus_cnt++;
339 iscsi_complete_command(session, ctask); 391 __iscsi_put_ctask(ctask);
340 } 392 }
341 break; 393 break;
342 case ISCSI_OP_R2T: 394 case ISCSI_OP_R2T:
@@ -406,6 +458,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
406 break; 458 break;
407 } 459 }
408 } else if (itt == ISCSI_RESERVED_TAG) { 460 } else if (itt == ISCSI_RESERVED_TAG) {
461 rc = iscsi_check_assign_cmdsn(session,
462 (struct iscsi_nopin*)hdr);
463 if (rc)
464 goto done;
465
409 switch(opcode) { 466 switch(opcode) {
410 case ISCSI_OP_NOOP_IN: 467 case ISCSI_OP_NOOP_IN:
411 if (datalen) { 468 if (datalen) {
@@ -413,11 +470,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
413 break; 470 break;
414 } 471 }
415 472
416 rc = iscsi_check_assign_cmdsn(session,
417 (struct iscsi_nopin*)hdr);
418 if (rc)
419 break;
420
421 if (hdr->ttt == ISCSI_RESERVED_TAG) 473 if (hdr->ttt == ISCSI_RESERVED_TAG)
422 break; 474 break;
423 475
@@ -425,7 +477,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
425 rc = ISCSI_ERR_CONN_FAILED; 477 rc = ISCSI_ERR_CONN_FAILED;
426 break; 478 break;
427 case ISCSI_OP_REJECT: 479 case ISCSI_OP_REJECT:
428 /* we need sth like iscsi_reject_rsp()*/ 480 rc = iscsi_handle_reject(conn, hdr, data, datalen);
481 break;
429 case ISCSI_OP_ASYNC_EVENT: 482 case ISCSI_OP_ASYNC_EVENT:
430 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 483 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
431 /* we need sth like iscsi_async_event_rsp() */ 484 /* we need sth like iscsi_async_event_rsp() */
@@ -561,7 +614,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
561 BUG_ON(conn->ctask && conn->mtask); 614 BUG_ON(conn->ctask && conn->mtask);
562 615
563 if (conn->ctask) { 616 if (conn->ctask) {
617 iscsi_get_ctask(conn->ctask);
564 rc = tt->xmit_cmd_task(conn, conn->ctask); 618 rc = tt->xmit_cmd_task(conn, conn->ctask);
619 iscsi_put_ctask(conn->ctask);
565 if (rc) 620 if (rc)
566 goto again; 621 goto again;
567 /* done with this in-progress ctask */ 622 /* done with this in-progress ctask */
@@ -602,12 +657,19 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
602 struct iscsi_cmd_task, running); 657 struct iscsi_cmd_task, running);
603 conn->ctask->state = ISCSI_TASK_RUNNING; 658 conn->ctask->state = ISCSI_TASK_RUNNING;
604 list_move_tail(conn->xmitqueue.next, &conn->run_list); 659 list_move_tail(conn->xmitqueue.next, &conn->run_list);
660 __iscsi_get_ctask(conn->ctask);
605 spin_unlock_bh(&conn->session->lock); 661 spin_unlock_bh(&conn->session->lock);
606 662
607 rc = tt->xmit_cmd_task(conn, conn->ctask); 663 rc = tt->xmit_cmd_task(conn, conn->ctask);
608 if (rc) 664 if (rc)
609 goto again; 665 goto again;
666
610 spin_lock_bh(&conn->session->lock); 667 spin_lock_bh(&conn->session->lock);
668 __iscsi_put_ctask(conn->ctask);
669 if (rc) {
670 spin_unlock_bh(&conn->session->lock);
671 goto again;
672 }
611 } 673 }
612 spin_unlock_bh(&conn->session->lock); 674 spin_unlock_bh(&conn->session->lock);
613 /* done with this ctask */ 675 /* done with this ctask */
@@ -657,6 +719,7 @@ enum {
657 FAILURE_SESSION_FAILED, 719 FAILURE_SESSION_FAILED,
658 FAILURE_SESSION_FREED, 720 FAILURE_SESSION_FREED,
659 FAILURE_WINDOW_CLOSED, 721 FAILURE_WINDOW_CLOSED,
722 FAILURE_OOM,
660 FAILURE_SESSION_TERMINATE, 723 FAILURE_SESSION_TERMINATE,
661 FAILURE_SESSION_IN_RECOVERY, 724 FAILURE_SESSION_IN_RECOVERY,
662 FAILURE_SESSION_RECOVERY_TIMEOUT, 725 FAILURE_SESSION_RECOVERY_TIMEOUT,
@@ -672,6 +735,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
672 735
673 sc->scsi_done = done; 736 sc->scsi_done = done;
674 sc->result = 0; 737 sc->result = 0;
738 sc->SCp.ptr = NULL;
675 739
676 host = sc->device->host; 740 host = sc->device->host;
677 session = iscsi_hostdata(host->hostdata); 741 session = iscsi_hostdata(host->hostdata);
@@ -715,10 +779,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
715 779
716 conn = session->leadconn; 780 conn = session->leadconn;
717 781
718 __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 782 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
783 sizeof(void*))) {
784 reason = FAILURE_OOM;
785 goto reject;
786 }
719 sc->SCp.phase = session->age; 787 sc->SCp.phase = session->age;
720 sc->SCp.ptr = (char *)ctask; 788 sc->SCp.ptr = (char *)ctask;
721 789
790 atomic_set(&ctask->refcount, 1);
722 ctask->state = ISCSI_TASK_PENDING; 791 ctask->state = ISCSI_TASK_PENDING;
723 ctask->mtask = NULL; 792 ctask->mtask = NULL;
724 ctask->conn = conn; 793 ctask->conn = conn;
@@ -731,9 +800,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
731 800
732 list_add_tail(&ctask->running, &conn->xmitqueue); 801 list_add_tail(&ctask->running, &conn->xmitqueue);
733 debug_scsi( 802 debug_scsi(
734 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", 803 "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
804 "win %d]\n",
735 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 805 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
736 conn->id, (long)sc, ctask->itt, sc->request_bufflen, 806 conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
737 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 807 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
738 spin_unlock(&session->lock); 808 spin_unlock(&session->lock);
739 809
@@ -1061,16 +1131,30 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1061 1131
1062 sc->result = err; 1132 sc->result = err;
1063 sc->resid = sc->request_bufflen; 1133 sc->resid = sc->request_bufflen;
1064 iscsi_complete_command(conn->session, ctask); 1134 /* release ref from queuecommand */
1135 __iscsi_put_ctask(ctask);
1065} 1136}
1066 1137
1067int iscsi_eh_abort(struct scsi_cmnd *sc) 1138int iscsi_eh_abort(struct scsi_cmnd *sc)
1068{ 1139{
1069 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; 1140 struct iscsi_cmd_task *ctask;
1070 struct iscsi_conn *conn = ctask->conn; 1141 struct iscsi_conn *conn;
1071 struct iscsi_session *session = conn->session; 1142 struct iscsi_session *session;
1072 int rc; 1143 int rc;
1073 1144
1145 /*
1146 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
1147 * got the command.
1148 */
1149 if (!sc->SCp.ptr) {
1150 debug_scsi("sc never reached iscsi layer or it completed.\n");
1151 return SUCCESS;
1152 }
1153
1154 ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1155 conn = ctask->conn;
1156 session = conn->session;
1157
1074 conn->eh_abort_cnt++; 1158 conn->eh_abort_cnt++;
1075 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); 1159 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1076 1160
@@ -1520,11 +1604,19 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
1520 struct iscsi_conn *conn = cls_conn->dd_data; 1604 struct iscsi_conn *conn = cls_conn->dd_data;
1521 struct iscsi_session *session = conn->session; 1605 struct iscsi_session *session = conn->session;
1522 1606
1523 if (session == NULL) { 1607 if (!session) {
1524 printk(KERN_ERR "iscsi: can't start unbound connection\n"); 1608 printk(KERN_ERR "iscsi: can't start unbound connection\n");
1525 return -EPERM; 1609 return -EPERM;
1526 } 1610 }
1527 1611
1612 if ((session->imm_data_en || !session->initial_r2t_en) &&
1613 session->first_burst > session->max_burst) {
1614 printk("iscsi: invalid burst lengths: "
1615 "first_burst %d max_burst %d\n",
1616 session->first_burst, session->max_burst);
1617 return -EINVAL;
1618 }
1619
1528 spin_lock_bh(&session->lock); 1620 spin_lock_bh(&session->lock);
1529 conn->c_stage = ISCSI_CONN_STARTED; 1621 conn->c_stage = ISCSI_CONN_STARTED;
1530 session->state = ISCSI_STATE_LOGGED_IN; 1622 session->state = ISCSI_STATE_LOGGED_IN;
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
new file mode 100644
index 000000000000..aafdc92f8312
--- /dev/null
+++ b/drivers/scsi/libsas/Kconfig
@@ -0,0 +1,39 @@
1#
2# Kernel configuration file for the SAS Class
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23#
24
25config SCSI_SAS_LIBSAS
26 tristate "SAS Domain Transport Attributes"
27 depends on SCSI
28 select SCSI_SAS_ATTRS
29 help
30 This provides transport specific helpers for SAS drivers which
31 use the domain device construct (like the aic94xxx).
32
33config SCSI_SAS_LIBSAS_DEBUG
34 bool "Compile the SAS Domain Transport Attributes in debug mode"
35 default y
36 depends on SCSI_SAS_LIBSAS
37 help
38 Compiles the SAS Layer in debug mode. In debug mode, the
39 SAS Layer prints diagnostic and debug messages.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
new file mode 100644
index 000000000000..44d972a3b4bd
--- /dev/null
+++ b/drivers/scsi/libsas/Makefile
@@ -0,0 +1,36 @@
1#
2# Kernel Makefile for the libsas helpers
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23
24ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DSAS_DEBUG
26endif
27
28obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
29libsas-y += sas_init.o \
30 sas_phy.o \
31 sas_port.o \
32 sas_event.o \
33 sas_dump.o \
34 sas_discover.o \
35 sas_expander.o \
36 sas_scsi_host.o
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
new file mode 100644
index 000000000000..d977bd492d8d
--- /dev/null
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -0,0 +1,749 @@
1/*
2 * Serial Attached SCSI (SAS) Discover process
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <linux/pci.h>
26#include <linux/scatterlist.h>
27#include <scsi/scsi_host.h>
28#include <scsi/scsi_eh.h>
29#include "sas_internal.h"
30
31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_transport_sas.h>
33#include "../scsi_sas_internal.h"
34
35/* ---------- Basic task processing for discovery purposes ---------- */
36
37void sas_init_dev(struct domain_device *dev)
38{
39 INIT_LIST_HEAD(&dev->siblings);
40 INIT_LIST_HEAD(&dev->dev_list_node);
41 switch (dev->dev_type) {
42 case SAS_END_DEV:
43 break;
44 case EDGE_DEV:
45 case FANOUT_DEV:
46 INIT_LIST_HEAD(&dev->ex_dev.children);
47 break;
48 case SATA_DEV:
49 case SATA_PM:
50 case SATA_PM_PORT:
51 INIT_LIST_HEAD(&dev->sata_dev.children);
52 break;
53 default:
54 break;
55 }
56}
57
58static void sas_task_timedout(unsigned long _task)
59{
60 struct sas_task *task = (void *) _task;
61 unsigned long flags;
62
63 spin_lock_irqsave(&task->task_state_lock, flags);
64 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
65 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
66 spin_unlock_irqrestore(&task->task_state_lock, flags);
67
68 complete(&task->completion);
69}
70
71static void sas_disc_task_done(struct sas_task *task)
72{
73 if (!del_timer(&task->timer))
74 return;
75 complete(&task->completion);
76}
77
78#define SAS_DEV_TIMEOUT 10
79
80/**
81 * sas_execute_task -- Basic task processing for discovery
82 * @task: the task to be executed
83 * @buffer: pointer to buffer to do I/O
84 * @size: size of @buffer
85 * @pci_dma_dir: PCI_DMA_...
86 */
87static int sas_execute_task(struct sas_task *task, void *buffer, int size,
88 int pci_dma_dir)
89{
90 int res = 0;
91 struct scatterlist *scatter = NULL;
92 struct task_status_struct *ts = &task->task_status;
93 int num_scatter = 0;
94 int retries = 0;
95 struct sas_internal *i =
96 to_sas_internal(task->dev->port->ha->core.shost->transportt);
97
98 if (pci_dma_dir != PCI_DMA_NONE) {
99 scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
100 if (!scatter)
101 goto out;
102
103 sg_init_one(scatter, buffer, size);
104 num_scatter = 1;
105 }
106
107 task->task_proto = task->dev->tproto;
108 task->scatter = scatter;
109 task->num_scatter = num_scatter;
110 task->total_xfer_len = size;
111 task->data_dir = pci_dma_dir;
112 task->task_done = sas_disc_task_done;
113
114 for (retries = 0; retries < 5; retries++) {
115 task->task_state_flags = SAS_TASK_STATE_PENDING;
116 init_completion(&task->completion);
117
118 task->timer.data = (unsigned long) task;
119 task->timer.function = sas_task_timedout;
120 task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
121 add_timer(&task->timer);
122
123 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
124 if (res) {
125 del_timer(&task->timer);
126 SAS_DPRINTK("executing SAS discovery task failed:%d\n",
127 res);
128 goto ex_err;
129 }
130 wait_for_completion(&task->completion);
131 res = -ETASK;
132 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
133 int res2;
134 SAS_DPRINTK("task aborted, flags:0x%x\n",
135 task->task_state_flags);
136 res2 = i->dft->lldd_abort_task(task);
137 SAS_DPRINTK("came back from abort task\n");
138 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
139 if (res2 == TMF_RESP_FUNC_COMPLETE)
140 continue; /* Retry the task */
141 else
142 goto ex_err;
143 }
144 }
145 if (task->task_status.stat == SAM_BUSY ||
146 task->task_status.stat == SAM_TASK_SET_FULL ||
147 task->task_status.stat == SAS_QUEUE_FULL) {
148 SAS_DPRINTK("task: q busy, sleeping...\n");
149 schedule_timeout_interruptible(HZ);
150 } else if (task->task_status.stat == SAM_CHECK_COND) {
151 struct scsi_sense_hdr shdr;
152
153 if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
154 &shdr)) {
155 SAS_DPRINTK("couldn't normalize sense\n");
156 continue;
157 }
158 if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
159 (shdr.sense_key == 2 && shdr.asc == 4 &&
160 shdr.ascq == 1)) {
161 SAS_DPRINTK("device %016llx LUN: %016llx "
162 "powering up or not ready yet, "
163 "sleeping...\n",
164 SAS_ADDR(task->dev->sas_addr),
165 SAS_ADDR(task->ssp_task.LUN));
166
167 schedule_timeout_interruptible(5*HZ);
168 } else if (shdr.sense_key == 1) {
169 res = 0;
170 break;
171 } else if (shdr.sense_key == 5) {
172 break;
173 } else {
174 SAS_DPRINTK("dev %016llx LUN: %016llx "
175 "sense key:0x%x ASC:0x%x ASCQ:0x%x"
176 "\n",
177 SAS_ADDR(task->dev->sas_addr),
178 SAS_ADDR(task->ssp_task.LUN),
179 shdr.sense_key,
180 shdr.asc, shdr.ascq);
181 }
182 } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
183 task->task_status.stat != SAM_GOOD) {
184 SAS_DPRINTK("task finished with resp:0x%x, "
185 "stat:0x%x\n",
186 task->task_status.resp,
187 task->task_status.stat);
188 goto ex_err;
189 } else {
190 res = 0;
191 break;
192 }
193 }
194ex_err:
195 if (pci_dma_dir != PCI_DMA_NONE)
196 kfree(scatter);
197out:
198 return res;
199}
200
201/* ---------- Domain device discovery ---------- */
202
203/**
204 * sas_get_port_device -- Discover devices which caused port creation
205 * @port: pointer to struct sas_port of interest
206 *
207 * Devices directly attached to a HA port, have no parent. This is
208 * how we know they are (domain) "root" devices. All other devices
209 * do, and should have their "parent" pointer set appropriately as
210 * soon as a child device is discovered.
211 */
212static int sas_get_port_device(struct asd_sas_port *port)
213{
214 unsigned long flags;
215 struct asd_sas_phy *phy;
216 struct sas_rphy *rphy;
217 struct domain_device *dev;
218
219 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
220 if (!dev)
221 return -ENOMEM;
222
223 spin_lock_irqsave(&port->phy_list_lock, flags);
224 if (list_empty(&port->phy_list)) {
225 spin_unlock_irqrestore(&port->phy_list_lock, flags);
226 kfree(dev);
227 return -ENODEV;
228 }
229 phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
230 spin_lock(&phy->frame_rcvd_lock);
231 memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
232 (size_t)phy->frame_rcvd_size));
233 spin_unlock(&phy->frame_rcvd_lock);
234 spin_unlock_irqrestore(&port->phy_list_lock, flags);
235
236 if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
237 struct dev_to_host_fis *fis =
238 (struct dev_to_host_fis *) dev->frame_rcvd;
239 if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
240 fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
241 && (fis->device & ~0x10) == 0)
242 dev->dev_type = SATA_PM;
243 else
244 dev->dev_type = SATA_DEV;
245 dev->tproto = SATA_PROTO;
246 } else {
247 struct sas_identify_frame *id =
248 (struct sas_identify_frame *) dev->frame_rcvd;
249 dev->dev_type = id->dev_type;
250 dev->iproto = id->initiator_bits;
251 dev->tproto = id->target_bits;
252 }
253
254 sas_init_dev(dev);
255
256 switch (dev->dev_type) {
257 case SAS_END_DEV:
258 rphy = sas_end_device_alloc(port->port);
259 break;
260 case EDGE_DEV:
261 rphy = sas_expander_alloc(port->port,
262 SAS_EDGE_EXPANDER_DEVICE);
263 break;
264 case FANOUT_DEV:
265 rphy = sas_expander_alloc(port->port,
266 SAS_FANOUT_EXPANDER_DEVICE);
267 break;
268 case SATA_DEV:
269 default:
270 printk("ERROR: Unidentified device type %d\n", dev->dev_type);
271 rphy = NULL;
272 break;
273 }
274
275 if (!rphy) {
276 kfree(dev);
277 return -ENODEV;
278 }
279 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
280 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
281 sas_fill_in_rphy(dev, rphy);
282 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
283 port->port_dev = dev;
284 dev->port = port;
285 dev->linkrate = port->linkrate;
286 dev->min_linkrate = port->linkrate;
287 dev->max_linkrate = port->linkrate;
288 dev->pathways = port->num_phys;
289 memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE);
290 memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
291 memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
292 port->disc.max_level = 0;
293
294 dev->rphy = rphy;
295 spin_lock(&port->dev_list_lock);
296 list_add_tail(&dev->dev_list_node, &port->dev_list);
297 spin_unlock(&port->dev_list_lock);
298
299 return 0;
300}
301
302/* ---------- Discover and Revalidate ---------- */
303
304/* ---------- SATA ---------- */
305
306static void sas_get_ata_command_set(struct domain_device *dev)
307{
308 struct dev_to_host_fis *fis =
309 (struct dev_to_host_fis *) dev->frame_rcvd;
310
311 if ((fis->sector_count == 1 && /* ATA */
312 fis->lbal == 1 &&
313 fis->lbam == 0 &&
314 fis->lbah == 0 &&
315 fis->device == 0)
316 ||
317 (fis->sector_count == 0 && /* CE-ATA (mATA) */
318 fis->lbal == 0 &&
319 fis->lbam == 0xCE &&
320 fis->lbah == 0xAA &&
321 (fis->device & ~0x10) == 0))
322
323 dev->sata_dev.command_set = ATA_COMMAND_SET;
324
325 else if ((fis->interrupt_reason == 1 && /* ATAPI */
326 fis->lbal == 1 &&
327 fis->byte_count_low == 0x14 &&
328 fis->byte_count_high == 0xEB &&
329 (fis->device & ~0x10) == 0))
330
331 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
332
333 else if ((fis->sector_count == 1 && /* SEMB */
334 fis->lbal == 1 &&
335 fis->lbam == 0x3C &&
336 fis->lbah == 0xC3 &&
337 fis->device == 0)
338 ||
339 (fis->interrupt_reason == 1 && /* SATA PM */
340 fis->lbal == 1 &&
341 fis->byte_count_low == 0x69 &&
342 fis->byte_count_high == 0x96 &&
343 (fis->device & ~0x10) == 0))
344
345 /* Treat it as a superset? */
346 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
347}
348
349/**
350 * sas_issue_ata_cmd -- Basic SATA command processing for discovery
351 * @dev: the device to send the command to
352 * @command: the command register
353 * @features: the features register
354 * @buffer: pointer to buffer to do I/O
355 * @size: size of @buffer
356 * @pci_dma_dir: PCI_DMA_...
357 */
358static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
359 u8 features, void *buffer, int size,
360 int pci_dma_dir)
361{
362 int res = 0;
363 struct sas_task *task;
364 struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
365 &dev->frame_rcvd[0];
366
367 res = -ENOMEM;
368 task = sas_alloc_task(GFP_KERNEL);
369 if (!task)
370 goto out;
371
372 task->dev = dev;
373
374 task->ata_task.fis.command = command;
375 task->ata_task.fis.features = features;
376 task->ata_task.fis.device = d2h_fis->device;
377 task->ata_task.retry_count = 1;
378
379 res = sas_execute_task(task, buffer, size, pci_dma_dir);
380
381 sas_free_task(task);
382out:
383 return res;
384}
385
386static void sas_sata_propagate_sas_addr(struct domain_device *dev)
387{
388 unsigned long flags;
389 struct asd_sas_port *port = dev->port;
390 struct asd_sas_phy *phy;
391
392 BUG_ON(dev->parent);
393
394 memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
395 spin_lock_irqsave(&port->phy_list_lock, flags);
396 list_for_each_entry(phy, &port->phy_list, port_phy_el)
397 memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
398 spin_unlock_irqrestore(&port->phy_list_lock, flags);
399}
400
401#define ATA_IDENTIFY_DEV 0xEC
402#define ATA_IDENTIFY_PACKET_DEV 0xA1
403#define ATA_SET_FEATURES 0xEF
404#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
405
406/**
407 * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
408 * @dev: STP/SATA device of interest (ATA/ATAPI)
409 *
410 * The LLDD has already been notified of this device, so that we can
411 * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
412 * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
413 * performance for this device.
414 */
415static int sas_discover_sata_dev(struct domain_device *dev)
416{
417 int res;
418 __le16 *identify_x;
419 u8 command;
420
421 identify_x = kzalloc(512, GFP_KERNEL);
422 if (!identify_x)
423 return -ENOMEM;
424
425 if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
426 dev->sata_dev.identify_device = identify_x;
427 command = ATA_IDENTIFY_DEV;
428 } else {
429 dev->sata_dev.identify_packet_device = identify_x;
430 command = ATA_IDENTIFY_PACKET_DEV;
431 }
432
433 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
434 PCI_DMA_FROMDEVICE);
435 if (res)
436 goto out_err;
437
438 /* lives on the media? */
439 if (le16_to_cpu(identify_x[0]) & 4) {
440 /* incomplete response */
441 SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
442 "dev %llx\n", SAS_ADDR(dev->sas_addr));
443 if (!le16_to_cpu(identify_x[83] & (1<<6)))
444 goto cont1;
445 res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
446 ATA_FEATURE_PUP_STBY_SPIN_UP,
447 NULL, 0, PCI_DMA_NONE);
448 if (res)
449 goto cont1;
450
451 schedule_timeout_interruptible(5*HZ); /* More time? */
452 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
453 PCI_DMA_FROMDEVICE);
454 if (res)
455 goto out_err;
456 }
457cont1:
458 /* Get WWN */
459 if (dev->port->oob_mode != SATA_OOB_MODE) {
460 memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
461 SAS_ADDR_SIZE);
462 } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
463 (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
464 == 0x5000) {
465 int i;
466
467 for (i = 0; i < 4; i++) {
468 dev->sas_addr[2*i] =
469 (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
470 dev->sas_addr[2*i+1] =
471 le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
472 }
473 }
474 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
475 if (!dev->parent)
476 sas_sata_propagate_sas_addr(dev);
477
478 /* XXX Hint: register this SATA device with SATL.
479 When this returns, dev->sata_dev->lu is alive and
480 present.
481 sas_satl_register_dev(dev);
482 */
483 return 0;
484out_err:
485 dev->sata_dev.identify_packet_device = NULL;
486 dev->sata_dev.identify_device = NULL;
487 kfree(identify_x);
488 return res;
489}
490
491static int sas_discover_sata_pm(struct domain_device *dev)
492{
493 return -ENODEV;
494}
495
496int sas_notify_lldd_dev_found(struct domain_device *dev)
497{
498 int res = 0;
499 struct sas_ha_struct *sas_ha = dev->port->ha;
500 struct Scsi_Host *shost = sas_ha->core.shost;
501 struct sas_internal *i = to_sas_internal(shost->transportt);
502
503 if (i->dft->lldd_dev_found) {
504 res = i->dft->lldd_dev_found(dev);
505 if (res) {
506 printk("sas: driver on pcidev %s cannot handle "
507 "device %llx, error:%d\n",
508 pci_name(sas_ha->pcidev),
509 SAS_ADDR(dev->sas_addr), res);
510 }
511 }
512 return res;
513}
514
515
516void sas_notify_lldd_dev_gone(struct domain_device *dev)
517{
518 struct sas_ha_struct *sas_ha = dev->port->ha;
519 struct Scsi_Host *shost = sas_ha->core.shost;
520 struct sas_internal *i = to_sas_internal(shost->transportt);
521
522 if (i->dft->lldd_dev_gone)
523 i->dft->lldd_dev_gone(dev);
524}
525
526/* ---------- Common/dispatchers ---------- */
527
528/**
529 * sas_discover_sata -- discover an STP/SATA domain device
530 * @dev: pointer to struct domain_device of interest
531 *
532 * First we notify the LLDD of this device, so we can send frames to
533 * it. Then depending on the type of device we call the appropriate
534 * discover functions. Once device discover is done, we notify the
535 * LLDD so that it can fine-tune its parameters for the device, by
536 * removing it and then adding it. That is, the second time around,
537 * the driver would have certain fields, that it is looking at, set.
538 * Finally we initialize the kobj so that the device can be added to
539 * the system at registration time. Devices directly attached to a HA
540 * port, have no parents. All other devices do, and should have their
541 * "parent" pointer set appropriately before calling this function.
542 */
543int sas_discover_sata(struct domain_device *dev)
544{
545 int res;
546
547 sas_get_ata_command_set(dev);
548
549 res = sas_notify_lldd_dev_found(dev);
550 if (res)
551 return res;
552
553 switch (dev->dev_type) {
554 case SATA_DEV:
555 res = sas_discover_sata_dev(dev);
556 break;
557 case SATA_PM:
558 res = sas_discover_sata_pm(dev);
559 break;
560 default:
561 break;
562 }
563
564 sas_notify_lldd_dev_gone(dev);
565 if (!res) {
566 sas_notify_lldd_dev_found(dev);
567 }
568 return res;
569}
570
571/**
572 * sas_discover_end_dev -- discover an end device (SSP, etc)
573 * @end: pointer to domain device of interest
574 *
575 * See comment in sas_discover_sata().
576 */
577int sas_discover_end_dev(struct domain_device *dev)
578{
579 int res;
580
581 res = sas_notify_lldd_dev_found(dev);
582 if (res)
583 return res;
584
585 res = sas_rphy_add(dev->rphy);
586 if (res)
587 goto out_err;
588
589 /* do this to get the end device port attributes which will have
590 * been scanned in sas_rphy_add */
591 sas_notify_lldd_dev_gone(dev);
592 sas_notify_lldd_dev_found(dev);
593
594 return 0;
595
596out_err:
597 sas_notify_lldd_dev_gone(dev);
598 return res;
599}
600
601/* ---------- Device registration and unregistration ---------- */
602
603static inline void sas_unregister_common_dev(struct domain_device *dev)
604{
605 sas_notify_lldd_dev_gone(dev);
606 if (!dev->parent)
607 dev->port->port_dev = NULL;
608 else
609 list_del_init(&dev->siblings);
610 list_del_init(&dev->dev_list_node);
611}
612
613void sas_unregister_dev(struct domain_device *dev)
614{
615 if (dev->rphy) {
616 sas_remove_children(&dev->rphy->dev);
617 sas_rphy_delete(dev->rphy);
618 dev->rphy = NULL;
619 }
620 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
621 /* remove the phys and ports, everything else should be gone */
622 kfree(dev->ex_dev.ex_phy);
623 dev->ex_dev.ex_phy = NULL;
624 }
625 sas_unregister_common_dev(dev);
626}
627
628void sas_unregister_domain_devices(struct asd_sas_port *port)
629{
630 struct domain_device *dev, *n;
631
632 list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node)
633 sas_unregister_dev(dev);
634
635 port->port->rphy = NULL;
636
637}
638
639/* ---------- Discovery and Revalidation ---------- */
640
641/**
642 * sas_discover_domain -- discover the domain
643 * @port: port to the domain of interest
644 *
645 * NOTE: this process _must_ quit (return) as soon as any connection
646 * errors are encountered. Connection recovery is done elsewhere.
647 * Discover process only interrogates devices in order to discover the
648 * domain.
649 */
650static void sas_discover_domain(void *data)
651{
652 int error = 0;
653 struct asd_sas_port *port = data;
654
655 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
656 &port->disc.pending);
657
658 if (port->port_dev)
659 return ;
660 else {
661 error = sas_get_port_device(port);
662 if (error)
663 return;
664 }
665
666 SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id,
667 current->pid);
668
669 switch (port->port_dev->dev_type) {
670 case SAS_END_DEV:
671 error = sas_discover_end_dev(port->port_dev);
672 break;
673 case EDGE_DEV:
674 case FANOUT_DEV:
675 error = sas_discover_root_expander(port->port_dev);
676 break;
677 case SATA_DEV:
678 case SATA_PM:
679 error = sas_discover_sata(port->port_dev);
680 break;
681 default:
682 SAS_DPRINTK("unhandled device %d\n", port->port_dev->dev_type);
683 break;
684 }
685
686 if (error) {
687 kfree(port->port_dev); /* not kobject_register-ed yet */
688 port->port_dev = NULL;
689 }
690
691 SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
692 current->pid, error);
693}
694
695static void sas_revalidate_domain(void *data)
696{
697 int res = 0;
698 struct asd_sas_port *port = data;
699
700 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
701 &port->disc.pending);
702
703 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
704 current->pid);
705 if (port->port_dev)
706 res = sas_ex_revalidate_domain(port->port_dev);
707
708 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
709 port->id, current->pid, res);
710}
711
712/* ---------- Events ---------- */
713
714int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
715{
716 struct sas_discovery *disc;
717
718 if (!port)
719 return 0;
720 disc = &port->disc;
721
722 BUG_ON(ev >= DISC_NUM_EVENTS);
723
724 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
725 &disc->disc_work[ev], port->ha->core.shost);
726
727 return 0;
728}
729
730/**
731 * sas_init_disc -- initialize the discovery struct in the port
732 * @port: pointer to struct port
733 *
734 * Called when the ports are being initialized.
735 */
736void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
737{
738 int i;
739
740 static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = {
741 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
742 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
743 };
744
745 spin_lock_init(&disc->disc_event_lock);
746 disc->pending = 0;
747 for (i = 0; i < DISC_NUM_EVENTS; i++)
748 INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port);
749}
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
new file mode 100644
index 000000000000..f1246d2c9bef
--- /dev/null
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -0,0 +1,76 @@
1/*
2 * Serial Attached SCSI (SAS) Dump/Debugging routines
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_dump.h"
26
27#ifdef SAS_DEBUG
28
29static const char *sas_hae_str[] = {
30 [0] = "HAE_RESET",
31};
32
33static const char *sas_porte_str[] = {
34 [0] = "PORTE_BYTES_DMAED",
35 [1] = "PORTE_BROADCAST_RCVD",
36 [2] = "PORTE_LINK_RESET_ERR",
37 [3] = "PORTE_TIMER_EVENT",
38 [4] = "PORTE_HARD_RESET",
39};
40
41static const char *sas_phye_str[] = {
42 [0] = "PHYE_LOSS_OF_SIGNAL",
43 [1] = "PHYE_OOB_DONE",
44 [2] = "PHYE_OOB_ERROR",
45 [3] = "PHYE_SPINUP_HOLD",
46};
47
48void sas_dprint_porte(int phyid, enum port_event pe)
49{
50 SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]);
51}
52void sas_dprint_phye(int phyid, enum phy_event pe)
53{
54 SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
55}
56
57void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
58{
59 SAS_DPRINTK("ha %s: %s event\n", pci_name(sas_ha->pcidev),
60 sas_hae_str[he]);
61}
62
63void sas_dump_port(struct asd_sas_port *port)
64{
65 SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
66 SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id,
67 SAS_ADDR(port->sas_addr));
68 SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id,
69 SAS_ADDR(port->attached_sas_addr));
70 SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto);
71 SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto);
72 SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
73 SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
74}
75
76#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
new file mode 100644
index 000000000000..47b45d4f5258
--- /dev/null
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -0,0 +1,42 @@
1/*
2 * Serial Attached SCSI (SAS) Dump/Debugging routines header file
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26
27#ifdef SAS_DEBUG
28
29void sas_dprint_porte(int phyid, enum port_event pe);
30void sas_dprint_phye(int phyid, enum phy_event pe);
31void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
32void sas_dump_port(struct asd_sas_port *port);
33
34#else /* SAS_DEBUG */
35
36static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
37static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
38static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
39 enum ha_event he) { }
40static inline void sas_dump_port(struct asd_sas_port *port) { }
41
42#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
new file mode 100644
index 000000000000..19110ed1c89c
--- /dev/null
+++ b/drivers/scsi/libsas/sas_event.c
@@ -0,0 +1,75 @@
1/*
2 * Serial Attached SCSI (SAS) Event processing
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <scsi/scsi_host.h>
26#include "sas_internal.h"
27#include "sas_dump.h"
28
29static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
30{
31 BUG_ON(event >= HA_NUM_EVENTS);
32
33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
34 &sas_ha->ha_events[event], sas_ha->core.shost);
35}
36
37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
38{
39 struct sas_ha_struct *ha = phy->ha;
40
41 BUG_ON(event >= PORT_NUM_EVENTS);
42
43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
44 &phy->port_events[event], ha->core.shost);
45}
46
47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
48{
49 struct sas_ha_struct *ha = phy->ha;
50
51 BUG_ON(event >= PHY_NUM_EVENTS);
52
53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
54 &phy->phy_events[event], ha->core.shost);
55}
56
57int sas_init_events(struct sas_ha_struct *sas_ha)
58{
59 static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = {
60 [HAE_RESET] = sas_hae_reset,
61 };
62
63 int i;
64
65 spin_lock_init(&sas_ha->event_lock);
66
67 for (i = 0; i < HA_NUM_EVENTS; i++)
68 INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha);
69
70 sas_ha->notify_ha_event = notify_ha_event;
71 sas_ha->notify_port_event = notify_port_event;
72 sas_ha->notify_phy_event = notify_phy_event;
73
74 return 0;
75}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
new file mode 100644
index 000000000000..30b8014bcc7a
--- /dev/null
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -0,0 +1,1855 @@
1/*
2 * Serial Attached SCSI (SAS) Expander discovery and configuration
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <linux/pci.h>
26#include <linux/scatterlist.h>
27
28#include "sas_internal.h"
29
30#include <scsi/scsi_transport.h>
31#include <scsi/scsi_transport_sas.h>
32#include "../scsi_sas_internal.h"
33
34static int sas_discover_expander(struct domain_device *dev);
35static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
36static int sas_configure_phy(struct domain_device *dev, int phy_id,
37 u8 *sas_addr, int include);
38static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
39
40#if 0
41/* FIXME: smp needs to migrate into the sas class */
42static ssize_t smp_portal_read(struct kobject *, char *, loff_t, size_t);
43static ssize_t smp_portal_write(struct kobject *, char *, loff_t, size_t);
44#endif
45
46/* ---------- SMP task management ---------- */
47
48static void smp_task_timedout(unsigned long _task)
49{
50 struct sas_task *task = (void *) _task;
51 unsigned long flags;
52
53 spin_lock_irqsave(&task->task_state_lock, flags);
54 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
55 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
56 spin_unlock_irqrestore(&task->task_state_lock, flags);
57
58 complete(&task->completion);
59}
60
61static void smp_task_done(struct sas_task *task)
62{
63 if (!del_timer(&task->timer))
64 return;
65 complete(&task->completion);
66}
67
68/* Give it some long enough timeout. In seconds. */
69#define SMP_TIMEOUT 10
70
71static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
72 void *resp, int resp_size)
73{
74 int res;
75 struct sas_task *task = sas_alloc_task(GFP_KERNEL);
76 struct sas_internal *i =
77 to_sas_internal(dev->port->ha->core.shost->transportt);
78
79 if (!task)
80 return -ENOMEM;
81
82 task->dev = dev;
83 task->task_proto = dev->tproto;
84 sg_init_one(&task->smp_task.smp_req, req, req_size);
85 sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
86
87 task->task_done = smp_task_done;
88
89 task->timer.data = (unsigned long) task;
90 task->timer.function = smp_task_timedout;
91 task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
92 add_timer(&task->timer);
93
94 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
95
96 if (res) {
97 del_timer(&task->timer);
98 SAS_DPRINTK("executing SMP task failed:%d\n", res);
99 goto ex_err;
100 }
101
102 wait_for_completion(&task->completion);
103 res = -ETASK;
104 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
105 SAS_DPRINTK("smp task timed out or aborted\n");
106 i->dft->lldd_abort_task(task);
107 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
108 SAS_DPRINTK("SMP task aborted and not done\n");
109 goto ex_err;
110 }
111 }
112 if (task->task_status.resp == SAS_TASK_COMPLETE &&
113 task->task_status.stat == SAM_GOOD)
114 res = 0;
115 else
116 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
117 "status 0x%x\n", __FUNCTION__,
118 SAS_ADDR(dev->sas_addr),
119 task->task_status.resp,
120 task->task_status.stat);
121ex_err:
122 sas_free_task(task);
123 return res;
124}
125
126/* ---------- Allocations ---------- */
127
128static inline void *alloc_smp_req(int size)
129{
130 u8 *p = kzalloc(size, GFP_KERNEL);
131 if (p)
132 p[0] = SMP_REQUEST;
133 return p;
134}
135
136static inline void *alloc_smp_resp(int size)
137{
138 return kzalloc(size, GFP_KERNEL);
139}
140
141/* ---------- Expander configuration ---------- */
142
143static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
144 void *disc_resp)
145{
146 struct expander_device *ex = &dev->ex_dev;
147 struct ex_phy *phy = &ex->ex_phy[phy_id];
148 struct smp_resp *resp = disc_resp;
149 struct discover_resp *dr = &resp->disc;
150 struct sas_rphy *rphy = dev->rphy;
151 int rediscover = (phy->phy != NULL);
152
153 if (!rediscover) {
154 phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
155
156 /* FIXME: error_handling */
157 BUG_ON(!phy->phy);
158 }
159
160 switch (resp->result) {
161 case SMP_RESP_PHY_VACANT:
162 phy->phy_state = PHY_VACANT;
163 return;
164 default:
165 phy->phy_state = PHY_NOT_PRESENT;
166 return;
167 case SMP_RESP_FUNC_ACC:
168 phy->phy_state = PHY_EMPTY; /* do not know yet */
169 break;
170 }
171
172 phy->phy_id = phy_id;
173 phy->attached_dev_type = dr->attached_dev_type;
174 phy->linkrate = dr->linkrate;
175 phy->attached_sata_host = dr->attached_sata_host;
176 phy->attached_sata_dev = dr->attached_sata_dev;
177 phy->attached_sata_ps = dr->attached_sata_ps;
178 phy->attached_iproto = dr->iproto << 1;
179 phy->attached_tproto = dr->tproto << 1;
180 memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
181 phy->attached_phy_id = dr->attached_phy_id;
182 phy->phy_change_count = dr->change_count;
183 phy->routing_attr = dr->routing_attr;
184 phy->virtual = dr->virtual;
185 phy->last_da_index = -1;
186
187 phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
188 phy->phy->identify.target_port_protocols = phy->attached_tproto;
189 phy->phy->identify.phy_identifier = phy_id;
190 phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
191 phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
192 phy->phy->minimum_linkrate = dr->pmin_linkrate;
193 phy->phy->maximum_linkrate = dr->pmax_linkrate;
194 phy->phy->negotiated_linkrate = phy->linkrate;
195
196 if (!rediscover)
197 sas_phy_add(phy->phy);
198
199 SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n",
200 SAS_ADDR(dev->sas_addr), phy->phy_id,
201 phy->routing_attr == TABLE_ROUTING ? 'T' :
202 phy->routing_attr == DIRECT_ROUTING ? 'D' :
203 phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?',
204 SAS_ADDR(phy->attached_sas_addr));
205
206 return;
207}
208
209#define DISCOVER_REQ_SIZE 16
210#define DISCOVER_RESP_SIZE 56
211
212static int sas_ex_phy_discover(struct domain_device *dev, int single)
213{
214 struct expander_device *ex = &dev->ex_dev;
215 int res = 0;
216 u8 *disc_req;
217 u8 *disc_resp;
218
219 disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
220 if (!disc_req)
221 return -ENOMEM;
222
223 disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE);
224 if (!disc_resp) {
225 kfree(disc_req);
226 return -ENOMEM;
227 }
228
229 disc_req[1] = SMP_DISCOVER;
230
231 if (0 <= single && single < ex->num_phys) {
232 disc_req[9] = single;
233 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
234 disc_resp, DISCOVER_RESP_SIZE);
235 if (res)
236 goto out_err;
237 sas_set_ex_phy(dev, single, disc_resp);
238 } else {
239 int i;
240
241 for (i = 0; i < ex->num_phys; i++) {
242 disc_req[9] = i;
243 res = smp_execute_task(dev, disc_req,
244 DISCOVER_REQ_SIZE, disc_resp,
245 DISCOVER_RESP_SIZE);
246 if (res)
247 goto out_err;
248 sas_set_ex_phy(dev, i, disc_resp);
249 }
250 }
251out_err:
252 kfree(disc_resp);
253 kfree(disc_req);
254 return res;
255}
256
257static int sas_expander_discover(struct domain_device *dev)
258{
259 struct expander_device *ex = &dev->ex_dev;
260 int res = -ENOMEM;
261
262 ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL);
263 if (!ex->ex_phy)
264 return -ENOMEM;
265
266 res = sas_ex_phy_discover(dev, -1);
267 if (res)
268 goto out_err;
269
270 return 0;
271 out_err:
272 kfree(ex->ex_phy);
273 ex->ex_phy = NULL;
274 return res;
275}
276
277#define MAX_EXPANDER_PHYS 128
278
279static void ex_assign_report_general(struct domain_device *dev,
280 struct smp_resp *resp)
281{
282 struct report_general_resp *rg = &resp->rg;
283
284 dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
285 dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
286 dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
287 dev->ex_dev.conf_route_table = rg->conf_route_table;
288 dev->ex_dev.configuring = rg->configuring;
289 memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
290}
291
292#define RG_REQ_SIZE 8
293#define RG_RESP_SIZE 32
294
295static int sas_ex_general(struct domain_device *dev)
296{
297 u8 *rg_req;
298 struct smp_resp *rg_resp;
299 int res;
300 int i;
301
302 rg_req = alloc_smp_req(RG_REQ_SIZE);
303 if (!rg_req)
304 return -ENOMEM;
305
306 rg_resp = alloc_smp_resp(RG_RESP_SIZE);
307 if (!rg_resp) {
308 kfree(rg_req);
309 return -ENOMEM;
310 }
311
312 rg_req[1] = SMP_REPORT_GENERAL;
313
314 for (i = 0; i < 5; i++) {
315 res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
316 RG_RESP_SIZE);
317
318 if (res) {
319 SAS_DPRINTK("RG to ex %016llx failed:0x%x\n",
320 SAS_ADDR(dev->sas_addr), res);
321 goto out;
322 } else if (rg_resp->result != SMP_RESP_FUNC_ACC) {
323 SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n",
324 SAS_ADDR(dev->sas_addr), rg_resp->result);
325 res = rg_resp->result;
326 goto out;
327 }
328
329 ex_assign_report_general(dev, rg_resp);
330
331 if (dev->ex_dev.configuring) {
332 SAS_DPRINTK("RG: ex %llx self-configuring...\n",
333 SAS_ADDR(dev->sas_addr));
334 schedule_timeout_interruptible(5*HZ);
335 } else
336 break;
337 }
338out:
339 kfree(rg_req);
340 kfree(rg_resp);
341 return res;
342}
343
344static void ex_assign_manuf_info(struct domain_device *dev, void
345 *_mi_resp)
346{
347 u8 *mi_resp = _mi_resp;
348 struct sas_rphy *rphy = dev->rphy;
349 struct sas_expander_device *edev = rphy_to_expander_device(rphy);
350
351 memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN);
352 memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN);
353 memcpy(edev->product_rev, mi_resp + 36,
354 SAS_EXPANDER_PRODUCT_REV_LEN);
355
356 if (mi_resp[8] & 1) {
357 memcpy(edev->component_vendor_id, mi_resp + 40,
358 SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
359 edev->component_id = mi_resp[48] << 8 | mi_resp[49];
360 edev->component_revision_id = mi_resp[50];
361 }
362}
363
364#define MI_REQ_SIZE 8
365#define MI_RESP_SIZE 64
366
367static int sas_ex_manuf_info(struct domain_device *dev)
368{
369 u8 *mi_req;
370 u8 *mi_resp;
371 int res;
372
373 mi_req = alloc_smp_req(MI_REQ_SIZE);
374 if (!mi_req)
375 return -ENOMEM;
376
377 mi_resp = alloc_smp_resp(MI_RESP_SIZE);
378 if (!mi_resp) {
379 kfree(mi_req);
380 return -ENOMEM;
381 }
382
383 mi_req[1] = SMP_REPORT_MANUF_INFO;
384
385 res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
386 if (res) {
387 SAS_DPRINTK("MI: ex %016llx failed:0x%x\n",
388 SAS_ADDR(dev->sas_addr), res);
389 goto out;
390 } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) {
391 SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n",
392 SAS_ADDR(dev->sas_addr), mi_resp[2]);
393 goto out;
394 }
395
396 ex_assign_manuf_info(dev, mi_resp);
397out:
398 kfree(mi_req);
399 kfree(mi_resp);
400 return res;
401}
402
403#define PC_REQ_SIZE 44
404#define PC_RESP_SIZE 8
405
406int sas_smp_phy_control(struct domain_device *dev, int phy_id,
407 enum phy_func phy_func,
408 struct sas_phy_linkrates *rates)
409{
410 u8 *pc_req;
411 u8 *pc_resp;
412 int res;
413
414 pc_req = alloc_smp_req(PC_REQ_SIZE);
415 if (!pc_req)
416 return -ENOMEM;
417
418 pc_resp = alloc_smp_resp(PC_RESP_SIZE);
419 if (!pc_resp) {
420 kfree(pc_req);
421 return -ENOMEM;
422 }
423
424 pc_req[1] = SMP_PHY_CONTROL;
425 pc_req[9] = phy_id;
426 pc_req[10]= phy_func;
427 if (rates) {
428 pc_req[32] = rates->minimum_linkrate << 4;
429 pc_req[33] = rates->maximum_linkrate << 4;
430 }
431
432 res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
433
434 kfree(pc_resp);
435 kfree(pc_req);
436 return res;
437}
438
439static void sas_ex_disable_phy(struct domain_device *dev, int phy_id)
440{
441 struct expander_device *ex = &dev->ex_dev;
442 struct ex_phy *phy = &ex->ex_phy[phy_id];
443
444 sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL);
445 phy->linkrate = SAS_PHY_DISABLED;
446}
447
448static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr)
449{
450 struct expander_device *ex = &dev->ex_dev;
451 int i;
452
453 for (i = 0; i < ex->num_phys; i++) {
454 struct ex_phy *phy = &ex->ex_phy[i];
455
456 if (phy->phy_state == PHY_VACANT ||
457 phy->phy_state == PHY_NOT_PRESENT)
458 continue;
459
460 if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr))
461 sas_ex_disable_phy(dev, i);
462 }
463}
464
465static int sas_dev_present_in_domain(struct asd_sas_port *port,
466 u8 *sas_addr)
467{
468 struct domain_device *dev;
469
470 if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr))
471 return 1;
472 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
473 if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr))
474 return 1;
475 }
476 return 0;
477}
478
479#define RPEL_REQ_SIZE 16
480#define RPEL_RESP_SIZE 32
481int sas_smp_get_phy_events(struct sas_phy *phy)
482{
483 int res;
484 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
485 struct domain_device *dev = sas_find_dev_by_rphy(rphy);
486 u8 *req = alloc_smp_req(RPEL_REQ_SIZE);
487 u8 *resp = kzalloc(RPEL_RESP_SIZE, GFP_KERNEL);
488
489 if (!resp)
490 return -ENOMEM;
491
492 req[1] = SMP_REPORT_PHY_ERR_LOG;
493 req[9] = phy->number;
494
495 res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
496 resp, RPEL_RESP_SIZE);
497
498 if (!res)
499 goto out;
500
501 phy->invalid_dword_count = scsi_to_u32(&resp[12]);
502 phy->running_disparity_error_count = scsi_to_u32(&resp[16]);
503 phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]);
504 phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
505
506 out:
507 kfree(resp);
508 return res;
509
510}
511
512#define RPS_REQ_SIZE 16
513#define RPS_RESP_SIZE 60
514
515static int sas_get_report_phy_sata(struct domain_device *dev,
516 int phy_id,
517 struct smp_resp *rps_resp)
518{
519 int res;
520 u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
521
522 if (!rps_req)
523 return -ENOMEM;
524
525 rps_req[1] = SMP_REPORT_PHY_SATA;
526 rps_req[9] = phy_id;
527
528 res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
529 rps_resp, RPS_RESP_SIZE);
530
531 kfree(rps_req);
532 return 0;
533}
534
535static void sas_ex_get_linkrate(struct domain_device *parent,
536 struct domain_device *child,
537 struct ex_phy *parent_phy)
538{
539 struct expander_device *parent_ex = &parent->ex_dev;
540 struct sas_port *port;
541 int i;
542
543 child->pathways = 0;
544
545 port = parent_phy->port;
546
547 for (i = 0; i < parent_ex->num_phys; i++) {
548 struct ex_phy *phy = &parent_ex->ex_phy[i];
549
550 if (phy->phy_state == PHY_VACANT ||
551 phy->phy_state == PHY_NOT_PRESENT)
552 continue;
553
554 if (SAS_ADDR(phy->attached_sas_addr) ==
555 SAS_ADDR(child->sas_addr)) {
556
557 child->min_linkrate = min(parent->min_linkrate,
558 phy->linkrate);
559 child->max_linkrate = max(parent->max_linkrate,
560 phy->linkrate);
561 child->pathways++;
562 sas_port_add_phy(port, phy->phy);
563 }
564 }
565 child->linkrate = min(parent_phy->linkrate, child->max_linkrate);
566 child->pathways = min(child->pathways, parent->pathways);
567}
568
569static struct domain_device *sas_ex_discover_end_dev(
570 struct domain_device *parent, int phy_id)
571{
572 struct expander_device *parent_ex = &parent->ex_dev;
573 struct ex_phy *phy = &parent_ex->ex_phy[phy_id];
574 struct domain_device *child = NULL;
575 struct sas_rphy *rphy;
576 int res;
577
578 if (phy->attached_sata_host || phy->attached_sata_ps)
579 return NULL;
580
581 child = kzalloc(sizeof(*child), GFP_KERNEL);
582 if (!child)
583 return NULL;
584
585 child->parent = parent;
586 child->port = parent->port;
587 child->iproto = phy->attached_iproto;
588 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
589 sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
590 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
591 BUG_ON(!phy->port);
592 /* FIXME: better error handling*/
593 BUG_ON(sas_port_add(phy->port) != 0);
594 sas_ex_get_linkrate(parent, child, phy);
595
596 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
597 child->dev_type = SATA_DEV;
598 if (phy->attached_tproto & SAS_PROTO_STP)
599 child->tproto = phy->attached_tproto;
600 if (phy->attached_sata_dev)
601 child->tproto |= SATA_DEV;
602 res = sas_get_report_phy_sata(parent, phy_id,
603 &child->sata_dev.rps_resp);
604 if (res) {
605 SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
606 "0x%x\n", SAS_ADDR(parent->sas_addr),
607 phy_id, res);
608 kfree(child);
609 return NULL;
610 }
611 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
612 sizeof(struct dev_to_host_fis));
613 sas_init_dev(child);
614 res = sas_discover_sata(child);
615 if (res) {
616 SAS_DPRINTK("sas_discover_sata() for device %16llx at "
617 "%016llx:0x%x returned 0x%x\n",
618 SAS_ADDR(child->sas_addr),
619 SAS_ADDR(parent->sas_addr), phy_id, res);
620 kfree(child);
621 return NULL;
622 }
623 } else if (phy->attached_tproto & SAS_PROTO_SSP) {
624 child->dev_type = SAS_END_DEV;
625 rphy = sas_end_device_alloc(phy->port);
626 /* FIXME: error handling */
627 BUG_ON(!rphy);
628 child->tproto = phy->attached_tproto;
629 sas_init_dev(child);
630
631 child->rphy = rphy;
632 sas_fill_in_rphy(child, rphy);
633
634 spin_lock(&parent->port->dev_list_lock);
635 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
636 spin_unlock(&parent->port->dev_list_lock);
637
638 res = sas_discover_end_dev(child);
639 if (res) {
640 SAS_DPRINTK("sas_discover_end_dev() for device %16llx "
641 "at %016llx:0x%x returned 0x%x\n",
642 SAS_ADDR(child->sas_addr),
643 SAS_ADDR(parent->sas_addr), phy_id, res);
644 /* FIXME: this kfrees list elements without removing them */
645 //kfree(child);
646 return NULL;
647 }
648 } else {
649 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
650 phy->attached_tproto, SAS_ADDR(parent->sas_addr),
651 phy_id);
652 }
653
654 list_add_tail(&child->siblings, &parent_ex->children);
655 return child;
656}
657
658static struct domain_device *sas_ex_discover_expander(
659 struct domain_device *parent, int phy_id)
660{
661 struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy);
662 struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
663 struct domain_device *child = NULL;
664 struct sas_rphy *rphy;
665 struct sas_expander_device *edev;
666 struct asd_sas_port *port;
667 int res;
668
669 if (phy->routing_attr == DIRECT_ROUTING) {
670 SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not "
671 "allowed\n",
672 SAS_ADDR(parent->sas_addr), phy_id,
673 SAS_ADDR(phy->attached_sas_addr),
674 phy->attached_phy_id);
675 return NULL;
676 }
677 child = kzalloc(sizeof(*child), GFP_KERNEL);
678 if (!child)
679 return NULL;
680
681 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
682 /* FIXME: better error handling */
683 BUG_ON(sas_port_add(phy->port) != 0);
684
685
686 switch (phy->attached_dev_type) {
687 case EDGE_DEV:
688 rphy = sas_expander_alloc(phy->port,
689 SAS_EDGE_EXPANDER_DEVICE);
690 break;
691 case FANOUT_DEV:
692 rphy = sas_expander_alloc(phy->port,
693 SAS_FANOUT_EXPANDER_DEVICE);
694 break;
695 default:
696 rphy = NULL; /* shut gcc up */
697 BUG();
698 }
699 port = parent->port;
700 child->rphy = rphy;
701 edev = rphy_to_expander_device(rphy);
702 child->dev_type = phy->attached_dev_type;
703 child->parent = parent;
704 child->port = port;
705 child->iproto = phy->attached_iproto;
706 child->tproto = phy->attached_tproto;
707 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
708 sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
709 sas_ex_get_linkrate(parent, child, phy);
710 edev->level = parent_ex->level + 1;
711 parent->port->disc.max_level = max(parent->port->disc.max_level,
712 edev->level);
713 sas_init_dev(child);
714 sas_fill_in_rphy(child, rphy);
715 sas_rphy_add(rphy);
716
717 spin_lock(&parent->port->dev_list_lock);
718 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
719 spin_unlock(&parent->port->dev_list_lock);
720
721 res = sas_discover_expander(child);
722 if (res) {
723 kfree(child);
724 return NULL;
725 }
726 list_add_tail(&child->siblings, &parent->ex_dev.children);
727 return child;
728}
729
730static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
731{
732 struct expander_device *ex = &dev->ex_dev;
733 struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
734 struct domain_device *child = NULL;
735 int res = 0;
736
737 /* Phy state */
738 if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) {
739 if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL))
740 res = sas_ex_phy_discover(dev, phy_id);
741 if (res)
742 return res;
743 }
744
745 /* Parent and domain coherency */
746 if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) ==
747 SAS_ADDR(dev->port->sas_addr))) {
748 sas_add_parent_port(dev, phy_id);
749 return 0;
750 }
751 if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) ==
752 SAS_ADDR(dev->parent->sas_addr))) {
753 sas_add_parent_port(dev, phy_id);
754 if (ex_phy->routing_attr == TABLE_ROUTING)
755 sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1);
756 return 0;
757 }
758
759 if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
760 sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
761
762 if (ex_phy->attached_dev_type == NO_DEVICE) {
763 if (ex_phy->routing_attr == DIRECT_ROUTING) {
764 memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
765 sas_configure_routing(dev, ex_phy->attached_sas_addr);
766 }
767 return 0;
768 } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
769 return 0;
770
771 if (ex_phy->attached_dev_type != SAS_END_DEV &&
772 ex_phy->attached_dev_type != FANOUT_DEV &&
773 ex_phy->attached_dev_type != EDGE_DEV) {
774 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
775 "phy 0x%x\n", ex_phy->attached_dev_type,
776 SAS_ADDR(dev->sas_addr),
777 phy_id);
778 return 0;
779 }
780
781 res = sas_configure_routing(dev, ex_phy->attached_sas_addr);
782 if (res) {
783 SAS_DPRINTK("configure routing for dev %016llx "
784 "reported 0x%x. Forgotten\n",
785 SAS_ADDR(ex_phy->attached_sas_addr), res);
786 sas_disable_routing(dev, ex_phy->attached_sas_addr);
787 return res;
788 }
789
790 switch (ex_phy->attached_dev_type) {
791 case SAS_END_DEV:
792 child = sas_ex_discover_end_dev(dev, phy_id);
793 break;
794 case FANOUT_DEV:
795 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
796 SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
797 "attached to ex %016llx phy 0x%x\n",
798 SAS_ADDR(ex_phy->attached_sas_addr),
799 ex_phy->attached_phy_id,
800 SAS_ADDR(dev->sas_addr),
801 phy_id);
802 sas_ex_disable_phy(dev, phy_id);
803 break;
804 } else
805 memcpy(dev->port->disc.fanout_sas_addr,
806 ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
807 /* fallthrough */
808 case EDGE_DEV:
809 child = sas_ex_discover_expander(dev, phy_id);
810 break;
811 default:
812 break;
813 }
814
815 if (child) {
816 int i;
817
818 for (i = 0; i < ex->num_phys; i++) {
819 if (ex->ex_phy[i].phy_state == PHY_VACANT ||
820 ex->ex_phy[i].phy_state == PHY_NOT_PRESENT)
821 continue;
822
823 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
824 SAS_ADDR(child->sas_addr))
825 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
826 }
827 }
828
829 return res;
830}
831
832static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
833{
834 struct expander_device *ex = &dev->ex_dev;
835 int i;
836
837 for (i = 0; i < ex->num_phys; i++) {
838 struct ex_phy *phy = &ex->ex_phy[i];
839
840 if (phy->phy_state == PHY_VACANT ||
841 phy->phy_state == PHY_NOT_PRESENT)
842 continue;
843
844 if ((phy->attached_dev_type == EDGE_DEV ||
845 phy->attached_dev_type == FANOUT_DEV) &&
846 phy->routing_attr == SUBTRACTIVE_ROUTING) {
847
848 memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
849
850 return 1;
851 }
852 }
853 return 0;
854}
855
856static int sas_check_level_subtractive_boundary(struct domain_device *dev)
857{
858 struct expander_device *ex = &dev->ex_dev;
859 struct domain_device *child;
860 u8 sub_addr[8] = {0, };
861
862 list_for_each_entry(child, &ex->children, siblings) {
863 if (child->dev_type != EDGE_DEV &&
864 child->dev_type != FANOUT_DEV)
865 continue;
866 if (sub_addr[0] == 0) {
867 sas_find_sub_addr(child, sub_addr);
868 continue;
869 } else {
870 u8 s2[8];
871
872 if (sas_find_sub_addr(child, s2) &&
873 (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
874
875 SAS_DPRINTK("ex %016llx->%016llx-?->%016llx "
876 "diverges from subtractive "
877 "boundary %016llx\n",
878 SAS_ADDR(dev->sas_addr),
879 SAS_ADDR(child->sas_addr),
880 SAS_ADDR(s2),
881 SAS_ADDR(sub_addr));
882
883 sas_ex_disable_port(child, s2);
884 }
885 }
886 }
887 return 0;
888}
889/**
890 * sas_ex_discover_devices -- discover devices attached to this expander
891 * dev: pointer to the expander domain device
892 * single: if you want to do a single phy, else set to -1;
893 *
894 * Configure this expander for use with its devices and register the
895 * devices of this expander.
896 */
897static int sas_ex_discover_devices(struct domain_device *dev, int single)
898{
899 struct expander_device *ex = &dev->ex_dev;
900 int i = 0, end = ex->num_phys;
901 int res = 0;
902
903 if (0 <= single && single < end) {
904 i = single;
905 end = i+1;
906 }
907
908 for ( ; i < end; i++) {
909 struct ex_phy *ex_phy = &ex->ex_phy[i];
910
911 if (ex_phy->phy_state == PHY_VACANT ||
912 ex_phy->phy_state == PHY_NOT_PRESENT ||
913 ex_phy->phy_state == PHY_DEVICE_DISCOVERED)
914 continue;
915
916 switch (ex_phy->linkrate) {
917 case SAS_PHY_DISABLED:
918 case SAS_PHY_RESET_PROBLEM:
919 case SAS_SATA_PORT_SELECTOR:
920 continue;
921 default:
922 res = sas_ex_discover_dev(dev, i);
923 if (res)
924 break;
925 continue;
926 }
927 }
928
929 if (!res)
930 sas_check_level_subtractive_boundary(dev);
931
932 return res;
933}
934
935static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
936{
937 struct expander_device *ex = &dev->ex_dev;
938 int i;
939 u8 *sub_sas_addr = NULL;
940
941 if (dev->dev_type != EDGE_DEV)
942 return 0;
943
944 for (i = 0; i < ex->num_phys; i++) {
945 struct ex_phy *phy = &ex->ex_phy[i];
946
947 if (phy->phy_state == PHY_VACANT ||
948 phy->phy_state == PHY_NOT_PRESENT)
949 continue;
950
951 if ((phy->attached_dev_type == FANOUT_DEV ||
952 phy->attached_dev_type == EDGE_DEV) &&
953 phy->routing_attr == SUBTRACTIVE_ROUTING) {
954
955 if (!sub_sas_addr)
956 sub_sas_addr = &phy->attached_sas_addr[0];
957 else if (SAS_ADDR(sub_sas_addr) !=
958 SAS_ADDR(phy->attached_sas_addr)) {
959
960 SAS_DPRINTK("ex %016llx phy 0x%x "
961 "diverges(%016llx) on subtractive "
962 "boundary(%016llx). Disabled\n",
963 SAS_ADDR(dev->sas_addr), i,
964 SAS_ADDR(phy->attached_sas_addr),
965 SAS_ADDR(sub_sas_addr));
966 sas_ex_disable_phy(dev, i);
967 }
968 }
969 }
970 return 0;
971}
972
973static void sas_print_parent_topology_bug(struct domain_device *child,
974 struct ex_phy *parent_phy,
975 struct ex_phy *child_phy)
976{
977 static const char ra_char[] = {
978 [DIRECT_ROUTING] = 'D',
979 [SUBTRACTIVE_ROUTING] = 'S',
980 [TABLE_ROUTING] = 'T',
981 };
982 static const char *ex_type[] = {
983 [EDGE_DEV] = "edge",
984 [FANOUT_DEV] = "fanout",
985 };
986 struct domain_device *parent = child->parent;
987
988 sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x "
989 "has %c:%c routing link!\n",
990
991 ex_type[parent->dev_type],
992 SAS_ADDR(parent->sas_addr),
993 parent_phy->phy_id,
994
995 ex_type[child->dev_type],
996 SAS_ADDR(child->sas_addr),
997 child_phy->phy_id,
998
999 ra_char[parent_phy->routing_attr],
1000 ra_char[child_phy->routing_attr]);
1001}
1002
1003static int sas_check_eeds(struct domain_device *child,
1004 struct ex_phy *parent_phy,
1005 struct ex_phy *child_phy)
1006{
1007 int res = 0;
1008 struct domain_device *parent = child->parent;
1009
1010 if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
1011 res = -ENODEV;
1012 SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx "
1013 "phy S:0x%x, while there is a fanout ex %016llx\n",
1014 SAS_ADDR(parent->sas_addr),
1015 parent_phy->phy_id,
1016 SAS_ADDR(child->sas_addr),
1017 child_phy->phy_id,
1018 SAS_ADDR(parent->port->disc.fanout_sas_addr));
1019 } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
1020 memcpy(parent->port->disc.eeds_a, parent->sas_addr,
1021 SAS_ADDR_SIZE);
1022 memcpy(parent->port->disc.eeds_b, child->sas_addr,
1023 SAS_ADDR_SIZE);
1024 } else if (((SAS_ADDR(parent->port->disc.eeds_a) ==
1025 SAS_ADDR(parent->sas_addr)) ||
1026 (SAS_ADDR(parent->port->disc.eeds_a) ==
1027 SAS_ADDR(child->sas_addr)))
1028 &&
1029 ((SAS_ADDR(parent->port->disc.eeds_b) ==
1030 SAS_ADDR(parent->sas_addr)) ||
1031 (SAS_ADDR(parent->port->disc.eeds_b) ==
1032 SAS_ADDR(child->sas_addr))))
1033 ;
1034 else {
1035 res = -ENODEV;
1036 SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx "
1037 "phy 0x%x link forms a third EEDS!\n",
1038 SAS_ADDR(parent->sas_addr),
1039 parent_phy->phy_id,
1040 SAS_ADDR(child->sas_addr),
1041 child_phy->phy_id);
1042 }
1043
1044 return res;
1045}
1046
1047/* Here we spill over 80 columns. It is intentional.
1048 */
1049static int sas_check_parent_topology(struct domain_device *child)
1050{
1051 struct expander_device *child_ex = &child->ex_dev;
1052 struct expander_device *parent_ex;
1053 int i;
1054 int res = 0;
1055
1056 if (!child->parent)
1057 return 0;
1058
1059 if (child->parent->dev_type != EDGE_DEV &&
1060 child->parent->dev_type != FANOUT_DEV)
1061 return 0;
1062
1063 parent_ex = &child->parent->ex_dev;
1064
1065 for (i = 0; i < parent_ex->num_phys; i++) {
1066 struct ex_phy *parent_phy = &parent_ex->ex_phy[i];
1067 struct ex_phy *child_phy;
1068
1069 if (parent_phy->phy_state == PHY_VACANT ||
1070 parent_phy->phy_state == PHY_NOT_PRESENT)
1071 continue;
1072
1073 if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr))
1074 continue;
1075
1076 child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
1077
1078 switch (child->parent->dev_type) {
1079 case EDGE_DEV:
1080 if (child->dev_type == FANOUT_DEV) {
1081 if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
1082 child_phy->routing_attr != TABLE_ROUTING) {
1083 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1084 res = -ENODEV;
1085 }
1086 } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
1087 if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) {
1088 res = sas_check_eeds(child, parent_phy, child_phy);
1089 } else if (child_phy->routing_attr != TABLE_ROUTING) {
1090 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1091 res = -ENODEV;
1092 }
1093 } else if (parent_phy->routing_attr == TABLE_ROUTING &&
1094 child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
1095 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1096 res = -ENODEV;
1097 }
1098 break;
1099 case FANOUT_DEV:
1100 if (parent_phy->routing_attr != TABLE_ROUTING ||
1101 child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
1102 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1103 res = -ENODEV;
1104 }
1105 break;
1106 default:
1107 break;
1108 }
1109 }
1110
1111 return res;
1112}
1113
1114#define RRI_REQ_SIZE 16
1115#define RRI_RESP_SIZE 44
1116
1117static int sas_configure_present(struct domain_device *dev, int phy_id,
1118 u8 *sas_addr, int *index, int *present)
1119{
1120 int i, res = 0;
1121 struct expander_device *ex = &dev->ex_dev;
1122 struct ex_phy *phy = &ex->ex_phy[phy_id];
1123 u8 *rri_req;
1124 u8 *rri_resp;
1125
1126 *present = 0;
1127 *index = 0;
1128
1129 rri_req = alloc_smp_req(RRI_REQ_SIZE);
1130 if (!rri_req)
1131 return -ENOMEM;
1132
1133 rri_resp = alloc_smp_resp(RRI_RESP_SIZE);
1134 if (!rri_resp) {
1135 kfree(rri_req);
1136 return -ENOMEM;
1137 }
1138
1139 rri_req[1] = SMP_REPORT_ROUTE_INFO;
1140 rri_req[9] = phy_id;
1141
1142 for (i = 0; i < ex->max_route_indexes ; i++) {
1143 *(__be16 *)(rri_req+6) = cpu_to_be16(i);
1144 res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp,
1145 RRI_RESP_SIZE);
1146 if (res)
1147 goto out;
1148 res = rri_resp[2];
1149 if (res == SMP_RESP_NO_INDEX) {
1150 SAS_DPRINTK("overflow of indexes: dev %016llx "
1151 "phy 0x%x index 0x%x\n",
1152 SAS_ADDR(dev->sas_addr), phy_id, i);
1153 goto out;
1154 } else if (res != SMP_RESP_FUNC_ACC) {
1155 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
1156 "result 0x%x\n", __FUNCTION__,
1157 SAS_ADDR(dev->sas_addr), phy_id, i, res);
1158 goto out;
1159 }
1160 if (SAS_ADDR(sas_addr) != 0) {
1161 if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) {
1162 *index = i;
1163 if ((rri_resp[12] & 0x80) == 0x80)
1164 *present = 0;
1165 else
1166 *present = 1;
1167 goto out;
1168 } else if (SAS_ADDR(rri_resp+16) == 0) {
1169 *index = i;
1170 *present = 0;
1171 goto out;
1172 }
1173 } else if (SAS_ADDR(rri_resp+16) == 0 &&
1174 phy->last_da_index < i) {
1175 phy->last_da_index = i;
1176 *index = i;
1177 *present = 0;
1178 goto out;
1179 }
1180 }
1181 res = -1;
1182out:
1183 kfree(rri_req);
1184 kfree(rri_resp);
1185 return res;
1186}
1187
1188#define CRI_REQ_SIZE 44
1189#define CRI_RESP_SIZE 8
1190
1191static int sas_configure_set(struct domain_device *dev, int phy_id,
1192 u8 *sas_addr, int index, int include)
1193{
1194 int res;
1195 u8 *cri_req;
1196 u8 *cri_resp;
1197
1198 cri_req = alloc_smp_req(CRI_REQ_SIZE);
1199 if (!cri_req)
1200 return -ENOMEM;
1201
1202 cri_resp = alloc_smp_resp(CRI_RESP_SIZE);
1203 if (!cri_resp) {
1204 kfree(cri_req);
1205 return -ENOMEM;
1206 }
1207
1208 cri_req[1] = SMP_CONF_ROUTE_INFO;
1209 *(__be16 *)(cri_req+6) = cpu_to_be16(index);
1210 cri_req[9] = phy_id;
1211 if (SAS_ADDR(sas_addr) == 0 || !include)
1212 cri_req[12] |= 0x80;
1213 memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE);
1214
1215 res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp,
1216 CRI_RESP_SIZE);
1217 if (res)
1218 goto out;
1219 res = cri_resp[2];
1220 if (res == SMP_RESP_NO_INDEX) {
1221 SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x "
1222 "index 0x%x\n",
1223 SAS_ADDR(dev->sas_addr), phy_id, index);
1224 }
1225out:
1226 kfree(cri_req);
1227 kfree(cri_resp);
1228 return res;
1229}
1230
1231static int sas_configure_phy(struct domain_device *dev, int phy_id,
1232 u8 *sas_addr, int include)
1233{
1234 int index;
1235 int present;
1236 int res;
1237
1238 res = sas_configure_present(dev, phy_id, sas_addr, &index, &present);
1239 if (res)
1240 return res;
1241 if (include ^ present)
1242 return sas_configure_set(dev, phy_id, sas_addr, index,include);
1243
1244 return res;
1245}
1246
1247/**
1248 * sas_configure_parent -- configure routing table of parent
1249 * parent: parent expander
1250 * child: child expander
1251 * sas_addr: SAS port identifier of device directly attached to child
1252 */
1253static int sas_configure_parent(struct domain_device *parent,
1254 struct domain_device *child,
1255 u8 *sas_addr, int include)
1256{
1257 struct expander_device *ex_parent = &parent->ex_dev;
1258 int res = 0;
1259 int i;
1260
1261 if (parent->parent) {
1262 res = sas_configure_parent(parent->parent, parent, sas_addr,
1263 include);
1264 if (res)
1265 return res;
1266 }
1267
1268 if (ex_parent->conf_route_table == 0) {
1269 SAS_DPRINTK("ex %016llx has self-configuring routing table\n",
1270 SAS_ADDR(parent->sas_addr));
1271 return 0;
1272 }
1273
1274 for (i = 0; i < ex_parent->num_phys; i++) {
1275 struct ex_phy *phy = &ex_parent->ex_phy[i];
1276
1277 if ((phy->routing_attr == TABLE_ROUTING) &&
1278 (SAS_ADDR(phy->attached_sas_addr) ==
1279 SAS_ADDR(child->sas_addr))) {
1280 res = sas_configure_phy(parent, i, sas_addr, include);
1281 if (res)
1282 return res;
1283 }
1284 }
1285
1286 return res;
1287}
1288
1289/**
1290 * sas_configure_routing -- configure routing
1291 * dev: expander device
1292 * sas_addr: port identifier of device directly attached to the expander device
1293 */
1294static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr)
1295{
1296 if (dev->parent)
1297 return sas_configure_parent(dev->parent, dev, sas_addr, 1);
1298 return 0;
1299}
1300
1301static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr)
1302{
1303 if (dev->parent)
1304 return sas_configure_parent(dev->parent, dev, sas_addr, 0);
1305 return 0;
1306}
1307
1308#if 0
1309#define SMP_BIN_ATTR_NAME "smp_portal"
1310
1311static void sas_ex_smp_hook(struct domain_device *dev)
1312{
1313 struct expander_device *ex_dev = &dev->ex_dev;
1314 struct bin_attribute *bin_attr = &ex_dev->smp_bin_attr;
1315
1316 memset(bin_attr, 0, sizeof(*bin_attr));
1317
1318 bin_attr->attr.name = SMP_BIN_ATTR_NAME;
1319 bin_attr->attr.owner = THIS_MODULE;
1320 bin_attr->attr.mode = 0600;
1321
1322 bin_attr->size = 0;
1323 bin_attr->private = NULL;
1324 bin_attr->read = smp_portal_read;
1325 bin_attr->write= smp_portal_write;
1326 bin_attr->mmap = NULL;
1327
1328 ex_dev->smp_portal_pid = -1;
1329 init_MUTEX(&ex_dev->smp_sema);
1330}
1331#endif
1332
1333/**
1334 * sas_discover_expander -- expander discovery
1335 * @ex: pointer to expander domain device
1336 *
1337 * See comment in sas_discover_sata().
1338 */
1339static int sas_discover_expander(struct domain_device *dev)
1340{
1341 int res;
1342
1343 res = sas_notify_lldd_dev_found(dev);
1344 if (res)
1345 return res;
1346
1347 res = sas_ex_general(dev);
1348 if (res)
1349 goto out_err;
1350 res = sas_ex_manuf_info(dev);
1351 if (res)
1352 goto out_err;
1353
1354 res = sas_expander_discover(dev);
1355 if (res) {
1356 SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n",
1357 SAS_ADDR(dev->sas_addr), res);
1358 goto out_err;
1359 }
1360
1361 sas_check_ex_subtractive_boundary(dev);
1362 res = sas_check_parent_topology(dev);
1363 if (res)
1364 goto out_err;
1365 return 0;
1366out_err:
1367 sas_notify_lldd_dev_gone(dev);
1368 return res;
1369}
1370
1371static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
1372{
1373 int res = 0;
1374 struct domain_device *dev;
1375
1376 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
1377 if (dev->dev_type == EDGE_DEV ||
1378 dev->dev_type == FANOUT_DEV) {
1379 struct sas_expander_device *ex =
1380 rphy_to_expander_device(dev->rphy);
1381
1382 if (level == ex->level)
1383 res = sas_ex_discover_devices(dev, -1);
1384 else if (level > 0)
1385 res = sas_ex_discover_devices(port->port_dev, -1);
1386
1387 }
1388 }
1389
1390 return res;
1391}
1392
1393static int sas_ex_bfs_disc(struct asd_sas_port *port)
1394{
1395 int res;
1396 int level;
1397
1398 do {
1399 level = port->disc.max_level;
1400 res = sas_ex_level_discovery(port, level);
1401 mb();
1402 } while (level < port->disc.max_level);
1403
1404 return res;
1405}
1406
1407int sas_discover_root_expander(struct domain_device *dev)
1408{
1409 int res;
1410 struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
1411
1412 sas_rphy_add(dev->rphy);
1413
1414 ex->level = dev->port->disc.max_level; /* 0 */
1415 res = sas_discover_expander(dev);
1416 if (!res)
1417 sas_ex_bfs_disc(dev->port);
1418
1419 return res;
1420}
1421
1422/* ---------- Domain revalidation ---------- */
1423
1424static int sas_get_phy_discover(struct domain_device *dev,
1425 int phy_id, struct smp_resp *disc_resp)
1426{
1427 int res;
1428 u8 *disc_req;
1429
1430 disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
1431 if (!disc_req)
1432 return -ENOMEM;
1433
1434 disc_req[1] = SMP_DISCOVER;
1435 disc_req[9] = phy_id;
1436
1437 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
1438 disc_resp, DISCOVER_RESP_SIZE);
1439 if (res)
1440 goto out;
1441 else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
1442 res = disc_resp->result;
1443 goto out;
1444 }
1445out:
1446 kfree(disc_req);
1447 return res;
1448}
1449
1450static int sas_get_phy_change_count(struct domain_device *dev,
1451 int phy_id, int *pcc)
1452{
1453 int res;
1454 struct smp_resp *disc_resp;
1455
1456 disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
1457 if (!disc_resp)
1458 return -ENOMEM;
1459
1460 res = sas_get_phy_discover(dev, phy_id, disc_resp);
1461 if (!res)
1462 *pcc = disc_resp->disc.change_count;
1463
1464 kfree(disc_resp);
1465 return res;
1466}
1467
1468static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
1469 int phy_id, u8 *attached_sas_addr)
1470{
1471 int res;
1472 struct smp_resp *disc_resp;
1473 struct discover_resp *dr;
1474
1475 disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
1476 if (!disc_resp)
1477 return -ENOMEM;
1478 dr = &disc_resp->disc;
1479
1480 res = sas_get_phy_discover(dev, phy_id, disc_resp);
1481 if (!res) {
1482 memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8);
1483 if (dr->attached_dev_type == 0)
1484 memset(attached_sas_addr, 0, 8);
1485 }
1486 kfree(disc_resp);
1487 return res;
1488}
1489
1490static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1491 int from_phy)
1492{
1493 struct expander_device *ex = &dev->ex_dev;
1494 int res = 0;
1495 int i;
1496
1497 for (i = from_phy; i < ex->num_phys; i++) {
1498 int phy_change_count = 0;
1499
1500 res = sas_get_phy_change_count(dev, i, &phy_change_count);
1501 if (res)
1502 goto out;
1503 else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
1504 ex->ex_phy[i].phy_change_count = phy_change_count;
1505 *phy_id = i;
1506 return 0;
1507 }
1508 }
1509out:
1510 return res;
1511}
1512
1513static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
1514{
1515 int res;
1516 u8 *rg_req;
1517 struct smp_resp *rg_resp;
1518
1519 rg_req = alloc_smp_req(RG_REQ_SIZE);
1520 if (!rg_req)
1521 return -ENOMEM;
1522
1523 rg_resp = alloc_smp_resp(RG_RESP_SIZE);
1524 if (!rg_resp) {
1525 kfree(rg_req);
1526 return -ENOMEM;
1527 }
1528
1529 rg_req[1] = SMP_REPORT_GENERAL;
1530
1531 res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
1532 RG_RESP_SIZE);
1533 if (res)
1534 goto out;
1535 if (rg_resp->result != SMP_RESP_FUNC_ACC) {
1536 res = rg_resp->result;
1537 goto out;
1538 }
1539
1540 *ecc = be16_to_cpu(rg_resp->rg.change_count);
1541out:
1542 kfree(rg_resp);
1543 kfree(rg_req);
1544 return res;
1545}
1546
1547static int sas_find_bcast_dev(struct domain_device *dev,
1548 struct domain_device **src_dev)
1549{
1550 struct expander_device *ex = &dev->ex_dev;
1551 int ex_change_count = -1;
1552 int res;
1553
1554 res = sas_get_ex_change_count(dev, &ex_change_count);
1555 if (res)
1556 goto out;
1557 if (ex_change_count != -1 &&
1558 ex_change_count != ex->ex_change_count) {
1559 *src_dev = dev;
1560 ex->ex_change_count = ex_change_count;
1561 } else {
1562 struct domain_device *ch;
1563
1564 list_for_each_entry(ch, &ex->children, siblings) {
1565 if (ch->dev_type == EDGE_DEV ||
1566 ch->dev_type == FANOUT_DEV) {
1567 res = sas_find_bcast_dev(ch, src_dev);
1568 if (src_dev)
1569 return res;
1570 }
1571 }
1572 }
1573out:
1574 return res;
1575}
1576
1577static void sas_unregister_ex_tree(struct domain_device *dev)
1578{
1579 struct expander_device *ex = &dev->ex_dev;
1580 struct domain_device *child, *n;
1581
1582 list_for_each_entry_safe(child, n, &ex->children, siblings) {
1583 if (child->dev_type == EDGE_DEV ||
1584 child->dev_type == FANOUT_DEV)
1585 sas_unregister_ex_tree(child);
1586 else
1587 sas_unregister_dev(child);
1588 }
1589 sas_unregister_dev(dev);
1590}
1591
1592static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1593 int phy_id)
1594{
1595 struct expander_device *ex_dev = &parent->ex_dev;
1596 struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
1597 struct domain_device *child, *n;
1598
1599 list_for_each_entry_safe(child, n, &ex_dev->children, siblings) {
1600 if (SAS_ADDR(child->sas_addr) ==
1601 SAS_ADDR(phy->attached_sas_addr)) {
1602 if (child->dev_type == EDGE_DEV ||
1603 child->dev_type == FANOUT_DEV)
1604 sas_unregister_ex_tree(child);
1605 else
1606 sas_unregister_dev(child);
1607 break;
1608 }
1609 }
1610 sas_disable_routing(parent, phy->attached_sas_addr);
1611 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1612 sas_port_delete_phy(phy->port, phy->phy);
1613 if (phy->port->num_phys == 0)
1614 sas_port_delete(phy->port);
1615 phy->port = NULL;
1616}
1617
1618static int sas_discover_bfs_by_root_level(struct domain_device *root,
1619 const int level)
1620{
1621 struct expander_device *ex_root = &root->ex_dev;
1622 struct domain_device *child;
1623 int res = 0;
1624
1625 list_for_each_entry(child, &ex_root->children, siblings) {
1626 if (child->dev_type == EDGE_DEV ||
1627 child->dev_type == FANOUT_DEV) {
1628 struct sas_expander_device *ex =
1629 rphy_to_expander_device(child->rphy);
1630
1631 if (level > ex->level)
1632 res = sas_discover_bfs_by_root_level(child,
1633 level);
1634 else if (level == ex->level)
1635 res = sas_ex_discover_devices(child, -1);
1636 }
1637 }
1638 return res;
1639}
1640
1641static int sas_discover_bfs_by_root(struct domain_device *dev)
1642{
1643 int res;
1644 struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
1645 int level = ex->level+1;
1646
1647 res = sas_ex_discover_devices(dev, -1);
1648 if (res)
1649 goto out;
1650 do {
1651 res = sas_discover_bfs_by_root_level(dev, level);
1652 mb();
1653 level += 1;
1654 } while (level <= dev->port->disc.max_level);
1655out:
1656 return res;
1657}
1658
1659static int sas_discover_new(struct domain_device *dev, int phy_id)
1660{
1661 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
1662 struct domain_device *child;
1663 int res;
1664
1665 SAS_DPRINTK("ex %016llx phy%d new device attached\n",
1666 SAS_ADDR(dev->sas_addr), phy_id);
1667 res = sas_ex_phy_discover(dev, phy_id);
1668 if (res)
1669 goto out;
1670 res = sas_ex_discover_devices(dev, phy_id);
1671 if (res)
1672 goto out;
1673 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
1674 if (SAS_ADDR(child->sas_addr) ==
1675 SAS_ADDR(ex_phy->attached_sas_addr)) {
1676 if (child->dev_type == EDGE_DEV ||
1677 child->dev_type == FANOUT_DEV)
1678 res = sas_discover_bfs_by_root(child);
1679 break;
1680 }
1681 }
1682out:
1683 return res;
1684}
1685
1686static int sas_rediscover_dev(struct domain_device *dev, int phy_id)
1687{
1688 struct expander_device *ex = &dev->ex_dev;
1689 struct ex_phy *phy = &ex->ex_phy[phy_id];
1690 u8 attached_sas_addr[8];
1691 int res;
1692
1693 res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr);
1694 switch (res) {
1695 case SMP_RESP_NO_PHY:
1696 phy->phy_state = PHY_NOT_PRESENT;
1697 sas_unregister_devs_sas_addr(dev, phy_id);
1698 goto out; break;
1699 case SMP_RESP_PHY_VACANT:
1700 phy->phy_state = PHY_VACANT;
1701 sas_unregister_devs_sas_addr(dev, phy_id);
1702 goto out; break;
1703 case SMP_RESP_FUNC_ACC:
1704 break;
1705 }
1706
1707 if (SAS_ADDR(attached_sas_addr) == 0) {
1708 phy->phy_state = PHY_EMPTY;
1709 sas_unregister_devs_sas_addr(dev, phy_id);
1710 } else if (SAS_ADDR(attached_sas_addr) ==
1711 SAS_ADDR(phy->attached_sas_addr)) {
1712 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n",
1713 SAS_ADDR(dev->sas_addr), phy_id);
1714 sas_ex_phy_discover(dev, phy_id);
1715 } else
1716 res = sas_discover_new(dev, phy_id);
1717out:
1718 return res;
1719}
1720
1721static int sas_rediscover(struct domain_device *dev, const int phy_id)
1722{
1723 struct expander_device *ex = &dev->ex_dev;
1724 struct ex_phy *changed_phy = &ex->ex_phy[phy_id];
1725 int res = 0;
1726 int i;
1727
1728 SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
1729 SAS_ADDR(dev->sas_addr), phy_id);
1730
1731 if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
1732 for (i = 0; i < ex->num_phys; i++) {
1733 struct ex_phy *phy = &ex->ex_phy[i];
1734
1735 if (i == phy_id)
1736 continue;
1737 if (SAS_ADDR(phy->attached_sas_addr) ==
1738 SAS_ADDR(changed_phy->attached_sas_addr)) {
1739 SAS_DPRINTK("phy%d part of wide port with "
1740 "phy%d\n", phy_id, i);
1741 goto out;
1742 }
1743 }
1744 res = sas_rediscover_dev(dev, phy_id);
1745 } else
1746 res = sas_discover_new(dev, phy_id);
1747out:
1748 return res;
1749}
1750
1751/**
1752 * sas_revalidate_domain -- revalidate the domain
1753 * @port: port to the domain of interest
1754 *
1755 * NOTE: this process _must_ quit (return) as soon as any connection
1756 * errors are encountered. Connection recovery is done elsewhere.
1757 * Discover process only interrogates devices in order to discover the
1758 * domain.
1759 */
1760int sas_ex_revalidate_domain(struct domain_device *port_dev)
1761{
1762 int res;
1763 struct domain_device *dev = NULL;
1764
1765 res = sas_find_bcast_dev(port_dev, &dev);
1766 if (res)
1767 goto out;
1768 if (dev) {
1769 struct expander_device *ex = &dev->ex_dev;
1770 int i = 0, phy_id;
1771
1772 do {
1773 phy_id = -1;
1774 res = sas_find_bcast_phy(dev, &phy_id, i);
1775 if (phy_id == -1)
1776 break;
1777 res = sas_rediscover(dev, phy_id);
1778 i = phy_id + 1;
1779 } while (i < ex->num_phys);
1780 }
1781out:
1782 return res;
1783}
1784
1785#if 0
1786/* ---------- SMP portal ---------- */
1787
1788static ssize_t smp_portal_write(struct kobject *kobj, char *buf, loff_t offs,
1789 size_t size)
1790{
1791 struct domain_device *dev = to_dom_device(kobj);
1792 struct expander_device *ex = &dev->ex_dev;
1793
1794 if (offs != 0)
1795 return -EFBIG;
1796 else if (size == 0)
1797 return 0;
1798
1799 down_interruptible(&ex->smp_sema);
1800 if (ex->smp_req)
1801 kfree(ex->smp_req);
1802 ex->smp_req = kzalloc(size, GFP_USER);
1803 if (!ex->smp_req) {
1804 up(&ex->smp_sema);
1805 return -ENOMEM;
1806 }
1807 memcpy(ex->smp_req, buf, size);
1808 ex->smp_req_size = size;
1809 ex->smp_portal_pid = current->pid;
1810 up(&ex->smp_sema);
1811
1812 return size;
1813}
1814
1815static ssize_t smp_portal_read(struct kobject *kobj, char *buf, loff_t offs,
1816 size_t size)
1817{
1818 struct domain_device *dev = to_dom_device(kobj);
1819 struct expander_device *ex = &dev->ex_dev;
1820 u8 *smp_resp;
1821 int res = -EINVAL;
1822
1823 /* XXX: sysfs gives us an offset of 0x10 or 0x8 while in fact
1824 * it should be 0.
1825 */
1826
1827 down_interruptible(&ex->smp_sema);
1828 if (!ex->smp_req || ex->smp_portal_pid != current->pid)
1829 goto out;
1830
1831 res = 0;
1832 if (size == 0)
1833 goto out;
1834
1835 res = -ENOMEM;
1836 smp_resp = alloc_smp_resp(size);
1837 if (!smp_resp)
1838 goto out;
1839 res = smp_execute_task(dev, ex->smp_req, ex->smp_req_size,
1840 smp_resp, size);
1841 if (!res) {
1842 memcpy(buf, smp_resp, size);
1843 res = size;
1844 }
1845
1846 kfree(smp_resp);
1847out:
1848 kfree(ex->smp_req);
1849 ex->smp_req = NULL;
1850 ex->smp_req_size = 0;
1851 ex->smp_portal_pid = -1;
1852 up(&ex->smp_sema);
1853 return res;
1854}
1855#endif
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
new file mode 100644
index 000000000000..c836a237fb79
--- /dev/null
+++ b/drivers/scsi/libsas/sas_init.c
@@ -0,0 +1,267 @@
1/*
2 * Serial Attached SCSI (SAS) Transport Layer initialization
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/device.h>
29#include <linux/spinlock.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h>
34
35#include "sas_internal.h"
36
37#include "../scsi_sas_internal.h"
38
39kmem_cache_t *sas_task_cache;
40
41/*------------ SAS addr hash -----------*/
42void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
43{
44 const u32 poly = 0x00DB2777;
45 u32 r = 0;
46 int i;
47
48 for (i = 0; i < 8; i++) {
49 int b;
50 for (b = 7; b >= 0; b--) {
51 r <<= 1;
52 if ((1 << b) & sas_addr[i]) {
53 if (!(r & 0x01000000))
54 r ^= poly;
55 } else if (r & 0x01000000)
56 r ^= poly;
57 }
58 }
59
60 hashed[0] = (r >> 16) & 0xFF;
61 hashed[1] = (r >> 8) & 0xFF ;
62 hashed[2] = r & 0xFF;
63}
64
65
66/* ---------- HA events ---------- */
67
68void sas_hae_reset(void *data)
69{
70 struct sas_ha_struct *ha = data;
71
72 sas_begin_event(HAE_RESET, &ha->event_lock,
73 &ha->pending);
74}
75
76int sas_register_ha(struct sas_ha_struct *sas_ha)
77{
78 int error = 0;
79
80 spin_lock_init(&sas_ha->phy_port_lock);
81 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
82
83 if (sas_ha->lldd_queue_size == 0)
84 sas_ha->lldd_queue_size = 1;
85 else if (sas_ha->lldd_queue_size == -1)
86 sas_ha->lldd_queue_size = 128; /* Sanity */
87
88 error = sas_register_phys(sas_ha);
89 if (error) {
90 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
91 return error;
92 }
93
94 error = sas_register_ports(sas_ha);
95 if (error) {
96 printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
97 goto Undo_phys;
98 }
99
100 error = sas_init_events(sas_ha);
101 if (error) {
102 printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
103 goto Undo_ports;
104 }
105
106 if (sas_ha->lldd_max_execute_num > 1) {
107 error = sas_init_queue(sas_ha);
108 if (error) {
109 printk(KERN_NOTICE "couldn't start queue thread:%d, "
110 "running in direct mode\n", error);
111 sas_ha->lldd_max_execute_num = 1;
112 }
113 }
114
115 return 0;
116
117Undo_ports:
118 sas_unregister_ports(sas_ha);
119Undo_phys:
120
121 return error;
122}
123
124int sas_unregister_ha(struct sas_ha_struct *sas_ha)
125{
126 if (sas_ha->lldd_max_execute_num > 1) {
127 sas_shutdown_queue(sas_ha);
128 }
129
130 sas_unregister_ports(sas_ha);
131
132 return 0;
133}
134
135static int sas_get_linkerrors(struct sas_phy *phy)
136{
137 if (scsi_is_sas_phy_local(phy))
138 /* FIXME: we have no local phy stats
139 * gathering at this time */
140 return -EINVAL;
141
142 return sas_smp_get_phy_events(phy);
143}
144
145static int sas_phy_reset(struct sas_phy *phy, int hard_reset)
146{
147 int ret;
148 enum phy_func reset_type;
149
150 if (hard_reset)
151 reset_type = PHY_FUNC_HARD_RESET;
152 else
153 reset_type = PHY_FUNC_LINK_RESET;
154
155 if (scsi_is_sas_phy_local(phy)) {
156 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
157 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
158 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
159 struct sas_internal *i =
160 to_sas_internal(sas_ha->core.shost->transportt);
161
162 ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
163 } else {
164 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
165 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
166 ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
167 }
168 return ret;
169}
170
171static int sas_set_phy_speed(struct sas_phy *phy,
172 struct sas_phy_linkrates *rates)
173{
174 int ret;
175
176 if ((rates->minimum_linkrate &&
177 rates->minimum_linkrate > phy->maximum_linkrate) ||
178 (rates->maximum_linkrate &&
179 rates->maximum_linkrate < phy->minimum_linkrate))
180 return -EINVAL;
181
182 if (rates->minimum_linkrate &&
183 rates->minimum_linkrate < phy->minimum_linkrate_hw)
184 rates->minimum_linkrate = phy->minimum_linkrate_hw;
185
186 if (rates->maximum_linkrate &&
187 rates->maximum_linkrate > phy->maximum_linkrate_hw)
188 rates->maximum_linkrate = phy->maximum_linkrate_hw;
189
190 if (scsi_is_sas_phy_local(phy)) {
191 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
192 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
193 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
194 struct sas_internal *i =
195 to_sas_internal(sas_ha->core.shost->transportt);
196
197 ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
198 rates);
199 } else {
200 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
201 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
202 ret = sas_smp_phy_control(ddev, phy->number,
203 PHY_FUNC_LINK_RESET, rates);
204
205 }
206
207 return ret;
208}
209
210static struct sas_function_template sft = {
211 .phy_reset = sas_phy_reset,
212 .set_phy_speed = sas_set_phy_speed,
213 .get_linkerrors = sas_get_linkerrors,
214};
215
216struct scsi_transport_template *
217sas_domain_attach_transport(struct sas_domain_function_template *dft)
218{
219 struct scsi_transport_template *stt = sas_attach_transport(&sft);
220 struct sas_internal *i;
221
222 if (!stt)
223 return stt;
224
225 i = to_sas_internal(stt);
226 i->dft = dft;
227 stt->create_work_queue = 1;
228 stt->eh_timed_out = sas_scsi_timed_out;
229 stt->eh_strategy_handler = sas_scsi_recover_host;
230
231 return stt;
232}
233EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
234
235
236void sas_domain_release_transport(struct scsi_transport_template *stt)
237{
238 sas_release_transport(stt);
239}
240EXPORT_SYMBOL_GPL(sas_domain_release_transport);
241
242/* ---------- SAS Class register/unregister ---------- */
243
244static int __init sas_class_init(void)
245{
246 sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task),
247 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
248 if (!sas_task_cache)
249 return -ENOMEM;
250
251 return 0;
252}
253
254static void __exit sas_class_exit(void)
255{
256 kmem_cache_destroy(sas_task_cache);
257}
258
259MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
260MODULE_DESCRIPTION("SAS Transport Layer");
261MODULE_LICENSE("GPL v2");
262
263module_init(sas_class_init);
264module_exit(sas_class_exit);
265
266EXPORT_SYMBOL_GPL(sas_register_ha);
267EXPORT_SYMBOL_GPL(sas_unregister_ha);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
new file mode 100644
index 000000000000..bffcee474921
--- /dev/null
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -0,0 +1,146 @@
1/*
2 * Serial Attached SCSI (SAS) class internal header file
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#ifndef _SAS_INTERNAL_H_
27#define _SAS_INTERNAL_H_
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_sas.h>
32#include <scsi/libsas.h>
33
34#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
35
36#ifdef SAS_DEBUG
37#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
38#else
39#define SAS_DPRINTK(fmt, ...)
40#endif
41
42void sas_scsi_recover_host(struct Scsi_Host *shost);
43
44int sas_show_class(enum sas_class class, char *buf);
45int sas_show_proto(enum sas_proto proto, char *buf);
46int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
47int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
48
49int sas_register_phys(struct sas_ha_struct *sas_ha);
50void sas_unregister_phys(struct sas_ha_struct *sas_ha);
51
52int sas_register_ports(struct sas_ha_struct *sas_ha);
53void sas_unregister_ports(struct sas_ha_struct *sas_ha);
54
55enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
56
57int sas_init_queue(struct sas_ha_struct *sas_ha);
58int sas_init_events(struct sas_ha_struct *sas_ha);
59void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
60
61void sas_deform_port(struct asd_sas_phy *phy);
62
63void sas_porte_bytes_dmaed(void *);
64void sas_porte_broadcast_rcvd(void *);
65void sas_porte_link_reset_err(void *);
66void sas_porte_timer_event(void *);
67void sas_porte_hard_reset(void *);
68
69int sas_notify_lldd_dev_found(struct domain_device *);
70void sas_notify_lldd_dev_gone(struct domain_device *);
71
72int sas_smp_phy_control(struct domain_device *dev, int phy_id,
73 enum phy_func phy_func, struct sas_phy_linkrates *);
74int sas_smp_get_phy_events(struct sas_phy *phy);
75
76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
77
78void sas_hae_reset(void *);
79
80static inline void sas_queue_event(int event, spinlock_t *lock,
81 unsigned long *pending,
82 struct work_struct *work,
83 struct Scsi_Host *shost)
84{
85 unsigned long flags;
86
87 spin_lock_irqsave(lock, flags);
88 if (test_bit(event, pending)) {
89 spin_unlock_irqrestore(lock, flags);
90 return;
91 }
92 __set_bit(event, pending);
93 spin_unlock_irqrestore(lock, flags);
94 scsi_queue_work(shost, work);
95}
96
97static inline void sas_begin_event(int event, spinlock_t *lock,
98 unsigned long *pending)
99{
100 unsigned long flags;
101
102 spin_lock_irqsave(lock, flags);
103 __clear_bit(event, pending);
104 spin_unlock_irqrestore(lock, flags);
105}
106
107static inline void sas_fill_in_rphy(struct domain_device *dev,
108 struct sas_rphy *rphy)
109{
110 rphy->identify.sas_address = SAS_ADDR(dev->sas_addr);
111 rphy->identify.initiator_port_protocols = dev->iproto;
112 rphy->identify.target_port_protocols = dev->tproto;
113 switch (dev->dev_type) {
114 case SATA_DEV:
115 /* FIXME: need sata device type */
116 case SAS_END_DEV:
117 rphy->identify.device_type = SAS_END_DEVICE;
118 break;
119 case EDGE_DEV:
120 rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
121 break;
122 case FANOUT_DEV:
123 rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
124 break;
125 default:
126 rphy->identify.device_type = SAS_PHY_UNUSED;
127 break;
128 }
129}
130
131static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
132{
133 struct expander_device *ex = &dev->ex_dev;
134 struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
135
136 if (!ex->parent_port) {
137 ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id);
138 /* FIXME: error handling */
139 BUG_ON(!ex->parent_port);
140 BUG_ON(sas_port_add(ex->parent_port));
141 sas_port_mark_backlink(ex->parent_port);
142 }
143 sas_port_add_phy(ex->parent_port, ex_phy->phy);
144}
145
146#endif /* _SAS_INTERNAL_H_ */
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
new file mode 100644
index 000000000000..9340cdbae4a3
--- /dev/null
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -0,0 +1,158 @@
1/*
2 * Serial Attached SCSI (SAS) Phy class
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport.h>
28#include <scsi/scsi_transport_sas.h>
29#include "../scsi_sas_internal.h"
30
31/* ---------- Phy events ---------- */
32
33static void sas_phye_loss_of_signal(void *data)
34{
35 struct asd_sas_phy *phy = data;
36
37 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
38 &phy->phy_events_pending);
39 phy->error = 0;
40 sas_deform_port(phy);
41}
42
43static void sas_phye_oob_done(void *data)
44{
45 struct asd_sas_phy *phy = data;
46
47 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
48 &phy->phy_events_pending);
49 phy->error = 0;
50}
51
52static void sas_phye_oob_error(void *data)
53{
54 struct asd_sas_phy *phy = data;
55 struct sas_ha_struct *sas_ha = phy->ha;
56 struct asd_sas_port *port = phy->port;
57 struct sas_internal *i =
58 to_sas_internal(sas_ha->core.shost->transportt);
59
60 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
61 &phy->phy_events_pending);
62
63 sas_deform_port(phy);
64
65 if (!port && phy->enabled && i->dft->lldd_control_phy) {
66 phy->error++;
67 switch (phy->error) {
68 case 1:
69 case 2:
70 i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET,
71 NULL);
72 break;
73 case 3:
74 default:
75 phy->error = 0;
76 phy->enabled = 0;
77 i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
78 break;
79 }
80 }
81}
82
83static void sas_phye_spinup_hold(void *data)
84{
85 struct asd_sas_phy *phy = data;
86 struct sas_ha_struct *sas_ha = phy->ha;
87 struct sas_internal *i =
88 to_sas_internal(sas_ha->core.shost->transportt);
89
90 sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock,
91 &phy->phy_events_pending);
92
93 phy->error = 0;
94 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
95}
96
97/* ---------- Phy class registration ---------- */
98
99int sas_register_phys(struct sas_ha_struct *sas_ha)
100{
101 int i;
102
103 static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = {
104 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
105 [PHYE_OOB_DONE] = sas_phye_oob_done,
106 [PHYE_OOB_ERROR] = sas_phye_oob_error,
107 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
108 };
109
110 static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = {
111 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
112 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
113 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
114 [PORTE_TIMER_EVENT] = sas_porte_timer_event,
115 [PORTE_HARD_RESET] = sas_porte_hard_reset,
116 };
117
118 /* Now register the phys. */
119 for (i = 0; i < sas_ha->num_phys; i++) {
120 int k;
121 struct asd_sas_phy *phy = sas_ha->sas_phy[i];
122
123 phy->error = 0;
124 INIT_LIST_HEAD(&phy->port_phy_el);
125 for (k = 0; k < PORT_NUM_EVENTS; k++)
126 INIT_WORK(&phy->port_events[k], sas_port_event_fns[k],
127 phy);
128
129 for (k = 0; k < PHY_NUM_EVENTS; k++)
130 INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
131 phy);
132 phy->port = NULL;
133 phy->ha = sas_ha;
134 spin_lock_init(&phy->frame_rcvd_lock);
135 spin_lock_init(&phy->sas_prim_lock);
136 phy->frame_rcvd_size = 0;
137
138 phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev,
139 i);
140 if (!phy->phy)
141 return -ENOMEM;
142
143 phy->phy->identify.initiator_port_protocols =
144 phy->iproto;
145 phy->phy->identify.target_port_protocols = phy->tproto;
146 phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr);
147 phy->phy->identify.phy_identifier = i;
148 phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
149 phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
150 phy->phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
151 phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
152 phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
153
154 sas_phy_add(phy->phy);
155 }
156
157 return 0;
158}
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
new file mode 100644
index 000000000000..253cdcf306a2
--- /dev/null
+++ b/drivers/scsi/libsas/sas_port.c
@@ -0,0 +1,279 @@
1/*
2 * Serial Attached SCSI (SAS) Port class
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26
27#include <scsi/scsi_transport.h>
28#include <scsi/scsi_transport_sas.h>
29#include "../scsi_sas_internal.h"
30
31/**
32 * sas_form_port -- add this phy to a port
33 * @phy: the phy of interest
34 *
35 * This function adds this phy to an existing port, thus creating a wide
36 * port, or it creates a port and adds the phy to the port.
37 */
38static void sas_form_port(struct asd_sas_phy *phy)
39{
40 int i;
41 struct sas_ha_struct *sas_ha = phy->ha;
42 struct asd_sas_port *port = phy->port;
43 struct sas_internal *si =
44 to_sas_internal(sas_ha->core.shost->transportt);
45
46 if (port) {
47 if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
48 SAS_ADDR_SIZE) == 0)
49 sas_deform_port(phy);
50 else {
51 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
52 __FUNCTION__, phy->id, phy->port->id,
53 phy->port->num_phys);
54 return;
55 }
56 }
57
58 /* find a port */
59 spin_lock(&sas_ha->phy_port_lock);
60 for (i = 0; i < sas_ha->num_phys; i++) {
61 port = sas_ha->sas_port[i];
62 spin_lock(&port->phy_list_lock);
63 if (*(u64 *) port->sas_addr &&
64 memcmp(port->attached_sas_addr,
65 phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 &&
66 port->num_phys > 0) {
67 /* wide port */
68 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
69 port->id);
70 break;
71 } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) {
72 memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
73 break;
74 }
75 spin_unlock(&port->phy_list_lock);
76 }
77
78 if (i >= sas_ha->num_phys) {
79 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
80 __FUNCTION__);
81 spin_unlock(&sas_ha->phy_port_lock);
82 return;
83 }
84
85 /* add the phy to the port */
86 list_add_tail(&phy->port_phy_el, &port->phy_list);
87 phy->port = port;
88 port->num_phys++;
89 port->phy_mask |= (1U << phy->id);
90
91 if (!port->phy)
92 port->phy = phy->phy;
93
94 SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id,
95 port->id, port->phy_mask);
96
97 if (*(u64 *)port->attached_sas_addr == 0) {
98 port->class = phy->class;
99 memcpy(port->attached_sas_addr, phy->attached_sas_addr,
100 SAS_ADDR_SIZE);
101 port->iproto = phy->iproto;
102 port->tproto = phy->tproto;
103 port->oob_mode = phy->oob_mode;
104 port->linkrate = phy->linkrate;
105 } else
106 port->linkrate = max(port->linkrate, phy->linkrate);
107 spin_unlock(&port->phy_list_lock);
108 spin_unlock(&sas_ha->phy_port_lock);
109
110 if (!port->port) {
111 port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
112 BUG_ON(!port->port);
113 sas_port_add(port->port);
114 }
115 sas_port_add_phy(port->port, phy->phy);
116
117 if (port->port_dev)
118 port->port_dev->pathways = port->num_phys;
119
120 /* Tell the LLDD about this port formation. */
121 if (si->dft->lldd_port_formed)
122 si->dft->lldd_port_formed(phy);
123
124 sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
125}
126
127/**
128 * sas_deform_port -- remove this phy from the port it belongs to
129 * @phy: the phy of interest
130 *
131 * This is called when the physical link to the other phy has been
132 * lost (on this phy), in Event thread context. We cannot delay here.
133 */
134void sas_deform_port(struct asd_sas_phy *phy)
135{
136 struct sas_ha_struct *sas_ha = phy->ha;
137 struct asd_sas_port *port = phy->port;
138 struct sas_internal *si =
139 to_sas_internal(sas_ha->core.shost->transportt);
140
141 if (!port)
142 return; /* done by a phy event */
143
144 if (port->port_dev)
145 port->port_dev->pathways--;
146
147 if (port->num_phys == 1) {
148 sas_unregister_domain_devices(port);
149 sas_port_delete(port->port);
150 port->port = NULL;
151 } else
152 sas_port_delete_phy(port->port, phy->phy);
153
154
155 if (si->dft->lldd_port_deformed)
156 si->dft->lldd_port_deformed(phy);
157
158 spin_lock(&sas_ha->phy_port_lock);
159 spin_lock(&port->phy_list_lock);
160
161 list_del_init(&phy->port_phy_el);
162 phy->port = NULL;
163 port->num_phys--;
164 port->phy_mask &= ~(1U << phy->id);
165
166 if (port->num_phys == 0) {
167 INIT_LIST_HEAD(&port->phy_list);
168 memset(port->sas_addr, 0, SAS_ADDR_SIZE);
169 memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
170 port->class = 0;
171 port->iproto = 0;
172 port->tproto = 0;
173 port->oob_mode = 0;
174 port->phy_mask = 0;
175 }
176 spin_unlock(&port->phy_list_lock);
177 spin_unlock(&sas_ha->phy_port_lock);
178
179 return;
180}
181
182/* ---------- SAS port events ---------- */
183
184void sas_porte_bytes_dmaed(void *data)
185{
186 struct asd_sas_phy *phy = data;
187
188 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
189 &phy->port_events_pending);
190
191 sas_form_port(phy);
192}
193
194void sas_porte_broadcast_rcvd(void *data)
195{
196 unsigned long flags;
197 u32 prim;
198 struct asd_sas_phy *phy = data;
199
200 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
201 &phy->port_events_pending);
202
203 spin_lock_irqsave(&phy->sas_prim_lock, flags);
204 prim = phy->sas_prim;
205 spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
206
207 SAS_DPRINTK("broadcast received: %d\n", prim);
208 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
209}
210
211void sas_porte_link_reset_err(void *data)
212{
213 struct asd_sas_phy *phy = data;
214
215 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
216 &phy->port_events_pending);
217
218 sas_deform_port(phy);
219}
220
221void sas_porte_timer_event(void *data)
222{
223 struct asd_sas_phy *phy = data;
224
225 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
226 &phy->port_events_pending);
227
228 sas_deform_port(phy);
229}
230
231void sas_porte_hard_reset(void *data)
232{
233 struct asd_sas_phy *phy = data;
234
235 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
236 &phy->port_events_pending);
237
238 sas_deform_port(phy);
239}
240
241/* ---------- SAS port registration ---------- */
242
243static void sas_init_port(struct asd_sas_port *port,
244 struct sas_ha_struct *sas_ha, int i)
245{
246 port->id = i;
247 INIT_LIST_HEAD(&port->dev_list);
248 spin_lock_init(&port->phy_list_lock);
249 INIT_LIST_HEAD(&port->phy_list);
250 port->num_phys = 0;
251 port->phy_mask = 0;
252 port->ha = sas_ha;
253
254 spin_lock_init(&port->dev_list_lock);
255}
256
257int sas_register_ports(struct sas_ha_struct *sas_ha)
258{
259 int i;
260
261 /* initialize the ports and discovery */
262 for (i = 0; i < sas_ha->num_phys; i++) {
263 struct asd_sas_port *port = sas_ha->sas_port[i];
264
265 sas_init_port(port, sas_ha, i);
266 sas_init_disc(&port->disc, port);
267 }
268 return 0;
269}
270
271void sas_unregister_ports(struct sas_ha_struct *sas_ha)
272{
273 int i;
274
275 for (i = 0; i < sas_ha->num_phys; i++)
276 if (sas_ha->sas_phy[i]->port)
277 sas_deform_port(sas_ha->sas_phy[i]);
278
279}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
new file mode 100644
index 000000000000..43e0e4e36934
--- /dev/null
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -0,0 +1,786 @@
1/*
2 * Serial Attached SCSI (SAS) class SCSI Host glue.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include "sas_internal.h"
27
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h>
34#include "../scsi_sas_internal.h"
35
36#include <linux/err.h>
37#include <linux/blkdev.h>
38#include <linux/scatterlist.h>
39
40/* ---------- SCSI Host glue ---------- */
41
42#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
43#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
44
45static void sas_scsi_task_done(struct sas_task *task)
46{
47 struct task_status_struct *ts = &task->task_status;
48 struct scsi_cmnd *sc = task->uldd_task;
49 unsigned ts_flags = task->task_state_flags;
50 int hs = 0, stat = 0;
51
52 if (unlikely(!sc)) {
53 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
54 list_del_init(&task->list);
55 sas_free_task(task);
56 return;
57 }
58
59 if (ts->resp == SAS_TASK_UNDELIVERED) {
60 /* transport error */
61 hs = DID_NO_CONNECT;
62 } else { /* ts->resp == SAS_TASK_COMPLETE */
63 /* task delivered, what happened afterwards? */
64 switch (ts->stat) {
65 case SAS_DEV_NO_RESPONSE:
66 case SAS_INTERRUPTED:
67 case SAS_PHY_DOWN:
68 case SAS_NAK_R_ERR:
69 case SAS_OPEN_TO:
70 hs = DID_NO_CONNECT;
71 break;
72 case SAS_DATA_UNDERRUN:
73 sc->resid = ts->residual;
74 if (sc->request_bufflen - sc->resid < sc->underflow)
75 hs = DID_ERROR;
76 break;
77 case SAS_DATA_OVERRUN:
78 hs = DID_ERROR;
79 break;
80 case SAS_QUEUE_FULL:
81 hs = DID_SOFT_ERROR; /* retry */
82 break;
83 case SAS_DEVICE_UNKNOWN:
84 hs = DID_BAD_TARGET;
85 break;
86 case SAS_SG_ERR:
87 hs = DID_PARITY;
88 break;
89 case SAS_OPEN_REJECT:
90 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
91 hs = DID_SOFT_ERROR; /* retry */
92 else
93 hs = DID_ERROR;
94 break;
95 case SAS_PROTO_RESPONSE:
96 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
97 "task; please report this\n",
98 task->dev->port->ha->sas_ha_name);
99 break;
100 case SAS_ABORTED_TASK:
101 hs = DID_ABORT;
102 break;
103 case SAM_CHECK_COND:
104 memcpy(sc->sense_buffer, ts->buf,
105 max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
106 stat = SAM_CHECK_COND;
107 break;
108 default:
109 stat = ts->stat;
110 break;
111 }
112 }
113 ASSIGN_SAS_TASK(sc, NULL);
114 sc->result = (hs << 16) | stat;
115 list_del_init(&task->list);
116 sas_free_task(task);
117 /* This is very ugly but this is how SCSI Core works. */
118 if (ts_flags & SAS_TASK_STATE_ABORTED)
119 scsi_finish_command(sc);
120 else
121 sc->scsi_done(sc);
122}
123
124static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
125{
126 enum task_attribute ta = TASK_ATTR_SIMPLE;
127 if (cmd->request && blk_rq_tagged(cmd->request)) {
128 if (cmd->device->ordered_tags &&
129 (cmd->request->flags & REQ_HARDBARRIER))
130 ta = TASK_ATTR_HOQ;
131 }
132 return ta;
133}
134
135static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
136 struct domain_device *dev,
137 unsigned long gfp_flags)
138{
139 struct sas_task *task = sas_alloc_task(gfp_flags);
140 struct scsi_lun lun;
141
142 if (!task)
143 return NULL;
144
145 *(u32 *)cmd->sense_buffer = 0;
146 task->uldd_task = cmd;
147 ASSIGN_SAS_TASK(cmd, task);
148
149 task->dev = dev;
150 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
151
152 task->ssp_task.retry_count = 1;
153 int_to_scsilun(cmd->device->lun, &lun);
154 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
155 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd);
156 memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
157
158 task->scatter = cmd->request_buffer;
159 task->num_scatter = cmd->use_sg;
160 task->total_xfer_len = cmd->request_bufflen;
161 task->data_dir = cmd->sc_data_direction;
162
163 task->task_done = sas_scsi_task_done;
164
165 return task;
166}
167
168static int sas_queue_up(struct sas_task *task)
169{
170 struct sas_ha_struct *sas_ha = task->dev->port->ha;
171 struct scsi_core *core = &sas_ha->core;
172 unsigned long flags;
173 LIST_HEAD(list);
174
175 spin_lock_irqsave(&core->task_queue_lock, flags);
176 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
177 spin_unlock_irqrestore(&core->task_queue_lock, flags);
178 return -SAS_QUEUE_FULL;
179 }
180 list_add_tail(&task->list, &core->task_queue);
181 core->task_queue_size += 1;
182 spin_unlock_irqrestore(&core->task_queue_lock, flags);
183 up(&core->queue_thread_sema);
184
185 return 0;
186}
187
188/**
189 * sas_queuecommand -- Enqueue a command for processing
190 * @parameters: See SCSI Core documentation
191 *
192 * Note: XXX: Remove the host unlock/lock pair when SCSI Core can
193 * call us without holding an IRQ spinlock...
194 */
195int sas_queuecommand(struct scsi_cmnd *cmd,
196 void (*scsi_done)(struct scsi_cmnd *))
197{
198 int res = 0;
199 struct domain_device *dev = cmd_to_domain_dev(cmd);
200 struct Scsi_Host *host = cmd->device->host;
201 struct sas_internal *i = to_sas_internal(host->transportt);
202
203 spin_unlock_irq(host->host_lock);
204
205 {
206 struct sas_ha_struct *sas_ha = dev->port->ha;
207 struct sas_task *task;
208
209 res = -ENOMEM;
210 task = sas_create_task(cmd, dev, GFP_ATOMIC);
211 if (!task)
212 goto out;
213
214 cmd->scsi_done = scsi_done;
215 /* Queue up, Direct Mode or Task Collector Mode. */
216 if (sas_ha->lldd_max_execute_num < 2)
217 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
218 else
219 res = sas_queue_up(task);
220
221 /* Examine */
222 if (res) {
223 SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
224 ASSIGN_SAS_TASK(cmd, NULL);
225 sas_free_task(task);
226 if (res == -SAS_QUEUE_FULL) {
227 cmd->result = DID_SOFT_ERROR << 16; /* retry */
228 res = 0;
229 scsi_done(cmd);
230 }
231 goto out;
232 }
233 }
234out:
235 spin_lock_irq(host->host_lock);
236 return res;
237}
238
239static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
240{
241 struct scsi_cmnd *cmd, *n;
242
243 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
244 if (cmd == my_cmd)
245 list_del_init(&cmd->eh_entry);
246 }
247}
248
249static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
250 struct domain_device *dev)
251{
252 struct scsi_cmnd *cmd, *n;
253
254 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
255 struct domain_device *x = cmd_to_domain_dev(cmd);
256
257 if (x == dev)
258 list_del_init(&cmd->eh_entry);
259 }
260}
261
262static void sas_scsi_clear_queue_port(struct list_head *error_q,
263 struct asd_sas_port *port)
264{
265 struct scsi_cmnd *cmd, *n;
266
267 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
268 struct domain_device *dev = cmd_to_domain_dev(cmd);
269 struct asd_sas_port *x = dev->port;
270
271 if (x == port)
272 list_del_init(&cmd->eh_entry);
273 }
274}
275
276enum task_disposition {
277 TASK_IS_DONE,
278 TASK_IS_ABORTED,
279 TASK_IS_AT_LU,
280 TASK_IS_NOT_AT_LU,
281};
282
283static enum task_disposition sas_scsi_find_task(struct sas_task *task)
284{
285 struct sas_ha_struct *ha = task->dev->port->ha;
286 unsigned long flags;
287 int i, res;
288 struct sas_internal *si =
289 to_sas_internal(task->dev->port->ha->core.shost->transportt);
290
291 if (ha->lldd_max_execute_num > 1) {
292 struct scsi_core *core = &ha->core;
293 struct sas_task *t, *n;
294
295 spin_lock_irqsave(&core->task_queue_lock, flags);
296 list_for_each_entry_safe(t, n, &core->task_queue, list) {
297 if (task == t) {
298 list_del_init(&t->list);
299 spin_unlock_irqrestore(&core->task_queue_lock,
300 flags);
301 SAS_DPRINTK("%s: task 0x%p aborted from "
302 "task_queue\n",
303 __FUNCTION__, task);
304 return TASK_IS_ABORTED;
305 }
306 }
307 spin_unlock_irqrestore(&core->task_queue_lock, flags);
308 }
309
310 for (i = 0; i < 5; i++) {
311 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
312 res = si->dft->lldd_abort_task(task);
313
314 spin_lock_irqsave(&task->task_state_lock, flags);
315 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
316 spin_unlock_irqrestore(&task->task_state_lock, flags);
317 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
318 task);
319 return TASK_IS_DONE;
320 }
321 spin_unlock_irqrestore(&task->task_state_lock, flags);
322
323 if (res == TMF_RESP_FUNC_COMPLETE) {
324 SAS_DPRINTK("%s: task 0x%p is aborted\n",
325 __FUNCTION__, task);
326 return TASK_IS_ABORTED;
327 } else if (si->dft->lldd_query_task) {
328 SAS_DPRINTK("%s: querying task 0x%p\n",
329 __FUNCTION__, task);
330 res = si->dft->lldd_query_task(task);
331 if (res == TMF_RESP_FUNC_SUCC) {
332 SAS_DPRINTK("%s: task 0x%p at LU\n",
333 __FUNCTION__, task);
334 return TASK_IS_AT_LU;
335 } else if (res == TMF_RESP_FUNC_COMPLETE) {
336 SAS_DPRINTK("%s: task 0x%p not at LU\n",
337 __FUNCTION__, task);
338 return TASK_IS_NOT_AT_LU;
339 }
340 }
341 }
342 return res;
343}
344
345static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
346{
347 int res = TMF_RESP_FUNC_FAILED;
348 struct scsi_lun lun;
349 struct sas_internal *i =
350 to_sas_internal(dev->port->ha->core.shost->transportt);
351
352 int_to_scsilun(cmd->device->lun, &lun);
353
354 SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
355 SAS_ADDR(dev->sas_addr),
356 cmd->device->lun);
357
358 if (i->dft->lldd_abort_task_set)
359 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
360
361 if (res == TMF_RESP_FUNC_FAILED) {
362 if (i->dft->lldd_clear_task_set)
363 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
364 }
365
366 if (res == TMF_RESP_FUNC_FAILED) {
367 if (i->dft->lldd_lu_reset)
368 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
369 }
370
371 return res;
372}
373
374static int sas_recover_I_T(struct domain_device *dev)
375{
376 int res = TMF_RESP_FUNC_FAILED;
377 struct sas_internal *i =
378 to_sas_internal(dev->port->ha->core.shost->transportt);
379
380 SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
381 SAS_ADDR(dev->sas_addr));
382
383 if (i->dft->lldd_I_T_nexus_reset)
384 res = i->dft->lldd_I_T_nexus_reset(dev);
385
386 return res;
387}
388
389void sas_scsi_recover_host(struct Scsi_Host *shost)
390{
391 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
392 unsigned long flags;
393 LIST_HEAD(error_q);
394 struct scsi_cmnd *cmd, *n;
395 enum task_disposition res = TASK_IS_DONE;
396 int tmf_resp;
397 struct sas_internal *i = to_sas_internal(shost->transportt);
398
399 spin_lock_irqsave(shost->host_lock, flags);
400 list_splice_init(&shost->eh_cmd_q, &error_q);
401 spin_unlock_irqrestore(shost->host_lock, flags);
402
403 SAS_DPRINTK("Enter %s\n", __FUNCTION__);
404
405 /* All tasks on this list were marked SAS_TASK_STATE_ABORTED
406 * by sas_scsi_timed_out() callback.
407 */
408Again:
409 SAS_DPRINTK("going over list...\n");
410 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
411 struct sas_task *task = TO_SAS_TASK(cmd);
412
413 SAS_DPRINTK("trying to find task 0x%p\n", task);
414 list_del_init(&cmd->eh_entry);
415 res = sas_scsi_find_task(task);
416
417 cmd->eh_eflags = 0;
418 shost->host_failed--;
419
420 switch (res) {
421 case TASK_IS_DONE:
422 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
423 task);
424 task->task_done(task);
425 continue;
426 case TASK_IS_ABORTED:
427 SAS_DPRINTK("%s: task 0x%p is aborted\n",
428 __FUNCTION__, task);
429 task->task_done(task);
430 continue;
431 case TASK_IS_AT_LU:
432 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
433 tmf_resp = sas_recover_lu(task->dev, cmd);
434 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
435 SAS_DPRINTK("dev %016llx LU %x is "
436 "recovered\n",
437 SAS_ADDR(task->dev),
438 cmd->device->lun);
439 task->task_done(task);
440 sas_scsi_clear_queue_lu(&error_q, cmd);
441 goto Again;
442 }
443 /* fallthrough */
444 case TASK_IS_NOT_AT_LU:
445 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
446 task);
447 tmf_resp = sas_recover_I_T(task->dev);
448 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
449 SAS_DPRINTK("I_T %016llx recovered\n",
450 SAS_ADDR(task->dev->sas_addr));
451 task->task_done(task);
452 sas_scsi_clear_queue_I_T(&error_q, task->dev);
453 goto Again;
454 }
455 /* Hammer time :-) */
456 if (i->dft->lldd_clear_nexus_port) {
457 struct asd_sas_port *port = task->dev->port;
458 SAS_DPRINTK("clearing nexus for port:%d\n",
459 port->id);
460 res = i->dft->lldd_clear_nexus_port(port);
461 if (res == TMF_RESP_FUNC_COMPLETE) {
462 SAS_DPRINTK("clear nexus port:%d "
463 "succeeded\n", port->id);
464 task->task_done(task);
465 sas_scsi_clear_queue_port(&error_q,
466 port);
467 goto Again;
468 }
469 }
470 if (i->dft->lldd_clear_nexus_ha) {
471 SAS_DPRINTK("clear nexus ha\n");
472 res = i->dft->lldd_clear_nexus_ha(ha);
473 if (res == TMF_RESP_FUNC_COMPLETE) {
474 SAS_DPRINTK("clear nexus ha "
475 "succeeded\n");
476 task->task_done(task);
477 goto out;
478 }
479 }
480 /* If we are here -- this means that no amount
481 * of effort could recover from errors. Quite
482 * possibly the HA just disappeared.
483 */
484 SAS_DPRINTK("error from device %llx, LUN %x "
485 "couldn't be recovered in any way\n",
486 SAS_ADDR(task->dev->sas_addr),
487 cmd->device->lun);
488
489 task->task_done(task);
490 goto clear_q;
491 }
492 }
493out:
494 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
495 return;
496clear_q:
497 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
498 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
499 struct sas_task *task = TO_SAS_TASK(cmd);
500 list_del_init(&cmd->eh_entry);
501 task->task_done(task);
502 }
503}
504
505enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
506{
507 struct sas_task *task = TO_SAS_TASK(cmd);
508 unsigned long flags;
509
510 if (!task) {
511 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
512 cmd, task);
513 return EH_HANDLED;
514 }
515
516 spin_lock_irqsave(&task->task_state_lock, flags);
517 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
518 spin_unlock_irqrestore(&task->task_state_lock, flags);
519 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
520 cmd, task);
521 return EH_HANDLED;
522 }
523 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
524 spin_unlock_irqrestore(&task->task_state_lock, flags);
525
526 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
527 cmd, task);
528
529 return EH_NOT_HANDLED;
530}
531
532struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
533{
534 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
535 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
536 struct domain_device *found_dev = NULL;
537 int i;
538
539 spin_lock(&ha->phy_port_lock);
540 for (i = 0; i < ha->num_phys; i++) {
541 struct asd_sas_port *port = ha->sas_port[i];
542 struct domain_device *dev;
543
544 spin_lock(&port->dev_list_lock);
545 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
546 if (rphy == dev->rphy) {
547 found_dev = dev;
548 spin_unlock(&port->dev_list_lock);
549 goto found;
550 }
551 }
552 spin_unlock(&port->dev_list_lock);
553 }
554 found:
555 spin_unlock(&ha->phy_port_lock);
556
557 return found_dev;
558}
559
560static inline struct domain_device *sas_find_target(struct scsi_target *starget)
561{
562 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
563
564 return sas_find_dev_by_rphy(rphy);
565}
566
567int sas_target_alloc(struct scsi_target *starget)
568{
569 struct domain_device *found_dev = sas_find_target(starget);
570
571 if (!found_dev)
572 return -ENODEV;
573
574 starget->hostdata = found_dev;
575 return 0;
576}
577
578#define SAS_DEF_QD 32
579#define SAS_MAX_QD 64
580
581int sas_slave_configure(struct scsi_device *scsi_dev)
582{
583 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
584 struct sas_ha_struct *sas_ha;
585
586 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
587
588 sas_ha = dev->port->ha;
589
590 sas_read_port_mode_page(scsi_dev);
591
592 if (scsi_dev->tagged_supported) {
593 scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
594 scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
595 } else {
596 SAS_DPRINTK("device %llx, LUN %x doesn't support "
597 "TCQ\n", SAS_ADDR(dev->sas_addr),
598 scsi_dev->lun);
599 scsi_dev->tagged_supported = 0;
600 scsi_set_tag_type(scsi_dev, 0);
601 scsi_deactivate_tcq(scsi_dev, 1);
602 }
603
604 return 0;
605}
606
607void sas_slave_destroy(struct scsi_device *scsi_dev)
608{
609}
610
611int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth)
612{
613 int res = min(new_depth, SAS_MAX_QD);
614
615 if (scsi_dev->tagged_supported)
616 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev),
617 res);
618 else {
619 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
620 sas_printk("device %llx LUN %x queue depth changed to 1\n",
621 SAS_ADDR(dev->sas_addr),
622 scsi_dev->lun);
623 scsi_adjust_queue_depth(scsi_dev, 0, 1);
624 res = 1;
625 }
626
627 return res;
628}
629
630int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
631{
632 if (!scsi_dev->tagged_supported)
633 return 0;
634
635 scsi_deactivate_tcq(scsi_dev, 1);
636
637 scsi_set_tag_type(scsi_dev, qt);
638 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
639
640 return qt;
641}
642
643int sas_bios_param(struct scsi_device *scsi_dev,
644 struct block_device *bdev,
645 sector_t capacity, int *hsc)
646{
647 hsc[0] = 255;
648 hsc[1] = 63;
649 sector_div(capacity, 255*63);
650 hsc[2] = capacity;
651
652 return 0;
653}
654
655/* ---------- Task Collector Thread implementation ---------- */
656
657static void sas_queue(struct sas_ha_struct *sas_ha)
658{
659 struct scsi_core *core = &sas_ha->core;
660 unsigned long flags;
661 LIST_HEAD(q);
662 int can_queue;
663 int res;
664 struct sas_internal *i = to_sas_internal(core->shost->transportt);
665
666 spin_lock_irqsave(&core->task_queue_lock, flags);
667 while (!core->queue_thread_kill &&
668 !list_empty(&core->task_queue)) {
669
670 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
671 if (can_queue >= 0) {
672 can_queue = core->task_queue_size;
673 list_splice_init(&core->task_queue, &q);
674 } else {
675 struct list_head *a, *n;
676
677 can_queue = sas_ha->lldd_queue_size;
678 list_for_each_safe(a, n, &core->task_queue) {
679 list_move_tail(a, &q);
680 if (--can_queue == 0)
681 break;
682 }
683 can_queue = sas_ha->lldd_queue_size;
684 }
685 core->task_queue_size -= can_queue;
686 spin_unlock_irqrestore(&core->task_queue_lock, flags);
687 {
688 struct sas_task *task = list_entry(q.next,
689 struct sas_task,
690 list);
691 list_del_init(&q);
692 res = i->dft->lldd_execute_task(task, can_queue,
693 GFP_KERNEL);
694 if (unlikely(res))
695 __list_add(&q, task->list.prev, &task->list);
696 }
697 spin_lock_irqsave(&core->task_queue_lock, flags);
698 if (res) {
699 list_splice_init(&q, &core->task_queue); /*at head*/
700 core->task_queue_size += can_queue;
701 }
702 }
703 spin_unlock_irqrestore(&core->task_queue_lock, flags);
704}
705
706static DECLARE_COMPLETION(queue_th_comp);
707
708/**
709 * sas_queue_thread -- The Task Collector thread
710 * @_sas_ha: pointer to struct sas_ha
711 */
712static int sas_queue_thread(void *_sas_ha)
713{
714 struct sas_ha_struct *sas_ha = _sas_ha;
715 struct scsi_core *core = &sas_ha->core;
716
717 daemonize("sas_queue_%d", core->shost->host_no);
718 current->flags |= PF_NOFREEZE;
719
720 complete(&queue_th_comp);
721
722 while (1) {
723 down_interruptible(&core->queue_thread_sema);
724 sas_queue(sas_ha);
725 if (core->queue_thread_kill)
726 break;
727 }
728
729 complete(&queue_th_comp);
730
731 return 0;
732}
733
734int sas_init_queue(struct sas_ha_struct *sas_ha)
735{
736 int res;
737 struct scsi_core *core = &sas_ha->core;
738
739 spin_lock_init(&core->task_queue_lock);
740 core->task_queue_size = 0;
741 INIT_LIST_HEAD(&core->task_queue);
742 init_MUTEX_LOCKED(&core->queue_thread_sema);
743
744 res = kernel_thread(sas_queue_thread, sas_ha, 0);
745 if (res >= 0)
746 wait_for_completion(&queue_th_comp);
747
748 return res < 0 ? res : 0;
749}
750
751void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
752{
753 unsigned long flags;
754 struct scsi_core *core = &sas_ha->core;
755 struct sas_task *task, *n;
756
757 init_completion(&queue_th_comp);
758 core->queue_thread_kill = 1;
759 up(&core->queue_thread_sema);
760 wait_for_completion(&queue_th_comp);
761
762 if (!list_empty(&core->task_queue))
763 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
764 SAS_ADDR(sas_ha->sas_addr));
765
766 spin_lock_irqsave(&core->task_queue_lock, flags);
767 list_for_each_entry_safe(task, n, &core->task_queue, list) {
768 struct scsi_cmnd *cmd = task->uldd_task;
769
770 list_del_init(&task->list);
771
772 ASSIGN_SAS_TASK(cmd, NULL);
773 sas_free_task(task);
774 cmd->result = DID_ABORT << 16;
775 cmd->scsi_done(cmd);
776 }
777 spin_unlock_irqrestore(&core->task_queue_lock, flags);
778}
779
780EXPORT_SYMBOL_GPL(sas_queuecommand);
781EXPORT_SYMBOL_GPL(sas_target_alloc);
782EXPORT_SYMBOL_GPL(sas_slave_configure);
783EXPORT_SYMBOL_GPL(sas_slave_destroy);
784EXPORT_SYMBOL_GPL(sas_change_queue_depth);
785EXPORT_SYMBOL_GPL(sas_change_queue_type);
786EXPORT_SYMBOL_GPL(sas_bios_param);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index d44f9aac6b8f..3f7f5f8abd75 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -285,6 +285,7 @@ struct lpfc_hba {
285 uint32_t cfg_log_verbose; 285 uint32_t cfg_log_verbose;
286 uint32_t cfg_lun_queue_depth; 286 uint32_t cfg_lun_queue_depth;
287 uint32_t cfg_nodev_tmo; 287 uint32_t cfg_nodev_tmo;
288 uint32_t cfg_devloss_tmo;
288 uint32_t cfg_hba_queue_depth; 289 uint32_t cfg_hba_queue_depth;
289 uint32_t cfg_fcp_class; 290 uint32_t cfg_fcp_class;
290 uint32_t cfg_use_adisc; 291 uint32_t cfg_use_adisc;
@@ -302,6 +303,9 @@ struct lpfc_hba {
302 uint32_t cfg_poll_tmo; 303 uint32_t cfg_poll_tmo;
303 uint32_t cfg_sg_seg_cnt; 304 uint32_t cfg_sg_seg_cnt;
304 uint32_t cfg_sg_dma_buf_size; 305 uint32_t cfg_sg_dma_buf_size;
306 uint64_t cfg_soft_wwpn;
307
308 uint32_t dev_loss_tmo_changed;
305 309
306 lpfc_vpd_t vpd; /* vital product data */ 310 lpfc_vpd_t vpd; /* vital product data */
307 311
@@ -351,6 +355,8 @@ struct lpfc_hba {
351#define VPD_PORT 0x8 /* valid vpd port data */ 355#define VPD_PORT 0x8 /* valid vpd port data */
352#define VPD_MASK 0xf /* mask for any vpd data */ 356#define VPD_MASK 0xf /* mask for any vpd data */
353 357
358 uint8_t soft_wwpn_enable;
359
354 struct timer_list fcp_poll_timer; 360 struct timer_list fcp_poll_timer;
355 struct timer_list els_tmofunc; 361 struct timer_list els_tmofunc;
356 362
@@ -391,3 +397,5 @@ struct rnidrsp {
391 struct list_head list; 397 struct list_head list;
392 uint32_t data; 398 uint32_t data;
393}; 399};
400
401#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d384c16f4a87..9496e87c135e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,9 @@
39#include "lpfc_compat.h" 39#include "lpfc_compat.h"
40#include "lpfc_crtn.h" 40#include "lpfc_crtn.h"
41 41
42#define LPFC_DEF_DEVLOSS_TMO 30
43#define LPFC_MIN_DEVLOSS_TMO 1
44#define LPFC_MAX_DEVLOSS_TMO 255
42 45
43static void 46static void
44lpfc_jedec_to_ascii(int incr, char hdw[]) 47lpfc_jedec_to_ascii(int incr, char hdw[])
@@ -548,6 +551,119 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
548 lpfc_board_mode_show, lpfc_board_mode_store); 551 lpfc_board_mode_show, lpfc_board_mode_store);
549static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 552static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
550 553
554
555static char *lpfc_soft_wwpn_key = "C99G71SL8032A";
556
557static ssize_t
558lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
559 size_t count)
560{
561 struct Scsi_Host *host = class_to_shost(cdev);
562 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
563 unsigned int cnt = count;
564
565 /*
566 * We're doing a simple sanity check for soft_wwpn setting.
567 * We require that the user write a specific key to enable
568 * the soft_wwpn attribute to be settable. Once the attribute
569 * is written, the enable key resets. If further updates are
570 * desired, the key must be written again to re-enable the
571 * attribute.
572 *
573 * The "key" is not secret - it is a hardcoded string shown
574 * here. The intent is to protect against the random user or
575 * application that is just writing attributes.
576 */
577
578 /* count may include a LF at end of string */
579 if (buf[cnt-1] == '\n')
580 cnt--;
581
582 if ((cnt != strlen(lpfc_soft_wwpn_key)) ||
583 (strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0))
584 return -EINVAL;
585
586 phba->soft_wwpn_enable = 1;
587 return count;
588}
589static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL,
590 lpfc_soft_wwpn_enable_store);
591
592static ssize_t
593lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
594{
595 struct Scsi_Host *host = class_to_shost(cdev);
596 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
597 return snprintf(buf, PAGE_SIZE, "0x%llx\n", phba->cfg_soft_wwpn);
598}
599
600
601static ssize_t
602lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
603{
604 struct Scsi_Host *host = class_to_shost(cdev);
605 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
606 struct completion online_compl;
607 int stat1=0, stat2=0;
608 unsigned int i, j, cnt=count;
609 u8 wwpn[8];
610
611 /* count may include a LF at end of string */
612 if (buf[cnt-1] == '\n')
613 cnt--;
614
615 if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) ||
616 ((cnt == 17) && (*buf++ != 'x')) ||
617 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
618 return -EINVAL;
619
620 phba->soft_wwpn_enable = 0;
621
622 memset(wwpn, 0, sizeof(wwpn));
623
624 /* Validate and store the new name */
625 for (i=0, j=0; i < 16; i++) {
626 if ((*buf >= 'a') && (*buf <= 'f'))
627 j = ((j << 4) | ((*buf++ -'a') + 10));
628 else if ((*buf >= 'A') && (*buf <= 'F'))
629 j = ((j << 4) | ((*buf++ -'A') + 10));
630 else if ((*buf >= '0') && (*buf <= '9'))
631 j = ((j << 4) | (*buf++ -'0'));
632 else
633 return -EINVAL;
634 if (i % 2) {
635 wwpn[i/2] = j & 0xff;
636 j = 0;
637 }
638 }
639 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
640 fc_host_port_name(host) = phba->cfg_soft_wwpn;
641
642 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
643 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
644
645 init_completion(&online_compl);
646 lpfc_workq_post_event(phba, &stat1, &online_compl, LPFC_EVT_OFFLINE);
647 wait_for_completion(&online_compl);
648 if (stat1)
649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
650 "%d:0463 lpfc_soft_wwpn attribute set failed to reinit "
651 "adapter - %d\n", phba->brd_no, stat1);
652
653 init_completion(&online_compl);
654 lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
655 wait_for_completion(&online_compl);
656 if (stat2)
657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658 "%d:0464 lpfc_soft_wwpn attribute set failed to reinit "
659 "adapter - %d\n", phba->brd_no, stat2);
660
661 return (stat1 || stat2) ? -EIO : count;
662}
663static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
664 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
665
666
551static int lpfc_poll = 0; 667static int lpfc_poll = 0;
552module_param(lpfc_poll, int, 0); 668module_param(lpfc_poll, int, 0);
553MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" 669MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
@@ -559,6 +675,123 @@ static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
559 lpfc_poll_show, lpfc_poll_store); 675 lpfc_poll_show, lpfc_poll_store);
560 676
561/* 677/*
678# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
679# until the timer expires. Value range is [0,255]. Default value is 30.
680*/
681static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
682static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
683module_param(lpfc_nodev_tmo, int, 0);
684MODULE_PARM_DESC(lpfc_nodev_tmo,
685 "Seconds driver will hold I/O waiting "
686 "for a device to come back");
687static ssize_t
688lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
689{
690 struct Scsi_Host *host = class_to_shost(cdev);
691 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
692 int val = 0;
693 val = phba->cfg_devloss_tmo;
694 return snprintf(buf, PAGE_SIZE, "%d\n",
695 phba->cfg_devloss_tmo);
696}
697
698static int
699lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
700{
701 static int warned;
702 if (phba->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
703 phba->cfg_nodev_tmo = phba->cfg_devloss_tmo;
704 if (!warned && val != LPFC_DEF_DEVLOSS_TMO) {
705 warned = 1;
706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
707 "%d:0402 Ignoring nodev_tmo module "
708 "parameter because devloss_tmo is"
709 " set.\n",
710 phba->brd_no);
711 }
712 return 0;
713 }
714
715 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
716 phba->cfg_nodev_tmo = val;
717 phba->cfg_devloss_tmo = val;
718 return 0;
719 }
720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
721 "%d:0400 lpfc_nodev_tmo attribute cannot be set to %d, "
722 "allowed range is [%d, %d]\n",
723 phba->brd_no, val,
724 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
725 phba->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
726 return -EINVAL;
727}
728
729static int
730lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
731{
732 if (phba->dev_loss_tmo_changed ||
733 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
735 "%d:0401 Ignoring change to nodev_tmo "
736 "because devloss_tmo is set.\n",
737 phba->brd_no);
738 return 0;
739 }
740
741 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
742 phba->cfg_nodev_tmo = val;
743 phba->cfg_devloss_tmo = val;
744 return 0;
745 }
746
747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
748 "%d:0403 lpfc_nodev_tmo attribute cannot be set to %d, "
749 "allowed range is [%d, %d]\n",
750 phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
751 LPFC_MAX_DEVLOSS_TMO);
752 return -EINVAL;
753}
754
755lpfc_param_store(nodev_tmo)
756
757static CLASS_DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
758 lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
759
760/*
761# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
762# disappear until the timer expires. Value range is [0,255]. Default
763# value is 30.
764*/
765module_param(lpfc_devloss_tmo, int, 0);
766MODULE_PARM_DESC(lpfc_devloss_tmo,
767 "Seconds driver will hold I/O waiting "
768 "for a device to come back");
769lpfc_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
770 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
771lpfc_param_show(devloss_tmo)
772static int
773lpfc_devloss_tmo_set(struct lpfc_hba *phba, int val)
774{
775 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
776 phba->cfg_nodev_tmo = val;
777 phba->cfg_devloss_tmo = val;
778 phba->dev_loss_tmo_changed = 1;
779 return 0;
780 }
781
782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
783 "%d:0404 lpfc_devloss_tmo attribute cannot be set to"
784 " %d, allowed range is [%d, %d]\n",
785 phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
786 LPFC_MAX_DEVLOSS_TMO);
787 return -EINVAL;
788}
789
790lpfc_param_store(devloss_tmo)
791static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
792 lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
793
794/*
562# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 795# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
563# deluged with LOTS of information. 796# deluged with LOTS of information.
564# You can set a bit mask to record specific types of verbose messages: 797# You can set a bit mask to record specific types of verbose messages:
@@ -617,14 +850,6 @@ LPFC_ATTR_R(scan_down, 1, 0, 1,
617 "Start scanning for devices from highest ALPA to lowest"); 850 "Start scanning for devices from highest ALPA to lowest");
618 851
619/* 852/*
620# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
621# until the timer expires. Value range is [0,255]. Default value is 30.
622# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
623*/
624LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
625 "Seconds driver will hold I/O waiting for a device to come back");
626
627/*
628# lpfc_topology: link topology for init link 853# lpfc_topology: link topology for init link
629# 0x0 = attempt loop mode then point-to-point 854# 0x0 = attempt loop mode then point-to-point
630# 0x01 = internal loopback mode 855# 0x01 = internal loopback mode
@@ -720,6 +945,7 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535,
720LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 945LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
721 "Milliseconds driver will wait between polling FCP ring"); 946 "Milliseconds driver will wait between polling FCP ring");
722 947
948
723struct class_device_attribute *lpfc_host_attrs[] = { 949struct class_device_attribute *lpfc_host_attrs[] = {
724 &class_device_attr_info, 950 &class_device_attr_info,
725 &class_device_attr_serialnum, 951 &class_device_attr_serialnum,
@@ -737,6 +963,7 @@ struct class_device_attribute *lpfc_host_attrs[] = {
737 &class_device_attr_lpfc_lun_queue_depth, 963 &class_device_attr_lpfc_lun_queue_depth,
738 &class_device_attr_lpfc_hba_queue_depth, 964 &class_device_attr_lpfc_hba_queue_depth,
739 &class_device_attr_lpfc_nodev_tmo, 965 &class_device_attr_lpfc_nodev_tmo,
966 &class_device_attr_lpfc_devloss_tmo,
740 &class_device_attr_lpfc_fcp_class, 967 &class_device_attr_lpfc_fcp_class,
741 &class_device_attr_lpfc_use_adisc, 968 &class_device_attr_lpfc_use_adisc,
742 &class_device_attr_lpfc_ack0, 969 &class_device_attr_lpfc_ack0,
@@ -754,6 +981,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
754 &class_device_attr_issue_reset, 981 &class_device_attr_issue_reset,
755 &class_device_attr_lpfc_poll, 982 &class_device_attr_lpfc_poll,
756 &class_device_attr_lpfc_poll_tmo, 983 &class_device_attr_lpfc_poll_tmo,
984 &class_device_attr_lpfc_soft_wwpn,
985 &class_device_attr_lpfc_soft_wwpn_enable,
757 NULL, 986 NULL,
758}; 987};
759 988
@@ -1204,6 +1433,15 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
1204 fc_host_fabric_name(shost) = node_name; 1433 fc_host_fabric_name(shost) = node_name;
1205} 1434}
1206 1435
1436static void
1437lpfc_get_host_symbolic_name (struct Scsi_Host *shost)
1438{
1439 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
1440
1441 spin_lock_irq(shost->host_lock);
1442 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
1443 spin_unlock_irq(shost->host_lock);
1444}
1207 1445
1208static struct fc_host_statistics * 1446static struct fc_host_statistics *
1209lpfc_get_stats(struct Scsi_Host *shost) 1447lpfc_get_stats(struct Scsi_Host *shost)
@@ -1441,27 +1679,12 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
1441} 1679}
1442 1680
1443static void 1681static void
1444lpfc_get_rport_loss_tmo(struct fc_rport *rport)
1445{
1446 /*
1447 * Return the driver's global value for device loss timeout plus
1448 * five seconds to allow the driver's nodev timer to run.
1449 */
1450 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1451}
1452
1453static void
1454lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 1682lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1455{ 1683{
1456 /*
1457 * The driver doesn't have a per-target timeout setting. Set
1458 * this value globally. lpfc_nodev_tmo should be greater then 0.
1459 */
1460 if (timeout) 1684 if (timeout)
1461 lpfc_nodev_tmo = timeout; 1685 rport->dev_loss_tmo = timeout;
1462 else 1686 else
1463 lpfc_nodev_tmo = 1; 1687 rport->dev_loss_tmo = 1;
1464 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1465} 1688}
1466 1689
1467 1690
@@ -1486,7 +1709,6 @@ struct fc_function_template lpfc_transport_functions = {
1486 .show_host_port_name = 1, 1709 .show_host_port_name = 1,
1487 .show_host_supported_classes = 1, 1710 .show_host_supported_classes = 1,
1488 .show_host_supported_fc4s = 1, 1711 .show_host_supported_fc4s = 1,
1489 .show_host_symbolic_name = 1,
1490 .show_host_supported_speeds = 1, 1712 .show_host_supported_speeds = 1,
1491 .show_host_maxframe_size = 1, 1713 .show_host_maxframe_size = 1,
1492 1714
@@ -1509,6 +1731,9 @@ struct fc_function_template lpfc_transport_functions = {
1509 .get_host_fabric_name = lpfc_get_host_fabric_name, 1731 .get_host_fabric_name = lpfc_get_host_fabric_name,
1510 .show_host_fabric_name = 1, 1732 .show_host_fabric_name = 1,
1511 1733
1734 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
1735 .show_host_symbolic_name = 1,
1736
1512 /* 1737 /*
1513 * The LPFC driver treats linkdown handling as target loss events 1738 * The LPFC driver treats linkdown handling as target loss events
1514 * so there are no sysfs handlers for link_down_tmo. 1739 * so there are no sysfs handlers for link_down_tmo.
@@ -1521,7 +1746,6 @@ struct fc_function_template lpfc_transport_functions = {
1521 .show_rport_maxframe_size = 1, 1746 .show_rport_maxframe_size = 1,
1522 .show_rport_supported_classes = 1, 1747 .show_rport_supported_classes = 1,
1523 1748
1524 .get_rport_dev_loss_tmo = lpfc_get_rport_loss_tmo,
1525 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 1749 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
1526 .show_rport_dev_loss_tmo = 1, 1750 .show_rport_dev_loss_tmo = 1,
1527 1751
@@ -1535,6 +1759,8 @@ struct fc_function_template lpfc_transport_functions = {
1535 .show_starget_port_name = 1, 1759 .show_starget_port_name = 1,
1536 1760
1537 .issue_fc_host_lip = lpfc_issue_lip, 1761 .issue_fc_host_lip = lpfc_issue_lip,
1762 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
1763 .terminate_rport_io = lpfc_terminate_rport_io,
1538}; 1764};
1539 1765
1540void 1766void
@@ -1550,14 +1776,15 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1550 lpfc_ack0_init(phba, lpfc_ack0); 1776 lpfc_ack0_init(phba, lpfc_ack0);
1551 lpfc_topology_init(phba, lpfc_topology); 1777 lpfc_topology_init(phba, lpfc_topology);
1552 lpfc_scan_down_init(phba, lpfc_scan_down); 1778 lpfc_scan_down_init(phba, lpfc_scan_down);
1553 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1554 lpfc_link_speed_init(phba, lpfc_link_speed); 1779 lpfc_link_speed_init(phba, lpfc_link_speed);
1555 lpfc_fdmi_on_init(phba, lpfc_fdmi_on); 1780 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
1556 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 1781 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1557 lpfc_max_luns_init(phba, lpfc_max_luns); 1782 lpfc_max_luns_init(phba, lpfc_max_luns);
1558 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 1783 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
1559 1784 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1785 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1560 phba->cfg_poll = lpfc_poll; 1786 phba->cfg_poll = lpfc_poll;
1787 phba->cfg_soft_wwpn = 0L;
1561 1788
1562 /* 1789 /*
1563 * The total number of segments is the configuration value plus 2 1790 * The total number of segments is the configuration value plus 2
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 2a176467f71b..3d684496acde 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -18,6 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21struct fc_rport;
21void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 22void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
22void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 23void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
23int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 24int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
@@ -200,6 +201,8 @@ extern struct scsi_host_template lpfc_template;
200extern struct fc_function_template lpfc_transport_functions; 201extern struct fc_function_template lpfc_transport_functions;
201 202
202void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp); 203void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
204void lpfc_terminate_rport_io(struct fc_rport *);
205void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
203 206
204#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 207#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
205#define HBA_EVENT_RSCN 5 208#define HBA_EVENT_RSCN 5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index bbb7310210b0..ae4106458991 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -324,7 +324,6 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
324 struct lpfc_sli_ct_request *Response = 324 struct lpfc_sli_ct_request *Response =
325 (struct lpfc_sli_ct_request *) mp->virt; 325 (struct lpfc_sli_ct_request *) mp->virt;
326 struct lpfc_nodelist *ndlp = NULL; 326 struct lpfc_nodelist *ndlp = NULL;
327 struct lpfc_nodelist *next_ndlp;
328 struct lpfc_dmabuf *mlast, *next_mp; 327 struct lpfc_dmabuf *mlast, *next_mp;
329 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; 328 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
330 uint32_t Did; 329 uint32_t Did;
@@ -399,30 +398,6 @@ nsout1:
399 * current driver state. 398 * current driver state.
400 */ 399 */
401 if (phba->hba_state == LPFC_HBA_READY) { 400 if (phba->hba_state == LPFC_HBA_READY) {
402
403 /*
404 * Switch ports that connect a loop of multiple targets need
405 * special consideration. The driver wants to unregister the
406 * rpi only on the target that was pulled from the loop. On
407 * RSCN, the driver wants to rediscover an NPort only if the
408 * driver flagged it as NLP_NPR_2B_DISC. Provided adisc is
409 * not enabled and the NPort is not capable of retransmissions
410 * (FC Tape) prevent timing races with the scsi error handler by
411 * unregistering the Nport's RPI. This action causes all
412 * outstanding IO to flush back to the midlayer.
413 */
414 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
415 nlp_listp) {
416 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
417 (lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) {
418 if ((phba->cfg_use_adisc == 0) &&
419 !(ndlp->nlp_fcp_info &
420 NLP_FCP_2_DEVICE)) {
421 lpfc_unreg_rpi(phba, ndlp);
422 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
423 }
424 }
425 }
426 lpfc_els_flush_rscn(phba); 401 lpfc_els_flush_rscn(phba);
427 spin_lock_irq(phba->host->host_lock); 402 spin_lock_irq(phba->host->host_lock);
428 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */ 403 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 41cf5d3ea6ce..9766f909c9c6 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -30,7 +30,6 @@
30 30
31/* worker thread events */ 31/* worker thread events */
32enum lpfc_work_type { 32enum lpfc_work_type {
33 LPFC_EVT_NODEV_TMO,
34 LPFC_EVT_ONLINE, 33 LPFC_EVT_ONLINE,
35 LPFC_EVT_OFFLINE, 34 LPFC_EVT_OFFLINE,
36 LPFC_EVT_WARM_START, 35 LPFC_EVT_WARM_START,
@@ -74,11 +73,9 @@ struct lpfc_nodelist {
74#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 73#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
75 74
76 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 75 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
77 struct timer_list nlp_tmofunc; /* Used for nodev tmo */
78 struct fc_rport *rport; /* Corresponding FC transport 76 struct fc_rport *rport; /* Corresponding FC transport
79 port structure */ 77 port structure */
80 struct lpfc_hba *nlp_phba; 78 struct lpfc_hba *nlp_phba;
81 struct lpfc_work_evt nodev_timeout_evt;
82 struct lpfc_work_evt els_retry_evt; 79 struct lpfc_work_evt els_retry_evt;
83 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
84 unsigned long last_q_full_time; /* jiffy of last queue full */ 81 unsigned long last_q_full_time; /* jiffy of last queue full */
@@ -102,7 +99,6 @@ struct lpfc_nodelist {
102#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ 99#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
103#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ 100#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
104#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ 101#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
105#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */
106#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ 102#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
107#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ 103#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
108#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ 104#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
@@ -169,7 +165,7 @@ struct lpfc_nodelist {
169 */ 165 */
170/* 166/*
171 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 167 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
172 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers 168 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
173 * expire, all effected nodes will receive a DEVICE_RM event. 169 * expire, all effected nodes will receive a DEVICE_RM event.
174 */ 170 */
175/* 171/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3567de613162..71864cdc6c71 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2506,6 +2506,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2506 uint32_t *lp; 2506 uint32_t *lp;
2507 IOCB_t *icmd; 2507 IOCB_t *icmd;
2508 uint32_t payload_len, cmd; 2508 uint32_t payload_len, cmd;
2509 int i;
2509 2510
2510 icmd = &cmdiocb->iocb; 2511 icmd = &cmdiocb->iocb;
2511 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2512 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2524,6 +2525,10 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2524 phba->brd_no, 2525 phba->brd_no,
2525 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt); 2526 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2526 2527
2528 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2529 fc_host_post_event(phba->host, fc_get_event_number(),
2530 FCH_EVT_RSCN, lp[i]);
2531
2527 /* If we are about to begin discovery, just ACC the RSCN. 2532 /* If we are about to begin discovery, just ACC the RSCN.
2528 * Discovery processing will satisfy it. 2533 * Discovery processing will satisfy it.
2529 */ 2534 */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index b2f1552f1848..d586c3d3b0d0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -56,28 +56,63 @@ static uint8_t lpfcAlpaArray[] = {
56 56
57static void lpfc_disc_timeout_handler(struct lpfc_hba *); 57static void lpfc_disc_timeout_handler(struct lpfc_hba *);
58 58
59static void 59void
60lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 60lpfc_terminate_rport_io(struct fc_rport *rport)
61{ 61{
62 uint8_t *name = (uint8_t *)&ndlp->nlp_portname; 62 struct lpfc_rport_data *rdata;
63 int warn_on = 0; 63 struct lpfc_nodelist * ndlp;
64 struct lpfc_hba *phba;
64 65
65 spin_lock_irq(phba->host->host_lock); 66 rdata = rport->dd_data;
66 if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) { 67 ndlp = rdata->pnode;
67 spin_unlock_irq(phba->host->host_lock); 68
69 if (!ndlp) {
70 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
71 printk(KERN_ERR "Cannot find remote node"
72 " to terminate I/O Data x%x\n",
73 rport->port_id);
68 return; 74 return;
69 } 75 }
70 76
71 /* 77 phba = ndlp->nlp_phba;
72 * If a discovery event readded nodev_timer after timer 78
73 * firing and before processing the timer, cancel the
74 * nlp_tmofunc.
75 */
76 spin_unlock_irq(phba->host->host_lock);
77 del_timer_sync(&ndlp->nlp_tmofunc);
78 spin_lock_irq(phba->host->host_lock); 79 spin_lock_irq(phba->host->host_lock);
80 if (ndlp->nlp_sid != NLP_NO_SID) {
81 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
82 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
83 }
84 spin_unlock_irq(phba->host->host_lock);
79 85
80 ndlp->nlp_flag &= ~NLP_NODEV_TMO; 86 return;
87}
88
89/*
90 * This function will be called when dev_loss_tmo fire.
91 */
92void
93lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
94{
95 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist * ndlp;
97 uint8_t *name;
98 int warn_on = 0;
99 struct lpfc_hba *phba;
100
101 rdata = rport->dd_data;
102 ndlp = rdata->pnode;
103
104 if (!ndlp) {
105 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
106 printk(KERN_ERR "Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n",
108 rport->port_id);
109 return;
110 }
111
112 name = (uint8_t *)&ndlp->nlp_portname;
113 phba = ndlp->nlp_phba;
114
115 spin_lock_irq(phba->host->host_lock);
81 116
82 if (ndlp->nlp_sid != NLP_NO_SID) { 117 if (ndlp->nlp_sid != NLP_NO_SID) {
83 warn_on = 1; 118 warn_on = 1;
@@ -85,11 +120,14 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
85 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 120 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
86 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 121 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
87 } 122 }
123 if (phba->fc_flag & FC_UNLOADING)
124 warn_on = 0;
125
88 spin_unlock_irq(phba->host->host_lock); 126 spin_unlock_irq(phba->host->host_lock);
89 127
90 if (warn_on) { 128 if (warn_on) {
91 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 129 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92 "%d:0203 Nodev timeout on " 130 "%d:0203 Devloss timeout on "
93 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 131 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
94 "NPort x%x Data: x%x x%x x%x\n", 132 "NPort x%x Data: x%x x%x x%x\n",
95 phba->brd_no, 133 phba->brd_no,
@@ -99,7 +137,7 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
99 ndlp->nlp_state, ndlp->nlp_rpi); 137 ndlp->nlp_state, ndlp->nlp_rpi);
100 } else { 138 } else {
101 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 139 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
102 "%d:0204 Nodev timeout on " 140 "%d:0204 Devloss timeout on "
103 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 141 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
104 "NPort x%x Data: x%x x%x x%x\n", 142 "NPort x%x Data: x%x x%x x%x\n",
105 phba->brd_no, 143 phba->brd_no,
@@ -109,7 +147,12 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
109 ndlp->nlp_state, ndlp->nlp_rpi); 147 ndlp->nlp_state, ndlp->nlp_rpi);
110 } 148 }
111 149
112 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); 150 ndlp->rport = NULL;
151 rdata->pnode = NULL;
152
153 if (!(phba->fc_flag & FC_UNLOADING))
154 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
155
113 return; 156 return;
114} 157}
115 158
@@ -127,11 +170,6 @@ lpfc_work_list_done(struct lpfc_hba * phba)
127 spin_unlock_irq(phba->host->host_lock); 170 spin_unlock_irq(phba->host->host_lock);
128 free_evt = 1; 171 free_evt = 1;
129 switch (evtp->evt) { 172 switch (evtp->evt) {
130 case LPFC_EVT_NODEV_TMO:
131 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
132 lpfc_process_nodev_timeout(phba, ndlp);
133 free_evt = 0;
134 break;
135 case LPFC_EVT_ELS_RETRY: 173 case LPFC_EVT_ELS_RETRY:
136 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 174 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
137 lpfc_els_retry_delay_handler(ndlp); 175 lpfc_els_retry_delay_handler(ndlp);
@@ -340,6 +378,9 @@ lpfc_linkdown(struct lpfc_hba * phba)
340 spin_unlock_irq(phba->host->host_lock); 378 spin_unlock_irq(phba->host->host_lock);
341 } 379 }
342 380
381 fc_host_post_event(phba->host, fc_get_event_number(),
382 FCH_EVT_LINKDOWN, 0);
383
343 /* Clean up any firmware default rpi's */ 384 /* Clean up any firmware default rpi's */
344 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 385 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
345 lpfc_unreg_did(phba, 0xffffffff, mb); 386 lpfc_unreg_did(phba, 0xffffffff, mb);
@@ -374,16 +415,6 @@ lpfc_linkdown(struct lpfc_hba * phba)
374 rc = lpfc_disc_state_machine(phba, ndlp, NULL, 415 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
375 NLP_EVT_DEVICE_RECOVERY); 416 NLP_EVT_DEVICE_RECOVERY);
376 417
377 /* Check config parameter use-adisc or FCP-2 */
378 if ((rc != NLP_STE_FREED_NODE) &&
379 (phba->cfg_use_adisc == 0) &&
380 !(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) {
381 /* We know we will have to relogin, so
382 * unreglogin the rpi right now to fail
383 * any outstanding I/Os quickly.
384 */
385 lpfc_unreg_rpi(phba, ndlp);
386 }
387 } 418 }
388 } 419 }
389 420
@@ -427,6 +458,9 @@ lpfc_linkup(struct lpfc_hba * phba)
427 struct list_head *listp, *node_list[7]; 458 struct list_head *listp, *node_list[7];
428 int i; 459 int i;
429 460
461 fc_host_post_event(phba->host, fc_get_event_number(),
462 FCH_EVT_LINKUP, 0);
463
430 spin_lock_irq(phba->host->host_lock); 464 spin_lock_irq(phba->host->host_lock);
431 phba->hba_state = LPFC_LINK_UP; 465 phba->hba_state = LPFC_LINK_UP;
432 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 466 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
@@ -638,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
638 672
639 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, 673 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
640 sizeof (struct serv_parm)); 674 sizeof (struct serv_parm));
675 if (phba->cfg_soft_wwpn)
676 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
641 memcpy((uint8_t *) & phba->fc_nodename, 677 memcpy((uint8_t *) & phba->fc_nodename,
642 (uint8_t *) & phba->fc_sparam.nodeName, 678 (uint8_t *) & phba->fc_sparam.nodeName,
643 sizeof (struct lpfc_name)); 679 sizeof (struct lpfc_name));
@@ -1098,8 +1134,11 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba,
1098 struct fc_rport *rport = ndlp->rport; 1134 struct fc_rport *rport = ndlp->rport;
1099 struct lpfc_rport_data *rdata = rport->dd_data; 1135 struct lpfc_rport_data *rdata = rport->dd_data;
1100 1136
1101 ndlp->rport = NULL; 1137 if (rport->scsi_target_id == -1) {
1102 rdata->pnode = NULL; 1138 ndlp->rport = NULL;
1139 rdata->pnode = NULL;
1140 }
1141
1103 fc_remote_port_delete(rport); 1142 fc_remote_port_delete(rport);
1104 1143
1105 return; 1144 return;
@@ -1227,17 +1266,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1227 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list); 1266 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1228 phba->fc_unmap_cnt++; 1267 phba->fc_unmap_cnt++;
1229 phba->nport_event_cnt++; 1268 phba->nport_event_cnt++;
1230 /* stop nodev tmo if running */
1231 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1232 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1233 spin_unlock_irq(phba->host->host_lock);
1234 del_timer_sync(&nlp->nlp_tmofunc);
1235 spin_lock_irq(phba->host->host_lock);
1236 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1237 list_del_init(&nlp->nodev_timeout_evt.
1238 evt_listp);
1239
1240 }
1241 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1269 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1242 nlp->nlp_type |= NLP_FC_NODE; 1270 nlp->nlp_type |= NLP_FC_NODE;
1243 break; 1271 break;
@@ -1248,17 +1276,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1248 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list); 1276 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1249 phba->fc_map_cnt++; 1277 phba->fc_map_cnt++;
1250 phba->nport_event_cnt++; 1278 phba->nport_event_cnt++;
1251 /* stop nodev tmo if running */
1252 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1253 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1254 spin_unlock_irq(phba->host->host_lock);
1255 del_timer_sync(&nlp->nlp_tmofunc);
1256 spin_lock_irq(phba->host->host_lock);
1257 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1258 list_del_init(&nlp->nodev_timeout_evt.
1259 evt_listp);
1260
1261 }
1262 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1279 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1263 break; 1280 break;
1264 case NLP_NPR_LIST: 1281 case NLP_NPR_LIST:
@@ -1267,11 +1284,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1267 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list); 1284 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1268 phba->fc_npr_cnt++; 1285 phba->fc_npr_cnt++;
1269 1286
1270 if (!(nlp->nlp_flag & NLP_NODEV_TMO))
1271 mod_timer(&nlp->nlp_tmofunc,
1272 jiffies + HZ * phba->cfg_nodev_tmo);
1273
1274 nlp->nlp_flag |= NLP_NODEV_TMO;
1275 nlp->nlp_flag &= ~NLP_RCV_PLOGI; 1287 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1276 break; 1288 break;
1277 case NLP_JUST_DQ: 1289 case NLP_JUST_DQ:
@@ -1301,7 +1313,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1301 * already. If we have, and it's a scsi entity, be 1313 * already. If we have, and it's a scsi entity, be
1302 * sure to unblock any attached scsi devices 1314 * sure to unblock any attached scsi devices
1303 */ 1315 */
1304 if (!nlp->rport) 1316 if ((!nlp->rport) || (nlp->rport->port_state ==
1317 FC_PORTSTATE_BLOCKED))
1305 lpfc_register_remote_port(phba, nlp); 1318 lpfc_register_remote_port(phba, nlp);
1306 1319
1307 /* 1320 /*
@@ -1575,15 +1588,12 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1575 1588
1576 lpfc_els_abort(phba,ndlp,0); 1589 lpfc_els_abort(phba,ndlp,0);
1577 spin_lock_irq(phba->host->host_lock); 1590 spin_lock_irq(phba->host->host_lock);
1578 ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO); 1591 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1579 spin_unlock_irq(phba->host->host_lock); 1592 spin_unlock_irq(phba->host->host_lock);
1580 del_timer_sync(&ndlp->nlp_tmofunc);
1581 1593
1582 ndlp->nlp_last_elscmd = 0; 1594 ndlp->nlp_last_elscmd = 0;
1583 del_timer_sync(&ndlp->nlp_delayfunc); 1595 del_timer_sync(&ndlp->nlp_delayfunc);
1584 1596
1585 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1586 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1587 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 1597 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1588 list_del_init(&ndlp->els_retry_evt.evt_listp); 1598 list_del_init(&ndlp->els_retry_evt.evt_listp);
1589 1599
@@ -1600,16 +1610,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1600int 1610int
1601lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1611lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1602{ 1612{
1603 if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1604 spin_lock_irq(phba->host->host_lock);
1605 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1606 spin_unlock_irq(phba->host->host_lock);
1607 del_timer_sync(&ndlp->nlp_tmofunc);
1608 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1609 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1610
1611 }
1612
1613 1613
1614 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1614 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1615 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1615 lpfc_cancel_retry_delay_tmo(phba, ndlp);
@@ -2424,34 +2424,6 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2424 return; 2424 return;
2425} 2425}
2426 2426
2427static void
2428lpfc_nodev_timeout(unsigned long ptr)
2429{
2430 struct lpfc_hba *phba;
2431 struct lpfc_nodelist *ndlp;
2432 unsigned long iflag;
2433 struct lpfc_work_evt *evtp;
2434
2435 ndlp = (struct lpfc_nodelist *)ptr;
2436 phba = ndlp->nlp_phba;
2437 evtp = &ndlp->nodev_timeout_evt;
2438 spin_lock_irqsave(phba->host->host_lock, iflag);
2439
2440 if (!list_empty(&evtp->evt_listp)) {
2441 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2442 return;
2443 }
2444 evtp->evt_arg1 = ndlp;
2445 evtp->evt = LPFC_EVT_NODEV_TMO;
2446 list_add_tail(&evtp->evt_listp, &phba->work_list);
2447 if (phba->work_wait)
2448 wake_up(phba->work_wait);
2449
2450 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2451 return;
2452}
2453
2454
2455/* 2427/*
2456 * This routine handles processing a NameServer REG_LOGIN mailbox 2428 * This routine handles processing a NameServer REG_LOGIN mailbox
2457 * command upon completion. It is setup in the LPFC_MBOXQ 2429 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -2575,11 +2547,7 @@ lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2575 uint32_t did) 2547 uint32_t did)
2576{ 2548{
2577 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2549 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2578 INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2579 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2550 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2580 init_timer(&ndlp->nlp_tmofunc);
2581 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2582 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2583 init_timer(&ndlp->nlp_delayfunc); 2551 init_timer(&ndlp->nlp_delayfunc);
2584 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2552 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2585 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2553 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f6948ffe689a..4cdf3464267f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
268 kfree(mp); 268 kfree(mp);
269 pmb->context1 = NULL; 269 pmb->context1 = NULL;
270 270
271 if (phba->cfg_soft_wwpn)
272 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
271 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 273 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
272 sizeof (struct lpfc_name)); 274 sizeof (struct lpfc_name));
273 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 275 memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
@@ -511,6 +513,7 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
511{ 513{
512 struct lpfc_sli *psli = &phba->sli; 514 struct lpfc_sli *psli = &phba->sli;
513 struct lpfc_sli_ring *pring; 515 struct lpfc_sli_ring *pring;
516 uint32_t event_data;
514 517
515 if (phba->work_hs & HS_FFER6) { 518 if (phba->work_hs & HS_FFER6) {
516 /* Re-establishing Link */ 519 /* Re-establishing Link */
@@ -555,6 +558,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
555 phba->brd_no, phba->work_hs, 558 phba->brd_no, phba->work_hs,
556 phba->work_status[0], phba->work_status[1]); 559 phba->work_status[0], phba->work_status[1]);
557 560
561 event_data = FC_REG_DUMP_EVENT;
562 fc_host_post_vendor_event(phba->host, fc_get_event_number(),
563 sizeof(event_data), (char *) &event_data,
564 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
565
558 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 566 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
559 lpfc_offline(phba); 567 lpfc_offline(phba);
560 phba->hba_state = LPFC_HBA_ERROR; 568 phba->hba_state = LPFC_HBA_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 20449a8dd53d..d5f415007db2 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1813,7 +1813,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1813 */ 1813 */
1814/* 1814/*
1815 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1815 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1816 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers 1816 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
1817 * expire, all effected nodes will receive a DEVICE_RM event. 1817 * expire, all effected nodes will receive a DEVICE_RM event.
1818 */ 1818 */
1819/* 1819/*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a8816a8738f8..97ae98dc95d0 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -935,7 +935,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
935 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); 935 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
936 spin_lock_irq(phba->host->host_lock); 936 spin_lock_irq(phba->host->host_lock);
937 if (++loop_count 937 if (++loop_count
938 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) 938 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
939 break; 939 break;
940 } 940 }
941 941
@@ -978,7 +978,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
978 spin_lock_irq(shost->host_lock); 978 spin_lock_irq(shost->host_lock);
979 /* 979 /*
980 * If target is not in a MAPPED state, delay the reset until 980 * If target is not in a MAPPED state, delay the reset until
981 * target is rediscovered or nodev timeout expires. 981 * target is rediscovered or devloss timeout expires.
982 */ 982 */
983 while ( 1 ) { 983 while ( 1 ) {
984 if (!pnode) 984 if (!pnode)
@@ -1050,7 +1050,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1050 spin_lock_irq(phba->host->host_lock); 1050 spin_lock_irq(phba->host->host_lock);
1051 1051
1052 if (++loopcnt 1052 if (++loopcnt
1053 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1053 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1054 break; 1054 break;
1055 1055
1056 cnt = lpfc_sli_sum_iocb(phba, 1056 cnt = lpfc_sli_sum_iocb(phba,
@@ -1151,7 +1151,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1151 spin_lock_irq(phba->host->host_lock); 1151 spin_lock_irq(phba->host->host_lock);
1152 1152
1153 if (++loopcnt 1153 if (++loopcnt
1154 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1154 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1155 break; 1155 break;
1156 1156
1157 cnt = lpfc_sli_sum_iocb(phba, 1157 cnt = lpfc_sli_sum_iocb(phba,
@@ -1249,7 +1249,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
1249 * target pointer is stored in the starget_data for the 1249 * target pointer is stored in the starget_data for the
1250 * driver's sysfs entry point functions. 1250 * driver's sysfs entry point functions.
1251 */ 1251 */
1252 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; 1252 rport->dev_loss_tmo = phba->cfg_devloss_tmo;
1253 1253
1254 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1254 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1255 lpfc_sli_poll_fcp_ring(phba); 1255 lpfc_sli_poll_fcp_ring(phba);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c7091ea29f3f..ac417908b407 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.9" 21#define LPFC_DRIVER_VERSION "8.1.10"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 76edbb639d37..b87bef69ba0f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -2822,9 +2822,7 @@ mega_print_inquiry(char *page, char *scsi_inq)
2822 2822
2823 i = scsi_inq[0] & 0x1f; 2823 i = scsi_inq[0] & 0x1f;
2824 2824
2825 len += sprintf(page+len, " Type: %s ", 2825 len += sprintf(page+len, " Type: %s ", scsi_device_type(i));
2826 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
2827 "Unknown ");
2828 2826
2829 len += sprintf(page+len, 2827 len += sprintf(page+len,
2830 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 2828 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
@@ -3658,8 +3656,9 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3658 * Send the request sense data also, irrespective of 3656 * Send the request sense data also, irrespective of
3659 * whether the user has asked for it or not. 3657 * whether the user has asked for it or not.
3660 */ 3658 */
3661 copy_to_user(upthru->reqsensearea, 3659 if (copy_to_user(upthru->reqsensearea,
3662 pthru->reqsensearea, 14); 3660 pthru->reqsensearea, 14))
3661 rval = -EFAULT;
3663 3662
3664freemem_and_return: 3663freemem_and_return:
3665 if( pthru->dataxferlen ) { 3664 if( pthru->dataxferlen ) {
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index cd982c877da0..266b3910846b 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -330,6 +330,21 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
330 NULL, 330 NULL,
331}; 331};
332 332
333/**
334 * megaraid_change_queue_depth - Change the device's queue depth
335 * @sdev: scsi device struct
336 * @qdepth: depth to set
337 *
338 * Return value:
339 * actual depth set
340 **/
341static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
342{
343 if (qdepth > MBOX_MAX_SCSI_CMDS)
344 qdepth = MBOX_MAX_SCSI_CMDS;
345 scsi_adjust_queue_depth(sdev, 0, qdepth);
346 return sdev->queue_depth;
347}
333 348
334/* 349/*
335 * Scsi host template for megaraid unified driver 350 * Scsi host template for megaraid unified driver
@@ -343,6 +358,7 @@ static struct scsi_host_template megaraid_template_g = {
343 .eh_device_reset_handler = megaraid_reset_handler, 358 .eh_device_reset_handler = megaraid_reset_handler,
344 .eh_bus_reset_handler = megaraid_reset_handler, 359 .eh_bus_reset_handler = megaraid_reset_handler,
345 .eh_host_reset_handler = megaraid_reset_handler, 360 .eh_host_reset_handler = megaraid_reset_handler,
361 .change_queue_depth = megaraid_change_queue_depth,
346 .use_clustering = ENABLE_CLUSTERING, 362 .use_clustering = ENABLE_CLUSTERING,
347 .sdev_attrs = megaraid_sdev_attrs, 363 .sdev_attrs = megaraid_sdev_attrs,
348 .shost_attrs = megaraid_shost_attrs, 364 .shost_attrs = megaraid_shost_attrs,
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index a8c9627a15c4..4cab5b534b25 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -53,31 +53,15 @@ MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
53 */ 53 */
54static struct pci_device_id megasas_pci_table[] = { 54static struct pci_device_id megasas_pci_table[] = {
55 55
56 { 56 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
57 PCI_VENDOR_ID_LSI_LOGIC, 57 /* xscale IOP */
58 PCI_DEVICE_ID_LSI_SAS1064R, /* xscale IOP */ 58 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
59 PCI_ANY_ID, 59 /* ppc IOP */
60 PCI_ANY_ID, 60 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
61 }, 61 /* xscale IOP, vega */
62 { 62 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
63 PCI_VENDOR_ID_LSI_LOGIC, 63 /* xscale IOP */
64 PCI_DEVICE_ID_LSI_SAS1078R, /* ppc IOP */ 64 {}
65 PCI_ANY_ID,
66 PCI_ANY_ID,
67 },
68 {
69 PCI_VENDOR_ID_LSI_LOGIC,
70 PCI_DEVICE_ID_LSI_VERDE_ZCR, /* xscale IOP, vega */
71 PCI_ANY_ID,
72 PCI_ANY_ID,
73 },
74 {
75 PCI_VENDOR_ID_DELL,
76 PCI_DEVICE_ID_DELL_PERC5, /* xscale IOP */
77 PCI_ANY_ID,
78 PCI_ANY_ID,
79 },
80 {0} /* Terminating entry */
81}; 65};
82 66
83MODULE_DEVICE_TABLE(pci, megasas_pci_table); 67MODULE_DEVICE_TABLE(pci, megasas_pci_table);
@@ -2854,7 +2838,7 @@ static int __init megasas_init(void)
2854 /* 2838 /*
2855 * Register ourselves as PCI hotplug module 2839 * Register ourselves as PCI hotplug module
2856 */ 2840 */
2857 rval = pci_module_init(&megasas_pci_driver); 2841 rval = pci_register_driver(&megasas_pci_driver);
2858 2842
2859 if (rval) { 2843 if (rval) {
2860 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); 2844 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index cb367c2c5c78..9b991b746d1e 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -29,7 +29,7 @@ static irqreturn_t mvme147_intr (int irq, void *dummy, struct pt_regs *fp)
29 return IRQ_HANDLED; 29 return IRQ_HANDLED;
30} 30}
31 31
32static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 32static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
33{ 33{
34 unsigned char flags = 0x01; 34 unsigned char flags = 0x01;
35 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 35 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -57,7 +57,7 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
57 return 0; 57 return 0;
58} 58}
59 59
60static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 60static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
61 int status) 61 int status)
62{ 62{
63 m147_pcc->dma_cntrl = 0; 63 m147_pcc->dma_cntrl = 0;
@@ -112,7 +112,7 @@ int mvme147_detect(struct scsi_host_template *tpnt)
112 return 0; 112 return 0;
113} 113}
114 114
115static int mvme147_bus_reset(Scsi_Cmnd *cmd) 115static int mvme147_bus_reset(struct scsi_cmnd *cmd)
116{ 116{
117 /* FIXME perform bus-specific reset */ 117 /* FIXME perform bus-specific reset */
118 118
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
index 2f56d69bd180..32aee85434d8 100644
--- a/drivers/scsi/mvme147.h
+++ b/drivers/scsi/mvme147.h
@@ -12,10 +12,6 @@
12 12
13int mvme147_detect(struct scsi_host_template *); 13int mvme147_detect(struct scsi_host_template *);
14int mvme147_release(struct Scsi_Host *); 14int mvme147_release(struct Scsi_Host *);
15const char *wd33c93_info(void);
16int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
17int wd33c93_abort(Scsi_Cmnd *);
18int wd33c93_reset(Scsi_Cmnd *, unsigned int);
19 15
20#ifndef CMD_PER_LUN 16#ifndef CMD_PER_LUN
21#define CMD_PER_LUN 2 17#define CMD_PER_LUN 2
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b332caddd5b3..c51b5769eac8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -96,24 +96,40 @@ unsigned int scsi_logging_level;
96EXPORT_SYMBOL(scsi_logging_level); 96EXPORT_SYMBOL(scsi_logging_level);
97#endif 97#endif
98 98
99const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { 99static const char *const scsi_device_types[] = {
100 "Direct-Access ", 100 "Direct access ",
101 "Sequential-Access", 101 "Sequential access",
102 "Printer ", 102 "Printer ",
103 "Processor ", 103 "Processor ",
104 "WORM ", 104 "WORM ",
105 "CD-ROM ", 105 "CD/DVD ",
106 "Scanner ", 106 "Scanner ",
107 "Optical Device ", 107 "Optical memory ",
108 "Medium Changer ", 108 "Media changer ",
109 "Communications ", 109 "Communications ",
110 "Unknown ", 110 "ASC IT8 ",
111 "Unknown ", 111 "ASC IT8 ",
112 "RAID ", 112 "RAID ",
113 "Enclosure ", 113 "Enclosure ",
114 "Direct-Access-RBC", 114 "Direct access RBC",
115 "Optical card ",
116 "Bridge controller",
117 "Object storage ",
118 "Automation/Drive ",
115}; 119};
116EXPORT_SYMBOL(scsi_device_types); 120
121const char * scsi_device_type(unsigned type)
122{
123 if (type == 0x1e)
124 return "Well-known LUN ";
125 if (type == 0x1f)
126 return "No Device ";
127 if (type > ARRAY_SIZE(scsi_device_types))
128 return "Unknown ";
129 return scsi_device_types[type];
130}
131
132EXPORT_SYMBOL(scsi_device_type);
117 133
118struct scsi_host_cmd_pool { 134struct scsi_host_cmd_pool {
119 kmem_cache_t *slab; 135 kmem_cache_t *slab;
@@ -835,14 +851,14 @@ EXPORT_SYMBOL(scsi_track_queue_full);
835 */ 851 */
836int scsi_device_get(struct scsi_device *sdev) 852int scsi_device_get(struct scsi_device *sdev)
837{ 853{
838 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 854 if (sdev->sdev_state == SDEV_DEL)
839 return -ENXIO; 855 return -ENXIO;
840 if (!get_device(&sdev->sdev_gendev)) 856 if (!get_device(&sdev->sdev_gendev))
841 return -ENXIO; 857 return -ENXIO;
842 if (!try_module_get(sdev->host->hostt->module)) { 858 /* We can fail this if we're doing SCSI operations
843 put_device(&sdev->sdev_gendev); 859 * from module exit (like cache flush) */
844 return -ENXIO; 860 try_module_get(sdev->host->hostt->module);
845 } 861
846 return 0; 862 return 0;
847} 863}
848EXPORT_SYMBOL(scsi_device_get); 864EXPORT_SYMBOL(scsi_device_get);
@@ -857,7 +873,14 @@ EXPORT_SYMBOL(scsi_device_get);
857 */ 873 */
858void scsi_device_put(struct scsi_device *sdev) 874void scsi_device_put(struct scsi_device *sdev)
859{ 875{
860 module_put(sdev->host->hostt->module); 876 struct module *module = sdev->host->hostt->module;
877
878#ifdef CONFIG_MODULE_UNLOAD
879 /* The module refcount will be zero if scsi_device_get()
880 * was called from a module removal routine */
881 if (module && module_refcount(module) != 0)
882 module_put(module);
883#endif
861 put_device(&sdev->sdev_gendev); 884 put_device(&sdev->sdev_gendev);
862} 885}
863EXPORT_SYMBOL(scsi_device_put); 886EXPORT_SYMBOL(scsi_device_put);
@@ -1099,6 +1122,8 @@ static int __init init_scsi(void)
1099 for_each_possible_cpu(i) 1122 for_each_possible_cpu(i)
1100 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1123 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1101 1124
1125 scsi_netlink_init();
1126
1102 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1127 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1103 return 0; 1128 return 0;
1104 1129
@@ -1119,6 +1144,7 @@ cleanup_queue:
1119 1144
1120static void __exit exit_scsi(void) 1145static void __exit exit_scsi(void)
1121{ 1146{
1147 scsi_netlink_exit();
1122 scsi_sysfs_unregister(); 1148 scsi_sysfs_unregister();
1123 scsi_exit_sysctl(); 1149 scsi_exit_sysctl();
1124 scsi_exit_hosts(); 1150 scsi_exit_hosts();
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index f51e466893e7..d5a55fae60e0 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -20,8 +20,6 @@
20#ifndef _SCSI_H 20#ifndef _SCSI_H
21#define _SCSI_H 21#define _SCSI_H
22 22
23#include <linux/config.h> /* for CONFIG_SCSI_LOGGING */
24
25#include <scsi/scsi_cmnd.h> 23#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_device.h> 24#include <scsi/scsi_device.h>
27#include <scsi/scsi_eh.h> 25#include <scsi/scsi_eh.h>
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a80303c6b3fd..9c0f35820e3e 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * linux/kernel/scsi_debug.c
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale 3 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking 4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
@@ -8,7 +7,9 @@
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * 8 *
10 * This version is more generic, simulating a variable number of disk 9 * This version is more generic, simulating a variable number of disk
11 * (or disk like devices) sharing a common amount of RAM 10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
12 * 13 *
13 * 14 *
14 * For documentation see http://www.torque.net/sg/sdebug26.html 15 * For documentation see http://www.torque.net/sg/sdebug26.html
@@ -50,8 +51,8 @@
50#include "scsi_logging.h" 51#include "scsi_logging.h"
51#include "scsi_debug.h" 52#include "scsi_debug.h"
52 53
53#define SCSI_DEBUG_VERSION "1.79" 54#define SCSI_DEBUG_VERSION "1.80"
54static const char * scsi_debug_version_date = "20060604"; 55static const char * scsi_debug_version_date = "20060914";
55 56
56/* Additional Sense Code (ASC) used */ 57/* Additional Sense Code (ASC) used */
57#define NO_ADDITIONAL_SENSE 0x0 58#define NO_ADDITIONAL_SENSE 0x0
@@ -86,6 +87,8 @@ static const char * scsi_debug_version_date = "20060604";
86#define DEF_D_SENSE 0 87#define DEF_D_SENSE 0
87#define DEF_NO_LUN_0 0 88#define DEF_NO_LUN_0 0
88#define DEF_VIRTUAL_GB 0 89#define DEF_VIRTUAL_GB 0
90#define DEF_FAKE_RW 0
91#define DEF_VPD_USE_HOSTNO 1
89 92
90/* bit mask values for scsi_debug_opts */ 93/* bit mask values for scsi_debug_opts */
91#define SCSI_DEBUG_OPT_NOISE 1 94#define SCSI_DEBUG_OPT_NOISE 1
@@ -127,6 +130,8 @@ static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
127static int scsi_debug_dsense = DEF_D_SENSE; 130static int scsi_debug_dsense = DEF_D_SENSE;
128static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; 131static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
129static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; 132static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
133static int scsi_debug_fake_rw = DEF_FAKE_RW;
134static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
130 135
131static int scsi_debug_cmnd_count = 0; 136static int scsi_debug_cmnd_count = 0;
132 137
@@ -423,6 +428,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
423 case READ_6: 428 case READ_6:
424 if ((errsts = check_readiness(SCpnt, 0, devip))) 429 if ((errsts = check_readiness(SCpnt, 0, devip)))
425 break; 430 break;
431 if (scsi_debug_fake_rw)
432 break;
426 if ((*cmd) == READ_16) { 433 if ((*cmd) == READ_16) {
427 for (lba = 0, j = 0; j < 8; ++j) { 434 for (lba = 0, j = 0; j < 8; ++j) {
428 if (j > 0) 435 if (j > 0)
@@ -465,6 +472,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
465 case WRITE_6: 472 case WRITE_6:
466 if ((errsts = check_readiness(SCpnt, 0, devip))) 473 if ((errsts = check_readiness(SCpnt, 0, devip)))
467 break; 474 break;
475 if (scsi_debug_fake_rw)
476 break;
468 if ((*cmd) == WRITE_16) { 477 if ((*cmd) == WRITE_16) {
469 for (lba = 0, j = 0; j < 8; ++j) { 478 for (lba = 0, j = 0; j < 8; ++j) {
470 if (j > 0) 479 if (j > 0)
@@ -941,6 +950,8 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
941 char lu_id_str[6]; 950 char lu_id_str[6];
942 int host_no = devip->sdbg_host->shost->host_no; 951 int host_no = devip->sdbg_host->shost->host_no;
943 952
953 if (0 == scsi_debug_vpd_use_hostno)
954 host_no = 0;
944 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) + 955 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
945 (devip->target * 1000) + devip->lun); 956 (devip->target * 1000) + devip->lun);
946 target_dev_id = ((host_no + 1) * 2000) + 957 target_dev_id = ((host_no + 1) * 2000) +
@@ -1059,19 +1070,6 @@ static int resp_requests(struct scsi_cmnd * scp,
1059 arr[12] = THRESHOLD_EXCEEDED; 1070 arr[12] = THRESHOLD_EXCEEDED;
1060 arr[13] = 0xff; /* TEST set and MRIE==6 */ 1071 arr[13] = 0xff; /* TEST set and MRIE==6 */
1061 } 1072 }
1062 } else if (devip->stopped) {
1063 if (want_dsense) {
1064 arr[0] = 0x72;
1065 arr[1] = 0x0; /* NO_SENSE in sense_key */
1066 arr[2] = LOW_POWER_COND_ON;
1067 arr[3] = 0x0; /* TEST set and MRIE==6 */
1068 } else {
1069 arr[0] = 0x70;
1070 arr[2] = 0x0; /* NO_SENSE in sense_key */
1071 arr[7] = 0xa; /* 18 byte sense buffer */
1072 arr[12] = LOW_POWER_COND_ON;
1073 arr[13] = 0x0; /* TEST set and MRIE==6 */
1074 }
1075 } else { 1073 } else {
1076 memcpy(arr, sbuff, SDEBUG_SENSE_LEN); 1074 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
1077 if ((cmd[1] & 1) && (! scsi_debug_dsense)) { 1075 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
@@ -1325,21 +1323,26 @@ static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1325static int resp_mode_sense(struct scsi_cmnd * scp, int target, 1323static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1326 struct sdebug_dev_info * devip) 1324 struct sdebug_dev_info * devip)
1327{ 1325{
1328 unsigned char dbd; 1326 unsigned char dbd, llbaa;
1329 int pcontrol, pcode, subpcode; 1327 int pcontrol, pcode, subpcode, bd_len;
1330 unsigned char dev_spec; 1328 unsigned char dev_spec;
1331 int alloc_len, msense_6, offset, len, errsts, target_dev_id; 1329 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1332 unsigned char * ap; 1330 unsigned char * ap;
1333 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 1331 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1334 unsigned char *cmd = (unsigned char *)scp->cmnd; 1332 unsigned char *cmd = (unsigned char *)scp->cmnd;
1335 1333
1336 if ((errsts = check_readiness(scp, 1, devip))) 1334 if ((errsts = check_readiness(scp, 1, devip)))
1337 return errsts; 1335 return errsts;
1338 dbd = cmd[1] & 0x8; 1336 dbd = !!(cmd[1] & 0x8);
1339 pcontrol = (cmd[2] & 0xc0) >> 6; 1337 pcontrol = (cmd[2] & 0xc0) >> 6;
1340 pcode = cmd[2] & 0x3f; 1338 pcode = cmd[2] & 0x3f;
1341 subpcode = cmd[3]; 1339 subpcode = cmd[3];
1342 msense_6 = (MODE_SENSE == cmd[0]); 1340 msense_6 = (MODE_SENSE == cmd[0]);
1341 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1342 if ((0 == scsi_debug_ptype) && (0 == dbd))
1343 bd_len = llbaa ? 16 : 8;
1344 else
1345 bd_len = 0;
1343 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); 1346 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1344 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); 1347 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1345 if (0x3 == pcontrol) { /* Saving values not supported */ 1348 if (0x3 == pcontrol) { /* Saving values not supported */
@@ -1349,15 +1352,58 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1349 } 1352 }
1350 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + 1353 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1351 (devip->target * 1000) - 3; 1354 (devip->target * 1000) - 3;
1352 dev_spec = DEV_READONLY(target) ? 0x80 : 0x0; 1355 /* set DPOFUA bit for disks */
1356 if (0 == scsi_debug_ptype)
1357 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1358 else
1359 dev_spec = 0x0;
1353 if (msense_6) { 1360 if (msense_6) {
1354 arr[2] = dev_spec; 1361 arr[2] = dev_spec;
1362 arr[3] = bd_len;
1355 offset = 4; 1363 offset = 4;
1356 } else { 1364 } else {
1357 arr[3] = dev_spec; 1365 arr[3] = dev_spec;
1366 if (16 == bd_len)
1367 arr[4] = 0x1; /* set LONGLBA bit */
1368 arr[7] = bd_len; /* assume 255 or less */
1358 offset = 8; 1369 offset = 8;
1359 } 1370 }
1360 ap = arr + offset; 1371 ap = arr + offset;
1372 if ((bd_len > 0) && (0 == sdebug_capacity)) {
1373 if (scsi_debug_virtual_gb > 0) {
1374 sdebug_capacity = 2048 * 1024;
1375 sdebug_capacity *= scsi_debug_virtual_gb;
1376 } else
1377 sdebug_capacity = sdebug_store_sectors;
1378 }
1379 if (8 == bd_len) {
1380 if (sdebug_capacity > 0xfffffffe) {
1381 ap[0] = 0xff;
1382 ap[1] = 0xff;
1383 ap[2] = 0xff;
1384 ap[3] = 0xff;
1385 } else {
1386 ap[0] = (sdebug_capacity >> 24) & 0xff;
1387 ap[1] = (sdebug_capacity >> 16) & 0xff;
1388 ap[2] = (sdebug_capacity >> 8) & 0xff;
1389 ap[3] = sdebug_capacity & 0xff;
1390 }
1391 ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
1392 ap[7] = SECT_SIZE_PER(target) & 0xff;
1393 offset += bd_len;
1394 ap = arr + offset;
1395 } else if (16 == bd_len) {
1396 unsigned long long capac = sdebug_capacity;
1397
1398 for (k = 0; k < 8; ++k, capac >>= 8)
1399 ap[7 - k] = capac & 0xff;
1400 ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff;
1401 ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff;
1402 ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff;
1403 ap[15] = SECT_SIZE_PER(target) & 0xff;
1404 offset += bd_len;
1405 ap = arr + offset;
1406 }
1361 1407
1362 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { 1408 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1363 /* TODO: Control Extension page */ 1409 /* TODO: Control Extension page */
@@ -1471,7 +1517,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1471 " IO sent=%d bytes\n", param_len, res); 1517 " IO sent=%d bytes\n", param_len, res);
1472 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); 1518 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1473 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); 1519 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1474 if ((md_len > 2) || (0 != bd_len)) { 1520 if (md_len > 2) {
1475 mk_sense_buffer(devip, ILLEGAL_REQUEST, 1521 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1476 INVALID_FIELD_IN_PARAM_LIST, 0); 1522 INVALID_FIELD_IN_PARAM_LIST, 0);
1477 return check_condition_result; 1523 return check_condition_result;
@@ -1544,7 +1590,7 @@ static int resp_ie_l_pg(unsigned char * arr)
1544static int resp_log_sense(struct scsi_cmnd * scp, 1590static int resp_log_sense(struct scsi_cmnd * scp,
1545 struct sdebug_dev_info * devip) 1591 struct sdebug_dev_info * devip)
1546{ 1592{
1547 int ppc, sp, pcontrol, pcode, alloc_len, errsts, len, n; 1593 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1548 unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; 1594 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1549 unsigned char *cmd = (unsigned char *)scp->cmnd; 1595 unsigned char *cmd = (unsigned char *)scp->cmnd;
1550 1596
@@ -1560,23 +1606,63 @@ static int resp_log_sense(struct scsi_cmnd * scp,
1560 } 1606 }
1561 pcontrol = (cmd[2] & 0xc0) >> 6; 1607 pcontrol = (cmd[2] & 0xc0) >> 6;
1562 pcode = cmd[2] & 0x3f; 1608 pcode = cmd[2] & 0x3f;
1609 subpcode = cmd[3] & 0xff;
1563 alloc_len = (cmd[7] << 8) + cmd[8]; 1610 alloc_len = (cmd[7] << 8) + cmd[8];
1564 arr[0] = pcode; 1611 arr[0] = pcode;
1565 switch (pcode) { 1612 if (0 == subpcode) {
1566 case 0x0: /* Supported log pages log page */ 1613 switch (pcode) {
1567 n = 4; 1614 case 0x0: /* Supported log pages log page */
1568 arr[n++] = 0x0; /* this page */ 1615 n = 4;
1569 arr[n++] = 0xd; /* Temperature */ 1616 arr[n++] = 0x0; /* this page */
1570 arr[n++] = 0x2f; /* Informational exceptions */ 1617 arr[n++] = 0xd; /* Temperature */
1571 arr[3] = n - 4; 1618 arr[n++] = 0x2f; /* Informational exceptions */
1572 break; 1619 arr[3] = n - 4;
1573 case 0xd: /* Temperature log page */ 1620 break;
1574 arr[3] = resp_temp_l_pg(arr + 4); 1621 case 0xd: /* Temperature log page */
1575 break; 1622 arr[3] = resp_temp_l_pg(arr + 4);
1576 case 0x2f: /* Informational exceptions log page */ 1623 break;
1577 arr[3] = resp_ie_l_pg(arr + 4); 1624 case 0x2f: /* Informational exceptions log page */
1578 break; 1625 arr[3] = resp_ie_l_pg(arr + 4);
1579 default: 1626 break;
1627 default:
1628 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1629 INVALID_FIELD_IN_CDB, 0);
1630 return check_condition_result;
1631 }
1632 } else if (0xff == subpcode) {
1633 arr[0] |= 0x40;
1634 arr[1] = subpcode;
1635 switch (pcode) {
1636 case 0x0: /* Supported log pages and subpages log page */
1637 n = 4;
1638 arr[n++] = 0x0;
1639 arr[n++] = 0x0; /* 0,0 page */
1640 arr[n++] = 0x0;
1641 arr[n++] = 0xff; /* this page */
1642 arr[n++] = 0xd;
1643 arr[n++] = 0x0; /* Temperature */
1644 arr[n++] = 0x2f;
1645 arr[n++] = 0x0; /* Informational exceptions */
1646 arr[3] = n - 4;
1647 break;
1648 case 0xd: /* Temperature subpages */
1649 n = 4;
1650 arr[n++] = 0xd;
1651 arr[n++] = 0x0; /* Temperature */
1652 arr[3] = n - 4;
1653 break;
1654 case 0x2f: /* Informational exceptions subpages */
1655 n = 4;
1656 arr[n++] = 0x2f;
1657 arr[n++] = 0x0; /* Informational exceptions */
1658 arr[3] = n - 4;
1659 break;
1660 default:
1661 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1662 INVALID_FIELD_IN_CDB, 0);
1663 return check_condition_result;
1664 }
1665 } else {
1580 mk_sense_buffer(devip, ILLEGAL_REQUEST, 1666 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1581 INVALID_FIELD_IN_CDB, 0); 1667 INVALID_FIELD_IN_CDB, 0);
1582 return check_condition_result; 1668 return check_condition_result;
@@ -2151,11 +2237,18 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
2151 } 2237 }
2152} 2238}
2153 2239
2240/* Note: The following macros create attribute files in the
2241 /sys/module/scsi_debug/parameters directory. Unfortunately this
2242 driver is unaware of a change and cannot trigger auxiliary actions
2243 as it can when the corresponding attribute in the
2244 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2245 */
2154module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); 2246module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2155module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); 2247module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2156module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); 2248module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2157module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); 2249module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2158module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); 2250module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2251module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2159module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); 2252module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2160module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); 2253module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2161module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); 2254module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
@@ -2164,6 +2257,8 @@ module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2164module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); 2257module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2165module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); 2258module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2166module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); 2259module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2260module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2261 S_IRUGO | S_IWUSR);
2167 2262
2168MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2263MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2169MODULE_DESCRIPTION("SCSI debug adapter driver"); 2264MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2175,6 +2270,7 @@ MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2175MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); 2270MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2176MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); 2271MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2177MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)"); 2272MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)");
2273MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2178MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 2274MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2179MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); 2275MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2180MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); 2276MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
@@ -2183,6 +2279,7 @@ MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->... (def=0)");
2183MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 2279MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2184MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2280MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2185MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2281MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2282MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2186 2283
2187 2284
2188static char sdebug_info[256]; 2285static char sdebug_info[256];
@@ -2334,6 +2431,24 @@ static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2334DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show, 2431DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2335 sdebug_dsense_store); 2432 sdebug_dsense_store);
2336 2433
2434static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2435{
2436 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2437}
2438static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2439 const char * buf, size_t count)
2440{
2441 int n;
2442
2443 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2444 scsi_debug_fake_rw = n;
2445 return count;
2446 }
2447 return -EINVAL;
2448}
2449DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2450 sdebug_fake_rw_store);
2451
2337static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf) 2452static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2338{ 2453{
2339 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0); 2454 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
@@ -2487,6 +2602,31 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,
2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2602DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
2488 sdebug_add_host_store); 2603 sdebug_add_host_store);
2489 2604
2605static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
2606 char * buf)
2607{
2608 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
2609}
2610static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
2611 const char * buf, size_t count)
2612{
2613 int n;
2614
2615 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2616 scsi_debug_vpd_use_hostno = n;
2617 return count;
2618 }
2619 return -EINVAL;
2620}
2621DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
2622 sdebug_vpd_use_hostno_store);
2623
2624/* Note: The following function creates attribute files in the
2625 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
2626 files (over those found in the /sys/module/scsi_debug/parameters
2627 directory) is that auxiliary actions can be triggered when an attribute
2628 is changed. For example see: sdebug_add_host_store() above.
2629 */
2490static int do_create_driverfs_files(void) 2630static int do_create_driverfs_files(void)
2491{ 2631{
2492 int ret; 2632 int ret;
@@ -2496,23 +2636,31 @@ static int do_create_driverfs_files(void)
2496 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2636 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
2497 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2637 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
2498 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2638 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
2639 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
2499 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2640 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2500 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2641 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
2501 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 2642 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2643 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2502 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2644 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
2503 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 2645 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
2504 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2646 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2647 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2648 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2505 return ret; 2649 return ret;
2506} 2650}
2507 2651
2508static void do_remove_driverfs_files(void) 2652static void do_remove_driverfs_files(void)
2509{ 2653{
2654 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2655 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2510 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2656 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2511 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts); 2657 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
2512 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2658 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
2513 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2514 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2659 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2660 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2661 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
2515 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2662 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2663 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
2516 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2664 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
2517 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2665 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
2518 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2666 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 077c1c691210..d6743b959a72 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -551,7 +551,15 @@ static void scsi_run_queue(struct request_queue *q)
551 list_del_init(&sdev->starved_entry); 551 list_del_init(&sdev->starved_entry);
552 spin_unlock_irqrestore(shost->host_lock, flags); 552 spin_unlock_irqrestore(shost->host_lock, flags);
553 553
554 blk_run_queue(sdev->request_queue); 554
555 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
556 !test_and_set_bit(QUEUE_FLAG_REENTER,
557 &sdev->request_queue->queue_flags)) {
558 blk_run_queue(sdev->request_queue);
559 clear_bit(QUEUE_FLAG_REENTER,
560 &sdev->request_queue->queue_flags);
561 } else
562 blk_run_queue(sdev->request_queue);
555 563
556 spin_lock_irqsave(shost->host_lock, flags); 564 spin_lock_irqsave(shost->host_lock, flags);
557 if (unlikely(!list_empty(&sdev->starved_entry))) 565 if (unlikely(!list_empty(&sdev->starved_entry)))
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
new file mode 100644
index 000000000000..1b59b27e887f
--- /dev/null
+++ b/drivers/scsi/scsi_netlink.c
@@ -0,0 +1,199 @@
1/*
2 * scsi_netlink.c - SCSI Transport Netlink Interface
3 *
4 * Copyright (C) 2006 James Smart, Emulex Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include <linux/time.h>
22#include <linux/jiffies.h>
23#include <linux/security.h>
24#include <net/sock.h>
25#include <net/netlink.h>
26
27#include <scsi/scsi_netlink.h>
28#include "scsi_priv.h"
29
30struct sock *scsi_nl_sock = NULL;
31EXPORT_SYMBOL_GPL(scsi_nl_sock);
32
33
34/**
35 * scsi_nl_rcv_msg -
36 * Receive message handler. Extracts message from a receive buffer.
37 * Validates message header and calls appropriate transport message handler
38 *
39 * @skb: socket receive buffer
40 *
41 **/
42static void
43scsi_nl_rcv_msg(struct sk_buff *skb)
44{
45 struct nlmsghdr *nlh;
46 struct scsi_nl_hdr *hdr;
47 uint32_t rlen;
48 int err;
49
50 while (skb->len >= NLMSG_SPACE(0)) {
51 err = 0;
52
53 nlh = (struct nlmsghdr *) skb->data;
54 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
55 (skb->len < nlh->nlmsg_len)) {
56 printk(KERN_WARNING "%s: discarding partial skb\n",
57 __FUNCTION__);
58 return;
59 }
60
61 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
62 if (rlen > skb->len)
63 rlen = skb->len;
64
65 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
66 err = -EBADMSG;
67 goto next_msg;
68 }
69
70 hdr = NLMSG_DATA(nlh);
71 if ((hdr->version != SCSI_NL_VERSION) ||
72 (hdr->magic != SCSI_NL_MAGIC)) {
73 err = -EPROTOTYPE;
74 goto next_msg;
75 }
76
77 if (security_netlink_recv(skb, CAP_SYS_ADMIN)) {
78 err = -EPERM;
79 goto next_msg;
80 }
81
82 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
83 printk(KERN_WARNING "%s: discarding partial message\n",
84 __FUNCTION__);
85 return;
86 }
87
88 /*
89 * We currently don't support anyone sending us a message
90 */
91
92next_msg:
93 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
94 netlink_ack(skb, nlh, err);
95
96 skb_pull(skb, rlen);
97 }
98}
99
100
101/**
102 * scsi_nl_rcv_msg -
103 * Receive handler for a socket. Extracts a received message buffer from
104 * the socket, and starts message processing.
105 *
106 * @sk: socket
107 * @len: unused
108 *
109 **/
110static void
111scsi_nl_rcv(struct sock *sk, int len)
112{
113 struct sk_buff *skb;
114
115 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
116 scsi_nl_rcv_msg(skb);
117 kfree_skb(skb);
118 }
119}
120
121
122/**
123 * scsi_nl_rcv_event -
124 * Event handler for a netlink socket.
125 *
126 * @this: event notifier block
127 * @event: event type
128 * @ptr: event payload
129 *
130 **/
131static int
132scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
133{
134 struct netlink_notify *n = ptr;
135
136 if (n->protocol != NETLINK_SCSITRANSPORT)
137 return NOTIFY_DONE;
138
139 /*
140 * Currently, we are not tracking PID's, etc. There is nothing
141 * to handle.
142 */
143
144 return NOTIFY_DONE;
145}
146
147static struct notifier_block scsi_netlink_notifier = {
148 .notifier_call = scsi_nl_rcv_event,
149};
150
151
152/**
153 * scsi_netlink_init -
154 * Called by SCSI subsystem to intialize the SCSI transport netlink
155 * interface
156 *
157 **/
158void
159scsi_netlink_init(void)
160{
161 int error;
162
163 error = netlink_register_notifier(&scsi_netlink_notifier);
164 if (error) {
165 printk(KERN_ERR "%s: register of event handler failed - %d\n",
166 __FUNCTION__, error);
167 return;
168 }
169
170 scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT,
171 SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE);
172 if (!scsi_nl_sock) {
173 printk(KERN_ERR "%s: register of recieve handler failed\n",
174 __FUNCTION__);
175 netlink_unregister_notifier(&scsi_netlink_notifier);
176 }
177
178 return;
179}
180
181
182/**
183 * scsi_netlink_exit -
184 * Called by SCSI subsystem to disable the SCSI transport netlink
185 * interface
186 *
187 **/
188void
189scsi_netlink_exit(void)
190{
191 if (scsi_nl_sock) {
192 sock_release(scsi_nl_sock->sk_socket);
193 netlink_unregister_notifier(&scsi_netlink_notifier);
194 }
195
196 return;
197}
198
199
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index ae24c85aaeea..5d023d44e5e7 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -8,6 +8,7 @@ struct scsi_cmnd;
8struct scsi_device; 8struct scsi_device;
9struct scsi_host_template; 9struct scsi_host_template;
10struct Scsi_Host; 10struct Scsi_Host;
11struct scsi_nl_hdr;
11 12
12 13
13/* 14/*
@@ -110,6 +111,16 @@ extern void __scsi_remove_device(struct scsi_device *);
110 111
111extern struct bus_type scsi_bus_type; 112extern struct bus_type scsi_bus_type;
112 113
114/* scsi_netlink.c */
115#ifdef CONFIG_SCSI_NETLINK
116extern void scsi_netlink_init(void);
117extern void scsi_netlink_exit(void);
118extern struct sock *scsi_nl_sock;
119#else
120static inline void scsi_netlink_init(void) {}
121static inline void scsi_netlink_exit(void) {}
122#endif
123
113/* 124/*
114 * internal scsi timeout functions: for use by mid-layer and transport 125 * internal scsi timeout functions: for use by mid-layer and transport
115 * classes. 126 * classes.
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 55200e4fdf11..524a5f7a5193 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -178,9 +178,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
178 178
179 seq_printf(s, "\n"); 179 seq_printf(s, "\n");
180 180
181 seq_printf(s, " Type: %s ", 181 seq_printf(s, " Type: %s ", scsi_device_type(sdev->type));
182 sdev->type < MAX_SCSI_DEVICE_CODE ?
183 scsi_device_types[(int) sdev->type] : "Unknown ");
184 seq_printf(s, " ANSI" 182 seq_printf(s, " ANSI"
185 " SCSI revision: %02x", (sdev->scsi_level - 1) ? 183 " SCSI revision: %02x", (sdev->scsi_level - 1) ?
186 sdev->scsi_level - 1 : 1); 184 sdev->scsi_level - 1 : 1);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 1bd92b9b46d9..fd9e281c3bfe 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -134,59 +134,6 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
134} 134}
135 135
136/** 136/**
137 * print_inquiry - printk the inquiry information
138 * @inq_result: printk this SCSI INQUIRY
139 *
140 * Description:
141 * printk the vendor, model, and other information found in the
142 * INQUIRY data in @inq_result.
143 *
144 * Notes:
145 * Remove this, and replace with a hotplug event that logs any
146 * relevant information.
147 **/
148static void print_inquiry(unsigned char *inq_result)
149{
150 int i;
151
152 printk(KERN_NOTICE " Vendor: ");
153 for (i = 8; i < 16; i++)
154 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
155 printk("%c", inq_result[i]);
156 else
157 printk(" ");
158
159 printk(" Model: ");
160 for (i = 16; i < 32; i++)
161 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
162 printk("%c", inq_result[i]);
163 else
164 printk(" ");
165
166 printk(" Rev: ");
167 for (i = 32; i < 36; i++)
168 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
169 printk("%c", inq_result[i]);
170 else
171 printk(" ");
172
173 printk("\n");
174
175 i = inq_result[0] & 0x1f;
176
177 printk(KERN_NOTICE " Type: %s ",
178 i <
179 MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
180 "Unknown ");
181 printk(" ANSI SCSI revision: %02x",
182 inq_result[2] & 0x07);
183 if ((inq_result[2] & 0x07) == 1 && (inq_result[3] & 0x0f) == 1)
184 printk(" CCS\n");
185 else
186 printk("\n");
187}
188
189/**
190 * scsi_alloc_sdev - allocate and setup a scsi_Device 137 * scsi_alloc_sdev - allocate and setup a scsi_Device
191 * 138 *
192 * Description: 139 * Description:
@@ -319,6 +266,18 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
319 return found_starget; 266 return found_starget;
320} 267}
321 268
269/**
270 * scsi_alloc_target - allocate a new or find an existing target
271 * @parent: parent of the target (need not be a scsi host)
272 * @channel: target channel number (zero if no channels)
273 * @id: target id number
274 *
275 * Return an existing target if one exists, provided it hasn't already
276 * gone into STARGET_DEL state, otherwise allocate a new target.
277 *
278 * The target is returned with an incremented reference, so the caller
279 * is responsible for both reaping and doing a last put
280 */
322static struct scsi_target *scsi_alloc_target(struct device *parent, 281static struct scsi_target *scsi_alloc_target(struct device *parent,
323 int channel, uint id) 282 int channel, uint id)
324{ 283{
@@ -384,14 +343,15 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
384 return NULL; 343 return NULL;
385 } 344 }
386 } 345 }
346 get_device(dev);
387 347
388 return starget; 348 return starget;
389 349
390 found: 350 found:
391 found_target->reap_ref++; 351 found_target->reap_ref++;
392 spin_unlock_irqrestore(shost->host_lock, flags); 352 spin_unlock_irqrestore(shost->host_lock, flags);
393 put_device(parent);
394 if (found_target->state != STARGET_DEL) { 353 if (found_target->state != STARGET_DEL) {
354 put_device(parent);
395 kfree(starget); 355 kfree(starget);
396 return found_target; 356 return found_target;
397 } 357 }
@@ -450,6 +410,32 @@ void scsi_target_reap(struct scsi_target *starget)
450} 410}
451 411
452/** 412/**
413 * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string
414 * @s: INQUIRY result string to sanitize
415 * @len: length of the string
416 *
417 * Description:
418 * The SCSI spec says that INQUIRY vendor, product, and revision
419 * strings must consist entirely of graphic ASCII characters,
420 * padded on the right with spaces. Since not all devices obey
421 * this rule, we will replace non-graphic or non-ASCII characters
422 * with spaces. Exception: a NUL character is interpreted as a
423 * string terminator, so all the following characters are set to
424 * spaces.
425 **/
426static void sanitize_inquiry_string(unsigned char *s, int len)
427{
428 int terminated = 0;
429
430 for (; len > 0; (--len, ++s)) {
431 if (*s == 0)
432 terminated = 1;
433 if (terminated || *s < 0x20 || *s > 0x7e)
434 *s = ' ';
435 }
436}
437
438/**
453 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY 439 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
454 * @sdev: scsi_device to probe 440 * @sdev: scsi_device to probe
455 * @inq_result: area to store the INQUIRY result 441 * @inq_result: area to store the INQUIRY result
@@ -463,7 +449,7 @@ void scsi_target_reap(struct scsi_target *starget)
463 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length 449 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
464 * are copied to the scsi_device any flags value is stored in *@bflags. 450 * are copied to the scsi_device any flags value is stored in *@bflags.
465 **/ 451 **/
466static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result, 452static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
467 int result_len, int *bflags) 453 int result_len, int *bflags)
468{ 454{
469 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 455 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -522,7 +508,11 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
522 } 508 }
523 509
524 if (result == 0) { 510 if (result == 0) {
525 response_len = (unsigned char) inq_result[4] + 5; 511 sanitize_inquiry_string(&inq_result[8], 8);
512 sanitize_inquiry_string(&inq_result[16], 16);
513 sanitize_inquiry_string(&inq_result[32], 4);
514
515 response_len = inq_result[4] + 5;
526 if (response_len > 255) 516 if (response_len > 255)
527 response_len = first_inquiry_len; /* sanity */ 517 response_len = first_inquiry_len; /* sanity */
528 518
@@ -628,7 +618,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
628 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 618 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
629 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 619 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
630 **/ 620 **/
631static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) 621static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
622 int *bflags)
632{ 623{
633 /* 624 /*
634 * XXX do not save the inquiry, since it can change underneath us, 625 * XXX do not save the inquiry, since it can change underneath us,
@@ -653,9 +644,8 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
653 if (*bflags & BLIST_ISROM) { 644 if (*bflags & BLIST_ISROM) {
654 /* 645 /*
655 * It would be better to modify sdev->type, and set 646 * It would be better to modify sdev->type, and set
656 * sdev->removable, but then the print_inquiry() output 647 * sdev->removable; this can now be done since
657 * would not show TYPE_ROM; if print_inquiry() is removed 648 * print_inquiry has gone away.
658 * the issue goes away.
659 */ 649 */
660 inq_result[0] = TYPE_ROM; 650 inq_result[0] = TYPE_ROM;
661 inq_result[1] |= 0x80; /* removable */ 651 inq_result[1] |= 0x80; /* removable */
@@ -684,8 +674,6 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
684 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type); 674 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type);
685 } 675 }
686 676
687 print_inquiry(inq_result);
688
689 /* 677 /*
690 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI 678 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
691 * spec says: The device server is capable of supporting the 679 * spec says: The device server is capable of supporting the
@@ -715,6 +703,12 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
715 if (inq_result[7] & 0x10) 703 if (inq_result[7] & 0x10)
716 sdev->sdtr = 1; 704 sdev->sdtr = 1;
717 705
706 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
707 "ANSI: %d%s\n", scsi_device_type(sdev->type),
708 sdev->vendor, sdev->model, sdev->rev,
709 sdev->inq_periph_qual, inq_result[2] & 0x07,
710 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
711
718 /* 712 /*
719 * End sysfs code. 713 * End sysfs code.
720 */ 714 */
@@ -943,11 +937,26 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
943 } 937 }
944 938
945 /* 939 /*
946 * Non-standard SCSI targets may set the PDT to 0x1f (unknown or 940 * Some targets may set slight variations of PQ and PDT to signal
947 * no device type) instead of using the Peripheral Qualifier to 941 * that no LUN is present, so don't add sdev in these cases.
948 * indicate that no LUN is present. For example, USB UFI does this. 942 * Two specific examples are:
943 * 1) NetApp targets: return PQ=1, PDT=0x1f
944 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
945 * in the UFI 1.0 spec (we cannot rely on reserved bits).
946 *
947 * References:
948 * 1) SCSI SPC-3, pp. 145-146
949 * PQ=1: "A peripheral device having the specified peripheral
950 * device type is not connected to this logical unit. However, the
951 * device server is capable of supporting the specified peripheral
952 * device type on this logical unit."
953 * PDT=0x1f: "Unknown or no device type"
954 * 2) USB UFI 1.0, p. 20
955 * PDT=00h Direct-access device (floppy)
956 * PDT=1Fh none (no FDD connected to the requested logical unit)
949 */ 957 */
950 if (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f) { 958 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
959 (result[0] & 0x1f) == 0x1f) {
951 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 960 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
952 "scsi scan: peripheral device type" 961 "scsi scan: peripheral device type"
953 " of 31, no device added\n")); 962 " of 31, no device added\n"));
@@ -1345,7 +1354,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1345 if (!starget) 1354 if (!starget)
1346 return ERR_PTR(-ENOMEM); 1355 return ERR_PTR(-ENOMEM);
1347 1356
1348 get_device(&starget->dev);
1349 mutex_lock(&shost->scan_mutex); 1357 mutex_lock(&shost->scan_mutex);
1350 if (scsi_host_scan_allowed(shost)) 1358 if (scsi_host_scan_allowed(shost))
1351 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1359 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
@@ -1404,7 +1412,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1404 if (!starget) 1412 if (!starget)
1405 return; 1413 return;
1406 1414
1407 get_device(&starget->dev);
1408 if (lun != SCAN_WILD_CARD) { 1415 if (lun != SCAN_WILD_CARD) {
1409 /* 1416 /*
1410 * Scan for a specific host/chan/id/lun. 1417 * Scan for a specific host/chan/id/lun.
@@ -1586,7 +1593,8 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1586 if (sdev) { 1593 if (sdev) {
1587 sdev->sdev_gendev.parent = get_device(&starget->dev); 1594 sdev->sdev_gendev.parent = get_device(&starget->dev);
1588 sdev->borken = 0; 1595 sdev->borken = 0;
1589 } 1596 } else
1597 scsi_target_reap(starget);
1590 put_device(&starget->dev); 1598 put_device(&starget->dev);
1591 out: 1599 out:
1592 mutex_unlock(&shost->scan_mutex); 1600 mutex_unlock(&shost->scan_mutex);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index b03aa85108e5..38c215a78f69 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -32,6 +32,9 @@
32#include <scsi/scsi_transport.h> 32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
35#include <linux/netlink.h>
36#include <net/netlink.h>
37#include <scsi/scsi_netlink_fc.h>
35#include "scsi_priv.h" 38#include "scsi_priv.h"
36 39
37static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 40static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
@@ -93,6 +96,29 @@ fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
93#define FC_PORTTYPE_MAX_NAMELEN 50 96#define FC_PORTTYPE_MAX_NAMELEN 50
94 97
95 98
99/* Convert fc_host_event_code values to ascii string name */
100static const struct {
101 enum fc_host_event_code value;
102 char *name;
103} fc_host_event_code_names[] = {
104 { FCH_EVT_LIP, "lip" },
105 { FCH_EVT_LINKUP, "link_up" },
106 { FCH_EVT_LINKDOWN, "link_down" },
107 { FCH_EVT_LIPRESET, "lip_reset" },
108 { FCH_EVT_RSCN, "rscn" },
109 { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" },
110 { FCH_EVT_PORT_UNKNOWN, "port_unknown" },
111 { FCH_EVT_PORT_ONLINE, "port_online" },
112 { FCH_EVT_PORT_OFFLINE, "port_offline" },
113 { FCH_EVT_PORT_FABRIC, "port_fabric" },
114 { FCH_EVT_LINK_UNKNOWN, "link_unknown" },
115 { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
116};
117fc_enum_name_search(host_event_code, fc_host_event_code,
118 fc_host_event_code_names)
119#define FC_HOST_EVENT_CODE_MAX_NAMELEN 30
120
121
96/* Convert fc_port_state values to ascii string name */ 122/* Convert fc_port_state values to ascii string name */
97static struct { 123static struct {
98 enum fc_port_state value; 124 enum fc_port_state value;
@@ -216,6 +242,7 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
216 242
217 243
218static void fc_timeout_deleted_rport(void *data); 244static void fc_timeout_deleted_rport(void *data);
245static void fc_timeout_fail_rport_io(void *data);
219static void fc_scsi_scan_rport(void *data); 246static void fc_scsi_scan_rport(void *data);
220 247
221/* 248/*
@@ -223,7 +250,7 @@ static void fc_scsi_scan_rport(void *data);
223 * Increase these values if you add attributes 250 * Increase these values if you add attributes
224 */ 251 */
225#define FC_STARGET_NUM_ATTRS 3 252#define FC_STARGET_NUM_ATTRS 3
226#define FC_RPORT_NUM_ATTRS 9 253#define FC_RPORT_NUM_ATTRS 10
227#define FC_HOST_NUM_ATTRS 17 254#define FC_HOST_NUM_ATTRS 17
228 255
229struct fc_internal { 256struct fc_internal {
@@ -301,8 +328,6 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
301 fc_host->supported_classes = FC_COS_UNSPECIFIED; 328 fc_host->supported_classes = FC_COS_UNSPECIFIED;
302 memset(fc_host->supported_fc4s, 0, 329 memset(fc_host->supported_fc4s, 0,
303 sizeof(fc_host->supported_fc4s)); 330 sizeof(fc_host->supported_fc4s));
304 memset(fc_host->symbolic_name, 0,
305 sizeof(fc_host->symbolic_name));
306 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; 331 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
307 fc_host->maxframe_size = -1; 332 fc_host->maxframe_size = -1;
308 memset(fc_host->serial_number, 0, 333 memset(fc_host->serial_number, 0,
@@ -315,6 +340,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
315 sizeof(fc_host->active_fc4s)); 340 sizeof(fc_host->active_fc4s));
316 fc_host->speed = FC_PORTSPEED_UNKNOWN; 341 fc_host->speed = FC_PORTSPEED_UNKNOWN;
317 fc_host->fabric_name = -1; 342 fc_host->fabric_name = -1;
343 memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
344 memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
318 345
319 fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN; 346 fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
320 347
@@ -377,10 +404,184 @@ MODULE_PARM_DESC(dev_loss_tmo,
377 " exceeded, the scsi target is removed. Value should be" 404 " exceeded, the scsi target is removed. Value should be"
378 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); 405 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
379 406
407/**
408 * Netlink Infrastructure
409 **/
410
411static atomic_t fc_event_seq;
412
413/**
414 * fc_get_event_number - Obtain the next sequential FC event number
415 *
416 * Notes:
417 * We could have inline'd this, but it would have required fc_event_seq to
418 * be exposed. For now, live with the subroutine call.
419 * Atomic used to avoid lock/unlock...
420 **/
421u32
422fc_get_event_number(void)
423{
424 return atomic_add_return(1, &fc_event_seq);
425}
426EXPORT_SYMBOL(fc_get_event_number);
427
428
429/**
430 * fc_host_post_event - called to post an even on an fc_host.
431 *
432 * @shost: host the event occurred on
433 * @event_number: fc event number obtained from get_fc_event_number()
434 * @event_code: fc_host event being posted
435 * @event_data: 32bits of data for the event being posted
436 *
437 * Notes:
438 * This routine assumes no locks are held on entry.
439 **/
440void
441fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
442 enum fc_host_event_code event_code, u32 event_data)
443{
444 struct sk_buff *skb;
445 struct nlmsghdr *nlh;
446 struct fc_nl_event *event;
447 const char *name;
448 u32 len, skblen;
449 int err;
450
451 if (!scsi_nl_sock) {
452 err = -ENOENT;
453 goto send_fail;
454 }
455
456 len = FC_NL_MSGALIGN(sizeof(*event));
457 skblen = NLMSG_SPACE(len);
458
459 skb = alloc_skb(skblen, GFP_KERNEL);
460 if (!skb) {
461 err = -ENOBUFS;
462 goto send_fail;
463 }
464
465 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
466 skblen - sizeof(*nlh), 0);
467 if (!nlh) {
468 err = -ENOBUFS;
469 goto send_fail_skb;
470 }
471 event = NLMSG_DATA(nlh);
472
473 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
474 FC_NL_ASYNC_EVENT, len);
475 event->seconds = get_seconds();
476 event->vendor_id = 0;
477 event->host_no = shost->host_no;
478 event->event_datalen = sizeof(u32); /* bytes */
479 event->event_num = event_number;
480 event->event_code = event_code;
481 event->event_data = event_data;
482
483 err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
484 GFP_KERNEL);
485 if (err && (err != -ESRCH)) /* filter no recipient errors */
486 /* nlmsg_multicast already kfree_skb'd */
487 goto send_fail;
488
489 return;
490
491send_fail_skb:
492 kfree_skb(skb);
493send_fail:
494 name = get_fc_host_event_code_name(event_code);
495 printk(KERN_WARNING
496 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
497 __FUNCTION__, shost->host_no,
498 (name) ? name : "<unknown>", event_data, err);
499 return;
500}
501EXPORT_SYMBOL(fc_host_post_event);
502
503
504/**
505 * fc_host_post_vendor_event - called to post a vendor unique event on
506 * a fc_host
507 *
508 * @shost: host the event occurred on
509 * @event_number: fc event number obtained from get_fc_event_number()
510 * @data_len: amount, in bytes, of vendor unique data
511 * @data_buf: pointer to vendor unique data
512 *
513 * Notes:
514 * This routine assumes no locks are held on entry.
515 **/
516void
517fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
518 u32 data_len, char * data_buf, u64 vendor_id)
519{
520 struct sk_buff *skb;
521 struct nlmsghdr *nlh;
522 struct fc_nl_event *event;
523 u32 len, skblen;
524 int err;
525
526 if (!scsi_nl_sock) {
527 err = -ENOENT;
528 goto send_vendor_fail;
529 }
530
531 len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
532 skblen = NLMSG_SPACE(len);
533
534 skb = alloc_skb(skblen, GFP_KERNEL);
535 if (!skb) {
536 err = -ENOBUFS;
537 goto send_vendor_fail;
538 }
539
540 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
541 skblen - sizeof(*nlh), 0);
542 if (!nlh) {
543 err = -ENOBUFS;
544 goto send_vendor_fail_skb;
545 }
546 event = NLMSG_DATA(nlh);
547
548 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
549 FC_NL_ASYNC_EVENT, len);
550 event->seconds = get_seconds();
551 event->vendor_id = vendor_id;
552 event->host_no = shost->host_no;
553 event->event_datalen = data_len; /* bytes */
554 event->event_num = event_number;
555 event->event_code = FCH_EVT_VENDOR_UNIQUE;
556 memcpy(&event->event_data, data_buf, data_len);
557
558 err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
559 GFP_KERNEL);
560 if (err && (err != -ESRCH)) /* filter no recipient errors */
561 /* nlmsg_multicast already kfree_skb'd */
562 goto send_vendor_fail;
563
564 return;
565
566send_vendor_fail_skb:
567 kfree_skb(skb);
568send_vendor_fail:
569 printk(KERN_WARNING
570 "%s: Dropped Event : host %d vendor_unique - err %d\n",
571 __FUNCTION__, shost->host_no, err);
572 return;
573}
574EXPORT_SYMBOL(fc_host_post_vendor_event);
575
576
380 577
381static __init int fc_transport_init(void) 578static __init int fc_transport_init(void)
382{ 579{
383 int error = transport_class_register(&fc_host_class); 580 int error;
581
582 atomic_set(&fc_event_seq, 0);
583
584 error = transport_class_register(&fc_host_class);
384 if (error) 585 if (error)
385 return error; 586 return error;
386 error = transport_class_register(&fc_rport_class); 587 error = transport_class_register(&fc_rport_class);
@@ -424,11 +625,14 @@ store_fc_rport_##field(struct class_device *cdev, const char *buf, \
424 struct fc_rport *rport = transport_class_to_rport(cdev); \ 625 struct fc_rport *rport = transport_class_to_rport(cdev); \
425 struct Scsi_Host *shost = rport_to_shost(rport); \ 626 struct Scsi_Host *shost = rport_to_shost(rport); \
426 struct fc_internal *i = to_fc_internal(shost->transportt); \ 627 struct fc_internal *i = to_fc_internal(shost->transportt); \
628 char *cp; \
427 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \ 629 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \
428 (rport->port_state == FC_PORTSTATE_DELETED) || \ 630 (rport->port_state == FC_PORTSTATE_DELETED) || \
429 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \ 631 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \
430 return -EBUSY; \ 632 return -EBUSY; \
431 val = simple_strtoul(buf, NULL, 0); \ 633 val = simple_strtoul(buf, &cp, 0); \
634 if (*cp && (*cp != '\n')) \
635 return -EINVAL; \
432 i->f->set_rport_##field(rport, val); \ 636 i->f->set_rport_##field(rport, val); \
433 return count; \ 637 return count; \
434} 638}
@@ -510,6 +714,13 @@ static FC_CLASS_DEVICE_ATTR(rport, title, S_IRUGO, \
510 if (i->f->show_rport_##field) \ 714 if (i->f->show_rport_##field) \
511 count++ 715 count++
512 716
717#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \
718{ \
719 i->private_rport_attrs[count] = class_device_attr_rport_##field; \
720 i->rport_attrs[count] = &i->private_rport_attrs[count]; \
721 count++; \
722}
723
513 724
514/* The FC Transport Remote Port Attributes: */ 725/* The FC Transport Remote Port Attributes: */
515 726
@@ -542,12 +753,14 @@ store_fc_rport_dev_loss_tmo(struct class_device *cdev, const char *buf,
542 struct fc_rport *rport = transport_class_to_rport(cdev); 753 struct fc_rport *rport = transport_class_to_rport(cdev);
543 struct Scsi_Host *shost = rport_to_shost(rport); 754 struct Scsi_Host *shost = rport_to_shost(rport);
544 struct fc_internal *i = to_fc_internal(shost->transportt); 755 struct fc_internal *i = to_fc_internal(shost->transportt);
756 char *cp;
545 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || 757 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
546 (rport->port_state == FC_PORTSTATE_DELETED) || 758 (rport->port_state == FC_PORTSTATE_DELETED) ||
547 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 759 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
548 return -EBUSY; 760 return -EBUSY;
549 val = simple_strtoul(buf, NULL, 0); 761 val = simple_strtoul(buf, &cp, 0);
550 if ((val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) 762 if ((*cp && (*cp != '\n')) ||
763 (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
551 return -EINVAL; 764 return -EINVAL;
552 i->f->set_rport_dev_loss_tmo(rport, val); 765 i->f->set_rport_dev_loss_tmo(rport, val);
553 return count; 766 return count;
@@ -597,6 +810,44 @@ static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO,
597fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); 810fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
598fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); 811fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
599 812
813/*
814 * fast_io_fail_tmo attribute
815 */
816static ssize_t
817show_fc_rport_fast_io_fail_tmo (struct class_device *cdev, char *buf)
818{
819 struct fc_rport *rport = transport_class_to_rport(cdev);
820
821 if (rport->fast_io_fail_tmo == -1)
822 return snprintf(buf, 5, "off\n");
823 return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
824}
825
826static ssize_t
827store_fc_rport_fast_io_fail_tmo(struct class_device *cdev, const char *buf,
828 size_t count)
829{
830 int val;
831 char *cp;
832 struct fc_rport *rport = transport_class_to_rport(cdev);
833
834 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
835 (rport->port_state == FC_PORTSTATE_DELETED) ||
836 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
837 return -EBUSY;
838 if (strncmp(buf, "off", 3) == 0)
839 rport->fast_io_fail_tmo = -1;
840 else {
841 val = simple_strtoul(buf, &cp, 0);
842 if ((*cp && (*cp != '\n')) ||
843 (val < 0) || (val >= rport->dev_loss_tmo))
844 return -EINVAL;
845 rport->fast_io_fail_tmo = val;
846 }
847 return count;
848}
849static FC_CLASS_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
850 show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
600 851
601 852
602/* 853/*
@@ -682,12 +933,34 @@ store_fc_host_##field(struct class_device *cdev, const char *buf, \
682 int val; \ 933 int val; \
683 struct Scsi_Host *shost = transport_class_to_shost(cdev); \ 934 struct Scsi_Host *shost = transport_class_to_shost(cdev); \
684 struct fc_internal *i = to_fc_internal(shost->transportt); \ 935 struct fc_internal *i = to_fc_internal(shost->transportt); \
936 char *cp; \
685 \ 937 \
686 val = simple_strtoul(buf, NULL, 0); \ 938 val = simple_strtoul(buf, &cp, 0); \
939 if (*cp && (*cp != '\n')) \
940 return -EINVAL; \
687 i->f->set_host_##field(shost, val); \ 941 i->f->set_host_##field(shost, val); \
688 return count; \ 942 return count; \
689} 943}
690 944
945#define fc_host_store_str_function(field, slen) \
946static ssize_t \
947store_fc_host_##field(struct class_device *cdev, const char *buf, \
948 size_t count) \
949{ \
950 struct Scsi_Host *shost = transport_class_to_shost(cdev); \
951 struct fc_internal *i = to_fc_internal(shost->transportt); \
952 unsigned int cnt=count; \
953 \
954 /* count may include a LF at end of string */ \
955 if (buf[cnt-1] == '\n') \
956 cnt--; \
957 if (cnt > ((slen) - 1)) \
958 return -EINVAL; \
959 memcpy(fc_host_##field(shost), buf, cnt); \
960 i->f->set_host_##field(shost); \
961 return count; \
962}
963
691#define fc_host_rd_attr(field, format_string, sz) \ 964#define fc_host_rd_attr(field, format_string, sz) \
692 fc_host_show_function(field, format_string, sz, ) \ 965 fc_host_show_function(field, format_string, sz, ) \
693static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ 966static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \
@@ -815,7 +1088,6 @@ fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
815fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); 1088fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
816fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, 1089fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
817 unsigned long long); 1090 unsigned long long);
818fc_private_host_rd_attr(symbolic_name, "%s\n", (FC_SYMBOLIC_NAME_SIZE +1));
819fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); 1091fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
820fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); 1092fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
821 1093
@@ -858,6 +1130,13 @@ fc_host_rd_attr(port_id, "0x%06x\n", 20);
858fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); 1130fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
859fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); 1131fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
860fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); 1132fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1133fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1134
1135fc_private_host_show_function(system_hostname, "%s\n",
1136 FC_SYMBOLIC_NAME_SIZE + 1, )
1137fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
1138static FC_CLASS_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1139 show_fc_host_system_hostname, store_fc_host_system_hostname);
861 1140
862 1141
863/* Private Host Attributes */ 1142/* Private Host Attributes */
@@ -1223,7 +1502,6 @@ fc_attach_transport(struct fc_function_template *ft)
1223 SETUP_HOST_ATTRIBUTE_RD(permanent_port_name); 1502 SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
1224 SETUP_HOST_ATTRIBUTE_RD(supported_classes); 1503 SETUP_HOST_ATTRIBUTE_RD(supported_classes);
1225 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); 1504 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
1226 SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
1227 SETUP_HOST_ATTRIBUTE_RD(supported_speeds); 1505 SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
1228 SETUP_HOST_ATTRIBUTE_RD(maxframe_size); 1506 SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
1229 SETUP_HOST_ATTRIBUTE_RD(serial_number); 1507 SETUP_HOST_ATTRIBUTE_RD(serial_number);
@@ -1234,6 +1512,8 @@ fc_attach_transport(struct fc_function_template *ft)
1234 SETUP_HOST_ATTRIBUTE_RD(active_fc4s); 1512 SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
1235 SETUP_HOST_ATTRIBUTE_RD(speed); 1513 SETUP_HOST_ATTRIBUTE_RD(speed);
1236 SETUP_HOST_ATTRIBUTE_RD(fabric_name); 1514 SETUP_HOST_ATTRIBUTE_RD(fabric_name);
1515 SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
1516 SETUP_HOST_ATTRIBUTE_RW(system_hostname);
1237 1517
1238 /* Transport-managed attributes */ 1518 /* Transport-managed attributes */
1239 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); 1519 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
@@ -1257,6 +1537,8 @@ fc_attach_transport(struct fc_function_template *ft)
1257 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); 1537 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
1258 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); 1538 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
1259 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); 1539 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
1540 if (ft->terminate_rport_io)
1541 SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
1260 1542
1261 BUG_ON(count > FC_RPORT_NUM_ATTRS); 1543 BUG_ON(count > FC_RPORT_NUM_ATTRS);
1262 1544
@@ -1328,7 +1610,7 @@ fc_flush_work(struct Scsi_Host *shost)
1328 * @delay: jiffies to delay the work queuing 1610 * @delay: jiffies to delay the work queuing
1329 * 1611 *
1330 * Return value: 1612 * Return value:
1331 * 0 on success / != 0 for error 1613 * 1 on success / 0 already queued / < 0 for error
1332 **/ 1614 **/
1333static int 1615static int
1334fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, 1616fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
@@ -1343,6 +1625,9 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
1343 return -EINVAL; 1625 return -EINVAL;
1344 } 1626 }
1345 1627
1628 if (delay == 0)
1629 return queue_work(fc_host_devloss_work_q(shost), work);
1630
1346 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); 1631 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
1347} 1632}
1348 1633
@@ -1435,10 +1720,23 @@ fc_starget_delete(void *data)
1435 struct fc_rport *rport = (struct fc_rport *)data; 1720 struct fc_rport *rport = (struct fc_rport *)data;
1436 struct Scsi_Host *shost = rport_to_shost(rport); 1721 struct Scsi_Host *shost = rport_to_shost(rport);
1437 unsigned long flags; 1722 unsigned long flags;
1723 struct fc_internal *i = to_fc_internal(shost->transportt);
1724
1725 /*
1726 * Involve the LLDD if possible. All io on the rport is to
1727 * be terminated, either as part of the dev_loss_tmo callback
1728 * processing, or via the terminate_rport_io function.
1729 */
1730 if (i->f->dev_loss_tmo_callbk)
1731 i->f->dev_loss_tmo_callbk(rport);
1732 else if (i->f->terminate_rport_io)
1733 i->f->terminate_rport_io(rport);
1438 1734
1439 spin_lock_irqsave(shost->host_lock, flags); 1735 spin_lock_irqsave(shost->host_lock, flags);
1440 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { 1736 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
1441 spin_unlock_irqrestore(shost->host_lock, flags); 1737 spin_unlock_irqrestore(shost->host_lock, flags);
1738 if (!cancel_delayed_work(&rport->fail_io_work))
1739 fc_flush_devloss(shost);
1442 if (!cancel_delayed_work(&rport->dev_loss_work)) 1740 if (!cancel_delayed_work(&rport->dev_loss_work))
1443 fc_flush_devloss(shost); 1741 fc_flush_devloss(shost);
1444 spin_lock_irqsave(shost->host_lock, flags); 1742 spin_lock_irqsave(shost->host_lock, flags);
@@ -1461,10 +1759,7 @@ fc_rport_final_delete(void *data)
1461 struct fc_rport *rport = (struct fc_rport *)data; 1759 struct fc_rport *rport = (struct fc_rport *)data;
1462 struct device *dev = &rport->dev; 1760 struct device *dev = &rport->dev;
1463 struct Scsi_Host *shost = rport_to_shost(rport); 1761 struct Scsi_Host *shost = rport_to_shost(rport);
1464 1762 struct fc_internal *i = to_fc_internal(shost->transportt);
1465 /* Delete SCSI target and sdevs */
1466 if (rport->scsi_target_id != -1)
1467 fc_starget_delete(data);
1468 1763
1469 /* 1764 /*
1470 * if a scan is pending, flush the SCSI Host work_q so that 1765 * if a scan is pending, flush the SCSI Host work_q so that
@@ -1473,6 +1768,14 @@ fc_rport_final_delete(void *data)
1473 if (rport->flags & FC_RPORT_SCAN_PENDING) 1768 if (rport->flags & FC_RPORT_SCAN_PENDING)
1474 scsi_flush_work(shost); 1769 scsi_flush_work(shost);
1475 1770
1771 /* Delete SCSI target and sdevs */
1772 if (rport->scsi_target_id != -1)
1773 fc_starget_delete(data);
1774 else if (i->f->dev_loss_tmo_callbk)
1775 i->f->dev_loss_tmo_callbk(rport);
1776 else if (i->f->terminate_rport_io)
1777 i->f->terminate_rport_io(rport);
1778
1476 transport_remove_device(dev); 1779 transport_remove_device(dev);
1477 device_del(dev); 1780 device_del(dev);
1478 transport_destroy_device(dev); 1781 transport_destroy_device(dev);
@@ -1524,8 +1827,10 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1524 if (fci->f->dd_fcrport_size) 1827 if (fci->f->dd_fcrport_size)
1525 rport->dd_data = &rport[1]; 1828 rport->dd_data = &rport[1];
1526 rport->channel = channel; 1829 rport->channel = channel;
1830 rport->fast_io_fail_tmo = -1;
1527 1831
1528 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); 1832 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
1833 INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport);
1529 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); 1834 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
1530 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); 1835 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
1531 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); 1836 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
@@ -1689,11 +1994,13 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1689 /* restart the target */ 1994 /* restart the target */
1690 1995
1691 /* 1996 /*
1692 * Stop the target timer first. Take no action 1997 * Stop the target timers first. Take no action
1693 * on the del_timer failure as the state 1998 * on the del_timer failure as the state
1694 * machine state change will validate the 1999 * machine state change will validate the
1695 * transaction. 2000 * transaction.
1696 */ 2001 */
2002 if (!cancel_delayed_work(&rport->fail_io_work))
2003 fc_flush_devloss(shost);
1697 if (!cancel_delayed_work(work)) 2004 if (!cancel_delayed_work(work))
1698 fc_flush_devloss(shost); 2005 fc_flush_devloss(shost);
1699 2006
@@ -1837,6 +2144,7 @@ void
1837fc_remote_port_delete(struct fc_rport *rport) 2144fc_remote_port_delete(struct fc_rport *rport)
1838{ 2145{
1839 struct Scsi_Host *shost = rport_to_shost(rport); 2146 struct Scsi_Host *shost = rport_to_shost(rport);
2147 struct fc_internal *i = to_fc_internal(shost->transportt);
1840 int timeout = rport->dev_loss_tmo; 2148 int timeout = rport->dev_loss_tmo;
1841 unsigned long flags; 2149 unsigned long flags;
1842 2150
@@ -1867,6 +2175,12 @@ fc_remote_port_delete(struct fc_rport *rport)
1867 2175
1868 scsi_target_block(&rport->dev); 2176 scsi_target_block(&rport->dev);
1869 2177
2178 /* see if we need to kill io faster than waiting for device loss */
2179 if ((rport->fast_io_fail_tmo != -1) &&
2180 (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io))
2181 fc_queue_devloss_work(shost, &rport->fail_io_work,
2182 rport->fast_io_fail_tmo * HZ);
2183
1870 /* cap the length the devices can be blocked until they are deleted */ 2184 /* cap the length the devices can be blocked until they are deleted */
1871 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ); 2185 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
1872} 2186}
@@ -1926,6 +2240,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
1926 * machine state change will validate the 2240 * machine state change will validate the
1927 * transaction. 2241 * transaction.
1928 */ 2242 */
2243 if (!cancel_delayed_work(&rport->fail_io_work))
2244 fc_flush_devloss(shost);
1929 if (!cancel_delayed_work(&rport->dev_loss_work)) 2245 if (!cancel_delayed_work(&rport->dev_loss_work))
1930 fc_flush_devloss(shost); 2246 fc_flush_devloss(shost);
1931 2247
@@ -2047,6 +2363,28 @@ fc_timeout_deleted_rport(void *data)
2047} 2363}
2048 2364
2049/** 2365/**
2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
2367 * disconnected SCSI target.
2368 *
2369 * @data: rport to terminate io on.
2370 *
2371 * Notes: Only requests the failure of the io, not that all are flushed
2372 * prior to returning.
2373 **/
2374static void
2375fc_timeout_fail_rport_io(void *data)
2376{
2377 struct fc_rport *rport = (struct fc_rport *)data;
2378 struct Scsi_Host *shost = rport_to_shost(rport);
2379 struct fc_internal *i = to_fc_internal(shost->transportt);
2380
2381 if (rport->port_state != FC_PORTSTATE_BLOCKED)
2382 return;
2383
2384 i->f->terminate_rport_io(rport);
2385}
2386
2387/**
2050 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 2388 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
2051 * 2389 *
2052 * @data: remote port to be scanned. 2390 * @data: remote port to be scanned.
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 2ecd14188574..7b0019cccce3 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,7 +34,7 @@
34#define ISCSI_SESSION_ATTRS 11 34#define ISCSI_SESSION_ATTRS 11
35#define ISCSI_CONN_ATTRS 11 35#define ISCSI_CONN_ATTRS 11
36#define ISCSI_HOST_ATTRS 0 36#define ISCSI_HOST_ATTRS 0
37#define ISCSI_TRANSPORT_VERSION "1.1-646" 37#define ISCSI_TRANSPORT_VERSION "2.0-685"
38 38
39struct iscsi_internal { 39struct iscsi_internal {
40 int daemon_pid; 40 int daemon_pid;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 5a625c3fddae..b5b0c2cba96b 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -77,6 +77,24 @@ get_sas_##title##_names(u32 table_key, char *buf) \
77 return len; \ 77 return len; \
78} 78}
79 79
80#define sas_bitfield_name_set(title, table) \
81static ssize_t \
82set_sas_##title##_names(u32 *table_key, const char *buf) \
83{ \
84 ssize_t len = 0; \
85 int i; \
86 \
87 for (i = 0; i < ARRAY_SIZE(table); i++) { \
88 len = strlen(table[i].name); \
89 if (strncmp(buf, table[i].name, len) == 0 && \
90 (buf[len] == '\n' || buf[len] == '\0')) { \
91 *table_key = table[i].value; \
92 return 0; \
93 } \
94 } \
95 return -EINVAL; \
96}
97
80#define sas_bitfield_name_search(title, table) \ 98#define sas_bitfield_name_search(title, table) \
81static ssize_t \ 99static ssize_t \
82get_sas_##title##_names(u32 table_key, char *buf) \ 100get_sas_##title##_names(u32 table_key, char *buf) \
@@ -131,7 +149,7 @@ static struct {
131 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, 149 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
132}; 150};
133sas_bitfield_name_search(linkspeed, sas_linkspeed_names) 151sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
134 152sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
135 153
136/* 154/*
137 * SAS host attributes 155 * SAS host attributes
@@ -253,10 +271,39 @@ show_sas_phy_##field(struct class_device *cdev, char *buf) \
253 return get_sas_linkspeed_names(phy->field, buf); \ 271 return get_sas_linkspeed_names(phy->field, buf); \
254} 272}
255 273
274/* Fudge to tell if we're minimum or maximum */
275#define sas_phy_store_linkspeed(field) \
276static ssize_t \
277store_sas_phy_##field(struct class_device *cdev, const char *buf, \
278 size_t count) \
279{ \
280 struct sas_phy *phy = transport_class_to_phy(cdev); \
281 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \
282 struct sas_internal *i = to_sas_internal(shost->transportt); \
283 u32 value; \
284 struct sas_phy_linkrates rates = {0}; \
285 int error; \
286 \
287 error = set_sas_linkspeed_names(&value, buf); \
288 if (error) \
289 return error; \
290 rates.field = value; \
291 error = i->f->set_phy_speed(phy, &rates); \
292 \
293 return error ? error : count; \
294}
295
296#define sas_phy_linkspeed_rw_attr(field) \
297 sas_phy_show_linkspeed(field) \
298 sas_phy_store_linkspeed(field) \
299static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, \
300 store_sas_phy_##field)
301
256#define sas_phy_linkspeed_attr(field) \ 302#define sas_phy_linkspeed_attr(field) \
257 sas_phy_show_linkspeed(field) \ 303 sas_phy_show_linkspeed(field) \
258static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) 304static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL)
259 305
306
260#define sas_phy_show_linkerror(field) \ 307#define sas_phy_show_linkerror(field) \
261static ssize_t \ 308static ssize_t \
262show_sas_phy_##field(struct class_device *cdev, char *buf) \ 309show_sas_phy_##field(struct class_device *cdev, char *buf) \
@@ -266,9 +313,6 @@ show_sas_phy_##field(struct class_device *cdev, char *buf) \
266 struct sas_internal *i = to_sas_internal(shost->transportt); \ 313 struct sas_internal *i = to_sas_internal(shost->transportt); \
267 int error; \ 314 int error; \
268 \ 315 \
269 if (!phy->local_attached) \
270 return -EINVAL; \
271 \
272 error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \ 316 error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \
273 if (error) \ 317 if (error) \
274 return error; \ 318 return error; \
@@ -299,9 +343,6 @@ static ssize_t do_sas_phy_reset(struct class_device *cdev,
299 struct sas_internal *i = to_sas_internal(shost->transportt); 343 struct sas_internal *i = to_sas_internal(shost->transportt);
300 int error; 344 int error;
301 345
302 if (!phy->local_attached)
303 return -EINVAL;
304
305 error = i->f->phy_reset(phy, hard_reset); 346 error = i->f->phy_reset(phy, hard_reset);
306 if (error) 347 if (error)
307 return error; 348 return error;
@@ -332,9 +373,9 @@ sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
332//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int); 373//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
333sas_phy_linkspeed_attr(negotiated_linkrate); 374sas_phy_linkspeed_attr(negotiated_linkrate);
334sas_phy_linkspeed_attr(minimum_linkrate_hw); 375sas_phy_linkspeed_attr(minimum_linkrate_hw);
335sas_phy_linkspeed_attr(minimum_linkrate); 376sas_phy_linkspeed_rw_attr(minimum_linkrate);
336sas_phy_linkspeed_attr(maximum_linkrate_hw); 377sas_phy_linkspeed_attr(maximum_linkrate_hw);
337sas_phy_linkspeed_attr(maximum_linkrate); 378sas_phy_linkspeed_rw_attr(maximum_linkrate);
338sas_phy_linkerror_attr(invalid_dword_count); 379sas_phy_linkerror_attr(invalid_dword_count);
339sas_phy_linkerror_attr(running_disparity_error_count); 380sas_phy_linkerror_attr(running_disparity_error_count);
340sas_phy_linkerror_attr(loss_of_dword_sync_count); 381sas_phy_linkerror_attr(loss_of_dword_sync_count);
@@ -849,7 +890,7 @@ show_sas_rphy_enclosure_identifier(struct class_device *cdev, char *buf)
849 * Only devices behind an expander are supported, because the 890 * Only devices behind an expander are supported, because the
850 * enclosure identifier is a SMP feature. 891 * enclosure identifier is a SMP feature.
851 */ 892 */
852 if (phy->local_attached) 893 if (scsi_is_sas_phy_local(phy))
853 return -EINVAL; 894 return -EINVAL;
854 895
855 error = i->f->get_enclosure_identifier(rphy, &identifier); 896 error = i->f->get_enclosure_identifier(rphy, &identifier);
@@ -870,7 +911,7 @@ show_sas_rphy_bay_identifier(struct class_device *cdev, char *buf)
870 struct sas_internal *i = to_sas_internal(shost->transportt); 911 struct sas_internal *i = to_sas_internal(shost->transportt);
871 int val; 912 int val;
872 913
873 if (phy->local_attached) 914 if (scsi_is_sas_phy_local(phy))
874 return -EINVAL; 915 return -EINVAL;
875 916
876 val = i->f->get_bay_identifier(rphy); 917 val = i->f->get_bay_identifier(rphy);
@@ -1316,13 +1357,23 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1316 * Setup / Teardown code 1357 * Setup / Teardown code
1317 */ 1358 */
1318 1359
1319#define SETUP_TEMPLATE(attrb, field, perm, test) \ 1360#define SETUP_TEMPLATE(attrb, field, perm, test) \
1320 i->private_##attrb[count] = class_device_attr_##field; \ 1361 i->private_##attrb[count] = class_device_attr_##field; \
1321 i->private_##attrb[count].attr.mode = perm; \ 1362 i->private_##attrb[count].attr.mode = perm; \
1322 i->attrb[count] = &i->private_##attrb[count]; \ 1363 i->attrb[count] = &i->private_##attrb[count]; \
1323 if (test) \ 1364 if (test) \
1324 count++ 1365 count++
1325 1366
1367#define SETUP_TEMPLATE_RW(attrb, field, perm, test, ro_test, ro_perm) \
1368 i->private_##attrb[count] = class_device_attr_##field; \
1369 i->private_##attrb[count].attr.mode = perm; \
1370 if (ro_test) { \
1371 i->private_##attrb[count].attr.mode = ro_perm; \
1372 i->private_##attrb[count].store = NULL; \
1373 } \
1374 i->attrb[count] = &i->private_##attrb[count]; \
1375 if (test) \
1376 count++
1326 1377
1327#define SETUP_RPORT_ATTRIBUTE(field) \ 1378#define SETUP_RPORT_ATTRIBUTE(field) \
1328 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1) 1379 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1)
@@ -1333,6 +1384,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1333#define SETUP_PHY_ATTRIBUTE(field) \ 1384#define SETUP_PHY_ATTRIBUTE(field) \
1334 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1) 1385 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1)
1335 1386
1387#define SETUP_PHY_ATTRIBUTE_RW(field) \
1388 SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
1389 !i->f->set_phy_speed, S_IRUGO)
1390
1336#define SETUP_PORT_ATTRIBUTE(field) \ 1391#define SETUP_PORT_ATTRIBUTE(field) \
1337 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) 1392 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
1338 1393
@@ -1413,9 +1468,9 @@ sas_attach_transport(struct sas_function_template *ft)
1413 //SETUP_PHY_ATTRIBUTE(port_identifier); 1468 //SETUP_PHY_ATTRIBUTE(port_identifier);
1414 SETUP_PHY_ATTRIBUTE(negotiated_linkrate); 1469 SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
1415 SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw); 1470 SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
1416 SETUP_PHY_ATTRIBUTE(minimum_linkrate); 1471 SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
1417 SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw); 1472 SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw);
1418 SETUP_PHY_ATTRIBUTE(maximum_linkrate); 1473 SETUP_PHY_ATTRIBUTE_RW(maximum_linkrate);
1419 1474
1420 SETUP_PHY_ATTRIBUTE(invalid_dword_count); 1475 SETUP_PHY_ATTRIBUTE(invalid_dword_count);
1421 SETUP_PHY_ATTRIBUTE(running_disparity_error_count); 1476 SETUP_PHY_ATTRIBUTE(running_disparity_error_count);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 29a9a53cdd1a..9f070f0d0f2b 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -47,6 +47,7 @@
47 47
48/* Private data accessors (keep these out of the header file) */ 48/* Private data accessors (keep these out of the header file) */
49#define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending) 49#define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending)
50#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress)
50#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) 51#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)
51 52
52struct spi_internal { 53struct spi_internal {
@@ -240,6 +241,7 @@ static int spi_setup_transport_attrs(struct transport_container *tc,
240 spi_pcomp_en(starget) = 0; 241 spi_pcomp_en(starget) = 0;
241 spi_hold_mcs(starget) = 0; 242 spi_hold_mcs(starget) = 0;
242 spi_dv_pending(starget) = 0; 243 spi_dv_pending(starget) = 0;
244 spi_dv_in_progress(starget) = 0;
243 spi_initial_dv(starget) = 0; 245 spi_initial_dv(starget) = 0;
244 mutex_init(&spi_dv_mutex(starget)); 246 mutex_init(&spi_dv_mutex(starget));
245 247
@@ -830,28 +832,37 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
830 DV_SET(period, spi_min_period(starget)); 832 DV_SET(period, spi_min_period(starget));
831 /* try QAS requests; this should be harmless to set if the 833 /* try QAS requests; this should be harmless to set if the
832 * target supports it */ 834 * target supports it */
833 if (scsi_device_qas(sdev)) 835 if (scsi_device_qas(sdev)) {
834 DV_SET(qas, 1); 836 DV_SET(qas, 1);
835 /* Also try IU transfers */ 837 } else {
836 if (scsi_device_ius(sdev)) 838 DV_SET(qas, 0);
839 }
840
841 if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) {
842 /* This u320 (or u640). Set IU transfers */
837 DV_SET(iu, 1); 843 DV_SET(iu, 1);
838 if (spi_min_period(starget) < 9) { 844 /* Then set the optional parameters */
839 /* This u320 (or u640). Ignore the coupled parameters
840 * like DT and IU, but set the optional ones */
841 DV_SET(rd_strm, 1); 845 DV_SET(rd_strm, 1);
842 DV_SET(wr_flow, 1); 846 DV_SET(wr_flow, 1);
843 DV_SET(rti, 1); 847 DV_SET(rti, 1);
844 if (spi_min_period(starget) == 8) 848 if (spi_min_period(starget) == 8)
845 DV_SET(pcomp_en, 1); 849 DV_SET(pcomp_en, 1);
850 } else {
851 DV_SET(iu, 0);
846 } 852 }
853
847 /* now that we've done all this, actually check the bus 854 /* now that we've done all this, actually check the bus
848 * signal type (if known). Some devices are stupid on 855 * signal type (if known). Some devices are stupid on
849 * a SE bus and still claim they can try LVD only settings */ 856 * a SE bus and still claim they can try LVD only settings */
850 if (i->f->get_signalling) 857 if (i->f->get_signalling)
851 i->f->get_signalling(shost); 858 i->f->get_signalling(shost);
852 if (spi_signalling(shost) == SPI_SIGNAL_SE || 859 if (spi_signalling(shost) == SPI_SIGNAL_SE ||
853 spi_signalling(shost) == SPI_SIGNAL_HVD) 860 spi_signalling(shost) == SPI_SIGNAL_HVD ||
861 !scsi_device_dt(sdev)) {
854 DV_SET(dt, 0); 862 DV_SET(dt, 0);
863 } else {
864 DV_SET(dt, 1);
865 }
855 /* Do the read only INQUIRY tests */ 866 /* Do the read only INQUIRY tests */
856 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, 867 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
857 spi_dv_device_compare_inquiry); 868 spi_dv_device_compare_inquiry);
@@ -907,6 +918,10 @@ spi_dv_device(struct scsi_device *sdev)
907 if (unlikely(scsi_device_get(sdev))) 918 if (unlikely(scsi_device_get(sdev)))
908 return; 919 return;
909 920
921 if (unlikely(spi_dv_in_progress(starget)))
922 return;
923 spi_dv_in_progress(starget) = 1;
924
910 buffer = kzalloc(len, GFP_KERNEL); 925 buffer = kzalloc(len, GFP_KERNEL);
911 926
912 if (unlikely(!buffer)) 927 if (unlikely(!buffer))
@@ -938,6 +953,7 @@ spi_dv_device(struct scsi_device *sdev)
938 out_free: 953 out_free:
939 kfree(buffer); 954 kfree(buffer);
940 out_put: 955 out_put:
956 spi_dv_in_progress(starget) = 0;
941 scsi_device_put(sdev); 957 scsi_device_put(sdev);
942} 958}
943EXPORT_SYMBOL(spi_dv_device); 959EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 98bd3aab9739..638cff41d436 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1215,7 +1215,7 @@ repeat:
1215 /* Either no media are present but the drive didn't tell us, 1215 /* Either no media are present but the drive didn't tell us,
1216 or they are present but the read capacity command fails */ 1216 or they are present but the read capacity command fails */
1217 /* sdkp->media_present = 0; -- not always correct */ 1217 /* sdkp->media_present = 0; -- not always correct */
1218 sdkp->capacity = 0x200000; /* 1 GB - random */ 1218 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
1219 1219
1220 return; 1220 return;
1221 } else if (the_result && longrc) { 1221 } else if (the_result && longrc) {
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 7cd366fcc571..4f1db6f2aae8 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -97,7 +97,7 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id, struct pt_regs *regs)
97} 97}
98 98
99static inline 99static inline
100void fill_hpc_entries(struct hpc_chunk *hcp, Scsi_Cmnd *cmd, int datainp) 100void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp)
101{ 101{
102 unsigned long len = cmd->SCp.this_residual; 102 unsigned long len = cmd->SCp.this_residual;
103 void *addr = cmd->SCp.ptr; 103 void *addr = cmd->SCp.ptr;
@@ -129,7 +129,7 @@ void fill_hpc_entries(struct hpc_chunk *hcp, Scsi_Cmnd *cmd, int datainp)
129 hcp->desc.cntinfo = HPCDMA_EOX; 129 hcp->desc.cntinfo = HPCDMA_EOX;
130} 130}
131 131
132static int dma_setup(Scsi_Cmnd *cmd, int datainp) 132static int dma_setup(struct scsi_cmnd *cmd, int datainp)
133{ 133{
134 struct ip22_hostdata *hdata = HDATA(cmd->device->host); 134 struct ip22_hostdata *hdata = HDATA(cmd->device->host);
135 struct hpc3_scsiregs *hregs = 135 struct hpc3_scsiregs *hregs =
@@ -163,7 +163,7 @@ static int dma_setup(Scsi_Cmnd *cmd, int datainp)
163 return 0; 163 return 0;
164} 164}
165 165
166static void dma_stop(struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 166static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
167 int status) 167 int status)
168{ 168{
169 struct ip22_hostdata *hdata = HDATA(instance); 169 struct ip22_hostdata *hdata = HDATA(instance);
@@ -305,7 +305,7 @@ static int sgiwd93_release(struct Scsi_Host *instance)
305 return 1; 305 return 1;
306} 306}
307 307
308static int sgiwd93_bus_reset(Scsi_Cmnd *cmd) 308static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
309{ 309{
310 /* FIXME perform bus-specific reset */ 310 /* FIXME perform bus-specific reset */
311 311
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
new file mode 100644
index 000000000000..3cf3106a29b8
--- /dev/null
+++ b/drivers/scsi/stex.c
@@ -0,0 +1,1252 @@
1/*
2 * SuperTrak EX Series Storage Controller driver for Linux
3 *
4 * Copyright (C) 2005, 2006 Promise Technology Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Written By:
12 * Ed Lin <promise_linux@promise.com>
13 *
14 * Version: 2.9.0.13
15 *
16 */
17
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/kernel.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23#include <linux/time.h>
24#include <linux/pci.h>
25#include <linux/blkdev.h>
26#include <linux/interrupt.h>
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/spinlock.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <asm/byteorder.h>
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_cmnd.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_tcq.h>
38
39#define DRV_NAME "stex"
40#define ST_DRIVER_VERSION "2.9.0.13"
41#define ST_VER_MAJOR 2
42#define ST_VER_MINOR 9
43#define ST_OEM 0
44#define ST_BUILD_VER 13
45
46enum {
47 /* MU register offset */
48 IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
49 IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
50 OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
51 OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
52 IDBL = 0x20, /* MU_INBOUND_DOORBELL */
53 IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
54 IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
55 ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
56 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
57 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
58
59 /* MU register value */
60 MU_INBOUND_DOORBELL_HANDSHAKE = 1,
61 MU_INBOUND_DOORBELL_REQHEADCHANGED = 2,
62 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4,
63 MU_INBOUND_DOORBELL_HMUSTOPPED = 8,
64 MU_INBOUND_DOORBELL_RESET = 16,
65
66 MU_OUTBOUND_DOORBELL_HANDSHAKE = 1,
67 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2,
68 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4,
69 MU_OUTBOUND_DOORBELL_BUSCHANGE = 8,
70 MU_OUTBOUND_DOORBELL_HASEVENT = 16,
71
72 /* MU status code */
73 MU_STATE_STARTING = 1,
74 MU_STATE_FMU_READY_FOR_HANDSHAKE = 2,
75 MU_STATE_SEND_HANDSHAKE_FRAME = 3,
76 MU_STATE_STARTED = 4,
77 MU_STATE_RESETTING = 5,
78
79 MU_MAX_DELAY_TIME = 240000,
80 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
81 HMU_PARTNER_TYPE = 2,
82
83 /* firmware returned values */
84 SRB_STATUS_SUCCESS = 0x01,
85 SRB_STATUS_ERROR = 0x04,
86 SRB_STATUS_BUSY = 0x05,
87 SRB_STATUS_INVALID_REQUEST = 0x06,
88 SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
89 SRB_SEE_SENSE = 0x80,
90
91 /* task attribute */
92 TASK_ATTRIBUTE_SIMPLE = 0x0,
93 TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
94 TASK_ATTRIBUTE_ORDERED = 0x2,
95 TASK_ATTRIBUTE_ACA = 0x4,
96
97 /* request count, etc. */
98 MU_MAX_REQUEST = 32,
99
100 /* one message wasted, use MU_MAX_REQUEST+1
101 to handle MU_MAX_REQUEST messages */
102 MU_REQ_COUNT = (MU_MAX_REQUEST + 1),
103 MU_STATUS_COUNT = (MU_MAX_REQUEST + 1),
104
105 STEX_CDB_LENGTH = MAX_COMMAND_SIZE,
106 REQ_VARIABLE_LEN = 1024,
107 STATUS_VAR_LEN = 128,
108 ST_CAN_QUEUE = MU_MAX_REQUEST,
109 ST_CMD_PER_LUN = MU_MAX_REQUEST,
110 ST_MAX_SG = 32,
111
112 /* sg flags */
113 SG_CF_EOT = 0x80, /* end of table */
114 SG_CF_64B = 0x40, /* 64 bit item */
115 SG_CF_HOST = 0x20, /* sg in host memory */
116
117 ST_MAX_ARRAY_SUPPORTED = 16,
118 ST_MAX_TARGET_NUM = (ST_MAX_ARRAY_SUPPORTED+1),
119 ST_MAX_LUN_PER_TARGET = 16,
120
121 st_shasta = 0,
122 st_vsc = 1,
123
124 PASSTHRU_REQ_TYPE = 0x00000001,
125 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
126 ST_INTERNAL_TIMEOUT = 30,
127
128 /* vendor specific commands of Promise */
129 ARRAY_CMD = 0xe0,
130 CONTROLLER_CMD = 0xe1,
131 DEBUGGING_CMD = 0xe2,
132 PASSTHRU_CMD = 0xe3,
133
134 PASSTHRU_GET_ADAPTER = 0x05,
135 PASSTHRU_GET_DRVVER = 0x10,
136 CTLR_POWER_STATE_CHANGE = 0x0e,
137 CTLR_POWER_SAVING = 0x01,
138
139 PASSTHRU_SIGNATURE = 0x4e415041,
140
141 INQUIRY_EVPD = 0x01,
142};
143
144struct st_sgitem {
145 u8 ctrl; /* SG_CF_xxx */
146 u8 reserved[3];
147 __le32 count;
148 __le32 addr;
149 __le32 addr_hi;
150};
151
152struct st_sgtable {
153 __le16 sg_count;
154 __le16 max_sg_count;
155 __le32 sz_in_byte;
156 struct st_sgitem table[ST_MAX_SG];
157};
158
159struct handshake_frame {
160 __le32 rb_phy; /* request payload queue physical address */
161 __le32 rb_phy_hi;
162 __le16 req_sz; /* size of each request payload */
163 __le16 req_cnt; /* count of reqs the buffer can hold */
164 __le16 status_sz; /* size of each status payload */
165 __le16 status_cnt; /* count of status the buffer can hold */
166 __le32 hosttime; /* seconds from Jan 1, 1970 (GMT) */
167 __le32 hosttime_hi;
168 u8 partner_type; /* who sends this frame */
169 u8 reserved0[7];
170 __le32 partner_ver_major;
171 __le32 partner_ver_minor;
172 __le32 partner_ver_oem;
173 __le32 partner_ver_build;
174 u32 reserved1[4];
175};
176
177struct req_msg {
178 __le16 tag;
179 u8 lun;
180 u8 target;
181 u8 task_attr;
182 u8 task_manage;
183 u8 prd_entry;
184 u8 payload_sz; /* payload size in 4-byte */
185 u8 cdb[STEX_CDB_LENGTH];
186 u8 variable[REQ_VARIABLE_LEN];
187};
188
189struct status_msg {
190 __le16 tag;
191 u8 lun;
192 u8 target;
193 u8 srb_status;
194 u8 scsi_status;
195 u8 reserved;
196 u8 payload_sz; /* payload size in 4-byte */
197 u8 variable[STATUS_VAR_LEN];
198};
199
200struct ver_info {
201 u32 major;
202 u32 minor;
203 u32 oem;
204 u32 build;
205 u32 reserved[2];
206};
207
208struct st_frame {
209 u32 base[6];
210 u32 rom_addr;
211
212 struct ver_info drv_ver;
213 struct ver_info bios_ver;
214
215 u32 bus;
216 u32 slot;
217 u32 irq_level;
218 u32 irq_vec;
219 u32 id;
220 u32 subid;
221
222 u32 dimm_size;
223 u8 dimm_type;
224 u8 reserved[3];
225
226 u32 channel;
227 u32 reserved1;
228};
229
230struct st_drvver {
231 u32 major;
232 u32 minor;
233 u32 oem;
234 u32 build;
235 u32 signature[2];
236 u8 console_id;
237 u8 host_no;
238 u8 reserved0[2];
239 u32 reserved[3];
240};
241
242#define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg))
243#define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg))
244#define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE)
245#define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + sizeof(struct st_frame))
246
247struct st_ccb {
248 struct req_msg *req;
249 struct scsi_cmnd *cmd;
250
251 void *sense_buffer;
252 unsigned int sense_bufflen;
253 int sg_count;
254
255 u32 req_type;
256 u8 srb_status;
257 u8 scsi_status;
258};
259
260struct st_hba {
261 void __iomem *mmio_base; /* iomapped PCI memory space */
262 void *dma_mem;
263 dma_addr_t dma_handle;
264
265 struct Scsi_Host *host;
266 struct pci_dev *pdev;
267
268 u32 req_head;
269 u32 req_tail;
270 u32 status_head;
271 u32 status_tail;
272
273 struct status_msg *status_buffer;
274 void *copy_buffer; /* temp buffer for driver-handled commands */
275 struct st_ccb ccb[MU_MAX_REQUEST];
276 struct st_ccb *wait_ccb;
277 wait_queue_head_t waitq;
278
279 unsigned int mu_status;
280 int out_req_cnt;
281
282 unsigned int cardtype;
283};
284
285static const char console_inq_page[] =
286{
287 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
288 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
289 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
290 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
291 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
292 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
293 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
294 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
295};
296
297MODULE_AUTHOR("Ed Lin");
298MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
299MODULE_LICENSE("GPL");
300MODULE_VERSION(ST_DRIVER_VERSION);
301
302static void stex_gettime(__le32 *time)
303{
304 struct timeval tv;
305 do_gettimeofday(&tv);
306
307 *time = cpu_to_le32(tv.tv_sec & 0xffffffff);
308 *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16);
309}
310
311static struct status_msg *stex_get_status(struct st_hba *hba)
312{
313 struct status_msg *status =
314 hba->status_buffer + hba->status_tail;
315
316 ++hba->status_tail;
317 hba->status_tail %= MU_STATUS_COUNT;
318
319 return status;
320}
321
322static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
323{
324 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
325
326 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
327 cmd->sense_buffer[2] = sk;
328 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
329 cmd->sense_buffer[12] = asc;
330 cmd->sense_buffer[13] = ascq;
331}
332
333static void stex_invalid_field(struct scsi_cmnd *cmd,
334 void (*done)(struct scsi_cmnd *))
335{
336 /* "Invalid field in cbd" */
337 stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
338 done(cmd);
339}
340
341static struct req_msg *stex_alloc_req(struct st_hba *hba)
342{
343 struct req_msg *req = ((struct req_msg *)hba->dma_mem) +
344 hba->req_head;
345
346 ++hba->req_head;
347 hba->req_head %= MU_REQ_COUNT;
348
349 return req;
350}
351
352static int stex_map_sg(struct st_hba *hba,
353 struct req_msg *req, struct st_ccb *ccb)
354{
355 struct pci_dev *pdev = hba->pdev;
356 struct scsi_cmnd *cmd;
357 dma_addr_t dma_handle;
358 struct scatterlist *src;
359 struct st_sgtable *dst;
360 int i;
361
362 cmd = ccb->cmd;
363 dst = (struct st_sgtable *)req->variable;
364 dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
365 dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen);
366
367 if (cmd->use_sg) {
368 int n_elem;
369
370 src = (struct scatterlist *) cmd->request_buffer;
371 n_elem = pci_map_sg(pdev, src,
372 cmd->use_sg, cmd->sc_data_direction);
373 if (n_elem <= 0)
374 return -EIO;
375
376 ccb->sg_count = n_elem;
377 dst->sg_count = cpu_to_le16((u16)n_elem);
378
379 for (i = 0; i < n_elem; i++, src++) {
380 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
381 dst->table[i].addr =
382 cpu_to_le32(sg_dma_address(src) & 0xffffffff);
383 dst->table[i].addr_hi =
384 cpu_to_le32((sg_dma_address(src) >> 16) >> 16);
385 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
386 }
387 dst->table[--i].ctrl |= SG_CF_EOT;
388 return 0;
389 }
390
391 dma_handle = pci_map_single(pdev, cmd->request_buffer,
392 cmd->request_bufflen, cmd->sc_data_direction);
393 cmd->SCp.dma_handle = dma_handle;
394
395 ccb->sg_count = 1;
396 dst->sg_count = cpu_to_le16(1);
397 dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
398 dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
399 dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
400 dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
401
402 return 0;
403}
404
405static void stex_internal_copy(struct scsi_cmnd *cmd,
406 const void *src, size_t *count, int sg_count)
407{
408 size_t lcount;
409 size_t len;
410 void *s, *d, *base = NULL;
411 if (*count > cmd->request_bufflen)
412 *count = cmd->request_bufflen;
413 lcount = *count;
414 while (lcount) {
415 len = lcount;
416 s = (void *)src;
417 if (cmd->use_sg) {
418 size_t offset = *count - lcount;
419 s += offset;
420 base = scsi_kmap_atomic_sg(cmd->request_buffer,
421 sg_count, &offset, &len);
422 if (base == NULL) {
423 *count -= lcount;
424 return;
425 }
426 d = base + offset;
427 } else
428 d = cmd->request_buffer;
429
430 memcpy(d, s, len);
431
432 lcount -= len;
433 if (cmd->use_sg)
434 scsi_kunmap_atomic_sg(base);
435 }
436}
437
438static int stex_direct_copy(struct scsi_cmnd *cmd,
439 const void *src, size_t count)
440{
441 struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
442 size_t cp_len = count;
443 int n_elem = 0;
444
445 if (cmd->use_sg) {
446 n_elem = pci_map_sg(hba->pdev, cmd->request_buffer,
447 cmd->use_sg, cmd->sc_data_direction);
448 if (n_elem <= 0)
449 return 0;
450 }
451
452 stex_internal_copy(cmd, src, &cp_len, n_elem);
453
454 if (cmd->use_sg)
455 pci_unmap_sg(hba->pdev, cmd->request_buffer,
456 cmd->use_sg, cmd->sc_data_direction);
457 return cp_len == count;
458}
459
460static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
461{
462 struct st_frame *p;
463 size_t count = sizeof(struct st_frame);
464
465 p = hba->copy_buffer;
466 memset(p->base, 0, sizeof(u32)*6);
467 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
468 p->rom_addr = 0;
469
470 p->drv_ver.major = ST_VER_MAJOR;
471 p->drv_ver.minor = ST_VER_MINOR;
472 p->drv_ver.oem = ST_OEM;
473 p->drv_ver.build = ST_BUILD_VER;
474
475 p->bus = hba->pdev->bus->number;
476 p->slot = hba->pdev->devfn;
477 p->irq_level = 0;
478 p->irq_vec = hba->pdev->irq;
479 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
480 p->subid =
481 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
482
483 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count);
484}
485
486static void
487stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
488{
489 req->tag = cpu_to_le16(tag);
490 req->task_attr = TASK_ATTRIBUTE_SIMPLE;
491 req->task_manage = 0; /* not supported yet */
492 req->payload_sz = (u8)(sizeof(struct req_msg)/sizeof(u32));
493
494 hba->ccb[tag].req = req;
495 hba->out_req_cnt++;
496
497 writel(hba->req_head, hba->mmio_base + IMR0);
498 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
499 readl(hba->mmio_base + IDBL); /* flush */
500}
501
502static int
503stex_slave_alloc(struct scsi_device *sdev)
504{
505 /* Cheat: usually extracted from Inquiry data */
506 sdev->tagged_supported = 1;
507
508 scsi_activate_tcq(sdev, sdev->host->can_queue);
509
510 return 0;
511}
512
513static int
514stex_slave_config(struct scsi_device *sdev)
515{
516 sdev->use_10_for_rw = 1;
517 sdev->use_10_for_ms = 1;
518 sdev->timeout = 60 * HZ;
519 sdev->tagged_supported = 1;
520
521 return 0;
522}
523
524static void
525stex_slave_destroy(struct scsi_device *sdev)
526{
527 scsi_deactivate_tcq(sdev, 1);
528}
529
530static int
531stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
532{
533 struct st_hba *hba;
534 struct Scsi_Host *host;
535 unsigned int id,lun;
536 struct req_msg *req;
537 u16 tag;
538 host = cmd->device->host;
539 id = cmd->device->id;
540 lun = cmd->device->channel; /* firmware lun issue work around */
541 hba = (struct st_hba *) &host->hostdata[0];
542
543 switch (cmd->cmnd[0]) {
544 case MODE_SENSE_10:
545 {
546 static char ms10_caching_page[12] =
547 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
548 unsigned char page;
549 page = cmd->cmnd[2] & 0x3f;
550 if (page == 0x8 || page == 0x3f) {
551 stex_direct_copy(cmd, ms10_caching_page,
552 sizeof(ms10_caching_page));
553 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
554 done(cmd);
555 } else
556 stex_invalid_field(cmd, done);
557 return 0;
558 }
559 case INQUIRY:
560 if (id != ST_MAX_ARRAY_SUPPORTED)
561 break;
562 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
563 stex_direct_copy(cmd, console_inq_page,
564 sizeof(console_inq_page));
565 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
566 done(cmd);
567 } else
568 stex_invalid_field(cmd, done);
569 return 0;
570 case PASSTHRU_CMD:
571 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
572 struct st_drvver ver;
573 ver.major = ST_VER_MAJOR;
574 ver.minor = ST_VER_MINOR;
575 ver.oem = ST_OEM;
576 ver.build = ST_BUILD_VER;
577 ver.signature[0] = PASSTHRU_SIGNATURE;
578 ver.console_id = ST_MAX_ARRAY_SUPPORTED;
579 ver.host_no = hba->host->host_no;
580 cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ?
581 DID_OK << 16 | COMMAND_COMPLETE << 8 :
582 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
583 done(cmd);
584 return 0;
585 }
586 default:
587 break;
588 }
589
590 cmd->scsi_done = done;
591
592 tag = cmd->request->tag;
593
594 if (unlikely(tag >= host->can_queue))
595 return SCSI_MLQUEUE_HOST_BUSY;
596
597 req = stex_alloc_req(hba);
598 req->lun = lun;
599 req->target = id;
600
601 /* cdb */
602 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
603
604 hba->ccb[tag].cmd = cmd;
605 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
606 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
607 hba->ccb[tag].req_type = 0;
608
609 if (cmd->sc_data_direction != DMA_NONE)
610 stex_map_sg(hba, req, &hba->ccb[tag]);
611
612 stex_send_cmd(hba, req, tag);
613 return 0;
614}
615
616static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
617{
618 if (cmd->sc_data_direction != DMA_NONE) {
619 if (cmd->use_sg)
620 pci_unmap_sg(hba->pdev, cmd->request_buffer,
621 cmd->use_sg, cmd->sc_data_direction);
622 else
623 pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
624 cmd->request_bufflen, cmd->sc_data_direction);
625 }
626}
627
628static void stex_scsi_done(struct st_ccb *ccb)
629{
630 struct scsi_cmnd *cmd = ccb->cmd;
631 int result;
632
633 if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
634 result = ccb->scsi_status;
635 switch (ccb->scsi_status) {
636 case SAM_STAT_GOOD:
637 result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
638 break;
639 case SAM_STAT_CHECK_CONDITION:
640 result |= DRIVER_SENSE << 24;
641 break;
642 case SAM_STAT_BUSY:
643 result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
644 break;
645 default:
646 result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
647 break;
648 }
649 }
650 else if (ccb->srb_status & SRB_SEE_SENSE)
651 result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
652 else switch (ccb->srb_status) {
653 case SRB_STATUS_SELECTION_TIMEOUT:
654 result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
655 break;
656 case SRB_STATUS_BUSY:
657 result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
658 break;
659 case SRB_STATUS_INVALID_REQUEST:
660 case SRB_STATUS_ERROR:
661 default:
662 result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
663 break;
664 }
665
666 cmd->result = result;
667 cmd->scsi_done(cmd);
668}
669
670static void stex_copy_data(struct st_ccb *ccb,
671 struct status_msg *resp, unsigned int variable)
672{
673 size_t count = variable;
674 if (resp->scsi_status != SAM_STAT_GOOD) {
675 if (ccb->sense_buffer != NULL)
676 memcpy(ccb->sense_buffer, resp->variable,
677 min(variable, ccb->sense_bufflen));
678 return;
679 }
680
681 if (ccb->cmd == NULL)
682 return;
683 stex_internal_copy(ccb->cmd, resp->variable, &count, ccb->sg_count);
684}
685
686static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
687{
688 void __iomem *base = hba->mmio_base;
689 struct status_msg *resp;
690 struct st_ccb *ccb;
691 unsigned int size;
692 u16 tag;
693
694 if (!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))
695 return;
696
697 /* status payloads */
698 hba->status_head = readl(base + OMR1);
699 if (unlikely(hba->status_head >= MU_STATUS_COUNT)) {
700 printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
701 pci_name(hba->pdev));
702 return;
703 }
704
705 if (unlikely(hba->mu_status != MU_STATE_STARTED ||
706 hba->out_req_cnt <= 0)) {
707 hba->status_tail = hba->status_head;
708 goto update_status;
709 }
710
711 while (hba->status_tail != hba->status_head) {
712 resp = stex_get_status(hba);
713 tag = le16_to_cpu(resp->tag);
714 if (unlikely(tag >= hba->host->can_queue)) {
715 printk(KERN_WARNING DRV_NAME
716 "(%s): invalid tag\n", pci_name(hba->pdev));
717 continue;
718 }
719
720 ccb = &hba->ccb[tag];
721 if (hba->wait_ccb == ccb)
722 hba->wait_ccb = NULL;
723 if (unlikely(ccb->req == NULL)) {
724 printk(KERN_WARNING DRV_NAME
725 "(%s): lagging req\n", pci_name(hba->pdev));
726 continue;
727 }
728
729 size = resp->payload_sz * sizeof(u32); /* payload size */
730 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
731 size > sizeof(*resp))) {
732 printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
733 pci_name(hba->pdev));
734 } else {
735 size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
736 if (size)
737 stex_copy_data(ccb, resp, size);
738 }
739
740 ccb->srb_status = resp->srb_status;
741 ccb->scsi_status = resp->scsi_status;
742
743 if (likely(ccb->cmd != NULL)) {
744 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
745 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
746 stex_controller_info(hba, ccb);
747 stex_unmap_sg(hba, ccb->cmd);
748 stex_scsi_done(ccb);
749 hba->out_req_cnt--;
750 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
751 hba->out_req_cnt--;
752 if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) {
753 ccb->req_type = 0;
754 continue;
755 }
756 ccb->req_type = 0;
757 if (waitqueue_active(&hba->waitq))
758 wake_up(&hba->waitq);
759 }
760 }
761
762update_status:
763 writel(hba->status_head, base + IMR1);
764 readl(base + IMR1); /* flush */
765}
766
767static irqreturn_t stex_intr(int irq, void *__hba, struct pt_regs *regs)
768{
769 struct st_hba *hba = __hba;
770 void __iomem *base = hba->mmio_base;
771 u32 data;
772 unsigned long flags;
773 int handled = 0;
774
775 spin_lock_irqsave(hba->host->host_lock, flags);
776
777 data = readl(base + ODBL);
778
779 if (data && data != 0xffffffff) {
780 /* clear the interrupt */
781 writel(data, base + ODBL);
782 readl(base + ODBL); /* flush */
783 stex_mu_intr(hba, data);
784 handled = 1;
785 }
786
787 spin_unlock_irqrestore(hba->host->host_lock, flags);
788
789 return IRQ_RETVAL(handled);
790}
791
792static int stex_handshake(struct st_hba *hba)
793{
794 void __iomem *base = hba->mmio_base;
795 struct handshake_frame *h;
796 dma_addr_t status_phys;
797 int i;
798
799 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
800 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
801 readl(base + IDBL);
802 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
803 && i < MU_MAX_DELAY_TIME; i++) {
804 rmb();
805 msleep(1);
806 }
807
808 if (i == MU_MAX_DELAY_TIME) {
809 printk(KERN_ERR DRV_NAME
810 "(%s): no handshake signature\n",
811 pci_name(hba->pdev));
812 return -1;
813 }
814 }
815
816 udelay(10);
817
818 h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
819 h->rb_phy = cpu_to_le32(hba->dma_handle);
820 h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
821 h->req_sz = cpu_to_le16(sizeof(struct req_msg));
822 h->req_cnt = cpu_to_le16(MU_REQ_COUNT);
823 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
824 h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
825 stex_gettime(&h->hosttime);
826 h->partner_type = HMU_PARTNER_TYPE;
827
828 status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
829 writel(status_phys, base + IMR0);
830 readl(base + IMR0);
831 writel((status_phys >> 16) >> 16, base + IMR1);
832 readl(base + IMR1);
833
834 writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
835 readl(base + OMR0);
836 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
837 readl(base + IDBL); /* flush */
838
839 udelay(10);
840 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
841 && i < MU_MAX_DELAY_TIME; i++) {
842 rmb();
843 msleep(1);
844 }
845
846 if (i == MU_MAX_DELAY_TIME) {
847 printk(KERN_ERR DRV_NAME
848 "(%s): no signature after handshake frame\n",
849 pci_name(hba->pdev));
850 return -1;
851 }
852
853 writel(0, base + IMR0);
854 readl(base + IMR0);
855 writel(0, base + OMR0);
856 readl(base + OMR0);
857 writel(0, base + IMR1);
858 readl(base + IMR1);
859 writel(0, base + OMR1);
860 readl(base + OMR1); /* flush */
861 hba->mu_status = MU_STATE_STARTED;
862 return 0;
863}
864
865static int stex_abort(struct scsi_cmnd *cmd)
866{
867 struct Scsi_Host *host = cmd->device->host;
868 struct st_hba *hba = (struct st_hba *)host->hostdata;
869 u16 tag = cmd->request->tag;
870 void __iomem *base;
871 u32 data;
872 int result = SUCCESS;
873 unsigned long flags;
874 base = hba->mmio_base;
875 spin_lock_irqsave(host->host_lock, flags);
876 if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
877 hba->wait_ccb = &hba->ccb[tag];
878 else {
879 for (tag = 0; tag < host->can_queue; tag++)
880 if (hba->ccb[tag].cmd == cmd) {
881 hba->wait_ccb = &hba->ccb[tag];
882 break;
883 }
884 if (tag >= host->can_queue)
885 goto out;
886 }
887
888 data = readl(base + ODBL);
889 if (data == 0 || data == 0xffffffff)
890 goto fail_out;
891
892 writel(data, base + ODBL);
893 readl(base + ODBL); /* flush */
894
895 stex_mu_intr(hba, data);
896
897 if (hba->wait_ccb == NULL) {
898 printk(KERN_WARNING DRV_NAME
899 "(%s): lost interrupt\n", pci_name(hba->pdev));
900 goto out;
901 }
902
903fail_out:
904 stex_unmap_sg(hba, cmd);
905 hba->wait_ccb->req = NULL; /* nullify the req's future return */
906 hba->wait_ccb = NULL;
907 result = FAILED;
908out:
909 spin_unlock_irqrestore(host->host_lock, flags);
910 return result;
911}
912
913static void stex_hard_reset(struct st_hba *hba)
914{
915 struct pci_bus *bus;
916 int i;
917 u16 pci_cmd;
918 u8 pci_bctl;
919
920 for (i = 0; i < 16; i++)
921 pci_read_config_dword(hba->pdev, i * 4,
922 &hba->pdev->saved_config_space[i]);
923
924 /* Reset secondary bus. Our controller(MU/ATU) is the only device on
925 secondary bus. Consult Intel 80331/3 developer's manual for detail */
926 bus = hba->pdev->bus;
927 pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
928 pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
929 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
930 msleep(1);
931 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
932 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
933
934 for (i = 0; i < MU_MAX_DELAY_TIME; i++) {
935 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
936 if (pci_cmd & PCI_COMMAND_MASTER)
937 break;
938 msleep(1);
939 }
940
941 ssleep(5);
942 for (i = 0; i < 16; i++)
943 pci_write_config_dword(hba->pdev, i * 4,
944 hba->pdev->saved_config_space[i]);
945}
946
947static int stex_reset(struct scsi_cmnd *cmd)
948{
949 struct st_hba *hba;
950 unsigned long flags;
951 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
952
953 hba->mu_status = MU_STATE_RESETTING;
954
955 if (hba->cardtype == st_shasta)
956 stex_hard_reset(hba);
957
958 if (stex_handshake(hba)) {
959 printk(KERN_WARNING DRV_NAME
960 "(%s): resetting: handshake failed\n",
961 pci_name(hba->pdev));
962 return FAILED;
963 }
964 spin_lock_irqsave(hba->host->host_lock, flags);
965 hba->req_head = 0;
966 hba->req_tail = 0;
967 hba->status_head = 0;
968 hba->status_tail = 0;
969 hba->out_req_cnt = 0;
970 spin_unlock_irqrestore(hba->host->host_lock, flags);
971
972 return SUCCESS;
973}
974
975static int stex_biosparam(struct scsi_device *sdev,
976 struct block_device *bdev, sector_t capacity, int geom[])
977{
978 int heads = 255, sectors = 63, cylinders;
979
980 if (capacity < 0x200000) {
981 heads = 64;
982 sectors = 32;
983 }
984
985 cylinders = sector_div(capacity, heads * sectors);
986
987 geom[0] = heads;
988 geom[1] = sectors;
989 geom[2] = cylinders;
990
991 return 0;
992}
993
994static struct scsi_host_template driver_template = {
995 .module = THIS_MODULE,
996 .name = DRV_NAME,
997 .proc_name = DRV_NAME,
998 .bios_param = stex_biosparam,
999 .queuecommand = stex_queuecommand,
1000 .slave_alloc = stex_slave_alloc,
1001 .slave_configure = stex_slave_config,
1002 .slave_destroy = stex_slave_destroy,
1003 .eh_abort_handler = stex_abort,
1004 .eh_host_reset_handler = stex_reset,
1005 .can_queue = ST_CAN_QUEUE,
1006 .this_id = -1,
1007 .sg_tablesize = ST_MAX_SG,
1008 .cmd_per_lun = ST_CMD_PER_LUN,
1009};
1010
1011static int stex_set_dma_mask(struct pci_dev * pdev)
1012{
1013 int ret;
1014 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
1015 && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1016 return 0;
1017 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1018 if (!ret)
1019 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1020 return ret;
1021}
1022
1023static int __devinit
1024stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1025{
1026 struct st_hba *hba;
1027 struct Scsi_Host *host;
1028 int err;
1029
1030 err = pci_enable_device(pdev);
1031 if (err)
1032 return err;
1033
1034 pci_set_master(pdev);
1035
1036 host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1037
1038 if (!host) {
1039 printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1040 pci_name(pdev));
1041 err = -ENOMEM;
1042 goto out_disable;
1043 }
1044
1045 hba = (struct st_hba *)host->hostdata;
1046 memset(hba, 0, sizeof(struct st_hba));
1047
1048 err = pci_request_regions(pdev, DRV_NAME);
1049 if (err < 0) {
1050 printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1051 pci_name(pdev));
1052 goto out_scsi_host_put;
1053 }
1054
1055 hba->mmio_base = ioremap(pci_resource_start(pdev, 0),
1056 pci_resource_len(pdev, 0));
1057 if ( !hba->mmio_base) {
1058 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1059 pci_name(pdev));
1060 err = -ENOMEM;
1061 goto out_release_regions;
1062 }
1063
1064 err = stex_set_dma_mask(pdev);
1065 if (err) {
1066 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1067 pci_name(pdev));
1068 goto out_iounmap;
1069 }
1070
1071 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1072 STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL);
1073 if (!hba->dma_mem) {
1074 err = -ENOMEM;
1075 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1076 pci_name(pdev));
1077 goto out_iounmap;
1078 }
1079
1080 hba->status_buffer =
1081 (struct status_msg *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
1082 hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
1083 hba->mu_status = MU_STATE_STARTING;
1084
1085 hba->cardtype = (unsigned int) id->driver_data;
1086
1087 /* firmware uses id/lun pair for a logical drive, but lun would be
1088 always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
1089 channel to map lun here */
1090 host->max_channel = ST_MAX_LUN_PER_TARGET - 1;
1091 host->max_id = ST_MAX_TARGET_NUM;
1092 host->max_lun = 1;
1093 host->unique_id = host->host_no;
1094 host->max_cmd_len = STEX_CDB_LENGTH;
1095
1096 hba->host = host;
1097 hba->pdev = pdev;
1098 init_waitqueue_head(&hba->waitq);
1099
1100 err = request_irq(pdev->irq, stex_intr, IRQF_SHARED, DRV_NAME, hba);
1101 if (err) {
1102 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1103 pci_name(pdev));
1104 goto out_pci_free;
1105 }
1106
1107 err = stex_handshake(hba);
1108 if (err)
1109 goto out_free_irq;
1110
1111 err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE);
1112 if (err) {
1113 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
1114 pci_name(pdev));
1115 goto out_free_irq;
1116 }
1117
1118 pci_set_drvdata(pdev, hba);
1119
1120 err = scsi_add_host(host, &pdev->dev);
1121 if (err) {
1122 printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1123 pci_name(pdev));
1124 goto out_free_irq;
1125 }
1126
1127 scsi_scan_host(host);
1128
1129 return 0;
1130
1131out_free_irq:
1132 free_irq(pdev->irq, hba);
1133out_pci_free:
1134 dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE,
1135 hba->dma_mem, hba->dma_handle);
1136out_iounmap:
1137 iounmap(hba->mmio_base);
1138out_release_regions:
1139 pci_release_regions(pdev);
1140out_scsi_host_put:
1141 scsi_host_put(host);
1142out_disable:
1143 pci_disable_device(pdev);
1144
1145 return err;
1146}
1147
1148static void stex_hba_stop(struct st_hba *hba)
1149{
1150 struct req_msg *req;
1151 unsigned long flags;
1152 unsigned long before;
1153 u16 tag = 0;
1154
1155 spin_lock_irqsave(hba->host->host_lock, flags);
1156 req = stex_alloc_req(hba);
1157 memset(req->cdb, 0, STEX_CDB_LENGTH);
1158
1159 req->cdb[0] = CONTROLLER_CMD;
1160 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1161 req->cdb[2] = CTLR_POWER_SAVING;
1162
1163 hba->ccb[tag].cmd = NULL;
1164 hba->ccb[tag].sg_count = 0;
1165 hba->ccb[tag].sense_bufflen = 0;
1166 hba->ccb[tag].sense_buffer = NULL;
1167 hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE;
1168
1169 stex_send_cmd(hba, req, tag);
1170 spin_unlock_irqrestore(hba->host->host_lock, flags);
1171
1172 before = jiffies;
1173 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1174 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ))
1175 return;
1176 msleep(10);
1177 }
1178}
1179
1180static void stex_hba_free(struct st_hba *hba)
1181{
1182 free_irq(hba->pdev->irq, hba);
1183
1184 iounmap(hba->mmio_base);
1185
1186 pci_release_regions(hba->pdev);
1187
1188 dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE,
1189 hba->dma_mem, hba->dma_handle);
1190}
1191
1192static void stex_remove(struct pci_dev *pdev)
1193{
1194 struct st_hba *hba = pci_get_drvdata(pdev);
1195
1196 scsi_remove_host(hba->host);
1197
1198 pci_set_drvdata(pdev, NULL);
1199
1200 stex_hba_stop(hba);
1201
1202 stex_hba_free(hba);
1203
1204 scsi_host_put(hba->host);
1205
1206 pci_disable_device(pdev);
1207}
1208
1209static void stex_shutdown(struct pci_dev *pdev)
1210{
1211 struct st_hba *hba = pci_get_drvdata(pdev);
1212
1213 stex_hba_stop(hba);
1214}
1215
1216static struct pci_device_id stex_pci_tbl[] = {
1217 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1218 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1219 { 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1220 { 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1221 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1222 { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1223 { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1224 { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1225 { } /* terminate list */
1226};
1227MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1228
1229static struct pci_driver stex_pci_driver = {
1230 .name = DRV_NAME,
1231 .id_table = stex_pci_tbl,
1232 .probe = stex_probe,
1233 .remove = __devexit_p(stex_remove),
1234 .shutdown = stex_shutdown,
1235};
1236
1237static int __init stex_init(void)
1238{
1239 printk(KERN_INFO DRV_NAME
1240 ": Promise SuperTrak EX Driver version: %s\n",
1241 ST_DRIVER_VERSION);
1242
1243 return pci_register_driver(&stex_pci_driver);
1244}
1245
1246static void __exit stex_exit(void)
1247{
1248 pci_unregister_driver(&stex_pci_driver);
1249}
1250
1251module_init(stex_init);
1252module_exit(stex_exit);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index e681681ab7a2..0372aa9fa190 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -196,8 +196,8 @@ struct mscp {
196 u32 sense_data PACKED; 196 u32 sense_data PACKED;
197 /* The following fields are for software only. They are included in 197 /* The following fields are for software only. They are included in
198 the MSCP structure because they are associated with SCSI requests. */ 198 the MSCP structure because they are associated with SCSI requests. */
199 void (*done)(Scsi_Cmnd *); 199 void (*done) (struct scsi_cmnd *);
200 Scsi_Cmnd *SCint; 200 struct scsi_cmnd *SCint;
201 ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */ 201 ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
202}; 202};
203 203
@@ -289,7 +289,7 @@ static const unsigned short ultrastor_ports_14f[] = {
289 289
290static void ultrastor_interrupt(int, void *, struct pt_regs *); 290static void ultrastor_interrupt(int, void *, struct pt_regs *);
291static irqreturn_t do_ultrastor_interrupt(int, void *, struct pt_regs *); 291static irqreturn_t do_ultrastor_interrupt(int, void *, struct pt_regs *);
292static inline void build_sg_list(struct mscp *, Scsi_Cmnd *SCpnt); 292static inline void build_sg_list(struct mscp *, struct scsi_cmnd *SCpnt);
293 293
294 294
295/* Always called with host lock held */ 295/* Always called with host lock held */
@@ -673,7 +673,7 @@ static const char *ultrastor_info(struct Scsi_Host * shpnt)
673 return buf; 673 return buf;
674} 674}
675 675
676static inline void build_sg_list(struct mscp *mscp, Scsi_Cmnd *SCpnt) 676static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
677{ 677{
678 struct scatterlist *sl; 678 struct scatterlist *sl;
679 long transfer_length = 0; 679 long transfer_length = 0;
@@ -694,7 +694,8 @@ static inline void build_sg_list(struct mscp *mscp, Scsi_Cmnd *SCpnt)
694 mscp->transfer_data_length = transfer_length; 694 mscp->transfer_data_length = transfer_length;
695} 695}
696 696
697static int ultrastor_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 697static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
698 void (*done) (struct scsi_cmnd *))
698{ 699{
699 struct mscp *my_mscp; 700 struct mscp *my_mscp;
700#if ULTRASTOR_MAX_CMDS > 1 701#if ULTRASTOR_MAX_CMDS > 1
@@ -833,7 +834,7 @@ retry:
833 834
834 */ 835 */
835 836
836static int ultrastor_abort(Scsi_Cmnd *SCpnt) 837static int ultrastor_abort(struct scsi_cmnd *SCpnt)
837{ 838{
838#if ULTRASTOR_DEBUG & UD_ABORT 839#if ULTRASTOR_DEBUG & UD_ABORT
839 char out[108]; 840 char out[108];
@@ -843,7 +844,7 @@ static int ultrastor_abort(Scsi_Cmnd *SCpnt)
843 unsigned int mscp_index; 844 unsigned int mscp_index;
844 unsigned char old_aborted; 845 unsigned char old_aborted;
845 unsigned long flags; 846 unsigned long flags;
846 void (*done)(Scsi_Cmnd *); 847 void (*done)(struct scsi_cmnd *);
847 struct Scsi_Host *host = SCpnt->device->host; 848 struct Scsi_Host *host = SCpnt->device->host;
848 849
849 if(config.slot) 850 if(config.slot)
@@ -960,7 +961,7 @@ static int ultrastor_abort(Scsi_Cmnd *SCpnt)
960 return SUCCESS; 961 return SUCCESS;
961} 962}
962 963
963static int ultrastor_host_reset(Scsi_Cmnd * SCpnt) 964static int ultrastor_host_reset(struct scsi_cmnd * SCpnt)
964{ 965{
965 unsigned long flags; 966 unsigned long flags;
966 int i; 967 int i;
@@ -1045,8 +1046,8 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1045 unsigned int mscp_index; 1046 unsigned int mscp_index;
1046#endif 1047#endif
1047 struct mscp *mscp; 1048 struct mscp *mscp;
1048 void (*done)(Scsi_Cmnd *); 1049 void (*done) (struct scsi_cmnd *);
1049 Scsi_Cmnd *SCtmp; 1050 struct scsi_cmnd *SCtmp;
1050 1051
1051#if ULTRASTOR_MAX_CMDS == 1 1052#if ULTRASTOR_MAX_CMDS == 1
1052 mscp = &config.mscp[0]; 1053 mscp = &config.mscp[0];
@@ -1079,7 +1080,7 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1079 return; 1080 return;
1080 } 1081 }
1081 if (icm_status == 3) { 1082 if (icm_status == 3) {
1082 void (*done)(Scsi_Cmnd *) = mscp->done; 1083 void (*done)(struct scsi_cmnd *) = mscp->done;
1083 if (done) { 1084 if (done) {
1084 mscp->done = NULL; 1085 mscp->done = NULL;
1085 mscp->SCint->result = DID_ABORT << 16; 1086 mscp->SCint->result = DID_ABORT << 16;
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h
index da759a11deff..a692905f95f7 100644
--- a/drivers/scsi/ultrastor.h
+++ b/drivers/scsi/ultrastor.h
@@ -14,11 +14,13 @@
14#define _ULTRASTOR_H 14#define _ULTRASTOR_H
15 15
16static int ultrastor_detect(struct scsi_host_template *); 16static int ultrastor_detect(struct scsi_host_template *);
17static const char *ultrastor_info(struct Scsi_Host * shpnt); 17static const char *ultrastor_info(struct Scsi_Host *shpnt);
18static int ultrastor_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); 18static int ultrastor_queuecommand(struct scsi_cmnd *,
19static int ultrastor_abort(Scsi_Cmnd *); 19 void (*done)(struct scsi_cmnd *));
20static int ultrastor_host_reset(Scsi_Cmnd *); 20static int ultrastor_abort(struct scsi_cmnd *);
21static int ultrastor_biosparam(struct scsi_device *, struct block_device *, sector_t, int *); 21static int ultrastor_host_reset(struct scsi_cmnd *);
22static int ultrastor_biosparam(struct scsi_device *, struct block_device *,
23 sector_t, int *);
22 24
23 25
24#define ULTRASTOR_14F_MAX_SG 16 26#define ULTRASTOR_14F_MAX_SG 16
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index a2c56b2de589..3305fb6079eb 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -1818,7 +1818,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1818 int n, len, insize = 0; 1818 int n, len, insize = 0;
1819 1819
1820 /* Ignore all Wacom devices */ 1820 /* Ignore all Wacom devices */
1821 if (dev->descriptor.idVendor == USB_VENDOR_ID_WACOM) 1821 if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM)
1822 return NULL; 1822 return NULL;
1823 1823
1824 for (n = 0; hid_blacklist[n].idVendor; n++) 1824 for (n = 0; hid_blacklist[n].idVendor; n++)
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 390439b3d899..1b4f75d1f8a9 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3197,11 +3197,11 @@ static void fbcon_exit(void)
3197 return; 3197 return;
3198 3198
3199#ifdef CONFIG_ATARI 3199#ifdef CONFIG_ATARI
3200 free_irq(IRQ_AUTO_4, fbcon_vbl_handler); 3200 free_irq(IRQ_AUTO_4, fb_vbl_handler);
3201#endif 3201#endif
3202#ifdef CONFIG_MAC 3202#ifdef CONFIG_MAC
3203 if (MACH_IS_MAC && vbl_detected) 3203 if (MACH_IS_MAC && vbl_detected)
3204 free_irq(IRQ_MAC_VBL, fbcon_vbl_handler); 3204 free_irq(IRQ_MAC_VBL, fb_vbl_handler);
3205#endif 3205#endif
3206 3206
3207 kfree((void *)softback_buf); 3207 kfree((void *)softback_buf);
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 67d1e1c8813d..4acde4f7dbf8 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -1826,8 +1826,8 @@ static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
1826{ 1826{
1827 struct riva_par *par = info->par; 1827 struct riva_par *par = info->par;
1828 struct device_node *dp; 1828 struct device_node *dp;
1829 unsigned char *pedid = NULL; 1829 const unsigned char *pedid = NULL;
1830 unsigned char *disptype = NULL; 1830 const unsigned char *disptype = NULL;
1831 static char *propnames[] = { 1831 static char *propnames[] = {
1832 "DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL }; 1832 "DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL };
1833 int i; 1833 int i;