aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2008-04-28 00:01:34 -0400
committerSteve French <sfrench@us.ibm.com>2008-04-28 00:01:34 -0400
commit1dbbb6077426f8ce63d6a59c5ac6613e1689cbde (patch)
tree6141d4d7a8eb7c557705bdfa764137d4fd2e4924 /drivers
parentd09e860cf07e7c9ee12920a09f5080e30a12a23a (diff)
parent064922a805ec7aadfafdd27aa6b4908d737c3c1d (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_idle.c19
-rw-r--r--drivers/char/agp/amd-k7-agp.c3
-rw-r--r--drivers/char/agp/frontend.c4
-rw-r--r--drivers/char/drm/ati_pcigart.c7
-rw-r--r--drivers/char/drm/drm.h17
-rw-r--r--drivers/char/drm/drmP.h133
-rw-r--r--drivers/char/drm/drm_agpsupport.c2
-rw-r--r--drivers/char/drm/drm_drv.c60
-rw-r--r--drivers/char/drm/drm_fops.c41
-rw-r--r--drivers/char/drm/drm_irq.c381
-rw-r--r--drivers/char/drm/drm_proc.c61
-rw-r--r--drivers/char/drm/drm_stub.c138
-rw-r--r--drivers/char/drm/drm_sysfs.c46
-rw-r--r--drivers/char/drm/drm_vm.c22
-rw-r--r--drivers/char/drm/i810_dma.c4
-rw-r--r--drivers/char/drm/i830_dma.c4
-rw-r--r--drivers/char/drm/i915_dma.c160
-rw-r--r--drivers/char/drm/i915_drm.h45
-rw-r--r--drivers/char/drm/i915_drv.c8
-rw-r--r--drivers/char/drm/i915_drv.h103
-rw-r--r--drivers/char/drm/i915_irq.c605
-rw-r--r--drivers/char/drm/mga_drv.c7
-rw-r--r--drivers/char/drm/mga_drv.h6
-rw-r--r--drivers/char/drm/mga_irq.c69
-rw-r--r--drivers/char/drm/r128_drv.c7
-rw-r--r--drivers/char/drm/r128_drv.h9
-rw-r--r--drivers/char/drm/r128_irq.c55
-rw-r--r--drivers/char/drm/radeon_drv.c8
-rw-r--r--drivers/char/drm/radeon_drv.h19
-rw-r--r--drivers/char/drm/radeon_irq.c171
-rw-r--r--drivers/char/drm/via_drv.c6
-rw-r--r--drivers/char/drm/via_drv.h7
-rw-r--r--drivers/char/drm/via_irq.c81
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/arm/bast-ide.c25
-rw-r--r--drivers/ide/arm/icside.c69
-rw-r--r--drivers/ide/arm/ide_arm.c20
-rw-r--r--drivers/ide/arm/palm_bk3710.c64
-rw-r--r--drivers/ide/arm/rapide.c11
-rw-r--r--drivers/ide/cris/ide-cris.c53
-rw-r--r--drivers/ide/h8300/ide-h8300.c10
-rw-r--r--drivers/ide/ide-acpi.c30
-rw-r--r--drivers/ide/ide-cd.c917
-rw-r--r--drivers/ide/ide-cd.h4
-rw-r--r--drivers/ide/ide-disk.c159
-rw-r--r--drivers/ide/ide-dma.c153
-rw-r--r--drivers/ide/ide-floppy.c34
-rw-r--r--drivers/ide/ide-generic.c36
-rw-r--r--drivers/ide/ide-io.c59
-rw-r--r--drivers/ide/ide-iops.c110
-rw-r--r--drivers/ide/ide-lib.c44
-rw-r--r--drivers/ide/ide-pnp.c45
-rw-r--r--drivers/ide/ide-probe.c285
-rw-r--r--drivers/ide/ide-proc.c169
-rw-r--r--drivers/ide/ide-scan-pci.c2
-rw-r--r--drivers/ide/ide-tape.c1204
-rw-r--r--drivers/ide/ide-taskfile.c48
-rw-r--r--drivers/ide/ide.c491
-rw-r--r--drivers/ide/legacy/ali14xx.c44
-rw-r--r--drivers/ide/legacy/buddha.c18
-rw-r--r--drivers/ide/legacy/dtc2278.c39
-rw-r--r--drivers/ide/legacy/falconide.c14
-rw-r--r--drivers/ide/legacy/gayle.c22
-rw-r--r--drivers/ide/legacy/hd.c78
-rw-r--r--drivers/ide/legacy/ht6560b.c57
-rw-r--r--drivers/ide/legacy/ide-4drives.c52
-rw-r--r--drivers/ide/legacy/ide-cs.c84
-rw-r--r--drivers/ide/legacy/ide_platform.c16
-rw-r--r--drivers/ide/legacy/macide.c8
-rw-r--r--drivers/ide/legacy/q40ide.c9
-rw-r--r--drivers/ide/legacy/qd65xx.c238
-rw-r--r--drivers/ide/legacy/qd65xx.h1
-rw-r--r--drivers/ide/legacy/umc8672.c92
-rw-r--r--drivers/ide/mips/au1xxx-ide.c130
-rw-r--r--drivers/ide/mips/swarm.c19
-rw-r--r--drivers/ide/pci/aec62xx.c39
-rw-r--r--drivers/ide/pci/alim15x3.c332
-rw-r--r--drivers/ide/pci/amd74xx.c19
-rw-r--r--drivers/ide/pci/atiixp.c29
-rw-r--r--drivers/ide/pci/cmd640.c294
-rw-r--r--drivers/ide/pci/cmd64x.c153
-rw-r--r--drivers/ide/pci/cs5520.c29
-rw-r--r--drivers/ide/pci/cs5530.c18
-rw-r--r--drivers/ide/pci/cs5535.c24
-rw-r--r--drivers/ide/pci/cy82c693.c97
-rw-r--r--drivers/ide/pci/delkin_cb.c20
-rw-r--r--drivers/ide/pci/generic.c10
-rw-r--r--drivers/ide/pci/hpt34x.c17
-rw-r--r--drivers/ide/pci/hpt366.c132
-rw-r--r--drivers/ide/pci/it8213.c34
-rw-r--r--drivers/ide/pci/it821x.c52
-rw-r--r--drivers/ide/pci/jmicron.c29
-rw-r--r--drivers/ide/pci/ns87415.c40
-rw-r--r--drivers/ide/pci/opti621.c82
-rw-r--r--drivers/ide/pci/pdc202xx_new.c23
-rw-r--r--drivers/ide/pci/pdc202xx_old.c126
-rw-r--r--drivers/ide/pci/piix.c17
-rw-r--r--drivers/ide/pci/rz1000.c2
-rw-r--r--drivers/ide/pci/sc1200.c39
-rw-r--r--drivers/ide/pci/scc_pata.c95
-rw-r--r--drivers/ide/pci/serverworks.c38
-rw-r--r--drivers/ide/pci/sgiioc4.c140
-rw-r--r--drivers/ide/pci/siimage.c142
-rw-r--r--drivers/ide/pci/sis5513.c253
-rw-r--r--drivers/ide/pci/sl82c105.c83
-rw-r--r--drivers/ide/pci/slc90e66.c22
-rw-r--r--drivers/ide/pci/tc86c001.c54
-rw-r--r--drivers/ide/pci/triflex.c12
-rw-r--r--drivers/ide/pci/trm290.c47
-rw-r--r--drivers/ide/pci/via82cxxx.c20
-rw-r--r--drivers/ide/ppc/mpc8xx.c70
-rw-r--r--drivers/ide/ppc/pmac.c183
-rw-r--r--drivers/ide/setup-pci.c226
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c75
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c15
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c51
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c23
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c122
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h33
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/infiniband/hw/nes/nes.c15
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c20
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c18
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c125
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/input/joystick/xpad.c34
-rw-r--r--drivers/macintosh/mac_hid.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig8
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/mt312.h2
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c888
-rw-r--r--drivers/media/dvb/frontends/s5h1411.h90
-rw-r--r--drivers/media/video/au0828/Kconfig2
-rw-r--r--drivers/media/video/au0828/au0828-cards.c1
-rw-r--r--drivers/media/video/au0828/au0828-core.c26
-rw-r--r--drivers/media/video/au0828/au0828-dvb.c2
-rw-r--r--drivers/media/video/au0828/au0828-i2c.c6
-rw-r--r--drivers/media/video/au0828/au0828.h8
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c4
-rw-r--r--drivers/media/video/cx88/Kconfig1
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c6
-rw-r--r--drivers/media/video/cx88/cx88-cards.c1
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c32
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c21
-rw-r--r--drivers/media/video/pvrusb2/Kconfig1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c28
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.h22
-rw-r--r--drivers/media/video/tuner-core.c92
-rw-r--r--drivers/media/video/tuner-xc2028.c2
-rw-r--r--drivers/media/video/vivi.c2
-rw-r--r--drivers/misc/enclosure.c100
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/mlx4/alloc.c157
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/main.c3
-rw-r--r--drivers/net/mlx4/mlx4.h3
-rw-r--r--drivers/net/mlx4/qp.c31
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/kvm/Makefile9
-rw-r--r--drivers/s390/kvm/kvm_virtio.c338
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c39
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c114
-rw-r--r--drivers/scsi/FlashPoint.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aha152x.c7
-rw-r--r--drivers/scsi/aha1542.c26
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h23
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg115
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c835
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_inline.h859
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c181
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h177
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c33
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped1145
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped1555
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_seq.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h55
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg45
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c16
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c676
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_inline.h616
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c95
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h142
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c73
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c9
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped233
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c6
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y105
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l19
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c25
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h1
-rw-r--r--drivers/scsi/eata.c11
-rw-r--r--drivers/scsi/esp_scsi.c35
-rw-r--r--drivers/scsi/esp_scsi.h13
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/ide-scsi.c19
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/mac_esp.c657
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c394
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_proc.c7
-rw-r--r--drivers/scsi/scsi_scan.c84
-rw-r--r--drivers/scsi/scsi_sysfs.c142
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_sas.c22
-rw-r--r--drivers/scsi/scsi_transport_spi.c33
-rw-r--r--drivers/scsi/sgiwd93.c4
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/st.c10
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/u14-34f.c9
244 files changed, 10946 insertions, 10859 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 788da9781f80..0d90ff5fd117 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -418,13 +418,12 @@ static void acpi_processor_idle(void)
418 418
419 cx = pr->power.state; 419 cx = pr->power.state;
420 if (!cx || acpi_idle_suspend) { 420 if (!cx || acpi_idle_suspend) {
421 if (pm_idle_save) 421 if (pm_idle_save) {
422 pm_idle_save(); 422 pm_idle_save(); /* enables IRQs */
423 else 423 } else {
424 acpi_safe_halt(); 424 acpi_safe_halt();
425
426 if (irqs_disabled())
427 local_irq_enable(); 425 local_irq_enable();
426 }
428 427
429 return; 428 return;
430 } 429 }
@@ -520,10 +519,12 @@ static void acpi_processor_idle(void)
520 * Use the appropriate idle routine, the one that would 519 * Use the appropriate idle routine, the one that would
521 * be used without acpi C-states. 520 * be used without acpi C-states.
522 */ 521 */
523 if (pm_idle_save) 522 if (pm_idle_save) {
524 pm_idle_save(); 523 pm_idle_save(); /* enables IRQs */
525 else 524 } else {
526 acpi_safe_halt(); 525 acpi_safe_halt();
526 local_irq_enable();
527 }
527 528
528 /* 529 /*
529 * TBD: Can't get time duration while in C1, as resumes 530 * TBD: Can't get time duration while in C1, as resumes
@@ -534,8 +535,6 @@ static void acpi_processor_idle(void)
534 * skew otherwise. 535 * skew otherwise.
535 */ 536 */
536 sleep_ticks = 0xFFFFFFFF; 537 sleep_ticks = 0xFFFFFFFF;
537 if (irqs_disabled())
538 local_irq_enable();
539 538
540 break; 539 break;
541 540
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index d28669992147..96bdb9296b07 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -436,8 +436,9 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
436 system controller may experience noise due to strong drive strengths 436 system controller may experience noise due to strong drive strengths
437 */ 437 */
438 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) { 438 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
439 u8 cap_ptr=0;
440 struct pci_dev *gfxcard=NULL; 439 struct pci_dev *gfxcard=NULL;
440
441 cap_ptr = 0;
441 while (!cap_ptr) { 442 while (!cap_ptr) {
442 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); 443 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
443 if (!gfxcard) { 444 if (!gfxcard) {
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 55d7a82bd071..857b26227d87 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -967,7 +967,7 @@ int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
967 return 0; 967 return 0;
968} 968}
969 969
970static int agp_ioctl(struct inode *inode, struct file *file, 970static long agp_ioctl(struct file *file,
971 unsigned int cmd, unsigned long arg) 971 unsigned int cmd, unsigned long arg)
972{ 972{
973 struct agp_file_private *curr_priv = file->private_data; 973 struct agp_file_private *curr_priv = file->private_data;
@@ -1058,7 +1058,7 @@ static const struct file_operations agp_fops =
1058 .llseek = no_llseek, 1058 .llseek = no_llseek,
1059 .read = agp_read, 1059 .read = agp_read,
1060 .write = agp_write, 1060 .write = agp_write,
1061 .ioctl = agp_ioctl, 1061 .unlocked_ioctl = agp_ioctl,
1062#ifdef CONFIG_COMPAT 1062#ifdef CONFIG_COMPAT
1063 .compat_ioctl = compat_agp_ioctl, 1063 .compat_ioctl = compat_agp_ioctl,
1064#endif 1064#endif
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index 141f4dfa0a11..b710426bab3e 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -167,13 +167,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
167 page_base += ATI_PCIGART_PAGE_SIZE; 167 page_base += ATI_PCIGART_PAGE_SIZE;
168 } 168 }
169 } 169 }
170
171 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
172 dma_sync_single_for_device(&dev->pdev->dev,
173 bus_address,
174 max_pages * sizeof(u32),
175 PCI_DMA_TODEVICE);
176
177 ret = 1; 170 ret = 1;
178 171
179#if defined(__i386__) || defined(__x86_64__) 172#if defined(__i386__) || defined(__x86_64__)
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 3a05c6d5ebe1..6874f31ca8ca 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -471,6 +471,7 @@ struct drm_irq_busid {
471enum drm_vblank_seq_type { 471enum drm_vblank_seq_type {
472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
474 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
474 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 475 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
475 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 476 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
476 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ 477 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
@@ -503,6 +504,21 @@ union drm_wait_vblank {
503 struct drm_wait_vblank_reply reply; 504 struct drm_wait_vblank_reply reply;
504}; 505};
505 506
507enum drm_modeset_ctl_cmd {
508 _DRM_PRE_MODESET = 1,
509 _DRM_POST_MODESET = 2,
510};
511
512/**
513 * DRM_IOCTL_MODESET_CTL ioctl argument type
514 *
515 * \sa drmModesetCtl().
516 */
517struct drm_modeset_ctl {
518 unsigned long arg;
519 enum drm_modeset_ctl_cmd cmd;
520};
521
506/** 522/**
507 * DRM_IOCTL_AGP_ENABLE ioctl argument type. 523 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
508 * 524 *
@@ -587,6 +603,7 @@ struct drm_set_version {
587#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) 603#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
588#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 604#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
589#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 605#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
606#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
590 607
591#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 608#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
592#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 609#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 6540948d5176..ecee3547a13f 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -100,10 +100,8 @@ struct drm_device;
100#define DRIVER_HAVE_DMA 0x20 100#define DRIVER_HAVE_DMA 0x20
101#define DRIVER_HAVE_IRQ 0x40 101#define DRIVER_HAVE_IRQ 0x40
102#define DRIVER_IRQ_SHARED 0x80 102#define DRIVER_IRQ_SHARED 0x80
103#define DRIVER_IRQ_VBL 0x100
104#define DRIVER_DMA_QUEUE 0x200 103#define DRIVER_DMA_QUEUE 0x200
105#define DRIVER_FB_DMA 0x400 104#define DRIVER_FB_DMA 0x400
106#define DRIVER_IRQ_VBL2 0x800
107 105
108/***********************************************************************/ 106/***********************************************************************/
109/** \name Begin the DRM... */ 107/** \name Begin the DRM... */
@@ -379,13 +377,12 @@ struct drm_buf_entry {
379struct drm_file { 377struct drm_file {
380 int authenticated; 378 int authenticated;
381 int master; 379 int master;
382 int minor;
383 pid_t pid; 380 pid_t pid;
384 uid_t uid; 381 uid_t uid;
385 drm_magic_t magic; 382 drm_magic_t magic;
386 unsigned long ioctl_count; 383 unsigned long ioctl_count;
387 struct list_head lhead; 384 struct list_head lhead;
388 struct drm_head *head; 385 struct drm_minor *minor;
389 int remove_auth_on_close; 386 int remove_auth_on_close;
390 unsigned long lock_count; 387 unsigned long lock_count;
391 struct file *filp; 388 struct file *filp;
@@ -580,10 +577,52 @@ struct drm_driver {
580 int (*context_dtor) (struct drm_device *dev, int context); 577 int (*context_dtor) (struct drm_device *dev, int context);
581 int (*kernel_context_switch) (struct drm_device *dev, int old, 578 int (*kernel_context_switch) (struct drm_device *dev, int old,
582 int new); 579 int new);
583 void (*kernel_context_switch_unlock) (struct drm_device *dev); 580 void (*kernel_context_switch_unlock) (struct drm_device * dev);
584 int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence); 581 /**
585 int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence); 582 * get_vblank_counter - get raw hardware vblank counter
586 int (*dri_library_name) (struct drm_device *dev, char *buf); 583 * @dev: DRM device
584 * @crtc: counter to fetch
585 *
586 * Driver callback for fetching a raw hardware vblank counter
587 * for @crtc. If a device doesn't have a hardware counter, the
588 * driver can simply return the value of drm_vblank_count and
589 * make the enable_vblank() and disable_vblank() hooks into no-ops,
590 * leaving interrupts enabled at all times.
591 *
592 * Wraparound handling and loss of events due to modesetting is dealt
593 * with in the DRM core code.
594 *
595 * RETURNS
596 * Raw vblank counter value.
597 */
598 u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
599
600 /**
601 * enable_vblank - enable vblank interrupt events
602 * @dev: DRM device
603 * @crtc: which irq to enable
604 *
605 * Enable vblank interrupts for @crtc. If the device doesn't have
606 * a hardware vblank counter, this routine should be a no-op, since
607 * interrupts will have to stay on to keep the count accurate.
608 *
609 * RETURNS
610 * Zero on success, appropriate errno if the given @crtc's vblank
611 * interrupt cannot be enabled.
612 */
613 int (*enable_vblank) (struct drm_device *dev, int crtc);
614
615 /**
616 * disable_vblank - disable vblank interrupt events
617 * @dev: DRM device
618 * @crtc: which irq to enable
619 *
620 * Disable vblank interrupts for @crtc. If the device doesn't have
621 * a hardware vblank counter, this routine should be a no-op, since
622 * interrupts will have to stay on to keep the count accurate.
623 */
624 void (*disable_vblank) (struct drm_device *dev, int crtc);
625 int (*dri_library_name) (struct drm_device *dev, char * buf);
587 626
588 /** 627 /**
589 * Called by \c drm_device_is_agp. Typically used to determine if a 628 * Called by \c drm_device_is_agp. Typically used to determine if a
@@ -602,7 +641,7 @@ struct drm_driver {
602 641
603 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 642 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
604 void (*irq_preinstall) (struct drm_device *dev); 643 void (*irq_preinstall) (struct drm_device *dev);
605 void (*irq_postinstall) (struct drm_device *dev); 644 int (*irq_postinstall) (struct drm_device *dev);
606 void (*irq_uninstall) (struct drm_device *dev); 645 void (*irq_uninstall) (struct drm_device *dev);
607 void (*reclaim_buffers) (struct drm_device *dev, 646 void (*reclaim_buffers) (struct drm_device *dev,
608 struct drm_file * file_priv); 647 struct drm_file * file_priv);
@@ -630,16 +669,19 @@ struct drm_driver {
630 struct pci_driver pci_driver; 669 struct pci_driver pci_driver;
631}; 670};
632 671
672#define DRM_MINOR_UNASSIGNED 0
673#define DRM_MINOR_LEGACY 1
674
633/** 675/**
634 * DRM head structure. This structure represent a video head on a card 676 * DRM minor structure. This structure represents a drm minor number.
635 * that may contain multiple heads. Embed one per head of these in the
636 * private drm_device structure.
637 */ 677 */
638struct drm_head { 678struct drm_minor {
639 int minor; /**< Minor device number */ 679 int index; /**< Minor device number */
680 int type; /**< Control or render */
681 dev_t device; /**< Device number for mknod */
682 struct device kdev; /**< Linux device */
640 struct drm_device *dev; 683 struct drm_device *dev;
641 struct proc_dir_entry *dev_root; /**< proc directory entry */ 684 struct proc_dir_entry *dev_root; /**< proc directory entry */
642 dev_t device; /**< Device number for mknod */
643}; 685};
644 686
645/** 687/**
@@ -647,7 +689,6 @@ struct drm_head {
647 * may contain multiple heads. 689 * may contain multiple heads.
648 */ 690 */
649struct drm_device { 691struct drm_device {
650 struct device dev; /**< Linux device */
651 char *unique; /**< Unique identifier: e.g., busid */ 692 char *unique; /**< Unique identifier: e.g., busid */
652 int unique_len; /**< Length of unique field */ 693 int unique_len; /**< Length of unique field */
653 char *devname; /**< For /proc/interrupts */ 694 char *devname; /**< For /proc/interrupts */
@@ -729,13 +770,21 @@ struct drm_device {
729 /** \name VBLANK IRQ support */ 770 /** \name VBLANK IRQ support */
730 /*@{ */ 771 /*@{ */
731 772
732 wait_queue_head_t vbl_queue; /**< VBLANK wait queue */ 773 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
733 atomic_t vbl_received; 774 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
734 atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
735 spinlock_t vbl_lock; 775 spinlock_t vbl_lock;
736 struct list_head vbl_sigs; /**< signal list to send on VBLANK */ 776 struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
737 struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */ 777 atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
738 unsigned int vbl_pending; 778 atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
779 u32 *last_vblank; /* protected by dev->vbl_lock, used */
780 /* for wraparound handling */
781 u32 *vblank_offset; /* used to track how many vblanks */
782 int *vblank_enabled; /* so we don't call enable more than
783 once per disable */
784 u32 *vblank_premodeset; /* were lost during modeset */
785 struct timer_list vblank_disable_timer;
786
787 unsigned long max_vblank_count; /**< size of vblank counter register */
739 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ 788 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
740 void (*locked_tasklet_func)(struct drm_device *dev); 789 void (*locked_tasklet_func)(struct drm_device *dev);
741 790
@@ -755,6 +804,7 @@ struct drm_device {
755#ifdef __alpha__ 804#ifdef __alpha__
756 struct pci_controller *hose; 805 struct pci_controller *hose;
757#endif 806#endif
807 int num_crtcs; /**< Number of CRTCs on this device */
758 struct drm_sg_mem *sg; /**< Scatter gather memory */ 808 struct drm_sg_mem *sg; /**< Scatter gather memory */
759 void *dev_private; /**< device private data */ 809 void *dev_private; /**< device private data */
760 struct drm_sigdata sigdata; /**< For block_all_signals */ 810 struct drm_sigdata sigdata; /**< For block_all_signals */
@@ -763,7 +813,7 @@ struct drm_device {
763 struct drm_driver *driver; 813 struct drm_driver *driver;
764 drm_local_map_t *agp_buffer_map; 814 drm_local_map_t *agp_buffer_map;
765 unsigned int agp_buffer_token; 815 unsigned int agp_buffer_token;
766 struct drm_head primary; /**< primary screen head */ 816 struct drm_minor *primary; /**< render type primary screen head */
767 817
768 /** \name Drawable information */ 818 /** \name Drawable information */
769 /*@{ */ 819 /*@{ */
@@ -989,11 +1039,19 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev);
989extern void drm_driver_irq_postinstall(struct drm_device *dev); 1039extern void drm_driver_irq_postinstall(struct drm_device *dev);
990extern void drm_driver_irq_uninstall(struct drm_device *dev); 1040extern void drm_driver_irq_uninstall(struct drm_device *dev);
991 1041
992extern int drm_wait_vblank(struct drm_device *dev, void *data, 1042extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
993 struct drm_file *file_priv); 1043extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp);
994extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1044extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
995extern void drm_vbl_send_signals(struct drm_device *dev);
996extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 1045extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
1046extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1047extern void drm_update_vblank_count(struct drm_device *dev, int crtc);
1048extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1049extern int drm_vblank_get(struct drm_device *dev, int crtc);
1050extern void drm_vblank_put(struct drm_device *dev, int crtc);
1051
1052 /* Modesetting support */
1053extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1054 struct drm_file *file_priv);
997 1055
998 /* AGP/GART support (drm_agpsupport.h) */ 1056 /* AGP/GART support (drm_agpsupport.h) */
999extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 1057extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
@@ -1030,23 +1088,20 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
1030extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 1088extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
1031 struct drm_driver *driver); 1089 struct drm_driver *driver);
1032extern int drm_put_dev(struct drm_device *dev); 1090extern int drm_put_dev(struct drm_device *dev);
1033extern int drm_put_head(struct drm_head *head); 1091extern int drm_put_minor(struct drm_minor **minor);
1034extern unsigned int drm_debug; 1092extern unsigned int drm_debug;
1035extern unsigned int drm_cards_limit; 1093
1036extern struct drm_head **drm_heads;
1037extern struct class *drm_class; 1094extern struct class *drm_class;
1038extern struct proc_dir_entry *drm_proc_root; 1095extern struct proc_dir_entry *drm_proc_root;
1039 1096
1097extern struct idr drm_minors_idr;
1098
1040extern drm_local_map_t *drm_getsarea(struct drm_device *dev); 1099extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
1041 1100
1042 /* Proc support (drm_proc.h) */ 1101 /* Proc support (drm_proc.h) */
1043extern int drm_proc_init(struct drm_device *dev, 1102extern int drm_proc_init(struct drm_minor *minor, int minor_id,
1044 int minor, 1103 struct proc_dir_entry *root);
1045 struct proc_dir_entry *root, 1104extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1046 struct proc_dir_entry **dev_root);
1047extern int drm_proc_cleanup(int minor,
1048 struct proc_dir_entry *root,
1049 struct proc_dir_entry *dev_root);
1050 1105
1051 /* Scatter Gather Support (drm_scatter.h) */ 1106 /* Scatter Gather Support (drm_scatter.h) */
1052extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1107extern void drm_sg_cleanup(struct drm_sg_mem * entry);
@@ -1071,8 +1126,8 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1071struct drm_sysfs_class; 1126struct drm_sysfs_class;
1072extern struct class *drm_sysfs_create(struct module *owner, char *name); 1127extern struct class *drm_sysfs_create(struct module *owner, char *name);
1073extern void drm_sysfs_destroy(void); 1128extern void drm_sysfs_destroy(void);
1074extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head); 1129extern int drm_sysfs_device_add(struct drm_minor *minor);
1075extern void drm_sysfs_device_remove(struct drm_device *dev); 1130extern void drm_sysfs_device_remove(struct drm_minor *minor);
1076 1131
1077/* 1132/*
1078 * Basic memory manager support (drm_mm.c) 1133 * Basic memory manager support (drm_mm.c)
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 9468c7889ff1..aefa5ac4c0b1 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -122,7 +122,7 @@ EXPORT_SYMBOL(drm_agp_acquire);
122int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, 122int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
123 struct drm_file *file_priv) 123 struct drm_file *file_priv)
124{ 124{
125 return drm_agp_acquire((struct drm_device *) file_priv->head->dev); 125 return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
126} 126}
127 127
128/** 128/**
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 0e7af53c87de..fc54140551a7 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -313,35 +313,36 @@ static void drm_cleanup(struct drm_device * dev)
313 drm_ht_remove(&dev->map_hash); 313 drm_ht_remove(&dev->map_hash);
314 drm_ctxbitmap_cleanup(dev); 314 drm_ctxbitmap_cleanup(dev);
315 315
316 drm_put_head(&dev->primary); 316 drm_put_minor(&dev->primary);
317 if (drm_put_dev(dev)) 317 if (drm_put_dev(dev))
318 DRM_ERROR("Cannot unload module\n"); 318 DRM_ERROR("Cannot unload module\n");
319} 319}
320 320
321void drm_exit(struct drm_driver *driver) 321int drm_minors_cleanup(int id, void *ptr, void *data)
322{ 322{
323 int i; 323 struct drm_minor *minor = ptr;
324 struct drm_device *dev = NULL; 324 struct drm_device *dev;
325 struct drm_head *head; 325 struct drm_driver *driver = data;
326
327 dev = minor->dev;
328 if (minor->dev->driver != driver)
329 return 0;
330
331 if (minor->type != DRM_MINOR_LEGACY)
332 return 0;
326 333
334 if (dev)
335 pci_dev_put(dev->pdev);
336 drm_cleanup(dev);
337 return 1;
338}
339
340void drm_exit(struct drm_driver *driver)
341{
327 DRM_DEBUG("\n"); 342 DRM_DEBUG("\n");
328 343
329 for (i = 0; i < drm_cards_limit; i++) { 344 idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
330 head = drm_heads[i]; 345
331 if (!head)
332 continue;
333 if (!head->dev)
334 continue;
335 if (head->dev->driver != driver)
336 continue;
337 dev = head->dev;
338 if (dev) {
339 /* release the pci driver */
340 if (dev->pdev)
341 pci_dev_put(dev->pdev);
342 drm_cleanup(dev);
343 }
344 }
345 DRM_INFO("Module unloaded\n"); 346 DRM_INFO("Module unloaded\n");
346} 347}
347 348
@@ -357,13 +358,7 @@ static int __init drm_core_init(void)
357{ 358{
358 int ret = -ENOMEM; 359 int ret = -ENOMEM;
359 360
360 drm_cards_limit = 361 idr_init(&drm_minors_idr);
361 (drm_cards_limit <
362 DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
363 drm_heads =
364 drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
365 if (!drm_heads)
366 goto err_p1;
367 362
368 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 363 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
369 goto err_p1; 364 goto err_p1;
@@ -391,7 +386,8 @@ err_p3:
391 drm_sysfs_destroy(); 386 drm_sysfs_destroy();
392err_p2: 387err_p2:
393 unregister_chrdev(DRM_MAJOR, "drm"); 388 unregister_chrdev(DRM_MAJOR, "drm");
394 drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB); 389
390 idr_destroy(&drm_minors_idr);
395err_p1: 391err_p1:
396 return ret; 392 return ret;
397} 393}
@@ -403,7 +399,7 @@ static void __exit drm_core_exit(void)
403 399
404 unregister_chrdev(DRM_MAJOR, "drm"); 400 unregister_chrdev(DRM_MAJOR, "drm");
405 401
406 drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB); 402 idr_destroy(&drm_minors_idr);
407} 403}
408 404
409module_init(drm_core_init); 405module_init(drm_core_init);
@@ -452,7 +448,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
452 unsigned int cmd, unsigned long arg) 448 unsigned int cmd, unsigned long arg)
453{ 449{
454 struct drm_file *file_priv = filp->private_data; 450 struct drm_file *file_priv = filp->private_data;
455 struct drm_device *dev = file_priv->head->dev; 451 struct drm_device *dev = file_priv->minor->dev;
456 struct drm_ioctl_desc *ioctl; 452 struct drm_ioctl_desc *ioctl;
457 drm_ioctl_t *func; 453 drm_ioctl_t *func;
458 unsigned int nr = DRM_IOCTL_NR(cmd); 454 unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -465,7 +461,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
465 461
466 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", 462 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
467 task_pid_nr(current), cmd, nr, 463 task_pid_nr(current), cmd, nr,
468 (long)old_encode_dev(file_priv->head->device), 464 (long)old_encode_dev(file_priv->minor->device),
469 file_priv->authenticated); 465 file_priv->authenticated);
470 466
471 if ((nr >= DRM_CORE_IOCTL_COUNT) && 467 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index f09d4b5002b0..68f0da801ed8 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -129,16 +129,15 @@ static int drm_setup(struct drm_device * dev)
129int drm_open(struct inode *inode, struct file *filp) 129int drm_open(struct inode *inode, struct file *filp)
130{ 130{
131 struct drm_device *dev = NULL; 131 struct drm_device *dev = NULL;
132 int minor = iminor(inode); 132 int minor_id = iminor(inode);
133 struct drm_minor *minor;
133 int retcode = 0; 134 int retcode = 0;
134 135
135 if (!((minor >= 0) && (minor < drm_cards_limit))) 136 minor = idr_find(&drm_minors_idr, minor_id);
137 if (!minor)
136 return -ENODEV; 138 return -ENODEV;
137 139
138 if (!drm_heads[minor]) 140 if (!(dev = minor->dev))
139 return -ENODEV;
140
141 if (!(dev = drm_heads[minor]->dev))
142 return -ENODEV; 141 return -ENODEV;
143 142
144 retcode = drm_open_helper(inode, filp, dev); 143 retcode = drm_open_helper(inode, filp, dev);
@@ -168,19 +167,18 @@ EXPORT_SYMBOL(drm_open);
168int drm_stub_open(struct inode *inode, struct file *filp) 167int drm_stub_open(struct inode *inode, struct file *filp)
169{ 168{
170 struct drm_device *dev = NULL; 169 struct drm_device *dev = NULL;
171 int minor = iminor(inode); 170 struct drm_minor *minor;
171 int minor_id = iminor(inode);
172 int err = -ENODEV; 172 int err = -ENODEV;
173 const struct file_operations *old_fops; 173 const struct file_operations *old_fops;
174 174
175 DRM_DEBUG("\n"); 175 DRM_DEBUG("\n");
176 176
177 if (!((minor >= 0) && (minor < drm_cards_limit))) 177 minor = idr_find(&drm_minors_idr, minor_id);
178 return -ENODEV; 178 if (!minor)
179
180 if (!drm_heads[minor])
181 return -ENODEV; 179 return -ENODEV;
182 180
183 if (!(dev = drm_heads[minor]->dev)) 181 if (!(dev = minor->dev))
184 return -ENODEV; 182 return -ENODEV;
185 183
186 old_fops = filp->f_op; 184 old_fops = filp->f_op;
@@ -225,7 +223,7 @@ static int drm_cpu_valid(void)
225static int drm_open_helper(struct inode *inode, struct file *filp, 223static int drm_open_helper(struct inode *inode, struct file *filp,
226 struct drm_device * dev) 224 struct drm_device * dev)
227{ 225{
228 int minor = iminor(inode); 226 int minor_id = iminor(inode);
229 struct drm_file *priv; 227 struct drm_file *priv;
230 int ret; 228 int ret;
231 229
@@ -234,7 +232,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
234 if (!drm_cpu_valid()) 232 if (!drm_cpu_valid())
235 return -EINVAL; 233 return -EINVAL;
236 234
237 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor); 235 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
238 236
239 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 237 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
240 if (!priv) 238 if (!priv)
@@ -245,8 +243,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
245 priv->filp = filp; 243 priv->filp = filp;
246 priv->uid = current->euid; 244 priv->uid = current->euid;
247 priv->pid = task_pid_nr(current); 245 priv->pid = task_pid_nr(current);
248 priv->minor = minor; 246 priv->minor = idr_find(&drm_minors_idr, minor_id);
249 priv->head = drm_heads[minor];
250 priv->ioctl_count = 0; 247 priv->ioctl_count = 0;
251 /* for compatibility root is always authenticated */ 248 /* for compatibility root is always authenticated */
252 priv->authenticated = capable(CAP_SYS_ADMIN); 249 priv->authenticated = capable(CAP_SYS_ADMIN);
@@ -297,11 +294,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
297int drm_fasync(int fd, struct file *filp, int on) 294int drm_fasync(int fd, struct file *filp, int on)
298{ 295{
299 struct drm_file *priv = filp->private_data; 296 struct drm_file *priv = filp->private_data;
300 struct drm_device *dev = priv->head->dev; 297 struct drm_device *dev = priv->minor->dev;
301 int retcode; 298 int retcode;
302 299
303 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, 300 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
304 (long)old_encode_dev(priv->head->device)); 301 (long)old_encode_dev(priv->minor->device));
305 retcode = fasync_helper(fd, filp, on, &dev->buf_async); 302 retcode = fasync_helper(fd, filp, on, &dev->buf_async);
306 if (retcode < 0) 303 if (retcode < 0)
307 return retcode; 304 return retcode;
@@ -324,7 +321,7 @@ EXPORT_SYMBOL(drm_fasync);
324int drm_release(struct inode *inode, struct file *filp) 321int drm_release(struct inode *inode, struct file *filp)
325{ 322{
326 struct drm_file *file_priv = filp->private_data; 323 struct drm_file *file_priv = filp->private_data;
327 struct drm_device *dev = file_priv->head->dev; 324 struct drm_device *dev = file_priv->minor->dev;
328 int retcode = 0; 325 int retcode = 0;
329 unsigned long irqflags; 326 unsigned long irqflags;
330 327
@@ -341,14 +338,14 @@ int drm_release(struct inode *inode, struct file *filp)
341 338
342 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 339 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
343 task_pid_nr(current), 340 task_pid_nr(current),
344 (long)old_encode_dev(file_priv->head->device), 341 (long)old_encode_dev(file_priv->minor->device),
345 dev->open_count); 342 dev->open_count);
346 343
347 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { 344 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
348 if (drm_i_have_hw_lock(dev, file_priv)) { 345 if (drm_i_have_hw_lock(dev, file_priv)) {
349 dev->driver->reclaim_buffers_locked(dev, file_priv); 346 dev->driver->reclaim_buffers_locked(dev, file_priv);
350 } else { 347 } else {
351 unsigned long _end=jiffies + 3*DRM_HZ; 348 unsigned long endtime = jiffies + 3 * DRM_HZ;
352 int locked = 0; 349 int locked = 0;
353 350
354 drm_idlelock_take(&dev->lock); 351 drm_idlelock_take(&dev->lock);
@@ -366,7 +363,7 @@ int drm_release(struct inode *inode, struct file *filp)
366 if (locked) 363 if (locked)
367 break; 364 break;
368 schedule(); 365 schedule();
369 } while (!time_after_eq(jiffies, _end)); 366 } while (!time_after_eq(jiffies, endtime));
370 367
371 if (!locked) { 368 if (!locked) {
372 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" 369 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 089c015c01d1..286f9d61e7d5 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -71,6 +71,117 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
71 return 0; 71 return 0;
72} 72}
73 73
74static void vblank_disable_fn(unsigned long arg)
75{
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
78 int i;
79
80 for (i = 0; i < dev->num_crtcs; i++) {
81 spin_lock_irqsave(&dev->vbl_lock, irqflags);
82 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
83 dev->vblank_enabled[i]) {
84 dev->driver->disable_vblank(dev, i);
85 dev->vblank_enabled[i] = 0;
86 }
87 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
88 }
89}
90
91static void drm_vblank_cleanup(struct drm_device *dev)
92{
93 /* Bail if the driver didn't call drm_vblank_init() */
94 if (dev->num_crtcs == 0)
95 return;
96
97 del_timer(&dev->vblank_disable_timer);
98
99 vblank_disable_fn((unsigned long)dev);
100
101 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
102 DRM_MEM_DRIVER);
103 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
104 DRM_MEM_DRIVER);
105 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
106 dev->num_crtcs, DRM_MEM_DRIVER);
107 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
108 dev->num_crtcs, DRM_MEM_DRIVER);
109 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
110 dev->num_crtcs, DRM_MEM_DRIVER);
111 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
112 DRM_MEM_DRIVER);
113 drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
116 DRM_MEM_DRIVER);
117
118 dev->num_crtcs = 0;
119}
120
121int drm_vblank_init(struct drm_device *dev, int num_crtcs)
122{
123 int i, ret = -ENOMEM;
124
125 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
126 (unsigned long)dev);
127 spin_lock_init(&dev->vbl_lock);
128 atomic_set(&dev->vbl_signal_pending, 0);
129 dev->num_crtcs = num_crtcs;
130
131 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
132 DRM_MEM_DRIVER);
133 if (!dev->vbl_queue)
134 goto err;
135
136 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
137 DRM_MEM_DRIVER);
138 if (!dev->vbl_sigs)
139 goto err;
140
141 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
142 DRM_MEM_DRIVER);
143 if (!dev->_vblank_count)
144 goto err;
145
146 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
147 DRM_MEM_DRIVER);
148 if (!dev->vblank_refcount)
149 goto err;
150
151 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
152 DRM_MEM_DRIVER);
153 if (!dev->vblank_enabled)
154 goto err;
155
156 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
157 if (!dev->last_vblank)
158 goto err;
159
160 dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
161 DRM_MEM_DRIVER);
162 if (!dev->vblank_premodeset)
163 goto err;
164
165 dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
166 if (!dev->vblank_offset)
167 goto err;
168
169 /* Zero per-crtc vblank stuff */
170 for (i = 0; i < num_crtcs; i++) {
171 init_waitqueue_head(&dev->vbl_queue[i]);
172 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
173 atomic_set(&dev->_vblank_count[i], 0);
174 atomic_set(&dev->vblank_refcount[i], 0);
175 }
176
177 return 0;
178
179err:
180 drm_vblank_cleanup(dev);
181 return ret;
182}
183EXPORT_SYMBOL(drm_vblank_init);
184
74/** 185/**
75 * Install IRQ handler. 186 * Install IRQ handler.
76 * 187 *
@@ -109,17 +220,6 @@ static int drm_irq_install(struct drm_device * dev)
109 220
110 DRM_DEBUG("irq=%d\n", dev->irq); 221 DRM_DEBUG("irq=%d\n", dev->irq);
111 222
112 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
113 init_waitqueue_head(&dev->vbl_queue);
114
115 spin_lock_init(&dev->vbl_lock);
116
117 INIT_LIST_HEAD(&dev->vbl_sigs);
118 INIT_LIST_HEAD(&dev->vbl_sigs2);
119
120 dev->vbl_pending = 0;
121 }
122
123 /* Before installing handler */ 223 /* Before installing handler */
124 dev->driver->irq_preinstall(dev); 224 dev->driver->irq_preinstall(dev);
125 225
@@ -137,9 +237,14 @@ static int drm_irq_install(struct drm_device * dev)
137 } 237 }
138 238
139 /* After installing handler */ 239 /* After installing handler */
140 dev->driver->irq_postinstall(dev); 240 ret = dev->driver->irq_postinstall(dev);
241 if (ret < 0) {
242 mutex_lock(&dev->struct_mutex);
243 dev->irq_enabled = 0;
244 mutex_unlock(&dev->struct_mutex);
245 }
141 246
142 return 0; 247 return ret;
143} 248}
144 249
145/** 250/**
@@ -170,6 +275,8 @@ int drm_irq_uninstall(struct drm_device * dev)
170 275
171 free_irq(dev->irq, dev); 276 free_irq(dev->irq, dev);
172 277
278 drm_vblank_cleanup(dev);
279
173 dev->locked_tasklet_func = NULL; 280 dev->locked_tasklet_func = NULL;
174 281
175 return 0; 282 return 0;
@@ -214,6 +321,148 @@ int drm_control(struct drm_device *dev, void *data,
214} 321}
215 322
216/** 323/**
324 * drm_vblank_count - retrieve "cooked" vblank counter value
325 * @dev: DRM device
326 * @crtc: which counter to retrieve
327 *
328 * Fetches the "cooked" vblank count value that represents the number of
329 * vblank events since the system was booted, including lost events due to
330 * modesetting activity.
331 */
332u32 drm_vblank_count(struct drm_device *dev, int crtc)
333{
334 return atomic_read(&dev->_vblank_count[crtc]) +
335 dev->vblank_offset[crtc];
336}
337EXPORT_SYMBOL(drm_vblank_count);
338
339/**
340 * drm_update_vblank_count - update the master vblank counter
341 * @dev: DRM device
342 * @crtc: counter to update
343 *
344 * Call back into the driver to update the appropriate vblank counter
345 * (specified by @crtc). Deal with wraparound, if it occurred, and
346 * update the last read value so we can deal with wraparound on the next
347 * call if necessary.
348 */
349void drm_update_vblank_count(struct drm_device *dev, int crtc)
350{
351 unsigned long irqflags;
352 u32 cur_vblank, diff;
353
354 /*
355 * Interrupts were disabled prior to this call, so deal with counter
356 * wrap if needed.
357 * NOTE! It's possible we lost a full dev->max_vblank_count events
358 * here if the register is small or we had vblank interrupts off for
359 * a long time.
360 */
361 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
362 spin_lock_irqsave(&dev->vbl_lock, irqflags);
363 if (cur_vblank < dev->last_vblank[crtc]) {
364 diff = dev->max_vblank_count -
365 dev->last_vblank[crtc];
366 diff += cur_vblank;
367 } else {
368 diff = cur_vblank - dev->last_vblank[crtc];
369 }
370 dev->last_vblank[crtc] = cur_vblank;
371 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
372
373 atomic_add(diff, &dev->_vblank_count[crtc]);
374}
375EXPORT_SYMBOL(drm_update_vblank_count);
376
377/**
378 * drm_vblank_get - get a reference count on vblank events
379 * @dev: DRM device
380 * @crtc: which CRTC to own
381 *
382 * Acquire a reference count on vblank events to avoid having them disabled
383 * while in use. Note callers will probably want to update the master counter
384 * using drm_update_vblank_count() above before calling this routine so that
385 * wakeups occur on the right vblank event.
386 *
387 * RETURNS
388 * Zero on success, nonzero on failure.
389 */
390int drm_vblank_get(struct drm_device *dev, int crtc)
391{
392 unsigned long irqflags;
393 int ret = 0;
394
395 spin_lock_irqsave(&dev->vbl_lock, irqflags);
396 /* Going from 0->1 means we have to enable interrupts again */
397 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
398 !dev->vblank_enabled[crtc]) {
399 ret = dev->driver->enable_vblank(dev, crtc);
400 if (ret)
401 atomic_dec(&dev->vblank_refcount[crtc]);
402 else
403 dev->vblank_enabled[crtc] = 1;
404 }
405 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
406
407 return ret;
408}
409EXPORT_SYMBOL(drm_vblank_get);
410
411/**
412 * drm_vblank_put - give up ownership of vblank events
413 * @dev: DRM device
414 * @crtc: which counter to give up
415 *
416 * Release ownership of a given vblank counter, turning off interrupts
417 * if possible.
418 */
419void drm_vblank_put(struct drm_device *dev, int crtc)
420{
421 /* Last user schedules interrupt disable */
422 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
423 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
424}
425EXPORT_SYMBOL(drm_vblank_put);
426
427/**
428 * drm_modeset_ctl - handle vblank event counter changes across mode switch
429 * @DRM_IOCTL_ARGS: standard ioctl arguments
430 *
431 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
432 * ioctls around modesetting so that any lost vblank events are accounted for.
433 */
434int drm_modeset_ctl(struct drm_device *dev, void *data,
435 struct drm_file *file_priv)
436{
437 struct drm_modeset_ctl *modeset = data;
438 int crtc, ret = 0;
439 u32 new;
440
441 crtc = modeset->arg;
442 if (crtc >= dev->num_crtcs) {
443 ret = -EINVAL;
444 goto out;
445 }
446
447 switch (modeset->cmd) {
448 case _DRM_PRE_MODESET:
449 dev->vblank_premodeset[crtc] =
450 dev->driver->get_vblank_counter(dev, crtc);
451 break;
452 case _DRM_POST_MODESET:
453 new = dev->driver->get_vblank_counter(dev, crtc);
454 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
455 break;
456 default:
457 ret = -EINVAL;
458 break;
459 }
460
461out:
462 return ret;
463}
464
465/**
217 * Wait for VBLANK. 466 * Wait for VBLANK.
218 * 467 *
219 * \param inode device inode. 468 * \param inode device inode.
@@ -232,12 +481,13 @@ int drm_control(struct drm_device *dev, void *data,
232 * 481 *
233 * If a signal is not requested, then calls vblank_wait(). 482 * If a signal is not requested, then calls vblank_wait().
234 */ 483 */
235int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) 484int drm_wait_vblank(struct drm_device *dev, void *data,
485 struct drm_file *file_priv)
236{ 486{
237 union drm_wait_vblank *vblwait = data; 487 union drm_wait_vblank *vblwait = data;
238 struct timeval now; 488 struct timeval now;
239 int ret = 0; 489 int ret = 0;
240 unsigned int flags, seq; 490 unsigned int flags, seq, crtc;
241 491
242 if ((!dev->irq) || (!dev->irq_enabled)) 492 if ((!dev->irq) || (!dev->irq_enabled))
243 return -EINVAL; 493 return -EINVAL;
@@ -251,13 +501,13 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
251 } 501 }
252 502
253 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 503 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
504 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
254 505
255 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? 506 if (crtc >= dev->num_crtcs)
256 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
257 return -EINVAL; 507 return -EINVAL;
258 508
259 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 509 drm_update_vblank_count(dev, crtc);
260 : &dev->vbl_received); 510 seq = drm_vblank_count(dev, crtc);
261 511
262 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 512 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
263 case _DRM_VBLANK_RELATIVE: 513 case _DRM_VBLANK_RELATIVE:
@@ -276,8 +526,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
276 526
277 if (flags & _DRM_VBLANK_SIGNAL) { 527 if (flags & _DRM_VBLANK_SIGNAL) {
278 unsigned long irqflags; 528 unsigned long irqflags;
279 struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) 529 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
280 ? &dev->vbl_sigs2 : &dev->vbl_sigs;
281 struct drm_vbl_sig *vbl_sig; 530 struct drm_vbl_sig *vbl_sig;
282 531
283 spin_lock_irqsave(&dev->vbl_lock, irqflags); 532 spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -298,22 +547,26 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
298 } 547 }
299 } 548 }
300 549
301 if (dev->vbl_pending >= 100) { 550 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
302 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 551 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
303 return -EBUSY; 552 return -EBUSY;
304 } 553 }
305 554
306 dev->vbl_pending++;
307
308 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 555 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
309 556
310 if (! 557 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
311 (vbl_sig = 558 DRM_MEM_DRIVER);
312 drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { 559 if (!vbl_sig)
313 return -ENOMEM; 560 return -ENOMEM;
561
562 ret = drm_vblank_get(dev, crtc);
563 if (ret) {
564 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
565 DRM_MEM_DRIVER);
566 return ret;
314 } 567 }
315 568
316 memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); 569 atomic_inc(&dev->vbl_signal_pending);
317 570
318 vbl_sig->sequence = vblwait->request.sequence; 571 vbl_sig->sequence = vblwait->request.sequence;
319 vbl_sig->info.si_signo = vblwait->request.signal; 572 vbl_sig->info.si_signo = vblwait->request.signal;
@@ -327,17 +580,20 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
327 580
328 vblwait->reply.sequence = seq; 581 vblwait->reply.sequence = seq;
329 } else { 582 } else {
330 if (flags & _DRM_VBLANK_SECONDARY) { 583 unsigned long cur_vblank;
331 if (dev->driver->vblank_wait2) 584
332 ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); 585 ret = drm_vblank_get(dev, crtc);
333 } else if (dev->driver->vblank_wait) 586 if (ret)
334 ret = 587 return ret;
335 dev->driver->vblank_wait(dev, 588 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
336 &vblwait->request.sequence); 589 (((cur_vblank = drm_vblank_count(dev, crtc))
337 590 - vblwait->request.sequence) <= (1 << 23)));
591 drm_vblank_put(dev, crtc);
338 do_gettimeofday(&now); 592 do_gettimeofday(&now);
593
339 vblwait->reply.tval_sec = now.tv_sec; 594 vblwait->reply.tval_sec = now.tv_sec;
340 vblwait->reply.tval_usec = now.tv_usec; 595 vblwait->reply.tval_usec = now.tv_usec;
596 vblwait->reply.sequence = cur_vblank;
341 } 597 }
342 598
343 done: 599 done:
@@ -348,44 +604,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
348 * Send the VBLANK signals. 604 * Send the VBLANK signals.
349 * 605 *
350 * \param dev DRM device. 606 * \param dev DRM device.
607 * \param crtc CRTC where the vblank event occurred
351 * 608 *
352 * Sends a signal for each task in drm_device::vbl_sigs and empties the list. 609 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
353 * 610 *
354 * If a signal is not requested, then calls vblank_wait(). 611 * If a signal is not requested, then calls vblank_wait().
355 */ 612 */
356void drm_vbl_send_signals(struct drm_device * dev) 613static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
357{ 614{
615 struct drm_vbl_sig *vbl_sig, *tmp;
616 struct list_head *vbl_sigs;
617 unsigned int vbl_seq;
358 unsigned long flags; 618 unsigned long flags;
359 int i;
360 619
361 spin_lock_irqsave(&dev->vbl_lock, flags); 620 spin_lock_irqsave(&dev->vbl_lock, flags);
362 621
363 for (i = 0; i < 2; i++) { 622 vbl_sigs = &dev->vbl_sigs[crtc];
364 struct drm_vbl_sig *vbl_sig, *tmp; 623 vbl_seq = drm_vblank_count(dev, crtc);
365 struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
366 unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
367 &dev->vbl_received);
368 624
369 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { 625 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
370 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { 626 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
371 vbl_sig->info.si_code = vbl_seq; 627 vbl_sig->info.si_code = vbl_seq;
372 send_sig_info(vbl_sig->info.si_signo, 628 send_sig_info(vbl_sig->info.si_signo,
373 &vbl_sig->info, vbl_sig->task); 629 &vbl_sig->info, vbl_sig->task);
374 630
375 list_del(&vbl_sig->head); 631 list_del(&vbl_sig->head);
376 632
377 drm_free(vbl_sig, sizeof(*vbl_sig), 633 drm_free(vbl_sig, sizeof(*vbl_sig),
378 DRM_MEM_DRIVER); 634 DRM_MEM_DRIVER);
379 635 atomic_dec(&dev->vbl_signal_pending);
380 dev->vbl_pending--; 636 drm_vblank_put(dev, crtc);
381 } 637 }
382 }
383 } 638 }
384 639
385 spin_unlock_irqrestore(&dev->vbl_lock, flags); 640 spin_unlock_irqrestore(&dev->vbl_lock, flags);
386} 641}
387 642
388EXPORT_SYMBOL(drm_vbl_send_signals); 643/**
644 * drm_handle_vblank - handle a vblank event
645 * @dev: DRM device
646 * @crtc: where this event occurred
647 *
648 * Drivers should call this routine in their vblank interrupt handlers to
649 * update the vblank counter and send any signals that may be pending.
650 */
651void drm_handle_vblank(struct drm_device *dev, int crtc)
652{
653 drm_update_vblank_count(dev, crtc);
654 DRM_WAKEUP(&dev->vbl_queue[crtc]);
655 drm_vbl_send_signals(dev, crtc);
656}
657EXPORT_SYMBOL(drm_handle_vblank);
389 658
390/** 659/**
391 * Tasklet wrapper function. 660 * Tasklet wrapper function.
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index d9b560fe9bbe..93b1e0475c93 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -87,34 +87,35 @@ static struct drm_proc_list {
87 * "/proc/dri/%minor%/", and each entry in proc_list as 87 * "/proc/dri/%minor%/", and each entry in proc_list as
88 * "/proc/dri/%minor%/%name%". 88 * "/proc/dri/%minor%/%name%".
89 */ 89 */
90int drm_proc_init(struct drm_device * dev, int minor, 90int drm_proc_init(struct drm_minor *minor, int minor_id,
91 struct proc_dir_entry *root, struct proc_dir_entry **dev_root) 91 struct proc_dir_entry *root)
92{ 92{
93 struct proc_dir_entry *ent; 93 struct proc_dir_entry *ent;
94 int i, j; 94 int i, j;
95 char name[64]; 95 char name[64];
96 96
97 sprintf(name, "%d", minor); 97 sprintf(name, "%d", minor_id);
98 *dev_root = proc_mkdir(name, root); 98 minor->dev_root = proc_mkdir(name, root);
99 if (!*dev_root) { 99 if (!minor->dev_root) {
100 DRM_ERROR("Cannot create /proc/dri/%s\n", name); 100 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
101 return -1; 101 return -1;
102 } 102 }
103 103
104 for (i = 0; i < DRM_PROC_ENTRIES; i++) { 104 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
105 ent = create_proc_entry(drm_proc_list[i].name, 105 ent = create_proc_entry(drm_proc_list[i].name,
106 S_IFREG | S_IRUGO, *dev_root); 106 S_IFREG | S_IRUGO, minor->dev_root);
107 if (!ent) { 107 if (!ent) {
108 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 108 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
109 name, drm_proc_list[i].name); 109 name, drm_proc_list[i].name);
110 for (j = 0; j < i; j++) 110 for (j = 0; j < i; j++)
111 remove_proc_entry(drm_proc_list[i].name, 111 remove_proc_entry(drm_proc_list[i].name,
112 *dev_root); 112 minor->dev_root);
113 remove_proc_entry(name, root); 113 remove_proc_entry(name, root);
114 minor->dev_root = NULL;
114 return -1; 115 return -1;
115 } 116 }
116 ent->read_proc = drm_proc_list[i].f; 117 ent->read_proc = drm_proc_list[i].f;
117 ent->data = dev; 118 ent->data = minor;
118 } 119 }
119 120
120 return 0; 121 return 0;
@@ -130,18 +131,17 @@ int drm_proc_init(struct drm_device * dev, int minor,
130 * 131 *
131 * Remove all proc entries created by proc_init(). 132 * Remove all proc entries created by proc_init().
132 */ 133 */
133int drm_proc_cleanup(int minor, struct proc_dir_entry *root, 134int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
134 struct proc_dir_entry *dev_root)
135{ 135{
136 int i; 136 int i;
137 char name[64]; 137 char name[64];
138 138
139 if (!root || !dev_root) 139 if (!root || !minor->dev_root)
140 return 0; 140 return 0;
141 141
142 for (i = 0; i < DRM_PROC_ENTRIES; i++) 142 for (i = 0; i < DRM_PROC_ENTRIES; i++)
143 remove_proc_entry(drm_proc_list[i].name, dev_root); 143 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
144 sprintf(name, "%d", minor); 144 sprintf(name, "%d", minor->index);
145 remove_proc_entry(name, root); 145 remove_proc_entry(name, root);
146 146
147 return 0; 147 return 0;
@@ -163,7 +163,8 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
163static int drm_name_info(char *buf, char **start, off_t offset, int request, 163static int drm_name_info(char *buf, char **start, off_t offset, int request,
164 int *eof, void *data) 164 int *eof, void *data)
165{ 165{
166 struct drm_device *dev = (struct drm_device *) data; 166 struct drm_minor *minor = (struct drm_minor *) data;
167 struct drm_device *dev = minor->dev;
167 int len = 0; 168 int len = 0;
168 169
169 if (offset > DRM_PROC_LIMIT) { 170 if (offset > DRM_PROC_LIMIT) {
@@ -205,7 +206,8 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
205static int drm__vm_info(char *buf, char **start, off_t offset, int request, 206static int drm__vm_info(char *buf, char **start, off_t offset, int request,
206 int *eof, void *data) 207 int *eof, void *data)
207{ 208{
208 struct drm_device *dev = (struct drm_device *) data; 209 struct drm_minor *minor = (struct drm_minor *) data;
210 struct drm_device *dev = minor->dev;
209 int len = 0; 211 int len = 0;
210 struct drm_map *map; 212 struct drm_map *map;
211 struct drm_map_list *r_list; 213 struct drm_map_list *r_list;
@@ -261,7 +263,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
261static int drm_vm_info(char *buf, char **start, off_t offset, int request, 263static int drm_vm_info(char *buf, char **start, off_t offset, int request,
262 int *eof, void *data) 264 int *eof, void *data)
263{ 265{
264 struct drm_device *dev = (struct drm_device *) data; 266 struct drm_minor *minor = (struct drm_minor *) data;
267 struct drm_device *dev = minor->dev;
265 int ret; 268 int ret;
266 269
267 mutex_lock(&dev->struct_mutex); 270 mutex_lock(&dev->struct_mutex);
@@ -284,7 +287,8 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request,
284static int drm__queues_info(char *buf, char **start, off_t offset, 287static int drm__queues_info(char *buf, char **start, off_t offset,
285 int request, int *eof, void *data) 288 int request, int *eof, void *data)
286{ 289{
287 struct drm_device *dev = (struct drm_device *) data; 290 struct drm_minor *minor = (struct drm_minor *) data;
291 struct drm_device *dev = minor->dev;
288 int len = 0; 292 int len = 0;
289 int i; 293 int i;
290 struct drm_queue *q; 294 struct drm_queue *q;
@@ -334,7 +338,8 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
334static int drm_queues_info(char *buf, char **start, off_t offset, int request, 338static int drm_queues_info(char *buf, char **start, off_t offset, int request,
335 int *eof, void *data) 339 int *eof, void *data)
336{ 340{
337 struct drm_device *dev = (struct drm_device *) data; 341 struct drm_minor *minor = (struct drm_minor *) data;
342 struct drm_device *dev = minor->dev;
338 int ret; 343 int ret;
339 344
340 mutex_lock(&dev->struct_mutex); 345 mutex_lock(&dev->struct_mutex);
@@ -357,7 +362,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request,
357static int drm__bufs_info(char *buf, char **start, off_t offset, int request, 362static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
358 int *eof, void *data) 363 int *eof, void *data)
359{ 364{
360 struct drm_device *dev = (struct drm_device *) data; 365 struct drm_minor *minor = (struct drm_minor *) data;
366 struct drm_device *dev = minor->dev;
361 int len = 0; 367 int len = 0;
362 struct drm_device_dma *dma = dev->dma; 368 struct drm_device_dma *dma = dev->dma;
363 int i; 369 int i;
@@ -406,7 +412,8 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
406static int drm_bufs_info(char *buf, char **start, off_t offset, int request, 412static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
407 int *eof, void *data) 413 int *eof, void *data)
408{ 414{
409 struct drm_device *dev = (struct drm_device *) data; 415 struct drm_minor *minor = (struct drm_minor *) data;
416 struct drm_device *dev = minor->dev;
410 int ret; 417 int ret;
411 418
412 mutex_lock(&dev->struct_mutex); 419 mutex_lock(&dev->struct_mutex);
@@ -429,7 +436,8 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
429static int drm__clients_info(char *buf, char **start, off_t offset, 436static int drm__clients_info(char *buf, char **start, off_t offset,
430 int request, int *eof, void *data) 437 int request, int *eof, void *data)
431{ 438{
432 struct drm_device *dev = (struct drm_device *) data; 439 struct drm_minor *minor = (struct drm_minor *) data;
440 struct drm_device *dev = minor->dev;
433 int len = 0; 441 int len = 0;
434 struct drm_file *priv; 442 struct drm_file *priv;
435 443
@@ -445,7 +453,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
445 list_for_each_entry(priv, &dev->filelist, lhead) { 453 list_for_each_entry(priv, &dev->filelist, lhead) {
446 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", 454 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
447 priv->authenticated ? 'y' : 'n', 455 priv->authenticated ? 'y' : 'n',
448 priv->minor, 456 priv->minor->index,
449 priv->pid, 457 priv->pid,
450 priv->uid, priv->magic, priv->ioctl_count); 458 priv->uid, priv->magic, priv->ioctl_count);
451 } 459 }
@@ -462,7 +470,8 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
462static int drm_clients_info(char *buf, char **start, off_t offset, 470static int drm_clients_info(char *buf, char **start, off_t offset,
463 int request, int *eof, void *data) 471 int request, int *eof, void *data)
464{ 472{
465 struct drm_device *dev = (struct drm_device *) data; 473 struct drm_minor *minor = (struct drm_minor *) data;
474 struct drm_device *dev = minor->dev;
466 int ret; 475 int ret;
467 476
468 mutex_lock(&dev->struct_mutex); 477 mutex_lock(&dev->struct_mutex);
@@ -476,7 +485,8 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
476static int drm__vma_info(char *buf, char **start, off_t offset, int request, 485static int drm__vma_info(char *buf, char **start, off_t offset, int request,
477 int *eof, void *data) 486 int *eof, void *data)
478{ 487{
479 struct drm_device *dev = (struct drm_device *) data; 488 struct drm_minor *minor = (struct drm_minor *) data;
489 struct drm_device *dev = minor->dev;
480 int len = 0; 490 int len = 0;
481 struct drm_vma_entry *pt; 491 struct drm_vma_entry *pt;
482 struct vm_area_struct *vma; 492 struct vm_area_struct *vma;
@@ -535,7 +545,8 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
535static int drm_vma_info(char *buf, char **start, off_t offset, int request, 545static int drm_vma_info(char *buf, char **start, off_t offset, int request,
536 int *eof, void *data) 546 int *eof, void *data)
537{ 547{
538 struct drm_device *dev = (struct drm_device *) data; 548 struct drm_minor *minor = (struct drm_minor *) data;
549 struct drm_device *dev = minor->dev;
539 int ret; 550 int ret;
540 551
541 mutex_lock(&dev->struct_mutex); 552 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index d93a217f856a..c2f584f3b46c 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -36,23 +36,49 @@
36#include "drmP.h" 36#include "drmP.h"
37#include "drm_core.h" 37#include "drm_core.h"
38 38
39unsigned int drm_cards_limit = 16; /* Enough for one machine */
40unsigned int drm_debug = 0; /* 1 to enable debug output */ 39unsigned int drm_debug = 0; /* 1 to enable debug output */
41EXPORT_SYMBOL(drm_debug); 40EXPORT_SYMBOL(drm_debug);
42 41
43MODULE_AUTHOR(CORE_AUTHOR); 42MODULE_AUTHOR(CORE_AUTHOR);
44MODULE_DESCRIPTION(CORE_DESC); 43MODULE_DESCRIPTION(CORE_DESC);
45MODULE_LICENSE("GPL and additional rights"); 44MODULE_LICENSE("GPL and additional rights");
46MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
47MODULE_PARM_DESC(debug, "Enable debug output"); 45MODULE_PARM_DESC(debug, "Enable debug output");
48 46
49module_param_named(cards_limit, drm_cards_limit, int, 0444);
50module_param_named(debug, drm_debug, int, 0600); 47module_param_named(debug, drm_debug, int, 0600);
51 48
52struct drm_head **drm_heads; 49struct idr drm_minors_idr;
50
53struct class *drm_class; 51struct class *drm_class;
54struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
55 53
54static int drm_minor_get_id(struct drm_device *dev, int type)
55{
56 int new_id;
57 int ret;
58 int base = 0, limit = 63;
59
60again:
61 if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
62 DRM_ERROR("Out of memory expanding drawable idr\n");
63 return -ENOMEM;
64 }
65 mutex_lock(&dev->struct_mutex);
66 ret = idr_get_new_above(&drm_minors_idr, NULL,
67 base, &new_id);
68 mutex_unlock(&dev->struct_mutex);
69 if (ret == -EAGAIN) {
70 goto again;
71 } else if (ret) {
72 return ret;
73 }
74
75 if (new_id >= limit) {
76 idr_remove(&drm_minors_idr, new_id);
77 return -EINVAL;
78 }
79 return new_id;
80}
81
56static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, 82static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
57 const struct pci_device_id *ent, 83 const struct pci_device_id *ent,
58 struct drm_driver *driver) 84 struct drm_driver *driver)
@@ -145,48 +171,60 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
145 * create the proc init entry via proc_init(). This routines assigns 171 * create the proc init entry via proc_init(). This routines assigns
146 * minor numbers to secondary heads of multi-headed cards 172 * minor numbers to secondary heads of multi-headed cards
147 */ 173 */
148static int drm_get_head(struct drm_device * dev, struct drm_head * head) 174static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
149{ 175{
150 struct drm_head **heads = drm_heads; 176 struct drm_minor *new_minor;
151 int ret; 177 int ret;
152 int minor; 178 int minor_id;
153 179
154 DRM_DEBUG("\n"); 180 DRM_DEBUG("\n");
155 181
156 for (minor = 0; minor < drm_cards_limit; minor++, heads++) { 182 minor_id = drm_minor_get_id(dev, type);
157 if (!*heads) { 183 if (minor_id < 0)
158 184 return minor_id;
159 *head = (struct drm_head) { 185
160 .dev = dev,.device = 186 new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
161 MKDEV(DRM_MAJOR, minor),.minor = minor,}; 187 if (!new_minor) {
162 188 ret = -ENOMEM;
163 if ((ret = 189 goto err_idr;
164 drm_proc_init(dev, minor, drm_proc_root, 190 }
165 &head->dev_root))) { 191
166 printk(KERN_ERR 192 new_minor->type = type;
167 "DRM: Failed to initialize /proc/dri.\n"); 193 new_minor->device = MKDEV(DRM_MAJOR, minor_id);
168 goto err_g1; 194 new_minor->dev = dev;
169 } 195 new_minor->index = minor_id;
170 196
171 ret = drm_sysfs_device_add(dev, head); 197 idr_replace(&drm_minors_idr, new_minor, minor_id);
172 if (ret) { 198
173 printk(KERN_ERR 199 if (type == DRM_MINOR_LEGACY) {
174 "DRM: Error sysfs_device_add.\n"); 200 ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
175 goto err_g2; 201 if (ret) {
176 } 202 DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
177 *heads = head; 203 goto err_mem;
178
179 DRM_DEBUG("new minor assigned %d\n", minor);
180 return 0;
181 } 204 }
205 } else
206 new_minor->dev_root = NULL;
207
208 ret = drm_sysfs_device_add(new_minor);
209 if (ret) {
210 printk(KERN_ERR
211 "DRM: Error sysfs_device_add.\n");
212 goto err_g2;
182 } 213 }
183 DRM_ERROR("out of minors\n"); 214 *minor = new_minor;
184 return -ENOMEM; 215
185 err_g2: 216 DRM_DEBUG("new minor assigned %d\n", minor_id);
186 drm_proc_cleanup(minor, drm_proc_root, head->dev_root); 217 return 0;
187 err_g1: 218
188 *head = (struct drm_head) { 219
189 .dev = NULL}; 220err_g2:
221 if (new_minor->type == DRM_MINOR_LEGACY)
222 drm_proc_cleanup(new_minor, drm_proc_root);
223err_mem:
224 kfree(new_minor);
225err_idr:
226 idr_remove(&drm_minors_idr, minor_id);
227 *minor = NULL;
190 return ret; 228 return ret;
191} 229}
192 230
@@ -222,12 +260,12 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
222 printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); 260 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
223 goto err_g2; 261 goto err_g2;
224 } 262 }
225 if ((ret = drm_get_head(dev, &dev->primary))) 263 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
226 goto err_g2; 264 goto err_g2;
227 265
228 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 266 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
229 driver->name, driver->major, driver->minor, driver->patchlevel, 267 driver->name, driver->major, driver->minor, driver->patchlevel,
230 driver->date, dev->primary.minor); 268 driver->date, dev->primary->index);
231 269
232 return 0; 270 return 0;
233 271
@@ -276,18 +314,18 @@ int drm_put_dev(struct drm_device * dev)
276 * last minor released. 314 * last minor released.
277 * 315 *
278 */ 316 */
279int drm_put_head(struct drm_head * head) 317int drm_put_minor(struct drm_minor **minor_p)
280{ 318{
281 int minor = head->minor; 319 struct drm_minor *minor = *minor_p;
282 320 DRM_DEBUG("release secondary minor %d\n", minor->index);
283 DRM_DEBUG("release secondary minor %d\n", minor);
284
285 drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
286 drm_sysfs_device_remove(head->dev);
287 321
288 *head = (struct drm_head) {.dev = NULL}; 322 if (minor->type == DRM_MINOR_LEGACY)
323 drm_proc_cleanup(minor, drm_proc_root);
324 drm_sysfs_device_remove(minor);
289 325
290 drm_heads[minor] = NULL; 326 idr_remove(&drm_minors_idr, minor->index);
291 327
328 kfree(minor);
329 *minor_p = NULL;
292 return 0; 330 return 0;
293} 331}
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index 05ed5043254f..7a1d9a782ddb 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -19,7 +19,7 @@
19#include "drm_core.h" 19#include "drm_core.h"
20#include "drmP.h" 20#include "drmP.h"
21 21
22#define to_drm_device(d) container_of(d, struct drm_device, dev) 22#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
23 23
24/** 24/**
25 * drm_sysfs_suspend - DRM class suspend hook 25 * drm_sysfs_suspend - DRM class suspend hook
@@ -31,7 +31,8 @@
31 */ 31 */
32static int drm_sysfs_suspend(struct device *dev, pm_message_t state) 32static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
33{ 33{
34 struct drm_device *drm_dev = to_drm_device(dev); 34 struct drm_minor *drm_minor = to_drm_minor(dev);
35 struct drm_device *drm_dev = drm_minor->dev;
35 36
36 printk(KERN_ERR "%s\n", __FUNCTION__); 37 printk(KERN_ERR "%s\n", __FUNCTION__);
37 38
@@ -50,7 +51,8 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
50 */ 51 */
51static int drm_sysfs_resume(struct device *dev) 52static int drm_sysfs_resume(struct device *dev)
52{ 53{
53 struct drm_device *drm_dev = to_drm_device(dev); 54 struct drm_minor *drm_minor = to_drm_minor(dev);
55 struct drm_device *drm_dev = drm_minor->dev;
54 56
55 if (drm_dev->driver->resume) 57 if (drm_dev->driver->resume)
56 return drm_dev->driver->resume(drm_dev); 58 return drm_dev->driver->resume(drm_dev);
@@ -120,10 +122,11 @@ void drm_sysfs_destroy(void)
120static ssize_t show_dri(struct device *device, struct device_attribute *attr, 122static ssize_t show_dri(struct device *device, struct device_attribute *attr,
121 char *buf) 123 char *buf)
122{ 124{
123 struct drm_device *dev = to_drm_device(device); 125 struct drm_minor *drm_minor = to_drm_minor(device);
124 if (dev->driver->dri_library_name) 126 struct drm_device *drm_dev = drm_minor->dev;
125 return dev->driver->dri_library_name(dev, buf); 127 if (drm_dev->driver->dri_library_name)
126 return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); 128 return drm_dev->driver->dri_library_name(drm_dev, buf);
129 return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
127} 130}
128 131
129static struct device_attribute device_attrs[] = { 132static struct device_attribute device_attrs[] = {
@@ -152,25 +155,28 @@ static void drm_sysfs_device_release(struct device *dev)
152 * as the parent for the Linux device, and make sure it has a file containing 155 * as the parent for the Linux device, and make sure it has a file containing
153 * the driver we're using (for userspace compatibility). 156 * the driver we're using (for userspace compatibility).
154 */ 157 */
155int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head) 158int drm_sysfs_device_add(struct drm_minor *minor)
156{ 159{
157 int err; 160 int err;
158 int i, j; 161 int i, j;
162 char *minor_str;
159 163
160 dev->dev.parent = &dev->pdev->dev; 164 minor->kdev.parent = &minor->dev->pdev->dev;
161 dev->dev.class = drm_class; 165 minor->kdev.class = drm_class;
162 dev->dev.release = drm_sysfs_device_release; 166 minor->kdev.release = drm_sysfs_device_release;
163 dev->dev.devt = head->device; 167 minor->kdev.devt = minor->device;
164 snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor); 168 minor_str = "card%d";
165 169
166 err = device_register(&dev->dev); 170 snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
171
172 err = device_register(&minor->kdev);
167 if (err) { 173 if (err) {
168 DRM_ERROR("device add failed: %d\n", err); 174 DRM_ERROR("device add failed: %d\n", err);
169 goto err_out; 175 goto err_out;
170 } 176 }
171 177
172 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { 178 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
173 err = device_create_file(&dev->dev, &device_attrs[i]); 179 err = device_create_file(&minor->kdev, &device_attrs[i]);
174 if (err) 180 if (err)
175 goto err_out_files; 181 goto err_out_files;
176 } 182 }
@@ -180,8 +186,8 @@ int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
180err_out_files: 186err_out_files:
181 if (i > 0) 187 if (i > 0)
182 for (j = 0; j < i; j++) 188 for (j = 0; j < i; j++)
183 device_remove_file(&dev->dev, &device_attrs[i]); 189 device_remove_file(&minor->kdev, &device_attrs[i]);
184 device_unregister(&dev->dev); 190 device_unregister(&minor->kdev);
185err_out: 191err_out:
186 192
187 return err; 193 return err;
@@ -194,11 +200,11 @@ err_out:
194 * This call unregisters and cleans up a class device that was created with a 200 * This call unregisters and cleans up a class device that was created with a
195 * call to drm_sysfs_device_add() 201 * call to drm_sysfs_device_add()
196 */ 202 */
197void drm_sysfs_device_remove(struct drm_device *dev) 203void drm_sysfs_device_remove(struct drm_minor *minor)
198{ 204{
199 int i; 205 int i;
200 206
201 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) 207 for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
202 device_remove_file(&dev->dev, &device_attrs[i]); 208 device_remove_file(&minor->kdev, &device_attrs[i]);
203 device_unregister(&dev->dev); 209 device_unregister(&minor->kdev);
204} 210}
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 945df72a51a9..c234c6f24a8d 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -90,7 +90,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
90static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 90static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
91{ 91{
92 struct drm_file *priv = vma->vm_file->private_data; 92 struct drm_file *priv = vma->vm_file->private_data;
93 struct drm_device *dev = priv->head->dev; 93 struct drm_device *dev = priv->minor->dev;
94 struct drm_map *map = NULL; 94 struct drm_map *map = NULL;
95 struct drm_map_list *r_list; 95 struct drm_map_list *r_list;
96 struct drm_hash_item *hash; 96 struct drm_hash_item *hash;
@@ -207,7 +207,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
207static void drm_vm_shm_close(struct vm_area_struct *vma) 207static void drm_vm_shm_close(struct vm_area_struct *vma)
208{ 208{
209 struct drm_file *priv = vma->vm_file->private_data; 209 struct drm_file *priv = vma->vm_file->private_data;
210 struct drm_device *dev = priv->head->dev; 210 struct drm_device *dev = priv->minor->dev;
211 struct drm_vma_entry *pt, *temp; 211 struct drm_vma_entry *pt, *temp;
212 struct drm_map *map; 212 struct drm_map *map;
213 struct drm_map_list *r_list; 213 struct drm_map_list *r_list;
@@ -286,7 +286,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
286static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 286static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
287{ 287{
288 struct drm_file *priv = vma->vm_file->private_data; 288 struct drm_file *priv = vma->vm_file->private_data;
289 struct drm_device *dev = priv->head->dev; 289 struct drm_device *dev = priv->minor->dev;
290 struct drm_device_dma *dma = dev->dma; 290 struct drm_device_dma *dma = dev->dma;
291 unsigned long offset; 291 unsigned long offset;
292 unsigned long page_nr; 292 unsigned long page_nr;
@@ -321,7 +321,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
321{ 321{
322 struct drm_map *map = (struct drm_map *) vma->vm_private_data; 322 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
323 struct drm_file *priv = vma->vm_file->private_data; 323 struct drm_file *priv = vma->vm_file->private_data;
324 struct drm_device *dev = priv->head->dev; 324 struct drm_device *dev = priv->minor->dev;
325 struct drm_sg_mem *entry = dev->sg; 325 struct drm_sg_mem *entry = dev->sg;
326 unsigned long offset; 326 unsigned long offset;
327 unsigned long map_offset; 327 unsigned long map_offset;
@@ -402,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
402static void drm_vm_open_locked(struct vm_area_struct *vma) 402static void drm_vm_open_locked(struct vm_area_struct *vma)
403{ 403{
404 struct drm_file *priv = vma->vm_file->private_data; 404 struct drm_file *priv = vma->vm_file->private_data;
405 struct drm_device *dev = priv->head->dev; 405 struct drm_device *dev = priv->minor->dev;
406 struct drm_vma_entry *vma_entry; 406 struct drm_vma_entry *vma_entry;
407 407
408 DRM_DEBUG("0x%08lx,0x%08lx\n", 408 DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -420,7 +420,7 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
420static void drm_vm_open(struct vm_area_struct *vma) 420static void drm_vm_open(struct vm_area_struct *vma)
421{ 421{
422 struct drm_file *priv = vma->vm_file->private_data; 422 struct drm_file *priv = vma->vm_file->private_data;
423 struct drm_device *dev = priv->head->dev; 423 struct drm_device *dev = priv->minor->dev;
424 424
425 mutex_lock(&dev->struct_mutex); 425 mutex_lock(&dev->struct_mutex);
426 drm_vm_open_locked(vma); 426 drm_vm_open_locked(vma);
@@ -438,7 +438,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
438static void drm_vm_close(struct vm_area_struct *vma) 438static void drm_vm_close(struct vm_area_struct *vma)
439{ 439{
440 struct drm_file *priv = vma->vm_file->private_data; 440 struct drm_file *priv = vma->vm_file->private_data;
441 struct drm_device *dev = priv->head->dev; 441 struct drm_device *dev = priv->minor->dev;
442 struct drm_vma_entry *pt, *temp; 442 struct drm_vma_entry *pt, *temp;
443 443
444 DRM_DEBUG("0x%08lx,0x%08lx\n", 444 DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -473,7 +473,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
473 struct drm_device_dma *dma; 473 struct drm_device_dma *dma;
474 unsigned long length = vma->vm_end - vma->vm_start; 474 unsigned long length = vma->vm_end - vma->vm_start;
475 475
476 dev = priv->head->dev; 476 dev = priv->minor->dev;
477 dma = dev->dma; 477 dma = dev->dma;
478 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", 478 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
479 vma->vm_start, vma->vm_end, vma->vm_pgoff); 479 vma->vm_start, vma->vm_end, vma->vm_pgoff);
@@ -543,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
543static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) 543static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
544{ 544{
545 struct drm_file *priv = filp->private_data; 545 struct drm_file *priv = filp->private_data;
546 struct drm_device *dev = priv->head->dev; 546 struct drm_device *dev = priv->minor->dev;
547 struct drm_map *map = NULL; 547 struct drm_map *map = NULL;
548 unsigned long offset = 0; 548 unsigned long offset = 0;
549 struct drm_hash_item *hash; 549 struct drm_hash_item *hash;
@@ -640,12 +640,12 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
640 /* Don't let this area swap. Change when 640 /* Don't let this area swap. Change when
641 DRM_KERNEL advisory is supported. */ 641 DRM_KERNEL advisory is supported. */
642 vma->vm_flags |= VM_RESERVED; 642 vma->vm_flags |= VM_RESERVED;
643 vma->vm_page_prot = drm_dma_prot(map->type, vma);
644 break; 643 break;
645 case _DRM_SCATTER_GATHER: 644 case _DRM_SCATTER_GATHER:
646 vma->vm_ops = &drm_vm_sg_ops; 645 vma->vm_ops = &drm_vm_sg_ops;
647 vma->vm_private_data = (void *)map; 646 vma->vm_private_data = (void *)map;
648 vma->vm_flags |= VM_RESERVED; 647 vma->vm_flags |= VM_RESERVED;
648 vma->vm_page_prot = drm_dma_prot(map->type, vma);
649 break; 649 break;
650 default: 650 default:
651 return -EINVAL; /* This should never happen. */ 651 return -EINVAL; /* This should never happen. */
@@ -661,7 +661,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
661int drm_mmap(struct file *filp, struct vm_area_struct *vma) 661int drm_mmap(struct file *filp, struct vm_area_struct *vma)
662{ 662{
663 struct drm_file *priv = filp->private_data; 663 struct drm_file *priv = filp->private_data;
664 struct drm_device *dev = priv->head->dev; 664 struct drm_device *dev = priv->minor->dev;
665 int ret; 665 int ret;
666 666
667 mutex_lock(&dev->struct_mutex); 667 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 8d7ea81c4b66..e5de8ea41544 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -94,7 +94,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
94 drm_i810_buf_priv_t *buf_priv; 94 drm_i810_buf_priv_t *buf_priv;
95 95
96 lock_kernel(); 96 lock_kernel();
97 dev = priv->head->dev; 97 dev = priv->minor->dev;
98 dev_priv = dev->dev_private; 98 dev_priv = dev->dev_private;
99 buf = dev_priv->mmap_buffer; 99 buf = dev_priv->mmap_buffer;
100 buf_priv = buf->dev_private; 100 buf_priv = buf->dev_private;
@@ -122,7 +122,7 @@ static const struct file_operations i810_buffer_fops = {
122 122
123static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) 123static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
124{ 124{
125 struct drm_device *dev = file_priv->head->dev; 125 struct drm_device *dev = file_priv->minor->dev;
126 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 126 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
127 drm_i810_private_t *dev_priv = dev->dev_private; 127 drm_i810_private_t *dev_priv = dev->dev_private;
128 const struct file_operations *old_fops; 128 const struct file_operations *old_fops;
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 9df08105f4f3..60c9376be486 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -96,7 +96,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
96 drm_i830_buf_priv_t *buf_priv; 96 drm_i830_buf_priv_t *buf_priv;
97 97
98 lock_kernel(); 98 lock_kernel();
99 dev = priv->head->dev; 99 dev = priv->minor->dev;
100 dev_priv = dev->dev_private; 100 dev_priv = dev->dev_private;
101 buf = dev_priv->mmap_buffer; 101 buf = dev_priv->mmap_buffer;
102 buf_priv = buf->dev_private; 102 buf_priv = buf->dev_private;
@@ -124,7 +124,7 @@ static const struct file_operations i830_buffer_fops = {
124 124
125static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) 125static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
126{ 126{
127 struct drm_device *dev = file_priv->head->dev; 127 struct drm_device *dev = file_priv->minor->dev;
128 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 128 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
129 drm_i830_private_t *dev_priv = dev->dev_private; 129 drm_i830_private_t *dev_priv = dev->dev_private;
130 const struct file_operations *old_fops; 130 const struct file_operations *old_fops;
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index a043bb12301a..ef7bf143a80c 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -415,10 +415,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
415 drm_i915_private_t *dev_priv = dev->dev_private; 415 drm_i915_private_t *dev_priv = dev->dev_private;
416 RING_LOCALS; 416 RING_LOCALS;
417 417
418 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 418 if (++dev_priv->counter > BREADCRUMB_MASK) {
419 dev_priv->counter = 1;
420 DRM_DEBUG("Breadcrumb counter wrapped around\n");
421 }
419 422
420 if (dev_priv->counter > 0x7FFFFFFFUL) 423 if (dev_priv->sarea_priv)
421 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; 424 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
422 425
423 BEGIN_LP_RING(4); 426 BEGIN_LP_RING(4);
424 OUT_RING(CMD_STORE_DWORD_IDX); 427 OUT_RING(CMD_STORE_DWORD_IDX);
@@ -428,6 +431,26 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
428 ADVANCE_LP_RING(); 431 ADVANCE_LP_RING();
429} 432}
430 433
434int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
435{
436 drm_i915_private_t *dev_priv = dev->dev_private;
437 uint32_t flush_cmd = CMD_MI_FLUSH;
438 RING_LOCALS;
439
440 flush_cmd |= flush;
441
442 i915_kernel_lost_context(dev);
443
444 BEGIN_LP_RING(4);
445 OUT_RING(flush_cmd);
446 OUT_RING(0);
447 OUT_RING(0);
448 OUT_RING(0);
449 ADVANCE_LP_RING();
450
451 return 0;
452}
453
431static int i915_dispatch_cmdbuffer(struct drm_device * dev, 454static int i915_dispatch_cmdbuffer(struct drm_device * dev,
432 drm_i915_cmdbuffer_t * cmd) 455 drm_i915_cmdbuffer_t * cmd)
433{ 456{
@@ -511,52 +534,74 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
511 return 0; 534 return 0;
512} 535}
513 536
514static int i915_dispatch_flip(struct drm_device * dev) 537static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
515{ 538{
516 drm_i915_private_t *dev_priv = dev->dev_private; 539 drm_i915_private_t *dev_priv = dev->dev_private;
540 u32 num_pages, current_page, next_page, dspbase;
541 int shift = 2 * plane, x, y;
517 RING_LOCALS; 542 RING_LOCALS;
518 543
519 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 544 /* Calculate display base offset */
520 __FUNCTION__, 545 num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
521 dev_priv->current_page, 546 current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
522 dev_priv->sarea_priv->pf_current_page); 547 next_page = (current_page + 1) % num_pages;
523 548
524 i915_kernel_lost_context(dev); 549 switch (next_page) {
525 550 default:
526 BEGIN_LP_RING(2); 551 case 0:
527 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 552 dspbase = dev_priv->sarea_priv->front_offset;
528 OUT_RING(0); 553 break;
529 ADVANCE_LP_RING(); 554 case 1:
555 dspbase = dev_priv->sarea_priv->back_offset;
556 break;
557 case 2:
558 dspbase = dev_priv->sarea_priv->third_offset;
559 break;
560 }
530 561
531 BEGIN_LP_RING(6); 562 if (plane == 0) {
532 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 563 x = dev_priv->sarea_priv->planeA_x;
533 OUT_RING(0); 564 y = dev_priv->sarea_priv->planeA_y;
534 if (dev_priv->current_page == 0) {
535 OUT_RING(dev_priv->back_offset);
536 dev_priv->current_page = 1;
537 } else { 565 } else {
538 OUT_RING(dev_priv->front_offset); 566 x = dev_priv->sarea_priv->planeB_x;
539 dev_priv->current_page = 0; 567 y = dev_priv->sarea_priv->planeB_y;
540 } 568 }
541 OUT_RING(0);
542 ADVANCE_LP_RING();
543 569
544 BEGIN_LP_RING(2); 570 dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
545 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
546 OUT_RING(0);
547 ADVANCE_LP_RING();
548 571
549 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 572 DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
573 dspbase);
550 574
551 BEGIN_LP_RING(4); 575 BEGIN_LP_RING(4);
552 OUT_RING(CMD_STORE_DWORD_IDX); 576 OUT_RING(sync ? 0 :
553 OUT_RING(20); 577 (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
554 OUT_RING(dev_priv->counter); 578 MI_WAIT_FOR_PLANE_A_FLIP)));
555 OUT_RING(0); 579 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
580 (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
581 OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
582 OUT_RING(dspbase);
556 ADVANCE_LP_RING(); 583 ADVANCE_LP_RING();
557 584
558 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 585 dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
559 return 0; 586 dev_priv->sarea_priv->pf_current_page |= next_page << shift;
587}
588
589void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
590{
591 drm_i915_private_t *dev_priv = dev->dev_private;
592 int i;
593
594 DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
595 planes, dev_priv->sarea_priv->pf_current_page);
596
597 i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
598
599 for (i = 0; i < 2; i++)
600 if (planes & (1 << i))
601 i915_do_dispatch_flip(dev, i, sync);
602
603 i915_emit_breadcrumb(dev);
604
560} 605}
561 606
562static int i915_quiescent(struct drm_device * dev) 607static int i915_quiescent(struct drm_device * dev)
@@ -579,7 +624,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
579 struct drm_file *file_priv) 624 struct drm_file *file_priv)
580{ 625{
581 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 626 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
582 u32 *hw_status = dev_priv->hw_status_page;
583 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 627 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
584 dev_priv->sarea_priv; 628 dev_priv->sarea_priv;
585 drm_i915_batchbuffer_t *batch = data; 629 drm_i915_batchbuffer_t *batch = data;
@@ -602,7 +646,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
602 646
603 ret = i915_dispatch_batchbuffer(dev, batch); 647 ret = i915_dispatch_batchbuffer(dev, batch);
604 648
605 sarea_priv->last_dispatch = (int)hw_status[5]; 649 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
606 return ret; 650 return ret;
607} 651}
608 652
@@ -610,7 +654,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
610 struct drm_file *file_priv) 654 struct drm_file *file_priv)
611{ 655{
612 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 656 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
613 u32 *hw_status = dev_priv->hw_status_page;
614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 657 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
615 dev_priv->sarea_priv; 658 dev_priv->sarea_priv;
616 drm_i915_cmdbuffer_t *cmdbuf = data; 659 drm_i915_cmdbuffer_t *cmdbuf = data;
@@ -635,18 +678,51 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
635 return ret; 678 return ret;
636 } 679 }
637 680
638 sarea_priv->last_dispatch = (int)hw_status[5]; 681 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
682 return 0;
683}
684
685static int i915_do_cleanup_pageflip(struct drm_device * dev)
686{
687 drm_i915_private_t *dev_priv = dev->dev_private;
688 int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
689
690 DRM_DEBUG("\n");
691
692 for (i = 0, planes = 0; i < 2; i++)
693 if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
694 dev_priv->sarea_priv->pf_current_page =
695 (dev_priv->sarea_priv->pf_current_page &
696 ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
697
698 planes |= 1 << i;
699 }
700
701 if (planes)
702 i915_dispatch_flip(dev, planes, 0);
703
639 return 0; 704 return 0;
640} 705}
641 706
642static int i915_flip_bufs(struct drm_device *dev, void *data, 707static int i915_flip_bufs(struct drm_device *dev, void *data,
643 struct drm_file *file_priv) 708 struct drm_file *file_priv)
644{ 709{
645 DRM_DEBUG("%s\n", __FUNCTION__); 710 drm_i915_flip_t *param = data;
711
712 DRM_DEBUG("\n");
646 713
647 LOCK_TEST_WITH_RETURN(dev, file_priv); 714 LOCK_TEST_WITH_RETURN(dev, file_priv);
648 715
649 return i915_dispatch_flip(dev); 716 /* This is really planes */
717 if (param->pipes & ~0x3) {
718 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
719 param->pipes);
720 return -EINVAL;
721 }
722
723 i915_dispatch_flip(dev, param->pipes, 0);
724
725 return 0;
650} 726}
651 727
652static int i915_getparam(struct drm_device *dev, void *data, 728static int i915_getparam(struct drm_device *dev, void *data,
@@ -807,6 +883,8 @@ void i915_driver_lastclose(struct drm_device * dev)
807 if (!dev_priv) 883 if (!dev_priv)
808 return; 884 return;
809 885
886 if (drm_getsarea(dev) && dev_priv->sarea_priv)
887 i915_do_cleanup_pageflip(dev);
810 if (dev_priv->agp_heap) 888 if (dev_priv->agp_heap)
811 i915_mem_takedown(&(dev_priv->agp_heap)); 889 i915_mem_takedown(&(dev_priv->agp_heap));
812 890
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 05c66cf03a9e..0431c00e2289 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -105,14 +105,29 @@ typedef struct _drm_i915_sarea {
105 unsigned int rotated_tiled; 105 unsigned int rotated_tiled;
106 unsigned int rotated2_tiled; 106 unsigned int rotated2_tiled;
107 107
108 int pipeA_x; 108 int planeA_x;
109 int pipeA_y; 109 int planeA_y;
110 int pipeA_w; 110 int planeA_w;
111 int pipeA_h; 111 int planeA_h;
112 int pipeB_x; 112 int planeB_x;
113 int pipeB_y; 113 int planeB_y;
114 int pipeB_w; 114 int planeB_w;
115 int pipeB_h; 115 int planeB_h;
116
117 /* Triple buffering */
118 drm_handle_t third_handle;
119 int third_offset;
120 int third_size;
121 unsigned int third_tiled;
122
123 /* buffer object handles for the static buffers. May change
124 * over the lifetime of the client, though it doesn't in our current
125 * implementation.
126 */
127 unsigned int front_bo_handle;
128 unsigned int back_bo_handle;
129 unsigned int third_bo_handle;
130 unsigned int depth_bo_handle;
116} drm_i915_sarea_t; 131} drm_i915_sarea_t;
117 132
118/* Flags for perf_boxes 133/* Flags for perf_boxes
@@ -146,7 +161,7 @@ typedef struct _drm_i915_sarea {
146 161
147#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 162#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
148#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 163#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
149#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 164#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
150#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 165#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
151#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 166#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
152#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 167#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
@@ -161,6 +176,18 @@ typedef struct _drm_i915_sarea {
161#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 176#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
162#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 177#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
163 178
179/* Asynchronous page flipping:
180 */
181typedef struct drm_i915_flip {
182 /*
183 * This is really talking about planes, and we could rename it
184 * except for the fact that some of the duplicated i915_drm.h files
185 * out there check for HAVE_I915_FLIP and so might pick up this
186 * version.
187 */
188 int pipes;
189} drm_i915_flip_t;
190
164/* Allow drivers to submit batchbuffers directly to hardware, relying 191/* Allow drivers to submit batchbuffers directly to hardware, relying
165 * on the security mechanisms provided by hardware. 192 * on the security mechanisms provided by hardware.
166 */ 193 */
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index b2b451dc4460..bb8f1b2fb383 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -533,8 +533,7 @@ static struct drm_driver driver = {
533 */ 533 */
534 .driver_features = 534 .driver_features =
535 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 535 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
536 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | 536 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
537 DRIVER_IRQ_VBL2,
538 .load = i915_driver_load, 537 .load = i915_driver_load,
539 .unload = i915_driver_unload, 538 .unload = i915_driver_unload,
540 .lastclose = i915_driver_lastclose, 539 .lastclose = i915_driver_lastclose,
@@ -542,8 +541,9 @@ static struct drm_driver driver = {
542 .suspend = i915_suspend, 541 .suspend = i915_suspend,
543 .resume = i915_resume, 542 .resume = i915_resume,
544 .device_is_agp = i915_driver_device_is_agp, 543 .device_is_agp = i915_driver_device_is_agp,
545 .vblank_wait = i915_driver_vblank_wait, 544 .get_vblank_counter = i915_get_vblank_counter,
546 .vblank_wait2 = i915_driver_vblank_wait2, 545 .enable_vblank = i915_enable_vblank,
546 .disable_vblank = i915_disable_vblank,
547 .irq_preinstall = i915_driver_irq_preinstall, 547 .irq_preinstall = i915_driver_irq_preinstall,
548 .irq_postinstall = i915_driver_irq_postinstall, 548 .irq_postinstall = i915_driver_irq_postinstall,
549 .irq_uninstall = i915_driver_irq_uninstall, 549 .irq_uninstall = i915_driver_irq_uninstall,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 675d88bda066..c614d78b3dfd 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -76,8 +76,9 @@ struct mem_block {
76typedef struct _drm_i915_vbl_swap { 76typedef struct _drm_i915_vbl_swap {
77 struct list_head head; 77 struct list_head head;
78 drm_drawable_t drw_id; 78 drm_drawable_t drw_id;
79 unsigned int pipe; 79 unsigned int plane;
80 unsigned int sequence; 80 unsigned int sequence;
81 int flip;
81} drm_i915_vbl_swap_t; 82} drm_i915_vbl_swap_t;
82 83
83typedef struct drm_i915_private { 84typedef struct drm_i915_private {
@@ -90,7 +91,7 @@ typedef struct drm_i915_private {
90 drm_dma_handle_t *status_page_dmah; 91 drm_dma_handle_t *status_page_dmah;
91 void *hw_status_page; 92 void *hw_status_page;
92 dma_addr_t dma_status_page; 93 dma_addr_t dma_status_page;
93 unsigned long counter; 94 uint32_t counter;
94 unsigned int status_gfx_addr; 95 unsigned int status_gfx_addr;
95 drm_local_map_t hws_map; 96 drm_local_map_t hws_map;
96 97
@@ -103,13 +104,18 @@ typedef struct drm_i915_private {
103 104
104 wait_queue_head_t irq_queue; 105 wait_queue_head_t irq_queue;
105 atomic_t irq_received; 106 atomic_t irq_received;
106 atomic_t irq_emitted; 107 atomic_t irq_emited;
107 108
108 int tex_lru_log_granularity; 109 int tex_lru_log_granularity;
109 int allow_batchbuffer; 110 int allow_batchbuffer;
110 struct mem_block *agp_heap; 111 struct mem_block *agp_heap;
111 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 112 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
112 int vblank_pipe; 113 int vblank_pipe;
114 spinlock_t user_irq_lock;
115 int user_irq_refcount;
116 int fence_irq_on;
117 uint32_t irq_enable_reg;
118 int irq_enabled;
113 119
114 spinlock_t swaps_lock; 120 spinlock_t swaps_lock;
115 drm_i915_vbl_swap_t vbl_swaps; 121 drm_i915_vbl_swap_t vbl_swaps;
@@ -216,7 +222,7 @@ extern void i915_driver_preclose(struct drm_device *dev,
216extern int i915_driver_device_is_agp(struct drm_device * dev); 222extern int i915_driver_device_is_agp(struct drm_device * dev);
217extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 223extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
218 unsigned long arg); 224 unsigned long arg);
219 225extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
220/* i915_irq.c */ 226/* i915_irq.c */
221extern int i915_irq_emit(struct drm_device *dev, void *data, 227extern int i915_irq_emit(struct drm_device *dev, void *data,
222 struct drm_file *file_priv); 228 struct drm_file *file_priv);
@@ -227,7 +233,7 @@ extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequenc
227extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); 233extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
228extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 234extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
229extern void i915_driver_irq_preinstall(struct drm_device * dev); 235extern void i915_driver_irq_preinstall(struct drm_device * dev);
230extern void i915_driver_irq_postinstall(struct drm_device * dev); 236extern int i915_driver_irq_postinstall(struct drm_device * dev);
231extern void i915_driver_irq_uninstall(struct drm_device * dev); 237extern void i915_driver_irq_uninstall(struct drm_device * dev);
232extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 238extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
233 struct drm_file *file_priv); 239 struct drm_file *file_priv);
@@ -235,6 +241,9 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
235 struct drm_file *file_priv); 241 struct drm_file *file_priv);
236extern int i915_vblank_swap(struct drm_device *dev, void *data, 242extern int i915_vblank_swap(struct drm_device *dev, void *data,
237 struct drm_file *file_priv); 243 struct drm_file *file_priv);
244extern int i915_enable_vblank(struct drm_device *dev, int crtc);
245extern void i915_disable_vblank(struct drm_device *dev, int crtc);
246extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
238 247
239/* i915_mem.c */ 248/* i915_mem.c */
240extern int i915_mem_alloc(struct drm_device *dev, void *data, 249extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -379,21 +388,91 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
379 388
380/* Interrupt bits: 389/* Interrupt bits:
381 */ 390 */
382#define USER_INT_FLAG (1<<1) 391#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
383#define VSYNC_PIPEB_FLAG (1<<5) 392#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
384#define VSYNC_PIPEA_FLAG (1<<7) 393#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
385#define HWB_OOM_FLAG (1<<13) /* binner out of memory */ 394#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
395#define I915_HWB_OOM_INTERRUPT (1<<13) /* binner out of memory */
396#define I915_SYNC_STATUS_INTERRUPT (1<<12)
397#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
398#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
399#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
400#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
401#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
402#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
403#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
404#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
405#define I915_DEBUG_INTERRUPT (1<<2)
406#define I915_USER_INTERRUPT (1<<1)
407
386 408
387#define I915REG_HWSTAM 0x02098 409#define I915REG_HWSTAM 0x02098
388#define I915REG_INT_IDENTITY_R 0x020a4 410#define I915REG_INT_IDENTITY_R 0x020a4
389#define I915REG_INT_MASK_R 0x020a8 411#define I915REG_INT_MASK_R 0x020a8
390#define I915REG_INT_ENABLE_R 0x020a0 412#define I915REG_INT_ENABLE_R 0x020a0
413#define I915REG_INSTPM 0x020c0
414
415#define PIPEADSL 0x70000
416#define PIPEBDSL 0x71000
391 417
392#define I915REG_PIPEASTAT 0x70024 418#define I915REG_PIPEASTAT 0x70024
393#define I915REG_PIPEBSTAT 0x71024 419#define I915REG_PIPEBSTAT 0x71024
420/*
421 * The two pipe frame counter registers are not synchronized, so
422 * reading a stable value is somewhat tricky. The following code
423 * should work:
424 *
425 * do {
426 * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
427 * PIPE_FRAME_HIGH_SHIFT;
428 * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
429 * PIPE_FRAME_LOW_SHIFT);
430 * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
431 * PIPE_FRAME_HIGH_SHIFT);
432 * } while (high1 != high2);
433 * frame = (high1 << 8) | low1;
434 */
435#define PIPEAFRAMEHIGH 0x70040
436#define PIPEBFRAMEHIGH 0x71040
437#define PIPE_FRAME_HIGH_MASK 0x0000ffff
438#define PIPE_FRAME_HIGH_SHIFT 0
439#define PIPEAFRAMEPIXEL 0x70044
440#define PIPEBFRAMEPIXEL 0x71044
394 441
395#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) 442#define PIPE_FRAME_LOW_MASK 0xff000000
396#define I915_VBLANK_CLEAR (1UL<<1) 443#define PIPE_FRAME_LOW_SHIFT 24
444/*
445 * Pixel within the current frame is counted in the PIPEAFRAMEPIXEL register
446 * and is 24 bits wide.
447 */
448#define PIPE_PIXEL_MASK 0x00ffffff
449#define PIPE_PIXEL_SHIFT 0
450
451#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
452#define I915_CRC_ERROR_ENABLE (1UL<<29)
453#define I915_CRC_DONE_ENABLE (1UL<<28)
454#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
455#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
456#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
457#define I915_DPST_EVENT_ENABLE (1UL<<23)
458#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
459#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
460#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
461#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
462#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
463#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
464#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
465#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
466#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
467#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
468#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
469#define I915_DPST_EVENT_STATUS (1UL<<7)
470#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
471#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
472#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
473#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
474#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
475#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
397 476
398#define SRX_INDEX 0x3c4 477#define SRX_INDEX 0x3c4
399#define SRX_DATA 0x3c5 478#define SRX_DATA 0x3c5
@@ -566,6 +645,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
566#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) 645#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
567#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) 646#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
568#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) 647#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
648#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
649#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
569 650
570#define MI_BATCH_BUFFER ((0x30<<23)|1) 651#define MI_BATCH_BUFFER ((0x30<<23)|1)
571#define MI_BATCH_BUFFER_START (0x31<<23) 652#define MI_BATCH_BUFFER_START (0x31<<23)
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 92653b38e64c..023ce66ef3ab 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -38,6 +38,109 @@
38#define MAX_NOPID ((u32)~0) 38#define MAX_NOPID ((u32)~0)
39 39
40/** 40/**
41 * i915_get_pipe - return the the pipe associated with a given plane
42 * @dev: DRM device
43 * @plane: plane to look for
44 *
45 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
46 * rather than a pipe number, since they may not always be equal. This routine
47 * maps the given @plane back to a pipe number.
48 */
49static int
50i915_get_pipe(struct drm_device *dev, int plane)
51{
52 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
53 u32 dspcntr;
54
55 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
56
57 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
58}
59
60/**
61 * i915_get_plane - return the the plane associated with a given pipe
62 * @dev: DRM device
63 * @pipe: pipe to look for
64 *
65 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
66 * rather than a plane number, since they may not always be equal. This routine
67 * maps the given @pipe back to a plane number.
68 */
69static int
70i915_get_plane(struct drm_device *dev, int pipe)
71{
72 if (i915_get_pipe(dev, 0) == pipe)
73 return 0;
74 return 1;
75}
76
77/**
78 * i915_pipe_enabled - check if a pipe is enabled
79 * @dev: DRM device
80 * @pipe: pipe to check
81 *
82 * Reading certain registers when the pipe is disabled can hang the chip.
83 * Use this routine to make sure the PLL is running and the pipe is active
84 * before reading such registers if unsure.
85 */
86static int
87i915_pipe_enabled(struct drm_device *dev, int pipe)
88{
89 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
90 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
91
92 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
93 return 1;
94
95 return 0;
96}
97
98/**
99 * Emit a synchronous flip.
100 *
101 * This function must be called with the drawable spinlock held.
102 */
103static void
104i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
105 int plane)
106{
107 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
108 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
109 u16 x1, y1, x2, y2;
110 int pf_planes = 1 << plane;
111
112 /* If the window is visible on the other plane, we have to flip on that
113 * plane as well.
114 */
115 if (plane == 1) {
116 x1 = sarea_priv->planeA_x;
117 y1 = sarea_priv->planeA_y;
118 x2 = x1 + sarea_priv->planeA_w;
119 y2 = y1 + sarea_priv->planeA_h;
120 } else {
121 x1 = sarea_priv->planeB_x;
122 y1 = sarea_priv->planeB_y;
123 x2 = x1 + sarea_priv->planeB_w;
124 y2 = y1 + sarea_priv->planeB_h;
125 }
126
127 if (x2 > 0 && y2 > 0) {
128 int i, num_rects = drw->num_rects;
129 struct drm_clip_rect *rect = drw->rects;
130
131 for (i = 0; i < num_rects; i++)
132 if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
133 rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
134 pf_planes = 0x3;
135
136 break;
137 }
138 }
139
140 i915_dispatch_flip(dev, pf_planes, 1);
141}
142
143/**
41 * Emit blits for scheduled buffer swaps. 144 * Emit blits for scheduled buffer swaps.
42 * 145 *
43 * This function will be called with the HW lock held. 146 * This function will be called with the HW lock held.
@@ -45,40 +148,59 @@
45static void i915_vblank_tasklet(struct drm_device *dev) 148static void i915_vblank_tasklet(struct drm_device *dev)
46{ 149{
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 150 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned long irqflags;
49 struct list_head *list, *tmp, hits, *hit; 151 struct list_head *list, *tmp, hits, *hit;
50 int nhits, nrects, slice[2], upper[2], lower[2], i; 152 int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
51 unsigned counter[2] = { atomic_read(&dev->vbl_received), 153 unsigned counter[2];
52 atomic_read(&dev->vbl_received2) };
53 struct drm_drawable_info *drw; 154 struct drm_drawable_info *drw;
54 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 155 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55 u32 cpp = dev_priv->cpp; 156 u32 cpp = dev_priv->cpp, offsets[3];
56 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | 157 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
57 XY_SRC_COPY_BLT_WRITE_ALPHA | 158 XY_SRC_COPY_BLT_WRITE_ALPHA |
58 XY_SRC_COPY_BLT_WRITE_RGB) 159 XY_SRC_COPY_BLT_WRITE_RGB)
59 : XY_SRC_COPY_BLT_CMD; 160 : XY_SRC_COPY_BLT_CMD;
60 u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) | 161 u32 src_pitch = sarea_priv->pitch * cpp;
61 (cpp << 23) | (1 << 24); 162 u32 dst_pitch = sarea_priv->pitch * cpp;
163 /* COPY rop (0xcc), map cpp to magic color depth constants */
164 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
62 RING_LOCALS; 165 RING_LOCALS;
63 166
167 if (sarea_priv->front_tiled) {
168 cmd |= XY_SRC_COPY_BLT_DST_TILED;
169 dst_pitch >>= 2;
170 }
171 if (sarea_priv->back_tiled) {
172 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
173 src_pitch >>= 2;
174 }
175
176 counter[0] = drm_vblank_count(dev, 0);
177 counter[1] = drm_vblank_count(dev, 1);
178
64 DRM_DEBUG("\n"); 179 DRM_DEBUG("\n");
65 180
66 INIT_LIST_HEAD(&hits); 181 INIT_LIST_HEAD(&hits);
67 182
68 nhits = nrects = 0; 183 nhits = nrects = 0;
69 184
70 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 185 /* No irqsave/restore necessary. This tasklet may be run in an
186 * interrupt context or normal context, but we don't have to worry
187 * about getting interrupted by something acquiring the lock, because
188 * we are the interrupt context thing that acquires the lock.
189 */
190 spin_lock(&dev_priv->swaps_lock);
71 191
72 /* Find buffer swaps scheduled for this vertical blank */ 192 /* Find buffer swaps scheduled for this vertical blank */
73 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 193 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
74 drm_i915_vbl_swap_t *vbl_swap = 194 drm_i915_vbl_swap_t *vbl_swap =
75 list_entry(list, drm_i915_vbl_swap_t, head); 195 list_entry(list, drm_i915_vbl_swap_t, head);
196 int pipe = i915_get_pipe(dev, vbl_swap->plane);
76 197
77 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) 198 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
78 continue; 199 continue;
79 200
80 list_del(list); 201 list_del(list);
81 dev_priv->swaps_pending--; 202 dev_priv->swaps_pending--;
203 drm_vblank_put(dev, pipe);
82 204
83 spin_unlock(&dev_priv->swaps_lock); 205 spin_unlock(&dev_priv->swaps_lock);
84 spin_lock(&dev->drw_lock); 206 spin_lock(&dev->drw_lock);
@@ -116,33 +238,23 @@ static void i915_vblank_tasklet(struct drm_device *dev)
116 spin_lock(&dev_priv->swaps_lock); 238 spin_lock(&dev_priv->swaps_lock);
117 } 239 }
118 240
119 if (nhits == 0) {
120 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
121 return;
122 }
123
124 spin_unlock(&dev_priv->swaps_lock); 241 spin_unlock(&dev_priv->swaps_lock);
125 242
126 i915_kernel_lost_context(dev); 243 if (nhits == 0)
127 244 return;
128 BEGIN_LP_RING(6);
129
130 OUT_RING(GFX_OP_DRAWRECT_INFO);
131 OUT_RING(0);
132 OUT_RING(0);
133 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
134 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
135 OUT_RING(0);
136
137 ADVANCE_LP_RING();
138 245
139 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; 246 i915_kernel_lost_context(dev);
140 247
141 upper[0] = upper[1] = 0; 248 upper[0] = upper[1] = 0;
142 slice[0] = max(sarea_priv->pipeA_h / nhits, 1); 249 slice[0] = max(sarea_priv->planeA_h / nhits, 1);
143 slice[1] = max(sarea_priv->pipeB_h / nhits, 1); 250 slice[1] = max(sarea_priv->planeB_h / nhits, 1);
144 lower[0] = sarea_priv->pipeA_y + slice[0]; 251 lower[0] = sarea_priv->planeA_y + slice[0];
145 lower[1] = sarea_priv->pipeB_y + slice[0]; 252 lower[1] = sarea_priv->planeB_y + slice[0];
253
254 offsets[0] = sarea_priv->front_offset;
255 offsets[1] = sarea_priv->back_offset;
256 offsets[2] = sarea_priv->third_offset;
257 num_pages = sarea_priv->third_handle ? 3 : 2;
146 258
147 spin_lock(&dev->drw_lock); 259 spin_lock(&dev->drw_lock);
148 260
@@ -154,6 +266,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
154 for (i = 0; i++ < nhits; 266 for (i = 0; i++ < nhits;
155 upper[0] = lower[0], lower[0] += slice[0], 267 upper[0] = lower[0], lower[0] += slice[0],
156 upper[1] = lower[1], lower[1] += slice[1]) { 268 upper[1] = lower[1], lower[1] += slice[1]) {
269 int init_drawrect = 1;
270
157 if (i == nhits) 271 if (i == nhits)
158 lower[0] = lower[1] = sarea_priv->height; 272 lower[0] = lower[1] = sarea_priv->height;
159 273
@@ -161,7 +275,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
161 drm_i915_vbl_swap_t *swap_hit = 275 drm_i915_vbl_swap_t *swap_hit =
162 list_entry(hit, drm_i915_vbl_swap_t, head); 276 list_entry(hit, drm_i915_vbl_swap_t, head);
163 struct drm_clip_rect *rect; 277 struct drm_clip_rect *rect;
164 int num_rects, pipe; 278 int num_rects, plane, front, back;
165 unsigned short top, bottom; 279 unsigned short top, bottom;
166 280
167 drw = drm_get_drawable_info(dev, swap_hit->drw_id); 281 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@@ -169,10 +283,50 @@ static void i915_vblank_tasklet(struct drm_device *dev)
169 if (!drw) 283 if (!drw)
170 continue; 284 continue;
171 285
286 plane = swap_hit->plane;
287
288 if (swap_hit->flip) {
289 i915_dispatch_vsync_flip(dev, drw, plane);
290 continue;
291 }
292
293 if (init_drawrect) {
294 int width = sarea_priv->width;
295 int height = sarea_priv->height;
296 if (IS_I965G(dev)) {
297 BEGIN_LP_RING(4);
298
299 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
300 OUT_RING(0);
301 OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
302 OUT_RING(0);
303
304 ADVANCE_LP_RING();
305 } else {
306 BEGIN_LP_RING(6);
307
308 OUT_RING(GFX_OP_DRAWRECT_INFO);
309 OUT_RING(0);
310 OUT_RING(0);
311 OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
312 OUT_RING(0);
313 OUT_RING(0);
314
315 ADVANCE_LP_RING();
316 }
317
318 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
319
320 init_drawrect = 0;
321 }
322
172 rect = drw->rects; 323 rect = drw->rects;
173 pipe = swap_hit->pipe; 324 top = upper[plane];
174 top = upper[pipe]; 325 bottom = lower[plane];
175 bottom = lower[pipe]; 326
327 front = (dev_priv->sarea_priv->pf_current_page >>
328 (2 * plane)) & 0x3;
329 back = (front + 1) % num_pages;
176 330
177 for (num_rects = drw->num_rects; num_rects--; rect++) { 331 for (num_rects = drw->num_rects; num_rects--; rect++) {
178 int y1 = max(rect->y1, top); 332 int y1 = max(rect->y1, top);
@@ -184,20 +338,20 @@ static void i915_vblank_tasklet(struct drm_device *dev)
184 BEGIN_LP_RING(8); 338 BEGIN_LP_RING(8);
185 339
186 OUT_RING(cmd); 340 OUT_RING(cmd);
187 OUT_RING(pitchropcpp); 341 OUT_RING(ropcpp | dst_pitch);
188 OUT_RING((y1 << 16) | rect->x1); 342 OUT_RING((y1 << 16) | rect->x1);
189 OUT_RING((y2 << 16) | rect->x2); 343 OUT_RING((y2 << 16) | rect->x2);
190 OUT_RING(sarea_priv->front_offset); 344 OUT_RING(offsets[front]);
191 OUT_RING((y1 << 16) | rect->x1); 345 OUT_RING((y1 << 16) | rect->x1);
192 OUT_RING(pitchropcpp & 0xffff); 346 OUT_RING(src_pitch);
193 OUT_RING(sarea_priv->back_offset); 347 OUT_RING(offsets[back]);
194 348
195 ADVANCE_LP_RING(); 349 ADVANCE_LP_RING();
196 } 350 }
197 } 351 }
198 } 352 }
199 353
200 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 354 spin_unlock(&dev->drw_lock);
201 355
202 list_for_each_safe(hit, tmp, &hits) { 356 list_for_each_safe(hit, tmp, &hits) {
203 drm_i915_vbl_swap_t *swap_hit = 357 drm_i915_vbl_swap_t *swap_hit =
@@ -209,67 +363,112 @@ static void i915_vblank_tasklet(struct drm_device *dev)
209 } 363 }
210} 364}
211 365
366u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
367{
368 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
369 unsigned long high_frame;
370 unsigned long low_frame;
371 u32 high1, high2, low, count;
372 int pipe;
373
374 pipe = i915_get_pipe(dev, plane);
375 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
376 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
377
378 if (!i915_pipe_enabled(dev, pipe)) {
379 printk(KERN_ERR "trying to get vblank count for disabled "
380 "pipe %d\n", pipe);
381 return 0;
382 }
383
384 /*
385 * High & low register fields aren't synchronized, so make sure
386 * we get a low value that's stable across two reads of the high
387 * register.
388 */
389 do {
390 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
391 PIPE_FRAME_HIGH_SHIFT);
392 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
393 PIPE_FRAME_LOW_SHIFT);
394 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
395 PIPE_FRAME_HIGH_SHIFT);
396 } while (high1 != high2);
397
398 count = (high1 << 8) | low;
399
400 /* count may be reset by other driver(e.g. 2D driver),
401 we have no way to know if it is wrapped or resetted
402 when count is zero. do a rough guess.
403 */
404 if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2)
405 dev->last_vblank[pipe] = 0;
406
407 return count;
408}
409
212irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 410irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
213{ 411{
214 struct drm_device *dev = (struct drm_device *) arg; 412 struct drm_device *dev = (struct drm_device *) arg;
215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 413 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
216 u16 temp; 414 u32 iir;
217 u32 pipea_stats, pipeb_stats; 415 u32 pipea_stats, pipeb_stats;
218 416 int vblank = 0;
219 pipea_stats = I915_READ(I915REG_PIPEASTAT); 417
220 pipeb_stats = I915_READ(I915REG_PIPEBSTAT); 418 iir = I915_READ(I915REG_INT_IDENTITY_R);
221 419 if (iir == 0) {
222 temp = I915_READ16(I915REG_INT_IDENTITY_R); 420 DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
223 421 iir,
224 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG); 422 I915_READ(I915REG_INT_MASK_R),
225 423 I915_READ(I915REG_INT_ENABLE_R),
226 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 424 I915_READ(I915REG_PIPEASTAT),
227 425 I915_READ(I915REG_PIPEBSTAT));
228 if (temp == 0)
229 return IRQ_NONE; 426 return IRQ_NONE;
427 }
230 428
231 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 429 /*
232 (void) I915_READ16(I915REG_INT_IDENTITY_R); 430 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
233 DRM_READMEMORYBARRIER(); 431 * we may get extra interrupts.
234 432 */
235 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 433 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
434 pipea_stats = I915_READ(I915REG_PIPEASTAT);
435 if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
436 I915_VBLANK_INTERRUPT_STATUS))
437 {
438 vblank++;
439 drm_handle_vblank(dev, i915_get_plane(dev, 0));
440 }
441 I915_WRITE(I915REG_PIPEASTAT, pipea_stats);
442 }
443 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
444 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
445 if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
446 I915_VBLANK_INTERRUPT_STATUS))
447 {
448 vblank++;
449 drm_handle_vblank(dev, i915_get_plane(dev, 1));
450 }
451 I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats);
452 }
236 453
237 if (temp & USER_INT_FLAG) 454 if (dev_priv->sarea_priv)
238 DRM_WAKEUP(&dev_priv->irq_queue); 455 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
239 456
240 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) { 457 I915_WRITE(I915REG_INT_IDENTITY_R, iir);
241 int vblank_pipe = dev_priv->vblank_pipe; 458 (void) I915_READ(I915REG_INT_IDENTITY_R); /* Flush posted write */
242
243 if ((vblank_pipe &
244 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
245 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
246 if (temp & VSYNC_PIPEA_FLAG)
247 atomic_inc(&dev->vbl_received);
248 if (temp & VSYNC_PIPEB_FLAG)
249 atomic_inc(&dev->vbl_received2);
250 } else if (((temp & VSYNC_PIPEA_FLAG) &&
251 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
252 ((temp & VSYNC_PIPEB_FLAG) &&
253 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
254 atomic_inc(&dev->vbl_received);
255
256 DRM_WAKEUP(&dev->vbl_queue);
257 drm_vbl_send_signals(dev);
258 459
460 if (iir & I915_USER_INTERRUPT) {
461 DRM_WAKEUP(&dev_priv->irq_queue);
462 }
463 if (vblank) {
259 if (dev_priv->swaps_pending > 0) 464 if (dev_priv->swaps_pending > 0)
260 drm_locked_tasklet(dev, i915_vblank_tasklet); 465 drm_locked_tasklet(dev, i915_vblank_tasklet);
261 I915_WRITE(I915REG_PIPEASTAT,
262 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
263 I915_VBLANK_CLEAR);
264 I915_WRITE(I915REG_PIPEBSTAT,
265 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
266 I915_VBLANK_CLEAR);
267 } 466 }
268 467
269 return IRQ_HANDLED; 468 return IRQ_HANDLED;
270} 469}
271 470
272static int i915_emit_irq(struct drm_device * dev) 471static int i915_emit_irq(struct drm_device *dev)
273{ 472{
274 drm_i915_private_t *dev_priv = dev->dev_private; 473 drm_i915_private_t *dev_priv = dev->dev_private;
275 RING_LOCALS; 474 RING_LOCALS;
@@ -316,42 +515,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
316 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 515 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
317 } 516 }
318 517
319 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 518 if (dev_priv->sarea_priv)
320 return ret; 519 dev_priv->sarea_priv->last_dispatch =
321} 520 READ_BREADCRUMB(dev_priv);
322
323static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
324 atomic_t *counter)
325{
326 drm_i915_private_t *dev_priv = dev->dev_private;
327 unsigned int cur_vblank;
328 int ret = 0;
329
330 if (!dev_priv) {
331 DRM_ERROR("called with no initialization\n");
332 return -EINVAL;
333 }
334
335 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
336 (((cur_vblank = atomic_read(counter))
337 - *sequence) <= (1<<23)));
338
339 *sequence = cur_vblank;
340
341 return ret; 521 return ret;
342} 522}
343 523
344
345int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
346{
347 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
348}
349
350int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
351{
352 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
353}
354
355/* Needs the lock as it touches the ring. 524/* Needs the lock as it touches the ring.
356 */ 525 */
357int i915_irq_emit(struct drm_device *dev, void *data, 526int i915_irq_emit(struct drm_device *dev, void *data,
@@ -394,18 +563,96 @@ int i915_irq_wait(struct drm_device *dev, void *data,
394 return i915_wait_irq(dev, irqwait->irq_seq); 563 return i915_wait_irq(dev, irqwait->irq_seq);
395} 564}
396 565
566int i915_enable_vblank(struct drm_device *dev, int plane)
567{
568 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
569 int pipe = i915_get_pipe(dev, plane);
570 u32 pipestat_reg = 0;
571 u32 pipestat;
572
573 switch (pipe) {
574 case 0:
575 pipestat_reg = I915REG_PIPEASTAT;
576 dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
577 break;
578 case 1:
579 pipestat_reg = I915REG_PIPEBSTAT;
580 dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
581 break;
582 default:
583 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
584 pipe);
585 break;
586 }
587
588 if (pipestat_reg)
589 {
590 pipestat = I915_READ (pipestat_reg);
591 /*
592 * Older chips didn't have the start vblank interrupt,
593 * but
594 */
595 if (IS_I965G (dev))
596 pipestat |= I915_START_VBLANK_INTERRUPT_ENABLE;
597 else
598 pipestat |= I915_VBLANK_INTERRUPT_ENABLE;
599 /*
600 * Clear any pending status
601 */
602 pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
603 I915_VBLANK_INTERRUPT_STATUS);
604 I915_WRITE(pipestat_reg, pipestat);
605 }
606 I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
607
608 return 0;
609}
610
611void i915_disable_vblank(struct drm_device *dev, int plane)
612{
613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
614 int pipe = i915_get_pipe(dev, plane);
615 u32 pipestat_reg = 0;
616 u32 pipestat;
617
618 switch (pipe) {
619 case 0:
620 pipestat_reg = I915REG_PIPEASTAT;
621 dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
622 break;
623 case 1:
624 pipestat_reg = I915REG_PIPEBSTAT;
625 dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
626 break;
627 default:
628 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
629 pipe);
630 break;
631 }
632
633 I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
634 if (pipestat_reg)
635 {
636 pipestat = I915_READ (pipestat_reg);
637 pipestat &= ~(I915_START_VBLANK_INTERRUPT_ENABLE |
638 I915_VBLANK_INTERRUPT_ENABLE);
639 /*
640 * Clear any pending status
641 */
642 pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
643 I915_VBLANK_INTERRUPT_STATUS);
644 I915_WRITE(pipestat_reg, pipestat);
645 }
646}
647
397static void i915_enable_interrupt (struct drm_device *dev) 648static void i915_enable_interrupt (struct drm_device *dev)
398{ 649{
399 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 650 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
400 u16 flag;
401 651
402 flag = 0; 652 dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
403 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
404 flag |= VSYNC_PIPEA_FLAG;
405 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
406 flag |= VSYNC_PIPEB_FLAG;
407 653
408 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag); 654 I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
655 dev_priv->irq_enabled = 1;
409} 656}
410 657
411/* Set the vblank monitor pipe 658/* Set the vblank monitor pipe
@@ -428,8 +675,6 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
428 675
429 dev_priv->vblank_pipe = pipe->pipe; 676 dev_priv->vblank_pipe = pipe->pipe;
430 677
431 i915_enable_interrupt (dev);
432
433 return 0; 678 return 0;
434} 679}
435 680
@@ -447,9 +692,9 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
447 692
448 flag = I915_READ(I915REG_INT_ENABLE_R); 693 flag = I915_READ(I915REG_INT_ENABLE_R);
449 pipe->pipe = 0; 694 pipe->pipe = 0;
450 if (flag & VSYNC_PIPEA_FLAG) 695 if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
451 pipe->pipe |= DRM_I915_VBLANK_PIPE_A; 696 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
452 if (flag & VSYNC_PIPEB_FLAG) 697 if (flag & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
453 pipe->pipe |= DRM_I915_VBLANK_PIPE_B; 698 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
454 699
455 return 0; 700 return 0;
@@ -464,27 +709,30 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
464 drm_i915_private_t *dev_priv = dev->dev_private; 709 drm_i915_private_t *dev_priv = dev->dev_private;
465 drm_i915_vblank_swap_t *swap = data; 710 drm_i915_vblank_swap_t *swap = data;
466 drm_i915_vbl_swap_t *vbl_swap; 711 drm_i915_vbl_swap_t *vbl_swap;
467 unsigned int pipe, seqtype, curseq; 712 unsigned int pipe, seqtype, curseq, plane;
468 unsigned long irqflags; 713 unsigned long irqflags;
469 struct list_head *list; 714 struct list_head *list;
715 int ret;
470 716
471 if (!dev_priv) { 717 if (!dev_priv) {
472 DRM_ERROR("%s called with no initialization\n", __func__); 718 DRM_ERROR("%s called with no initialization\n", __func__);
473 return -EINVAL; 719 return -EINVAL;
474 } 720 }
475 721
476 if (dev_priv->sarea_priv->rotation) { 722 if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) {
477 DRM_DEBUG("Rotation not supported\n"); 723 DRM_DEBUG("Rotation not supported\n");
478 return -EINVAL; 724 return -EINVAL;
479 } 725 }
480 726
481 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | 727 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
482 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { 728 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
729 _DRM_VBLANK_FLIP)) {
483 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); 730 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
484 return -EINVAL; 731 return -EINVAL;
485 } 732 }
486 733
487 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 734 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
735 pipe = i915_get_pipe(dev, plane);
488 736
489 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 737 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
490 738
@@ -495,6 +743,11 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
495 743
496 spin_lock_irqsave(&dev->drw_lock, irqflags); 744 spin_lock_irqsave(&dev->drw_lock, irqflags);
497 745
746 /* It makes no sense to schedule a swap for a drawable that doesn't have
747 * valid information at this point. E.g. this could mean that the X
748 * server is too old to push drawable information to the DRM, in which
749 * case all such swaps would become ineffective.
750 */
498 if (!drm_get_drawable_info(dev, swap->drawable)) { 751 if (!drm_get_drawable_info(dev, swap->drawable)) {
499 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 752 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
500 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); 753 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
@@ -503,7 +756,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
503 756
504 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 757 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
505 758
506 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); 759 drm_update_vblank_count(dev, pipe);
760 curseq = drm_vblank_count(dev, pipe);
507 761
508 if (seqtype == _DRM_VBLANK_RELATIVE) 762 if (seqtype == _DRM_VBLANK_RELATIVE)
509 swap->sequence += curseq; 763 swap->sequence += curseq;
@@ -517,14 +771,43 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
517 } 771 }
518 } 772 }
519 773
774 if (swap->seqtype & _DRM_VBLANK_FLIP) {
775 swap->sequence--;
776
777 if ((curseq - swap->sequence) <= (1<<23)) {
778 struct drm_drawable_info *drw;
779
780 LOCK_TEST_WITH_RETURN(dev, file_priv);
781
782 spin_lock_irqsave(&dev->drw_lock, irqflags);
783
784 drw = drm_get_drawable_info(dev, swap->drawable);
785
786 if (!drw) {
787 spin_unlock_irqrestore(&dev->drw_lock,
788 irqflags);
789 DRM_DEBUG("Invalid drawable ID %d\n",
790 swap->drawable);
791 return -EINVAL;
792 }
793
794 i915_dispatch_vsync_flip(dev, drw, plane);
795
796 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
797
798 return 0;
799 }
800 }
801
520 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 802 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
521 803
522 list_for_each(list, &dev_priv->vbl_swaps.head) { 804 list_for_each(list, &dev_priv->vbl_swaps.head) {
523 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 805 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
524 806
525 if (vbl_swap->drw_id == swap->drawable && 807 if (vbl_swap->drw_id == swap->drawable &&
526 vbl_swap->pipe == pipe && 808 vbl_swap->plane == plane &&
527 vbl_swap->sequence == swap->sequence) { 809 vbl_swap->sequence == swap->sequence) {
810 vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
528 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 811 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
529 DRM_DEBUG("Already scheduled\n"); 812 DRM_DEBUG("Already scheduled\n");
530 return 0; 813 return 0;
@@ -547,9 +830,19 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
547 830
548 DRM_DEBUG("\n"); 831 DRM_DEBUG("\n");
549 832
833 ret = drm_vblank_get(dev, pipe);
834 if (ret) {
835 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
836 return ret;
837 }
838
550 vbl_swap->drw_id = swap->drawable; 839 vbl_swap->drw_id = swap->drawable;
551 vbl_swap->pipe = pipe; 840 vbl_swap->plane = plane;
552 vbl_swap->sequence = swap->sequence; 841 vbl_swap->sequence = swap->sequence;
842 vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
843
844 if (vbl_swap->flip)
845 swap->sequence++;
553 846
554 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 847 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
555 848
@@ -567,37 +860,57 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
567{ 860{
568 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 861 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
569 862
570 I915_WRITE16(I915REG_HWSTAM, 0xfffe); 863 I915_WRITE16(I915REG_HWSTAM, 0xeffe);
571 I915_WRITE16(I915REG_INT_MASK_R, 0x0); 864 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
572 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 865 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
573} 866}
574 867
575void i915_driver_irq_postinstall(struct drm_device * dev) 868int i915_driver_irq_postinstall(struct drm_device * dev)
576{ 869{
577 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 870 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
871 int ret, num_pipes = 2;
578 872
579 spin_lock_init(&dev_priv->swaps_lock); 873 spin_lock_init(&dev_priv->swaps_lock);
580 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 874 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
581 dev_priv->swaps_pending = 0; 875 dev_priv->swaps_pending = 0;
582 876
583 if (!dev_priv->vblank_pipe) 877 dev_priv->user_irq_refcount = 0;
584 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; 878 dev_priv->irq_enable_reg = 0;
879
880 ret = drm_vblank_init(dev, num_pipes);
881 if (ret)
882 return ret;
883
884 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
885
585 i915_enable_interrupt(dev); 886 i915_enable_interrupt(dev);
586 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 887 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
888
889 /*
890 * Initialize the hardware status page IRQ location.
891 */
892
893 I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
894 return 0;
587} 895}
588 896
589void i915_driver_irq_uninstall(struct drm_device * dev) 897void i915_driver_irq_uninstall(struct drm_device * dev)
590{ 898{
591 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 899 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
592 u16 temp; 900 u32 temp;
593 901
594 if (!dev_priv) 902 if (!dev_priv)
595 return; 903 return;
596 904
597 I915_WRITE16(I915REG_HWSTAM, 0xffff); 905 dev_priv->irq_enabled = 0;
598 I915_WRITE16(I915REG_INT_MASK_R, 0xffff); 906 I915_WRITE(I915REG_HWSTAM, 0xffffffff);
599 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 907 I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
600 908 I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
601 temp = I915_READ16(I915REG_INT_IDENTITY_R); 909
602 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 910 temp = I915_READ(I915REG_PIPEASTAT);
911 I915_WRITE(I915REG_PIPEASTAT, temp);
912 temp = I915_READ(I915REG_PIPEBSTAT);
913 I915_WRITE(I915REG_PIPEBSTAT, temp);
914 temp = I915_READ(I915REG_INT_IDENTITY_R);
915 I915_WRITE(I915REG_INT_IDENTITY_R, temp);
603} 916}
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 5572939fc7d1..6b3790939e76 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -45,15 +45,16 @@ static struct pci_device_id pciidlist[] = {
45static struct drm_driver driver = { 45static struct drm_driver driver = {
46 .driver_features = 46 .driver_features =
47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
48 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 48 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
49 DRIVER_IRQ_VBL,
50 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 49 .dev_priv_size = sizeof(drm_mga_buf_priv_t),
51 .load = mga_driver_load, 50 .load = mga_driver_load,
52 .unload = mga_driver_unload, 51 .unload = mga_driver_unload,
53 .lastclose = mga_driver_lastclose, 52 .lastclose = mga_driver_lastclose,
54 .dma_quiescent = mga_driver_dma_quiescent, 53 .dma_quiescent = mga_driver_dma_quiescent,
55 .device_is_agp = mga_driver_device_is_agp, 54 .device_is_agp = mga_driver_device_is_agp,
56 .vblank_wait = mga_driver_vblank_wait, 55 .get_vblank_counter = mga_get_vblank_counter,
56 .enable_vblank = mga_enable_vblank,
57 .disable_vblank = mga_disable_vblank,
57 .irq_preinstall = mga_driver_irq_preinstall, 58 .irq_preinstall = mga_driver_irq_preinstall,
58 .irq_postinstall = mga_driver_irq_postinstall, 59 .irq_postinstall = mga_driver_irq_postinstall,
59 .irq_uninstall = mga_driver_irq_uninstall, 60 .irq_uninstall = mga_driver_irq_uninstall,
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index f6ebd24bd587..8f7291f36363 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -120,6 +120,7 @@ typedef struct drm_mga_private {
120 u32 clear_cmd; 120 u32 clear_cmd;
121 u32 maccess; 121 u32 maccess;
122 122
123 atomic_t vbl_received; /**< Number of vblanks received. */
123 wait_queue_head_t fence_queue; 124 wait_queue_head_t fence_queue;
124 atomic_t last_fence_retired; 125 atomic_t last_fence_retired;
125 u32 next_fence_to_post; 126 u32 next_fence_to_post;
@@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
181extern int mga_warp_init(drm_mga_private_t * dev_priv); 182extern int mga_warp_init(drm_mga_private_t * dev_priv);
182 183
183 /* mga_irq.c */ 184 /* mga_irq.c */
185extern int mga_enable_vblank(struct drm_device *dev, int crtc);
186extern void mga_disable_vblank(struct drm_device *dev, int crtc);
187extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
184extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); 188extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
185extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 189extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
186extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); 190extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
187extern void mga_driver_irq_preinstall(struct drm_device * dev); 191extern void mga_driver_irq_preinstall(struct drm_device * dev);
188extern void mga_driver_irq_postinstall(struct drm_device * dev); 192extern int mga_driver_irq_postinstall(struct drm_device * dev);
189extern void mga_driver_irq_uninstall(struct drm_device * dev); 193extern void mga_driver_irq_uninstall(struct drm_device * dev);
190extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 194extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
191 unsigned long arg); 195 unsigned long arg);
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index 9302cb8f0f83..06852fb4b278 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -35,6 +35,20 @@
35#include "mga_drm.h" 35#include "mga_drm.h"
36#include "mga_drv.h" 36#include "mga_drv.h"
37 37
38u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
39{
40 const drm_mga_private_t *const dev_priv =
41 (drm_mga_private_t *) dev->dev_private;
42
43 if (crtc != 0) {
44 return 0;
45 }
46
47
48 return atomic_read(&dev_priv->vbl_received);
49}
50
51
38irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) 52irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39{ 53{
40 struct drm_device *dev = (struct drm_device *) arg; 54 struct drm_device *dev = (struct drm_device *) arg;
@@ -47,9 +61,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
47 /* VBLANK interrupt */ 61 /* VBLANK interrupt */
48 if (status & MGA_VLINEPEN) { 62 if (status & MGA_VLINEPEN) {
49 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); 63 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
50 atomic_inc(&dev->vbl_received); 64 atomic_inc(&dev_priv->vbl_received);
51 DRM_WAKEUP(&dev->vbl_queue); 65 drm_handle_vblank(dev, 0);
52 drm_vbl_send_signals(dev);
53 handled = 1; 66 handled = 1;
54 } 67 }
55 68
@@ -78,22 +91,34 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
78 return IRQ_NONE; 91 return IRQ_NONE;
79} 92}
80 93
81int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 94int mga_enable_vblank(struct drm_device *dev, int crtc)
82{ 95{
83 unsigned int cur_vblank; 96 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
84 int ret = 0;
85 97
86 /* Assume that the user has missed the current sequence number 98 if (crtc != 0) {
87 * by about a day rather than she wants to wait for years 99 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
88 * using vertical blanks... 100 crtc);
89 */ 101 return 0;
90 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 102 }
91 (((cur_vblank = atomic_read(&dev->vbl_received))
92 - *sequence) <= (1 << 23)));
93 103
94 *sequence = cur_vblank; 104 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
105 return 0;
106}
95 107
96 return ret; 108
109void mga_disable_vblank(struct drm_device *dev, int crtc)
110{
111 if (crtc != 0) {
112 DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
113 crtc);
114 }
115
116 /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
117 * a nice hardware counter that tracks the number of refreshes when
118 * the interrupt is disabled, and the kernel doesn't know the refresh
119 * rate to calculate an estimate.
120 */
121 /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
97} 122}
98 123
99int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) 124int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
@@ -125,14 +150,22 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
125 MGA_WRITE(MGA_ICLEAR, ~0); 150 MGA_WRITE(MGA_ICLEAR, ~0);
126} 151}
127 152
128void mga_driver_irq_postinstall(struct drm_device * dev) 153int mga_driver_irq_postinstall(struct drm_device * dev)
129{ 154{
130 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 155 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
156 int ret;
157
158 ret = drm_vblank_init(dev, 1);
159 if (ret)
160 return ret;
131 161
132 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); 162 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
133 163
134 /* Turn on vertical blank interrupt and soft trap interrupt. */ 164 /* Turn on soft trap interrupt. Vertical blank interrupts are enabled
135 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); 165 * in mga_enable_vblank.
166 */
167 MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
168 return 0;
136} 169}
137 170
138void mga_driver_irq_uninstall(struct drm_device * dev) 171void mga_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/char/drm/r128_drv.c b/drivers/char/drm/r128_drv.c
index 6108e7587e12..2888aa01ebc7 100644
--- a/drivers/char/drm/r128_drv.c
+++ b/drivers/char/drm/r128_drv.c
@@ -43,12 +43,13 @@ static struct pci_device_id pciidlist[] = {
43static struct drm_driver driver = { 43static struct drm_driver driver = {
44 .driver_features = 44 .driver_features =
45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
47 DRIVER_IRQ_VBL,
48 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 47 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
49 .preclose = r128_driver_preclose, 48 .preclose = r128_driver_preclose,
50 .lastclose = r128_driver_lastclose, 49 .lastclose = r128_driver_lastclose,
51 .vblank_wait = r128_driver_vblank_wait, 50 .get_vblank_counter = r128_get_vblank_counter,
51 .enable_vblank = r128_enable_vblank,
52 .disable_vblank = r128_disable_vblank,
52 .irq_preinstall = r128_driver_irq_preinstall, 53 .irq_preinstall = r128_driver_irq_preinstall,
53 .irq_postinstall = r128_driver_irq_postinstall, 54 .irq_postinstall = r128_driver_irq_postinstall,
54 .irq_uninstall = r128_driver_irq_uninstall, 55 .irq_uninstall = r128_driver_irq_uninstall,
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 011105e51ac6..80af9e09e75d 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -97,6 +97,8 @@ typedef struct drm_r128_private {
97 u32 crtc_offset; 97 u32 crtc_offset;
98 u32 crtc_offset_cntl; 98 u32 crtc_offset_cntl;
99 99
100 atomic_t vbl_received;
101
100 u32 color_fmt; 102 u32 color_fmt;
101 unsigned int front_offset; 103 unsigned int front_offset;
102 unsigned int front_pitch; 104 unsigned int front_pitch;
@@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
149extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); 151extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
150extern int r128_do_cleanup_cce(struct drm_device * dev); 152extern int r128_do_cleanup_cce(struct drm_device * dev);
151 153
152extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 154extern int r128_enable_vblank(struct drm_device *dev, int crtc);
153 155extern void r128_disable_vblank(struct drm_device *dev, int crtc);
156extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
154extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); 157extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
155extern void r128_driver_irq_preinstall(struct drm_device * dev); 158extern void r128_driver_irq_preinstall(struct drm_device * dev);
156extern void r128_driver_irq_postinstall(struct drm_device * dev); 159extern int r128_driver_irq_postinstall(struct drm_device * dev);
157extern void r128_driver_irq_uninstall(struct drm_device * dev); 160extern void r128_driver_irq_uninstall(struct drm_device * dev);
158extern void r128_driver_lastclose(struct drm_device * dev); 161extern void r128_driver_lastclose(struct drm_device * dev);
159extern void r128_driver_preclose(struct drm_device * dev, 162extern void r128_driver_preclose(struct drm_device * dev,
diff --git a/drivers/char/drm/r128_irq.c b/drivers/char/drm/r128_irq.c
index c76fdca7662d..5b95bd898f95 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/char/drm/r128_irq.c
@@ -35,6 +35,16 @@
35#include "r128_drm.h" 35#include "r128_drm.h"
36#include "r128_drv.h" 36#include "r128_drv.h"
37 37
38u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
39{
40 const drm_r128_private_t *dev_priv = dev->dev_private;
41
42 if (crtc != 0)
43 return 0;
44
45 return atomic_read(&dev_priv->vbl_received);
46}
47
38irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) 48irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
39{ 49{
40 struct drm_device *dev = (struct drm_device *) arg; 50 struct drm_device *dev = (struct drm_device *) arg;
@@ -46,30 +56,38 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
46 /* VBLANK interrupt */ 56 /* VBLANK interrupt */
47 if (status & R128_CRTC_VBLANK_INT) { 57 if (status & R128_CRTC_VBLANK_INT) {
48 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 58 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
49 atomic_inc(&dev->vbl_received); 59 atomic_inc(&dev_priv->vbl_received);
50 DRM_WAKEUP(&dev->vbl_queue); 60 drm_handle_vblank(dev, 0);
51 drm_vbl_send_signals(dev);
52 return IRQ_HANDLED; 61 return IRQ_HANDLED;
53 } 62 }
54 return IRQ_NONE; 63 return IRQ_NONE;
55} 64}
56 65
57int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 66int r128_enable_vblank(struct drm_device *dev, int crtc)
58{ 67{
59 unsigned int cur_vblank; 68 drm_r128_private_t *dev_priv = dev->dev_private;
60 int ret = 0;
61 69
62 /* Assume that the user has missed the current sequence number 70 if (crtc != 0) {
63 * by about a day rather than she wants to wait for years 71 DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
64 * using vertical blanks... 72 return -EINVAL;
65 */ 73 }
66 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
67 (((cur_vblank = atomic_read(&dev->vbl_received))
68 - *sequence) <= (1 << 23)));
69 74
70 *sequence = cur_vblank; 75 R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
76 return 0;
77}
78
79void r128_disable_vblank(struct drm_device *dev, int crtc)
80{
81 if (crtc != 0)
82 DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
71 83
72 return ret; 84 /*
85 * FIXME: implement proper interrupt disable by using the vblank
86 * counter register (if available)
87 *
88 * R128_WRITE(R128_GEN_INT_CNTL,
89 * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
90 */
73} 91}
74 92
75void r128_driver_irq_preinstall(struct drm_device * dev) 93void r128_driver_irq_preinstall(struct drm_device * dev)
@@ -82,12 +100,9 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
82 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 100 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
83} 101}
84 102
85void r128_driver_irq_postinstall(struct drm_device * dev) 103int r128_driver_irq_postinstall(struct drm_device * dev)
86{ 104{
87 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; 105 return drm_vblank_init(dev, 1);
88
89 /* Turn on VBL interrupt */
90 R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
91} 106}
92 107
93void r128_driver_irq_uninstall(struct drm_device * dev) 108void r128_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 349ac3d3b848..a2610319624d 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -59,8 +59,7 @@ static struct pci_device_id pciidlist[] = {
59static struct drm_driver driver = { 59static struct drm_driver driver = {
60 .driver_features = 60 .driver_features =
61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
62 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | 62 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
63 DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
64 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 63 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
65 .load = radeon_driver_load, 64 .load = radeon_driver_load,
66 .firstopen = radeon_driver_firstopen, 65 .firstopen = radeon_driver_firstopen,
@@ -69,8 +68,9 @@ static struct drm_driver driver = {
69 .postclose = radeon_driver_postclose, 68 .postclose = radeon_driver_postclose,
70 .lastclose = radeon_driver_lastclose, 69 .lastclose = radeon_driver_lastclose,
71 .unload = radeon_driver_unload, 70 .unload = radeon_driver_unload,
72 .vblank_wait = radeon_driver_vblank_wait, 71 .get_vblank_counter = radeon_get_vblank_counter,
73 .vblank_wait2 = radeon_driver_vblank_wait2, 72 .enable_vblank = radeon_enable_vblank,
73 .disable_vblank = radeon_disable_vblank,
74 .dri_library_name = dri_library_name, 74 .dri_library_name = dri_library_name,
75 .irq_preinstall = radeon_driver_irq_preinstall, 75 .irq_preinstall = radeon_driver_irq_preinstall,
76 .irq_postinstall = radeon_driver_irq_postinstall, 76 .irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 173ae620223a..b791420bd3d9 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -304,6 +304,9 @@ typedef struct drm_radeon_private {
304 304
305 u32 scratch_ages[5]; 305 u32 scratch_ages[5];
306 306
307 unsigned int crtc_last_cnt;
308 unsigned int crtc2_last_cnt;
309
307 /* starting from here on, data is preserved accross an open */ 310 /* starting from here on, data is preserved accross an open */
308 uint32_t flags; /* see radeon_chip_flags */ 311 uint32_t flags; /* see radeon_chip_flags */
309 unsigned long fb_aper_offset; 312 unsigned long fb_aper_offset;
@@ -374,13 +377,13 @@ extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *
374extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); 377extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
375 378
376extern void radeon_do_release(struct drm_device * dev); 379extern void radeon_do_release(struct drm_device * dev);
377extern int radeon_driver_vblank_wait(struct drm_device * dev, 380extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
378 unsigned int *sequence); 381extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
379extern int radeon_driver_vblank_wait2(struct drm_device * dev, 382extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
380 unsigned int *sequence); 383extern void radeon_do_release(struct drm_device * dev);
381extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); 384extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
382extern void radeon_driver_irq_preinstall(struct drm_device * dev); 385extern void radeon_driver_irq_preinstall(struct drm_device * dev);
383extern void radeon_driver_irq_postinstall(struct drm_device * dev); 386extern int radeon_driver_irq_postinstall(struct drm_device * dev);
384extern void radeon_driver_irq_uninstall(struct drm_device * dev); 387extern void radeon_driver_irq_uninstall(struct drm_device * dev);
385extern int radeon_vblank_crtc_get(struct drm_device *dev); 388extern int radeon_vblank_crtc_get(struct drm_device *dev);
386extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); 389extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
@@ -558,6 +561,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
558 ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \ 561 ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
559 : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) 562 : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
560 563
564#define RADEON_CRTC_CRNT_FRAME 0x0214
565#define RADEON_CRTC2_CRNT_FRAME 0x0314
566
567#define RADEON_CRTC_STATUS 0x005c
568#define RADEON_CRTC2_STATUS 0x03fc
569
561#define RADEON_GEN_INT_CNTL 0x0040 570#define RADEON_GEN_INT_CNTL 0x0040
562# define RADEON_CRTC_VBLANK_MASK (1 << 0) 571# define RADEON_CRTC_VBLANK_MASK (1 << 0)
563# define RADEON_CRTC2_VBLANK_MASK (1 << 9) 572# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index 009af3814b6f..507d6b747a13 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -35,12 +35,61 @@
35#include "radeon_drm.h" 35#include "radeon_drm.h"
36#include "radeon_drv.h" 36#include "radeon_drv.h"
37 37
38static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, 38static void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
39 u32 mask)
40{ 39{
41 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask; 40 drm_radeon_private_t *dev_priv = dev->dev_private;
41
42 if (state)
43 dev_priv->irq_enable_reg |= mask;
44 else
45 dev_priv->irq_enable_reg &= ~mask;
46
47 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
48}
49
50int radeon_enable_vblank(struct drm_device *dev, int crtc)
51{
52 switch (crtc) {
53 case 0:
54 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
55 break;
56 case 1:
57 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
58 break;
59 default:
60 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
61 crtc);
62 return EINVAL;
63 }
64
65 return 0;
66}
67
68void radeon_disable_vblank(struct drm_device *dev, int crtc)
69{
70 switch (crtc) {
71 case 0:
72 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
73 break;
74 case 1:
75 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
76 break;
77 default:
78 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
79 crtc);
80 break;
81 }
82}
83
84static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv)
85{
86 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) &
87 (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
88 RADEON_CRTC2_VBLANK_STAT);
89
42 if (irqs) 90 if (irqs)
43 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); 91 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
92
44 return irqs; 93 return irqs;
45} 94}
46 95
@@ -72,39 +121,21 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
72 /* Only consider the bits we're interested in - others could be used 121 /* Only consider the bits we're interested in - others could be used
73 * outside the DRM 122 * outside the DRM
74 */ 123 */
75 stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 124 stat = radeon_acknowledge_irqs(dev_priv);
76 RADEON_CRTC_VBLANK_STAT |
77 RADEON_CRTC2_VBLANK_STAT));
78 if (!stat) 125 if (!stat)
79 return IRQ_NONE; 126 return IRQ_NONE;
80 127
81 stat &= dev_priv->irq_enable_reg; 128 stat &= dev_priv->irq_enable_reg;
82 129
83 /* SW interrupt */ 130 /* SW interrupt */
84 if (stat & RADEON_SW_INT_TEST) { 131 if (stat & RADEON_SW_INT_TEST)
85 DRM_WAKEUP(&dev_priv->swi_queue); 132 DRM_WAKEUP(&dev_priv->swi_queue);
86 }
87 133
88 /* VBLANK interrupt */ 134 /* VBLANK interrupt */
89 if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) { 135 if (stat & RADEON_CRTC_VBLANK_STAT)
90 int vblank_crtc = dev_priv->vblank_crtc; 136 drm_handle_vblank(dev, 0);
91 137 if (stat & RADEON_CRTC2_VBLANK_STAT)
92 if ((vblank_crtc & 138 drm_handle_vblank(dev, 1);
93 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
94 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
95 if (stat & RADEON_CRTC_VBLANK_STAT)
96 atomic_inc(&dev->vbl_received);
97 if (stat & RADEON_CRTC2_VBLANK_STAT)
98 atomic_inc(&dev->vbl_received2);
99 } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
100 (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
101 ((stat & RADEON_CRTC2_VBLANK_STAT) &&
102 (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
103 atomic_inc(&dev->vbl_received);
104
105 DRM_WAKEUP(&dev->vbl_queue);
106 drm_vbl_send_signals(dev);
107 }
108 139
109 return IRQ_HANDLED; 140 return IRQ_HANDLED;
110} 141}
@@ -144,54 +175,27 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
144 return ret; 175 return ret;
145} 176}
146 177
147static int radeon_driver_vblank_do_wait(struct drm_device * dev, 178u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
148 unsigned int *sequence, int crtc)
149{ 179{
150 drm_radeon_private_t *dev_priv = 180 drm_radeon_private_t *dev_priv = dev->dev_private;
151 (drm_radeon_private_t *) dev->dev_private; 181 u32 crtc_cnt_reg, crtc_status_reg;
152 unsigned int cur_vblank; 182
153 int ret = 0;
154 int ack = 0;
155 atomic_t *counter;
156 if (!dev_priv) { 183 if (!dev_priv) {
157 DRM_ERROR("called with no initialization\n"); 184 DRM_ERROR("called with no initialization\n");
158 return -EINVAL; 185 return -EINVAL;
159 } 186 }
160 187
161 if (crtc == DRM_RADEON_VBLANK_CRTC1) { 188 if (crtc == 0) {
162 counter = &dev->vbl_received; 189 crtc_cnt_reg = RADEON_CRTC_CRNT_FRAME;
163 ack |= RADEON_CRTC_VBLANK_STAT; 190 crtc_status_reg = RADEON_CRTC_STATUS;
164 } else if (crtc == DRM_RADEON_VBLANK_CRTC2) { 191 } else if (crtc == 1) {
165 counter = &dev->vbl_received2; 192 crtc_cnt_reg = RADEON_CRTC2_CRNT_FRAME;
166 ack |= RADEON_CRTC2_VBLANK_STAT; 193 crtc_status_reg = RADEON_CRTC2_STATUS;
167 } else 194 } else {
168 return -EINVAL; 195 return -EINVAL;
196 }
169 197
170 radeon_acknowledge_irqs(dev_priv, ack); 198 return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
171
172 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
173
174 /* Assume that the user has missed the current sequence number
175 * by about a day rather than she wants to wait for years
176 * using vertical blanks...
177 */
178 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
179 (((cur_vblank = atomic_read(counter))
180 - *sequence) <= (1 << 23)));
181
182 *sequence = cur_vblank;
183
184 return ret;
185}
186
187int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
188{
189 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
190}
191
192int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
193{
194 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
195} 199}
196 200
197/* Needs the lock as it touches the ring. 201/* Needs the lock as it touches the ring.
@@ -234,21 +238,6 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
234 return radeon_wait_irq(dev, irqwait->irq_seq); 238 return radeon_wait_irq(dev, irqwait->irq_seq);
235} 239}
236 240
237static void radeon_enable_interrupt(struct drm_device *dev)
238{
239 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
240
241 dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
242 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
243 dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
244
245 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
246 dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
247
248 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
249 dev_priv->irq_enabled = 1;
250}
251
252/* drm_dma.h hooks 241/* drm_dma.h hooks
253*/ 242*/
254void radeon_driver_irq_preinstall(struct drm_device * dev) 243void radeon_driver_irq_preinstall(struct drm_device * dev)
@@ -260,20 +249,27 @@ void radeon_driver_irq_preinstall(struct drm_device * dev)
260 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 249 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
261 250
262 /* Clear bits if they're already high */ 251 /* Clear bits if they're already high */
263 radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 252 radeon_acknowledge_irqs(dev_priv);
264 RADEON_CRTC_VBLANK_STAT |
265 RADEON_CRTC2_VBLANK_STAT));
266} 253}
267 254
268void radeon_driver_irq_postinstall(struct drm_device * dev) 255int radeon_driver_irq_postinstall(struct drm_device * dev)
269{ 256{
270 drm_radeon_private_t *dev_priv = 257 drm_radeon_private_t *dev_priv =
271 (drm_radeon_private_t *) dev->dev_private; 258 (drm_radeon_private_t *) dev->dev_private;
259 int ret;
272 260
273 atomic_set(&dev_priv->swi_emitted, 0); 261 atomic_set(&dev_priv->swi_emitted, 0);
274 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); 262 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
275 263
276 radeon_enable_interrupt(dev); 264 ret = drm_vblank_init(dev, 2);
265 if (ret)
266 return ret;
267
268 dev->max_vblank_count = 0x001fffff;
269
270 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
271
272 return 0;
277} 273}
278 274
279void radeon_driver_irq_uninstall(struct drm_device * dev) 275void radeon_driver_irq_uninstall(struct drm_device * dev)
@@ -315,6 +311,5 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
315 return -EINVAL; 311 return -EINVAL;
316 } 312 }
317 dev_priv->vblank_crtc = (unsigned int)value; 313 dev_priv->vblank_crtc = (unsigned int)value;
318 radeon_enable_interrupt(dev);
319 return 0; 314 return 0;
320} 315}
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index 80c01cdfa37d..37870a4a3dc7 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -40,11 +40,13 @@ static struct pci_device_id pciidlist[] = {
40static struct drm_driver driver = { 40static struct drm_driver driver = {
41 .driver_features = 41 .driver_features =
42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
43 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 43 DRIVER_IRQ_SHARED,
44 .load = via_driver_load, 44 .load = via_driver_load,
45 .unload = via_driver_unload, 45 .unload = via_driver_unload,
46 .context_dtor = via_final_context, 46 .context_dtor = via_final_context,
47 .vblank_wait = via_driver_vblank_wait, 47 .get_vblank_counter = via_get_vblank_counter,
48 .enable_vblank = via_enable_vblank,
49 .disable_vblank = via_disable_vblank,
48 .irq_preinstall = via_driver_irq_preinstall, 50 .irq_preinstall = via_driver_irq_preinstall,
49 .irq_postinstall = via_driver_irq_postinstall, 51 .irq_postinstall = via_driver_irq_postinstall,
50 .irq_uninstall = via_driver_irq_uninstall, 52 .irq_uninstall = via_driver_irq_uninstall,
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 2daae81874cd..fe67030e39ac 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -75,6 +75,7 @@ typedef struct drm_via_private {
75 struct timeval last_vblank; 75 struct timeval last_vblank;
76 int last_vblank_valid; 76 int last_vblank_valid;
77 unsigned usec_per_vblank; 77 unsigned usec_per_vblank;
78 atomic_t vbl_received;
78 drm_via_state_t hc_state; 79 drm_via_state_t hc_state;
79 char pci_buf[VIA_PCI_BUF_SIZE]; 80 char pci_buf[VIA_PCI_BUF_SIZE];
80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; 81 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
@@ -130,11 +131,13 @@ extern int via_init_context(struct drm_device * dev, int context);
130extern int via_final_context(struct drm_device * dev, int context); 131extern int via_final_context(struct drm_device * dev, int context);
131 132
132extern int via_do_cleanup_map(struct drm_device * dev); 133extern int via_do_cleanup_map(struct drm_device * dev);
133extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 134extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
135extern int via_enable_vblank(struct drm_device *dev, int crtc);
136extern void via_disable_vblank(struct drm_device *dev, int crtc);
134 137
135extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); 138extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
136extern void via_driver_irq_preinstall(struct drm_device * dev); 139extern void via_driver_irq_preinstall(struct drm_device * dev);
137extern void via_driver_irq_postinstall(struct drm_device * dev); 140extern int via_driver_irq_postinstall(struct drm_device * dev);
138extern void via_driver_irq_uninstall(struct drm_device * dev); 141extern void via_driver_irq_uninstall(struct drm_device * dev);
139 142
140extern int via_dma_cleanup(struct drm_device * dev); 143extern int via_dma_cleanup(struct drm_device * dev);
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index c6bb978a1106..f1ab6fc7c07e 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -92,8 +92,17 @@ static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
92static unsigned time_diff(struct timeval *now, struct timeval *then) 92static unsigned time_diff(struct timeval *now, struct timeval *then)
93{ 93{
94 return (now->tv_usec >= then->tv_usec) ? 94 return (now->tv_usec >= then->tv_usec) ?
95 now->tv_usec - then->tv_usec : 95 now->tv_usec - then->tv_usec :
96 1000000 - (then->tv_usec - now->tv_usec); 96 1000000 - (then->tv_usec - now->tv_usec);
97}
98
99u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
100{
101 drm_via_private_t *dev_priv = dev->dev_private;
102 if (crtc != 0)
103 return 0;
104
105 return atomic_read(&dev_priv->vbl_received);
97} 106}
98 107
99irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) 108irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -108,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
108 117
109 status = VIA_READ(VIA_REG_INTERRUPT); 118 status = VIA_READ(VIA_REG_INTERRUPT);
110 if (status & VIA_IRQ_VBLANK_PENDING) { 119 if (status & VIA_IRQ_VBLANK_PENDING) {
111 atomic_inc(&dev->vbl_received); 120 atomic_inc(&dev_priv->vbl_received);
112 if (!(atomic_read(&dev->vbl_received) & 0x0F)) { 121 if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
113 do_gettimeofday(&cur_vblank); 122 do_gettimeofday(&cur_vblank);
114 if (dev_priv->last_vblank_valid) { 123 if (dev_priv->last_vblank_valid) {
115 dev_priv->usec_per_vblank = 124 dev_priv->usec_per_vblank =
@@ -119,12 +128,11 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
119 dev_priv->last_vblank = cur_vblank; 128 dev_priv->last_vblank = cur_vblank;
120 dev_priv->last_vblank_valid = 1; 129 dev_priv->last_vblank_valid = 1;
121 } 130 }
122 if (!(atomic_read(&dev->vbl_received) & 0xFF)) { 131 if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
123 DRM_DEBUG("US per vblank is: %u\n", 132 DRM_DEBUG("US per vblank is: %u\n",
124 dev_priv->usec_per_vblank); 133 dev_priv->usec_per_vblank);
125 } 134 }
126 DRM_WAKEUP(&dev->vbl_queue); 135 drm_handle_vblank(dev, 0);
127 drm_vbl_send_signals(dev);
128 handled = 1; 136 handled = 1;
129 } 137 }
130 138
@@ -163,31 +171,34 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
163 } 171 }
164} 172}
165 173
166int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 174int via_enable_vblank(struct drm_device *dev, int crtc)
167{ 175{
168 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 176 drm_via_private_t *dev_priv = dev->dev_private;
169 unsigned int cur_vblank; 177 u32 status;
170 int ret = 0;
171 178
172 DRM_DEBUG("\n"); 179 if (crtc != 0) {
173 if (!dev_priv) { 180 DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
174 DRM_ERROR("called with no initialization\n");
175 return -EINVAL; 181 return -EINVAL;
176 } 182 }
177 183
178 viadrv_acknowledge_irqs(dev_priv); 184 status = VIA_READ(VIA_REG_INTERRUPT);
185 VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
179 186
180 /* Assume that the user has missed the current sequence number 187 VIA_WRITE8(0x83d4, 0x11);
181 * by about a day rather than she wants to wait for years 188 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
182 * using vertical blanks...
183 */
184 189
185 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 190 return 0;
186 (((cur_vblank = atomic_read(&dev->vbl_received)) - 191}
187 *sequence) <= (1 << 23)));
188 192
189 *sequence = cur_vblank; 193void via_disable_vblank(struct drm_device *dev, int crtc)
190 return ret; 194{
195 drm_via_private_t *dev_priv = dev->dev_private;
196
197 VIA_WRITE8(0x83d4, 0x11);
198 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
199
200 if (crtc != 0)
201 DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
191} 202}
192 203
193static int 204static int
@@ -292,23 +303,25 @@ void via_driver_irq_preinstall(struct drm_device * dev)
292 } 303 }
293} 304}
294 305
295void via_driver_irq_postinstall(struct drm_device * dev) 306int via_driver_irq_postinstall(struct drm_device * dev)
296{ 307{
297 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 308 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
298 u32 status; 309 u32 status;
299 310
300 DRM_DEBUG("\n"); 311 DRM_DEBUG("via_driver_irq_postinstall\n");
301 if (dev_priv) { 312 if (!dev_priv)
302 status = VIA_READ(VIA_REG_INTERRUPT); 313 return -EINVAL;
303 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
304 | dev_priv->irq_enable_mask);
305 314
306 /* Some magic, oh for some data sheets ! */ 315 drm_vblank_init(dev, 1);
316 status = VIA_READ(VIA_REG_INTERRUPT);
317 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
318 | dev_priv->irq_enable_mask);
307 319
308 VIA_WRITE8(0x83d4, 0x11); 320 /* Some magic, oh for some data sheets ! */
309 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 321 VIA_WRITE8(0x83d4, 0x11);
322 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
310 323
311 } 324 return 0;
312} 325}
313 326
314void via_driver_irq_uninstall(struct drm_device * dev) 327void via_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 87532034d105..3f9e10001e19 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -1031,7 +1031,7 @@ comment "Other IDE chipsets support"
1031comment "Note: most of these also require special kernel boot parameters" 1031comment "Note: most of these also require special kernel boot parameters"
1032 1032
1033config BLK_DEV_4DRIVES 1033config BLK_DEV_4DRIVES
1034 bool "Generic 4 drives/port support" 1034 tristate "Generic 4 drives/port support"
1035 help 1035 help
1036 Certain older chipsets, including the Tekram 690CD, use a single set 1036 Certain older chipsets, including the Tekram 690CD, use a single set
1037 of I/O ports at 0x1f0 to control up to four drives, instead of the 1037 of I/O ports at 0x1f0 to control up to four drives, instead of the
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index ec46c44b061c..713cef20622e 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -21,6 +21,8 @@
21#include <asm/arch/bast-map.h> 21#include <asm/arch/bast-map.h>
22#include <asm/arch/bast-irq.h> 22#include <asm/arch/bast-irq.h>
23 23
24#define DRV_NAME "bast-ide"
25
24static int __init bastide_register(unsigned int base, unsigned int aux, int irq) 26static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
25{ 27{
26 ide_hwif_t *hwif; 28 ide_hwif_t *hwif;
@@ -33,27 +35,23 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
33 base += BAST_IDE_CS; 35 base += BAST_IDE_CS;
34 aux += BAST_IDE_CS; 36 aux += BAST_IDE_CS;
35 37
36 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 38 for (i = 0; i <= 7; i++) {
37 hw.io_ports[i] = (unsigned long)base; 39 hw.io_ports_array[i] = (unsigned long)base;
38 base += 0x20; 40 base += 0x20;
39 } 41 }
40 42
41 hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20); 43 hw.io_ports.ctl_addr = aux + (6 * 0x20);
42 hw.irq = irq; 44 hw.irq = irq;
43 45
44 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 46 hwif = ide_find_port();
45 if (hwif == NULL) 47 if (hwif == NULL)
46 goto out; 48 goto out;
47 49
48 i = hwif->index; 50 i = hwif->index;
49 51
50 if (hwif->present) 52 ide_init_port_data(hwif, i);
51 ide_unregister(i);
52 else
53 ide_init_port_data(hwif, i);
54
55 ide_init_port_hw(hwif, &hw); 53 ide_init_port_hw(hwif, &hw);
56 hwif->quirkproc = NULL; 54 hwif->port_ops = NULL;
57 55
58 idx[0] = i; 56 idx[0] = i;
59 57
@@ -64,6 +62,8 @@ out:
64 62
65static int __init bastide_init(void) 63static int __init bastide_init(void)
66{ 64{
65 unsigned long base = BAST_VA_IDEPRI + BAST_IDE_CS;
66
67 /* we can treat the VR1000 and the BAST the same */ 67 /* we can treat the VR1000 and the BAST the same */
68 68
69 if (!(machine_is_bast() || machine_is_vr1000())) 69 if (!(machine_is_bast() || machine_is_vr1000()))
@@ -71,6 +71,11 @@ static int __init bastide_init(void)
71 71
72 printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n"); 72 printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n");
73 73
74 if (!request_mem_region(base, 0x400000, DRV_NAME)) {
75 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
76 return -EBUSY;
77 }
78
74 bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0); 79 bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0);
75 bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1); 80 bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1);
76 81
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index e816b0ffcfe6..124445c20921 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -191,6 +191,10 @@ static void icside_maskproc(ide_drive_t *drive, int mask)
191 local_irq_restore(flags); 191 local_irq_restore(flags);
192} 192}
193 193
194static const struct ide_port_ops icside_v6_no_dma_port_ops = {
195 .maskproc = icside_maskproc,
196};
197
194#ifdef CONFIG_BLK_DEV_IDEDMA_ICS 198#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
195/* 199/*
196 * SG-DMA support. 200 * SG-DMA support.
@@ -266,6 +270,11 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
266 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data); 270 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
267} 271}
268 272
273static const struct ide_port_ops icside_v6_port_ops = {
274 .set_dma_mode = icside_set_dma_mode,
275 .maskproc = icside_maskproc,
276};
277
269static void icside_dma_host_set(ide_drive_t *drive, int on) 278static void icside_dma_host_set(ide_drive_t *drive, int on)
270{ 279{
271} 280}
@@ -375,32 +384,40 @@ static void icside_dma_lost_irq(ide_drive_t *drive)
375 printk(KERN_ERR "%s: IRQ lost\n", drive->name); 384 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
376} 385}
377 386
378static void icside_dma_init(ide_hwif_t *hwif) 387static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
379{ 388{
380 hwif->dmatable_cpu = NULL; 389 hwif->dmatable_cpu = NULL;
381 hwif->dmatable_dma = 0; 390 hwif->dmatable_dma = 0;
382 hwif->set_dma_mode = icside_set_dma_mode; 391
383 392 return 0;
384 hwif->dma_host_set = icside_dma_host_set;
385 hwif->dma_setup = icside_dma_setup;
386 hwif->dma_exec_cmd = icside_dma_exec_cmd;
387 hwif->dma_start = icside_dma_start;
388 hwif->ide_dma_end = icside_dma_end;
389 hwif->ide_dma_test_irq = icside_dma_test_irq;
390 hwif->dma_timeout = icside_dma_timeout;
391 hwif->dma_lost_irq = icside_dma_lost_irq;
392} 393}
394
395static const struct ide_dma_ops icside_v6_dma_ops = {
396 .dma_host_set = icside_dma_host_set,
397 .dma_setup = icside_dma_setup,
398 .dma_exec_cmd = icside_dma_exec_cmd,
399 .dma_start = icside_dma_start,
400 .dma_end = icside_dma_end,
401 .dma_test_irq = icside_dma_test_irq,
402 .dma_timeout = icside_dma_timeout,
403 .dma_lost_irq = icside_dma_lost_irq,
404};
393#else 405#else
394#define icside_dma_init(hwif) (0) 406#define icside_v6_dma_ops NULL
395#endif 407#endif
396 408
409static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
410{
411 return -EOPNOTSUPP;
412}
413
397static ide_hwif_t * 414static ide_hwif_t *
398icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec) 415icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec)
399{ 416{
400 unsigned long port = (unsigned long)base + info->dataoffset; 417 unsigned long port = (unsigned long)base + info->dataoffset;
401 ide_hwif_t *hwif; 418 ide_hwif_t *hwif;
402 419
403 hwif = ide_find_port(port); 420 hwif = ide_find_port();
404 if (hwif) { 421 if (hwif) {
405 int i; 422 int i;
406 423
@@ -408,15 +425,14 @@ icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *e
408 * Ensure we're using MMIO 425 * Ensure we're using MMIO
409 */ 426 */
410 default_hwif_mmiops(hwif); 427 default_hwif_mmiops(hwif);
411 hwif->mmio = 1;
412 428
413 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 429 for (i = 0; i <= 7; i++) {
414 hwif->io_ports[i] = port; 430 hwif->io_ports_array[i] = port;
415 port += 1 << info->stepping; 431 port += 1 << info->stepping;
416 } 432 }
417 hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset; 433 hwif->io_ports.ctl_addr =
434 (unsigned long)base + info->ctrloffset;
418 hwif->irq = ec->irq; 435 hwif->irq = ec->irq;
419 hwif->noprobe = 0;
420 hwif->chipset = ide_acorn; 436 hwif->chipset = ide_acorn;
421 hwif->gendev.parent = &ec->dev; 437 hwif->gendev.parent = &ec->dev;
422 hwif->dev = &ec->dev; 438 hwif->dev = &ec->dev;
@@ -462,9 +478,10 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
462} 478}
463 479
464static const struct ide_port_info icside_v6_port_info __initdata = { 480static const struct ide_port_info icside_v6_port_info __initdata = {
465 .host_flags = IDE_HFLAG_SERIALIZE | 481 .init_dma = icside_dma_off_init,
466 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */ 482 .port_ops = &icside_v6_no_dma_port_ops,
467 IDE_HFLAG_NO_AUTOTUNE, 483 .dma_ops = &icside_v6_dma_ops,
484 .host_flags = IDE_HFLAG_SERIALIZE,
468 .mwdma_mask = ATA_MWDMA2, 485 .mwdma_mask = ATA_MWDMA2,
469 .swdma_mask = ATA_SWDMA2, 486 .swdma_mask = ATA_SWDMA2,
470}; 487};
@@ -526,21 +543,19 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
526 state->hwif[0] = hwif; 543 state->hwif[0] = hwif;
527 state->hwif[1] = mate; 544 state->hwif[1] = mate;
528 545
529 hwif->maskproc = icside_maskproc;
530 hwif->hwif_data = state; 546 hwif->hwif_data = state;
531 hwif->config_data = (unsigned long)ioc_base; 547 hwif->config_data = (unsigned long)ioc_base;
532 hwif->select_data = sel; 548 hwif->select_data = sel;
533 549
534 mate->maskproc = icside_maskproc;
535 mate->hwif_data = state; 550 mate->hwif_data = state;
536 mate->config_data = (unsigned long)ioc_base; 551 mate->config_data = (unsigned long)ioc_base;
537 mate->select_data = sel | 1; 552 mate->select_data = sel | 1;
538 553
539 if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { 554 if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) {
540 icside_dma_init(hwif); 555 d.init_dma = icside_dma_init;
541 icside_dma_init(mate); 556 d.port_ops = &icside_v6_port_ops;
542 } else 557 d.dma_ops = NULL;
543 d.mwdma_mask = d.swdma_mask = 0; 558 }
544 559
545 idx[0] = hwif->index; 560 idx[0] = hwif->index;
546 idx[1] = mate->index; 561 idx[1] = mate->index;
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index be9ff7334c52..4263ffd4ab20 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -14,6 +14,8 @@
14#include <asm/mach-types.h> 14#include <asm/mach-types.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16 16
17#define DRV_NAME "ide_arm"
18
17#ifdef CONFIG_ARCH_CLPS7500 19#ifdef CONFIG_ARCH_CLPS7500
18# include <asm/arch/hardware.h> 20# include <asm/arch/hardware.h>
19# 21#
@@ -28,13 +30,27 @@ static int __init ide_arm_init(void)
28{ 30{
29 ide_hwif_t *hwif; 31 ide_hwif_t *hwif;
30 hw_regs_t hw; 32 hw_regs_t hw;
33 unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
31 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 34 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
32 35
36 if (!request_region(base, 8, DRV_NAME)) {
37 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
38 DRV_NAME, base, base + 7);
39 return -EBUSY;
40 }
41
42 if (!request_region(ctl, 1, DRV_NAME)) {
43 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
44 DRV_NAME, ctl);
45 release_region(base, 8);
46 return -EBUSY;
47 }
48
33 memset(&hw, 0, sizeof(hw)); 49 memset(&hw, 0, sizeof(hw));
34 ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206); 50 ide_std_init_ports(&hw, base, ctl);
35 hw.irq = IDE_ARM_IRQ; 51 hw.irq = IDE_ARM_IRQ;
36 52
37 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 53 hwif = ide_find_port();
38 if (hwif) { 54 if (hwif) {
39 ide_init_port_hw(hwif, &hw); 55 ide_init_port_hw(hwif, &hw);
40 idx[0] = hwif->index; 56 idx[0] = hwif->index;
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index 420fcb78a7cd..aaf32541622d 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -96,11 +96,11 @@ static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
96 u16 val16; 96 u16 val16;
97 97
98 /* DMA Data Setup */ 98 /* DMA Data Setup */
99 t0 = (palm_bk3710_udmatimings[mode].cycletime + ide_palm_clk - 1) 99 t0 = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].cycletime,
100 / ide_palm_clk - 1; 100 ide_palm_clk) - 1;
101 tenv = (20 + ide_palm_clk - 1) / ide_palm_clk - 1; 101 tenv = DIV_ROUND_UP(20, ide_palm_clk) - 1;
102 trp = (palm_bk3710_udmatimings[mode].rptime + ide_palm_clk - 1) 102 trp = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].rptime,
103 / ide_palm_clk - 1; 103 ide_palm_clk) - 1;
104 104
105 /* udmatim Register */ 105 /* udmatim Register */
106 val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0); 106 val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0);
@@ -141,8 +141,8 @@ static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
141 cycletime = max_t(int, t->cycle, min_cycle); 141 cycletime = max_t(int, t->cycle, min_cycle);
142 142
143 /* DMA Data Setup */ 143 /* DMA Data Setup */
144 t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk; 144 t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
145 td = (t->active + ide_palm_clk - 1) / ide_palm_clk; 145 td = DIV_ROUND_UP(t->active, ide_palm_clk);
146 tkw = t0 - td - 1; 146 tkw = t0 - td - 1;
147 td -= 1; 147 td -= 1;
148 148
@@ -168,9 +168,9 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
168 struct ide_timing *t; 168 struct ide_timing *t;
169 169
170 /* PIO Data Setup */ 170 /* PIO Data Setup */
171 t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk; 171 t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
172 t2 = (ide_timing_find_mode(XFER_PIO_0 + mode)->active + 172 t2 = DIV_ROUND_UP(ide_timing_find_mode(XFER_PIO_0 + mode)->active,
173 ide_palm_clk - 1) / ide_palm_clk; 173 ide_palm_clk);
174 174
175 t2i = t0 - t2 - 1; 175 t2i = t0 - t2 - 1;
176 t2 -= 1; 176 t2 -= 1;
@@ -192,8 +192,8 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
192 192
193 /* TASKFILE Setup */ 193 /* TASKFILE Setup */
194 t = ide_timing_find_mode(XFER_PIO_0 + mode); 194 t = ide_timing_find_mode(XFER_PIO_0 + mode);
195 t0 = (t->cyc8b + ide_palm_clk - 1) / ide_palm_clk; 195 t0 = DIV_ROUND_UP(t->cyc8b, ide_palm_clk);
196 t2 = (t->act8b + ide_palm_clk - 1) / ide_palm_clk; 196 t2 = DIV_ROUND_UP(t->act8b, ide_palm_clk);
197 197
198 t2i = t0 - t2 - 1; 198 t2i = t0 - t2 - 1;
199 t2 -= 1; 199 t2 -= 1;
@@ -317,17 +317,31 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
317 return ATA_CBL_PATA80; 317 return ATA_CBL_PATA80;
318} 318}
319 319
320static void __devinit palm_bk3710_init_hwif(ide_hwif_t *hwif) 320static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
321 const struct ide_port_info *d)
321{ 322{
322 hwif->set_pio_mode = palm_bk3710_set_pio_mode; 323 unsigned long base =
323 hwif->set_dma_mode = palm_bk3710_set_dma_mode; 324 hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
324 325
325 hwif->cable_detect = palm_bk3710_cable_detect; 326 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
327
328 if (ide_allocate_dma_engine(hwif))
329 return -1;
330
331 ide_setup_dma(hwif, base);
332
333 return 0;
326} 334}
327 335
336static const struct ide_port_ops palm_bk3710_ports_ops = {
337 .set_pio_mode = palm_bk3710_set_pio_mode,
338 .set_dma_mode = palm_bk3710_set_dma_mode,
339 .cable_detect = palm_bk3710_cable_detect,
340};
341
328static const struct ide_port_info __devinitdata palm_bk3710_port_info = { 342static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
329 .init_hwif = palm_bk3710_init_hwif, 343 .init_dma = palm_bk3710_init_dma,
330 .host_flags = IDE_HFLAG_NO_DMA, /* hack (no PCI) */ 344 .port_ops = &palm_bk3710_ports_ops,
331 .pio_mask = ATA_PIO4, 345 .pio_mask = ATA_PIO4,
332 .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */ 346 .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */
333 .mwdma_mask = ATA_MWDMA2, 347 .mwdma_mask = ATA_MWDMA2,
@@ -372,30 +386,24 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
372 386
373 pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET; 387 pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
374 for (i = 0; i < IDE_NR_PORTS - 2; i++) 388 for (i = 0; i < IDE_NR_PORTS - 2; i++)
375 hw.io_ports[i] = pribase + i; 389 hw.io_ports_array[i] = pribase + i;
376 hw.io_ports[IDE_CONTROL_OFFSET] = mem->start + 390 hw.io_ports.ctl_addr = mem->start +
377 IDE_PALM_ATA_PRI_CTL_OFFSET; 391 IDE_PALM_ATA_PRI_CTL_OFFSET;
378 hw.irq = irq->start; 392 hw.irq = irq->start;
379 hw.chipset = ide_palm3710; 393 hw.chipset = ide_palm3710;
380 394
381 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 395 hwif = ide_find_port();
382 if (hwif == NULL) 396 if (hwif == NULL)
383 goto out; 397 goto out;
384 398
385 i = hwif->index; 399 i = hwif->index;
386 400
387 if (hwif->present) 401 ide_init_port_data(hwif, i);
388 ide_unregister(i);
389 else
390 ide_init_port_data(hwif, i);
391
392 ide_init_port_hw(hwif, &hw); 402 ide_init_port_hw(hwif, &hw);
393 403
394 hwif->mmio = 1; 404 hwif->mmio = 1;
395 default_hwif_mmiops(hwif); 405 default_hwif_mmiops(hwif);
396 406
397 ide_setup_dma(hwif, mem->start);
398
399 idx[0] = i; 407 idx[0] = i;
400 408
401 ide_device_add(idx, &palm_bk3710_port_info); 409 ide_device_add(idx, &palm_bk3710_port_info);
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index b30adcf321c3..babc1a5e128d 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -17,11 +17,11 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
17 unsigned long port = (unsigned long)base; 17 unsigned long port = (unsigned long)base;
18 int i; 18 int i;
19 19
20 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 20 for (i = 0; i <= 7; i++) {
21 hw->io_ports[i] = port; 21 hw->io_ports_array[i] = port;
22 port += sz; 22 port += sz;
23 } 23 }
24 hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; 24 hw->io_ports.ctl_addr = (unsigned long)ctrl;
25 hw->irq = irq; 25 hw->irq = irq;
26} 26}
27 27
@@ -44,7 +44,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
44 goto release; 44 goto release;
45 } 45 }
46 46
47 hwif = ide_find_port((unsigned long)base); 47 hwif = ide_find_port();
48 if (hwif) { 48 if (hwif) {
49 memset(&hw, 0, sizeof(hw)); 49 memset(&hw, 0, sizeof(hw));
50 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); 50 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
@@ -53,7 +53,6 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
53 53
54 ide_init_port_hw(hwif, &hw); 54 ide_init_port_hw(hwif, &hw);
55 55
56 hwif->mmio = 1;
57 default_hwif_mmiops(hwif); 56 default_hwif_mmiops(hwif);
58 57
59 idx[0] = hwif->index; 58 idx[0] = hwif->index;
@@ -76,7 +75,7 @@ static void __devexit rapide_remove(struct expansion_card *ec)
76 75
77 ecard_set_drvdata(ec, NULL); 76 ecard_set_drvdata(ec, NULL);
78 77
79 ide_unregister(hwif->index); 78 ide_unregister(hwif);
80 79
81 ecard_release_resources(ec); 80 ecard_release_resources(ec);
82} 81}
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 31266d278095..9df26855bc05 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -88,8 +88,8 @@ enum /* Transfer types */
88int 88int
89cris_ide_ack_intr(ide_hwif_t* hwif) 89cris_ide_ack_intr(ide_hwif_t* hwif)
90{ 90{
91 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, 91 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
92 int, hwif->io_ports[0]); 92 hwif->io_ports.data_addr);
93 REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel); 93 REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
94 return 1; 94 return 1;
95} 95}
@@ -231,7 +231,7 @@ cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,
231 ide_hwif_t *hwif = drive->hwif; 231 ide_hwif_t *hwif = drive->hwif;
232 232
233 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, 233 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
234 hwif->io_ports[IDE_DATA_OFFSET]); 234 hwif->io_ports.data_addr);
235 reg_ata_rw_trf_cnt trf_cnt = {0}; 235 reg_ata_rw_trf_cnt trf_cnt = {0};
236 236
237 mycontext.saved_data = (dma_descr_data*)virt_to_phys(d); 237 mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
@@ -271,7 +271,7 @@ static int cris_dma_test_irq(ide_drive_t *drive)
271 int intr = REG_RD_INT(ata, regi_ata, r_intr); 271 int intr = REG_RD_INT(ata, regi_ata, r_intr);
272 272
273 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, 273 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
274 hwif->io_ports[IDE_DATA_OFFSET]); 274 hwif->io_ports.data_addr);
275 275
276 return intr & (1 << ctrl2.sel) ? 1 : 0; 276 return intr & (1 << ctrl2.sel) ? 1 : 0;
277} 277}
@@ -531,7 +531,7 @@ static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int d
531 *R_ATA_CTRL_DATA = 531 *R_ATA_CTRL_DATA =
532 cmd | 532 cmd |
533 IO_FIELD(R_ATA_CTRL_DATA, data, 533 IO_FIELD(R_ATA_CTRL_DATA, data,
534 drive->hwif->io_ports[IDE_DATA_OFFSET]) | 534 drive->hwif->io_ports.data_addr) |
535 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | 535 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
536 IO_STATE(R_ATA_CTRL_DATA, multi, on) | 536 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
537 IO_STATE(R_ATA_CTRL_DATA, dma_size, word); 537 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
@@ -550,7 +550,7 @@ static int cris_dma_test_irq(ide_drive_t *drive)
550{ 550{
551 int intr = *R_IRQ_MASK0_RD; 551 int intr = *R_IRQ_MASK0_RD;
552 int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel, 552 int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel,
553 drive->hwif->io_ports[IDE_DATA_OFFSET]); 553 drive->hwif->io_ports.data_addr);
554 554
555 return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0; 555 return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
556} 556}
@@ -644,7 +644,7 @@ cris_ide_inw(unsigned long reg) {
644 * call will also timeout on busy, but as long as the 644 * call will also timeout on busy, but as long as the
645 * write is still performed, everything will be fine. 645 * write is still performed, everything will be fine.
646 */ 646 */
647 if (cris_ide_get_reg(reg) == IDE_STATUS_OFFSET) 647 if (cris_ide_get_reg(reg) == 7)
648 return BUSY_STAT; 648 return BUSY_STAT;
649 else 649 else
650 /* For other rare cases we assume 0 is good enough. */ 650 /* For other rare cases we assume 0 is good enough. */
@@ -673,11 +673,6 @@ cris_ide_inb(unsigned long reg)
673 return (unsigned char)cris_ide_inw(reg); 673 return (unsigned char)cris_ide_inw(reg);
674} 674}
675 675
676static int cris_dma_end (ide_drive_t *drive);
677static int cris_dma_setup (ide_drive_t *drive);
678static void cris_dma_exec_cmd (ide_drive_t *drive, u8 command);
679static int cris_dma_test_irq(ide_drive_t *drive);
680static void cris_dma_start(ide_drive_t *drive);
681static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int); 676static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
682static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int); 677static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
683static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int); 678static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
@@ -770,20 +765,29 @@ static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
770 memset(hw, 0, sizeof(*hw)); 765 memset(hw, 0, sizeof(*hw));
771 766
772 for (i = 0; i <= 7; i++) 767 for (i = 0; i <= 7; i++)
773 hw->io_ports[i] = base + cris_ide_reg_addr(i, 0, 1); 768 hw->io_ports_array[i] = base + cris_ide_reg_addr(i, 0, 1);
774 769
775 /* 770 /*
776 * the IDE control register is at ATA address 6, 771 * the IDE control register is at ATA address 6,
777 * with CS1 active instead of CS0 772 * with CS1 active instead of CS0
778 */ 773 */
779 hw->io_ports[IDE_CONTROL_OFFSET] = base + cris_ide_reg_addr(6, 1, 0); 774 hw->io_ports.ctl_addr = base + cris_ide_reg_addr(6, 1, 0);
780 775
781 hw->irq = ide_default_irq(0); 776 hw->irq = ide_default_irq(0);
782 hw->ack_intr = cris_ide_ack_intr; 777 hw->ack_intr = cris_ide_ack_intr;
783} 778}
784 779
780static const struct ide_port_ops cris_port_ops = {
781 .set_pio_mode = cris_set_pio_mode,
782 .set_dma_mode = cris_set_dma_mode,
783};
784
785static const struct ide_dma_ops cris_dma_ops;
786
785static const struct ide_port_info cris_port_info __initdata = { 787static const struct ide_port_info cris_port_info __initdata = {
786 .chipset = ide_etrax100, 788 .chipset = ide_etrax100,
789 .port_ops = &cris_port_ops,
790 .dma_ops = &cris_dma_ops,
787 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 791 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
788 IDE_HFLAG_NO_DMA, /* no SFF-style DMA */ 792 IDE_HFLAG_NO_DMA, /* no SFF-style DMA */
789 .pio_mask = ATA_PIO4, 793 .pio_mask = ATA_PIO4,
@@ -804,24 +808,16 @@ static int __init init_e100_ide(void)
804 808
805 cris_setup_ports(&hw, cris_ide_base_address(h)); 809 cris_setup_ports(&hw, cris_ide_base_address(h));
806 810
807 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 811 hwif = ide_find_port();
808 if (hwif == NULL) 812 if (hwif == NULL)
809 continue; 813 continue;
810 ide_init_port_data(hwif, hwif->index); 814 ide_init_port_data(hwif, hwif->index);
811 ide_init_port_hw(hwif, &hw); 815 ide_init_port_hw(hwif, &hw);
812 hwif->mmio = 1; 816
813 hwif->set_pio_mode = &cris_set_pio_mode;
814 hwif->set_dma_mode = &cris_set_dma_mode;
815 hwif->ata_input_data = &cris_ide_input_data; 817 hwif->ata_input_data = &cris_ide_input_data;
816 hwif->ata_output_data = &cris_ide_output_data; 818 hwif->ata_output_data = &cris_ide_output_data;
817 hwif->atapi_input_bytes = &cris_atapi_input_bytes; 819 hwif->atapi_input_bytes = &cris_atapi_input_bytes;
818 hwif->atapi_output_bytes = &cris_atapi_output_bytes; 820 hwif->atapi_output_bytes = &cris_atapi_output_bytes;
819 hwif->dma_host_set = &cris_dma_host_set;
820 hwif->ide_dma_end = &cris_dma_end;
821 hwif->dma_setup = &cris_dma_setup;
822 hwif->dma_exec_cmd = &cris_dma_exec_cmd;
823 hwif->ide_dma_test_irq = &cris_dma_test_irq;
824 hwif->dma_start = &cris_dma_start;
825 hwif->OUTB = &cris_ide_outb; 821 hwif->OUTB = &cris_ide_outb;
826 hwif->OUTW = &cris_ide_outw; 822 hwif->OUTW = &cris_ide_outw;
827 hwif->OUTBSYNC = &cris_ide_outbsync; 823 hwif->OUTBSYNC = &cris_ide_outbsync;
@@ -1076,6 +1072,15 @@ static void cris_dma_start(ide_drive_t *drive)
1076 } 1072 }
1077} 1073}
1078 1074
1075static const struct ide_dma_ops cris_dma_ops = {
1076 .dma_host_set = cris_dma_host_set,
1077 .dma_setup = cris_dma_setup,
1078 .dma_exec_cmd = cris_dma_exec_cmd,
1079 .dma_start = cris_dma_start,
1080 .dma_end = cris_dma_end,
1081 .dma_test_irq = cris_dma_test_irq,
1082};
1083
1079module_init(init_e100_ide); 1084module_init(init_e100_ide);
1080 1085
1081MODULE_LICENSE("GPL"); 1086MODULE_LICENSE("GPL");
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 4108ec4ffa7f..fd23f12e17aa 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -63,9 +63,9 @@ static inline void hw_setup(hw_regs_t *hw)
63 int i; 63 int i;
64 64
65 memset(hw, 0, sizeof(hw_regs_t)); 65 memset(hw, 0, sizeof(hw_regs_t));
66 for (i = 0; i <= IDE_STATUS_OFFSET; i++) 66 for (i = 0; i <= 7; i++)
67 hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; 67 hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
68 hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT; 68 hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
69 hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; 69 hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
70 hw->chipset = ide_generic; 70 hw->chipset = ide_generic;
71} 71}
@@ -74,7 +74,6 @@ static inline void hwif_setup(ide_hwif_t *hwif)
74{ 74{
75 default_hwif_iops(hwif); 75 default_hwif_iops(hwif);
76 76
77 hwif->mmio = 1;
78 hwif->OUTW = mm_outw; 77 hwif->OUTW = mm_outw;
79 hwif->OUTSW = mm_outsw; 78 hwif->OUTSW = mm_outsw;
80 hwif->INW = mm_inw; 79 hwif->INW = mm_inw;
@@ -99,8 +98,7 @@ static int __init h8300_ide_init(void)
99 98
100 hw_setup(&hw); 99 hw_setup(&hw);
101 100
102 /* register if */ 101 hwif = ide_find_port();
103 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
104 if (hwif == NULL) { 102 if (hwif == NULL) {
105 printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); 103 printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
106 return -ENOENT; 104 return -ENOENT;
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 0f6fb6b72dd9..9d3601fa5680 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -55,14 +55,22 @@ struct ide_acpi_hwif_link {
55/* note: adds function name and KERN_DEBUG */ 55/* note: adds function name and KERN_DEBUG */
56#ifdef DEBUGGING 56#ifdef DEBUGGING
57#define DEBPRINT(fmt, args...) \ 57#define DEBPRINT(fmt, args...) \
58 printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, ## args) 58 printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
59#else 59#else
60#define DEBPRINT(fmt, args...) do {} while (0) 60#define DEBPRINT(fmt, args...) do {} while (0)
61#endif /* DEBUGGING */ 61#endif /* DEBUGGING */
62 62
63extern int ide_noacpi; 63int ide_noacpi;
64extern int ide_noacpitfs; 64module_param_named(noacpi, ide_noacpi, bool, 0);
65extern int ide_noacpionboot; 65MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
66
67int ide_acpigtf;
68module_param_named(acpigtf, ide_acpigtf, bool, 0);
69MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
70
71int ide_acpionboot;
72module_param_named(acpionboot, ide_acpionboot, bool, 0);
73MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
66 74
67static bool ide_noacpi_psx; 75static bool ide_noacpi_psx;
68static int no_acpi_psx(const struct dmi_system_id *id) 76static int no_acpi_psx(const struct dmi_system_id *id)
@@ -309,7 +317,7 @@ static int do_drive_get_GTF(ide_drive_t *drive,
309 if (ACPI_FAILURE(status)) { 317 if (ACPI_FAILURE(status)) {
310 printk(KERN_DEBUG 318 printk(KERN_DEBUG
311 "%s: Run _GTF error: status = 0x%x\n", 319 "%s: Run _GTF error: status = 0x%x\n",
312 __FUNCTION__, status); 320 __func__, status);
313 goto out; 321 goto out;
314 } 322 }
315 323
@@ -335,7 +343,7 @@ static int do_drive_get_GTF(ide_drive_t *drive,
335 out_obj->buffer.length % REGS_PER_GTF) { 343 out_obj->buffer.length % REGS_PER_GTF) {
336 printk(KERN_ERR 344 printk(KERN_ERR
337 "%s: unexpected GTF length (%d) or addr (0x%p)\n", 345 "%s: unexpected GTF length (%d) or addr (0x%p)\n",
338 __FUNCTION__, out_obj->buffer.length, 346 __func__, out_obj->buffer.length,
339 out_obj->buffer.pointer); 347 out_obj->buffer.pointer);
340 err = -ENOENT; 348 err = -ENOENT;
341 kfree(output.pointer); 349 kfree(output.pointer);
@@ -376,7 +384,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
376 memcpy(&args.tf_array[7], &gtf->tfa, 7); 384 memcpy(&args.tf_array[7], &gtf->tfa, 7);
377 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 385 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
378 386
379 if (ide_noacpitfs) { 387 if (!ide_acpigtf) {
380 DEBPRINT("_GTF execution disabled\n"); 388 DEBPRINT("_GTF execution disabled\n");
381 return err; 389 return err;
382 } 390 }
@@ -384,7 +392,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
384 err = ide_no_data_taskfile(drive, &args); 392 err = ide_no_data_taskfile(drive, &args);
385 if (err) 393 if (err)
386 printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n", 394 printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
387 __FUNCTION__, err); 395 __func__, err);
388 396
389 return err; 397 return err;
390} 398}
@@ -422,7 +430,7 @@ static int do_drive_set_taskfiles(ide_drive_t *drive,
422 430
423 if (gtf_length % REGS_PER_GTF) { 431 if (gtf_length % REGS_PER_GTF) {
424 printk(KERN_ERR "%s: unexpected GTF length (%d)\n", 432 printk(KERN_ERR "%s: unexpected GTF length (%d)\n",
425 __FUNCTION__, gtf_length); 433 __func__, gtf_length);
426 goto out; 434 goto out;
427 } 435 }
428 436
@@ -547,7 +555,7 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
547 printk(KERN_ERR 555 printk(KERN_ERR
548 "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or " 556 "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
549 "addr (0x%p)\n", 557 "addr (0x%p)\n",
550 __FUNCTION__, out_obj->buffer.length, 558 __func__, out_obj->buffer.length,
551 sizeof(struct GTM_buffer), out_obj->buffer.pointer); 559 sizeof(struct GTM_buffer), out_obj->buffer.pointer);
552 return; 560 return;
553 } 561 }
@@ -721,7 +729,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
721 drive->name, err); 729 drive->name, err);
722 } 730 }
723 731
724 if (ide_noacpionboot) { 732 if (!ide_acpionboot) {
725 DEBPRINT("ACPI methods disabled on boot\n"); 733 DEBPRINT("ACPI methods disabled on boot\n");
726 return; 734 return;
727 } 735 }
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index fe5aefbf8339..b34fd2bde96f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -13,8 +13,8 @@
13 * 13 *
14 * Suggestions are welcome. Patches that work are more welcome though. ;-) 14 * Suggestions are welcome. Patches that work are more welcome though. ;-)
15 * For those wishing to work on this driver, please be sure you download 15 * For those wishing to work on this driver, please be sure you download
16 * and comply with the latest Mt. Fuji (SFF8090 version 4) and ATAPI 16 * and comply with the latest Mt. Fuji (SFF8090 version 4) and ATAPI
17 * (SFF-8020i rev 2.6) standards. These documents can be obtained by 17 * (SFF-8020i rev 2.6) standards. These documents can be obtained by
18 * anonymous ftp from: 18 * anonymous ftp from:
19 * ftp://fission.dt.wdc.com/pub/standards/SFF_atapi/spec/SFF8020-r2.6/PS/8020r26.ps 19 * ftp://fission.dt.wdc.com/pub/standards/SFF_atapi/spec/SFF8020-r2.6/PS/8020r26.ps
20 * ftp://ftp.avc-pioneer.com/Mtfuji4/Spec/Fuji4r10.pdf 20 * ftp://ftp.avc-pioneer.com/Mtfuji4/Spec/Fuji4r10.pdf
@@ -39,19 +39,20 @@
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/bcd.h> 40#include <linux/bcd.h>
41 41
42#include <scsi/scsi.h> /* For SCSI -> ATAPI command conversion */ 42/* For SCSI -> ATAPI command conversion */
43#include <scsi/scsi.h>
43 44
44#include <asm/irq.h> 45#include <linux/irq.h>
45#include <asm/io.h> 46#include <linux/io.h>
46#include <asm/byteorder.h> 47#include <asm/byteorder.h>
47#include <asm/uaccess.h> 48#include <linux/uaccess.h>
48#include <asm/unaligned.h> 49#include <asm/unaligned.h>
49 50
50#include "ide-cd.h" 51#include "ide-cd.h"
51 52
52static DEFINE_MUTEX(idecd_ref_mutex); 53static DEFINE_MUTEX(idecd_ref_mutex);
53 54
54#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref) 55#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref)
55 56
56#define ide_cd_g(disk) \ 57#define ide_cd_g(disk) \
57 container_of((disk)->private_data, struct cdrom_info, driver) 58 container_of((disk)->private_data, struct cdrom_info, driver)
@@ -77,19 +78,17 @@ static void ide_cd_put(struct cdrom_info *cd)
77 mutex_unlock(&idecd_ref_mutex); 78 mutex_unlock(&idecd_ref_mutex);
78} 79}
79 80
80/**************************************************************************** 81/*
81 * Generic packet command support and error handling routines. 82 * Generic packet command support and error handling routines.
82 */ 83 */
83 84
84/* Mark that we've seen a media change, and invalidate our internal 85/* Mark that we've seen a media change and invalidate our internal buffers. */
85 buffers. */ 86static void cdrom_saw_media_change(ide_drive_t *drive)
86static void cdrom_saw_media_change (ide_drive_t *drive)
87{ 87{
88 struct cdrom_info *cd = drive->driver_data; 88 struct cdrom_info *cd = drive->driver_data;
89 89
90 cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED; 90 cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED;
91 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID; 91 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
92 cd->nsectors_buffered = 0;
93} 92}
94 93
95static int cdrom_log_sense(ide_drive_t *drive, struct request *rq, 94static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
@@ -101,44 +100,43 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
101 return 0; 100 return 0;
102 101
103 switch (sense->sense_key) { 102 switch (sense->sense_key) {
104 case NO_SENSE: case RECOVERED_ERROR: 103 case NO_SENSE:
105 break; 104 case RECOVERED_ERROR:
106 case NOT_READY: 105 break;
107 /* 106 case NOT_READY:
108 * don't care about tray state messages for 107 /*
109 * e.g. capacity commands or in-progress or 108 * don't care about tray state messages for e.g. capacity
110 * becoming ready 109 * commands or in-progress or becoming ready
111 */ 110 */
112 if (sense->asc == 0x3a || sense->asc == 0x04) 111 if (sense->asc == 0x3a || sense->asc == 0x04)
113 break;
114 log = 1;
115 break;
116 case ILLEGAL_REQUEST:
117 /*
118 * don't log START_STOP unit with LoEj set, since
119 * we cannot reliably check if drive can auto-close
120 */
121 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
122 break;
123 log = 1;
124 break;
125 case UNIT_ATTENTION:
126 /*
127 * Make good and sure we've seen this potential media
128 * change. Some drives (i.e. Creative) fail to present
129 * the correct sense key in the error register.
130 */
131 cdrom_saw_media_change(drive);
132 break; 112 break;
133 default: 113 log = 1;
134 log = 1; 114 break;
115 case ILLEGAL_REQUEST:
116 /*
117 * don't log START_STOP unit with LoEj set, since we cannot
118 * reliably check if drive can auto-close
119 */
120 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
135 break; 121 break;
122 log = 1;
123 break;
124 case UNIT_ATTENTION:
125 /*
126 * Make good and sure we've seen this potential media change.
127 * Some drives (i.e. Creative) fail to present the correct sense
128 * key in the error register.
129 */
130 cdrom_saw_media_change(drive);
131 break;
132 default:
133 log = 1;
134 break;
136 } 135 }
137 return log; 136 return log;
138} 137}
139 138
140static 139static void cdrom_analyze_sense_data(ide_drive_t *drive,
141void cdrom_analyze_sense_data(ide_drive_t *drive,
142 struct request *failed_command, 140 struct request *failed_command,
143 struct request_sense *sense) 141 struct request_sense *sense)
144{ 142{
@@ -151,16 +149,17 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
151 return; 149 return;
152 150
153 /* 151 /*
154 * If a read toc is executed for a CD-R or CD-RW medium where 152 * If a read toc is executed for a CD-R or CD-RW medium where the first
155 * the first toc has not been recorded yet, it will fail with 153 * toc has not been recorded yet, it will fail with 05/24/00 (which is a
156 * 05/24/00 (which is a confusing error) 154 * confusing error)
157 */ 155 */
158 if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP) 156 if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
159 if (sense->sense_key == 0x05 && sense->asc == 0x24) 157 if (sense->sense_key == 0x05 && sense->asc == 0x24)
160 return; 158 return;
161 159
162 if (sense->error_code == 0x70) { /* Current Error */ 160 /* current error */
163 switch(sense->sense_key) { 161 if (sense->error_code == 0x70) {
162 switch (sense->sense_key) {
164 case MEDIUM_ERROR: 163 case MEDIUM_ERROR:
165 case VOLUME_OVERFLOW: 164 case VOLUME_OVERFLOW:
166 case ILLEGAL_REQUEST: 165 case ILLEGAL_REQUEST:
@@ -178,25 +177,23 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
178 if (bio_sectors < 4) 177 if (bio_sectors < 4)
179 bio_sectors = 4; 178 bio_sectors = 4;
180 if (drive->queue->hardsect_size == 2048) 179 if (drive->queue->hardsect_size == 2048)
181 sector <<= 2; /* Device sector size is 2K */ 180 /* device sector size is 2K */
182 sector &= ~(bio_sectors -1); 181 sector <<= 2;
182 sector &= ~(bio_sectors - 1);
183 valid = (sector - failed_command->sector) << 9; 183 valid = (sector - failed_command->sector) << 9;
184 184
185 if (valid < 0) 185 if (valid < 0)
186 valid = 0; 186 valid = 0;
187 if (sector < get_capacity(info->disk) && 187 if (sector < get_capacity(info->disk) &&
188 drive->probed_capacity - sector < 4 * 75) { 188 drive->probed_capacity - sector < 4 * 75)
189 set_capacity(info->disk, sector); 189 set_capacity(info->disk, sector);
190 } 190 }
191 } 191 }
192 }
193 192
194 ide_cd_log_error(drive->name, failed_command, sense); 193 ide_cd_log_error(drive->name, failed_command, sense);
195} 194}
196 195
197/* 196/* Initialize a ide-cd packet command request */
198 * Initialize a ide-cd packet command request
199 */
200void ide_cd_init_rq(ide_drive_t *drive, struct request *rq) 197void ide_cd_init_rq(ide_drive_t *drive, struct request *rq)
201{ 198{
202 struct cdrom_info *cd = drive->driver_data; 199 struct cdrom_info *cd = drive->driver_data;
@@ -220,7 +217,8 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
220 217
221 rq->data = sense; 218 rq->data = sense;
222 rq->cmd[0] = GPCMD_REQUEST_SENSE; 219 rq->cmd[0] = GPCMD_REQUEST_SENSE;
223 rq->cmd[4] = rq->data_len = 18; 220 rq->cmd[4] = 18;
221 rq->data_len = 18;
224 222
225 rq->cmd_type = REQ_TYPE_SENSE; 223 rq->cmd_type = REQ_TYPE_SENSE;
226 224
@@ -230,7 +228,7 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
230 (void) ide_do_drive_cmd(drive, rq, ide_preempt); 228 (void) ide_do_drive_cmd(drive, rq, ide_preempt);
231} 229}
232 230
233static void cdrom_end_request (ide_drive_t *drive, int uptodate) 231static void cdrom_end_request(ide_drive_t *drive, int uptodate)
234{ 232{
235 struct request *rq = HWGROUP(drive)->rq; 233 struct request *rq = HWGROUP(drive)->rq;
236 int nsectors = rq->hard_cur_sectors; 234 int nsectors = rq->hard_cur_sectors;
@@ -252,7 +250,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
252 } 250 }
253 cdrom_analyze_sense_data(drive, failed, sense); 251 cdrom_analyze_sense_data(drive, failed, sense);
254 /* 252 /*
255 * now end failed request 253 * now end the failed request
256 */ 254 */
257 if (blk_fs_request(failed)) { 255 if (blk_fs_request(failed)) {
258 if (ide_end_dequeued_request(drive, failed, 0, 256 if (ide_end_dequeued_request(drive, failed, 0,
@@ -280,21 +278,24 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
280 ide_end_request(drive, uptodate, nsectors); 278 ide_end_request(drive, uptodate, nsectors);
281} 279}
282 280
283static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 stat) 281static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
284{ 282{
285 if (stat & 0x80) 283 if (st & 0x80)
286 return; 284 return;
287 ide_dump_status(drive, msg, stat); 285 ide_dump_status(drive, msg, st);
288} 286}
289 287
290/* Returns 0 if the request should be continued. 288/*
291 Returns 1 if the request was ended. */ 289 * Returns:
290 * 0: if the request should be continued.
291 * 1: if the request was ended.
292 */
292static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 293static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
293{ 294{
294 struct request *rq = HWGROUP(drive)->rq; 295 struct request *rq = HWGROUP(drive)->rq;
295 int stat, err, sense_key; 296 int stat, err, sense_key;
296 297
297 /* Check for errors. */ 298 /* check for errors */
298 stat = ide_read_status(drive); 299 stat = ide_read_status(drive);
299 300
300 if (stat_ret) 301 if (stat_ret)
@@ -303,20 +304,22 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
303 if (OK_STAT(stat, good_stat, BAD_R_STAT)) 304 if (OK_STAT(stat, good_stat, BAD_R_STAT))
304 return 0; 305 return 0;
305 306
306 /* Get the IDE error register. */ 307 /* get the IDE error register */
307 err = ide_read_error(drive); 308 err = ide_read_error(drive);
308 sense_key = err >> 4; 309 sense_key = err >> 4;
309 310
310 if (rq == NULL) { 311 if (rq == NULL) {
311 printk("%s: missing rq in cdrom_decode_status\n", drive->name); 312 printk(KERN_ERR "%s: missing rq in %s\n",
313 drive->name, __func__);
312 return 1; 314 return 1;
313 } 315 }
314 316
315 if (blk_sense_request(rq)) { 317 if (blk_sense_request(rq)) {
316 /* We got an error trying to get sense info 318 /*
317 from the drive (probably while trying 319 * We got an error trying to get sense info from the drive
318 to recover from a former error). Just give up. */ 320 * (probably while trying to recover from a former error).
319 321 * Just give up.
322 */
320 rq->cmd_flags |= REQ_FAILED; 323 rq->cmd_flags |= REQ_FAILED;
321 cdrom_end_request(drive, 0); 324 cdrom_end_request(drive, 0);
322 ide_error(drive, "request sense failure", stat); 325 ide_error(drive, "request sense failure", stat);
@@ -332,28 +335,27 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
332 if (blk_pc_request(rq) && !rq->errors) 335 if (blk_pc_request(rq) && !rq->errors)
333 rq->errors = SAM_STAT_CHECK_CONDITION; 336 rq->errors = SAM_STAT_CHECK_CONDITION;
334 337
335 /* Check for tray open. */ 338 /* check for tray open */
336 if (sense_key == NOT_READY) { 339 if (sense_key == NOT_READY) {
337 cdrom_saw_media_change (drive); 340 cdrom_saw_media_change(drive);
338 } else if (sense_key == UNIT_ATTENTION) { 341 } else if (sense_key == UNIT_ATTENTION) {
339 /* Check for media change. */ 342 /* check for media change */
340 cdrom_saw_media_change (drive); 343 cdrom_saw_media_change(drive);
341 /*printk("%s: media changed\n",drive->name);*/
342 return 0; 344 return 0;
343 } else if ((sense_key == ILLEGAL_REQUEST) && 345 } else if (sense_key == ILLEGAL_REQUEST &&
344 (rq->cmd[0] == GPCMD_START_STOP_UNIT)) { 346 rq->cmd[0] == GPCMD_START_STOP_UNIT) {
345 /* 347 /*
346 * Don't print error message for this condition-- 348 * Don't print error message for this condition--
347 * SFF8090i indicates that 5/24/00 is the correct 349 * SFF8090i indicates that 5/24/00 is the correct
348 * response to a request to close the tray if the 350 * response to a request to close the tray if the
349 * drive doesn't have that capability. 351 * drive doesn't have that capability.
350 * cdrom_log_sense() knows this! 352 * cdrom_log_sense() knows this!
351 */ 353 */
352 } else if (!(rq->cmd_flags & REQ_QUIET)) { 354 } else if (!(rq->cmd_flags & REQ_QUIET)) {
353 /* Otherwise, print an error. */ 355 /* otherwise, print an error */
354 ide_dump_status(drive, "packet command error", stat); 356 ide_dump_status(drive, "packet command error", stat);
355 } 357 }
356 358
357 rq->cmd_flags |= REQ_FAILED; 359 rq->cmd_flags |= REQ_FAILED;
358 360
359 /* 361 /*
@@ -366,27 +368,30 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
366 } else if (blk_fs_request(rq)) { 368 } else if (blk_fs_request(rq)) {
367 int do_end_request = 0; 369 int do_end_request = 0;
368 370
369 /* Handle errors from READ and WRITE requests. */ 371 /* handle errors from READ and WRITE requests */
370 372
371 if (blk_noretry_request(rq)) 373 if (blk_noretry_request(rq))
372 do_end_request = 1; 374 do_end_request = 1;
373 375
374 if (sense_key == NOT_READY) { 376 if (sense_key == NOT_READY) {
375 /* Tray open. */ 377 /* tray open */
376 if (rq_data_dir(rq) == READ) { 378 if (rq_data_dir(rq) == READ) {
377 cdrom_saw_media_change (drive); 379 cdrom_saw_media_change(drive);
378 380
379 /* Fail the request. */ 381 /* fail the request */
380 printk ("%s: tray open\n", drive->name); 382 printk(KERN_ERR "%s: tray open\n", drive->name);
381 do_end_request = 1; 383 do_end_request = 1;
382 } else { 384 } else {
383 struct cdrom_info *info = drive->driver_data; 385 struct cdrom_info *info = drive->driver_data;
384 386
385 /* allow the drive 5 seconds to recover, some 387 /*
388 * Allow the drive 5 seconds to recover, some
386 * devices will return this error while flushing 389 * devices will return this error while flushing
387 * data from cache */ 390 * data from cache.
391 */
388 if (!rq->errors) 392 if (!rq->errors)
389 info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY; 393 info->write_timeout = jiffies +
394 ATAPI_WAIT_WRITE_BUSY;
390 rq->errors = 1; 395 rq->errors = 1;
391 if (time_after(jiffies, info->write_timeout)) 396 if (time_after(jiffies, info->write_timeout))
392 do_end_request = 1; 397 do_end_request = 1;
@@ -394,59 +399,68 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
394 unsigned long flags; 399 unsigned long flags;
395 400
396 /* 401 /*
397 * take a breather relying on the 402 * take a breather relying on the unplug
398 * unplug timer to kick us again 403 * timer to kick us again
399 */ 404 */
400 spin_lock_irqsave(&ide_lock, flags); 405 spin_lock_irqsave(&ide_lock, flags);
401 blk_plug_device(drive->queue); 406 blk_plug_device(drive->queue);
402 spin_unlock_irqrestore(&ide_lock,flags); 407 spin_unlock_irqrestore(&ide_lock,
408 flags);
403 return 1; 409 return 1;
404 } 410 }
405 } 411 }
406 } else if (sense_key == UNIT_ATTENTION) { 412 } else if (sense_key == UNIT_ATTENTION) {
407 /* Media change. */ 413 /* media change */
408 cdrom_saw_media_change (drive); 414 cdrom_saw_media_change(drive);
409 415
410 /* Arrange to retry the request. 416 /*
411 But be sure to give up if we've retried 417 * Arrange to retry the request but be sure to give up
412 too many times. */ 418 * if we've retried too many times.
419 */
413 if (++rq->errors > ERROR_MAX) 420 if (++rq->errors > ERROR_MAX)
414 do_end_request = 1; 421 do_end_request = 1;
415 } else if (sense_key == ILLEGAL_REQUEST || 422 } else if (sense_key == ILLEGAL_REQUEST ||
416 sense_key == DATA_PROTECT) { 423 sense_key == DATA_PROTECT) {
417 /* No point in retrying after an illegal 424 /*
418 request or data protect error.*/ 425 * No point in retrying after an illegal request or data
419 ide_dump_status_no_sense (drive, "command error", stat); 426 * protect error.
427 */
428 ide_dump_status_no_sense(drive, "command error", stat);
420 do_end_request = 1; 429 do_end_request = 1;
421 } else if (sense_key == MEDIUM_ERROR) { 430 } else if (sense_key == MEDIUM_ERROR) {
422 /* No point in re-trying a zillion times on a bad 431 /*
423 * sector... If we got here the error is not correctable */ 432 * No point in re-trying a zillion times on a bad
424 ide_dump_status_no_sense (drive, "media error (bad sector)", stat); 433 * sector. If we got here the error is not correctable.
434 */
435 ide_dump_status_no_sense(drive,
436 "media error (bad sector)",
437 stat);
425 do_end_request = 1; 438 do_end_request = 1;
426 } else if (sense_key == BLANK_CHECK) { 439 } else if (sense_key == BLANK_CHECK) {
427 /* Disk appears blank ?? */ 440 /* disk appears blank ?? */
428 ide_dump_status_no_sense (drive, "media error (blank)", stat); 441 ide_dump_status_no_sense(drive, "media error (blank)",
442 stat);
429 do_end_request = 1; 443 do_end_request = 1;
430 } else if ((err & ~ABRT_ERR) != 0) { 444 } else if ((err & ~ABRT_ERR) != 0) {
431 /* Go to the default handler 445 /* go to the default handler for other errors */
432 for other errors. */
433 ide_error(drive, "cdrom_decode_status", stat); 446 ide_error(drive, "cdrom_decode_status", stat);
434 return 1; 447 return 1;
435 } else if ((++rq->errors > ERROR_MAX)) { 448 } else if ((++rq->errors > ERROR_MAX)) {
436 /* We've racked up too many retries. Abort. */ 449 /* we've racked up too many retries, abort */
437 do_end_request = 1; 450 do_end_request = 1;
438 } 451 }
439 452
440 /* End a request through request sense analysis when we have 453 /*
441 sense data. We need this in order to perform end of media 454 * End a request through request sense analysis when we have
442 processing */ 455 * sense data. We need this in order to perform end of media
443 456 * processing.
457 */
444 if (do_end_request) 458 if (do_end_request)
445 goto end_request; 459 goto end_request;
446 460
447 /* 461 /*
448 * If we got a CHECK_CONDITION status, 462 * If we got a CHECK_CONDITION status, queue
449 * queue a request sense command. 463 * a request sense command.
450 */ 464 */
451 if (stat & ERR_STAT) 465 if (stat & ERR_STAT)
452 cdrom_queue_request_sense(drive, NULL, NULL); 466 cdrom_queue_request_sense(drive, NULL, NULL);
@@ -455,7 +469,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
455 cdrom_end_request(drive, 0); 469 cdrom_end_request(drive, 0);
456 } 470 }
457 471
458 /* Retry, or handle the next request. */ 472 /* retry, or handle the next request */
459 return 1; 473 return 1;
460 474
461end_request: 475end_request:
@@ -480,35 +494,37 @@ static int cdrom_timer_expiry(ide_drive_t *drive)
480 unsigned long wait = 0; 494 unsigned long wait = 0;
481 495
482 /* 496 /*
483 * Some commands are *slow* and normally take a long time to 497 * Some commands are *slow* and normally take a long time to complete.
484 * complete. Usually we can use the ATAPI "disconnect" to bypass 498 * Usually we can use the ATAPI "disconnect" to bypass this, but not all
485 * this, but not all commands/drives support that. Let 499 * commands/drives support that. Let ide_timer_expiry keep polling us
486 * ide_timer_expiry keep polling us for these. 500 * for these.
487 */ 501 */
488 switch (rq->cmd[0]) { 502 switch (rq->cmd[0]) {
489 case GPCMD_BLANK: 503 case GPCMD_BLANK:
490 case GPCMD_FORMAT_UNIT: 504 case GPCMD_FORMAT_UNIT:
491 case GPCMD_RESERVE_RZONE_TRACK: 505 case GPCMD_RESERVE_RZONE_TRACK:
492 case GPCMD_CLOSE_TRACK: 506 case GPCMD_CLOSE_TRACK:
493 case GPCMD_FLUSH_CACHE: 507 case GPCMD_FLUSH_CACHE:
494 wait = ATAPI_WAIT_PC; 508 wait = ATAPI_WAIT_PC;
495 break; 509 break;
496 default: 510 default:
497 if (!(rq->cmd_flags & REQ_QUIET)) 511 if (!(rq->cmd_flags & REQ_QUIET))
498 printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]); 512 printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n",
499 wait = 0; 513 rq->cmd[0]);
500 break; 514 wait = 0;
515 break;
501 } 516 }
502 return wait; 517 return wait;
503} 518}
504 519
505/* Set up the device registers for transferring a packet command on DEV, 520/*
506 expecting to later transfer XFERLEN bytes. HANDLER is the routine 521 * Set up the device registers for transferring a packet command on DEV,
507 which actually transfers the command to the drive. If this is a 522 * expecting to later transfer XFERLEN bytes. HANDLER is the routine
508 drq_interrupt device, this routine will arrange for HANDLER to be 523 * which actually transfers the command to the drive. If this is a
509 called when the interrupt from the drive arrives. Otherwise, HANDLER 524 * drq_interrupt device, this routine will arrange for HANDLER to be
510 will be called immediately after the drive is prepared for the transfer. */ 525 * called when the interrupt from the drive arrives. Otherwise, HANDLER
511 526 * will be called immediately after the drive is prepared for the transfer.
527 */
512static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, 528static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
513 int xferlen, 529 int xferlen,
514 ide_handler_t *handler) 530 ide_handler_t *handler)
@@ -517,15 +533,15 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
517 struct cdrom_info *info = drive->driver_data; 533 struct cdrom_info *info = drive->driver_data;
518 ide_hwif_t *hwif = drive->hwif; 534 ide_hwif_t *hwif = drive->hwif;
519 535
520 /* Wait for the controller to be idle. */ 536 /* wait for the controller to be idle */
521 if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY)) 537 if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
522 return startstop; 538 return startstop;
523 539
524 /* FIXME: for Virtual DMA we must check harder */ 540 /* FIXME: for Virtual DMA we must check harder */
525 if (info->dma) 541 if (info->dma)
526 info->dma = !hwif->dma_setup(drive); 542 info->dma = !hwif->dma_ops->dma_setup(drive);
527 543
528 /* Set up the controller registers. */ 544 /* set up the controller registers */
529 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL | 545 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL |
530 IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma); 546 IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma);
531 547
@@ -535,7 +551,8 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
535 drive->waiting_for_dma = 0; 551 drive->waiting_for_dma = 0;
536 552
537 /* packet command */ 553 /* packet command */
538 ide_execute_command(drive, WIN_PACKETCMD, handler, ATAPI_WAIT_PC, cdrom_timer_expiry); 554 ide_execute_command(drive, WIN_PACKETCMD, handler,
555 ATAPI_WAIT_PC, cdrom_timer_expiry);
539 return ide_started; 556 return ide_started;
540 } else { 557 } else {
541 unsigned long flags; 558 unsigned long flags;
@@ -543,7 +560,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
543 /* packet command */ 560 /* packet command */
544 spin_lock_irqsave(&ide_lock, flags); 561 spin_lock_irqsave(&ide_lock, flags);
545 hwif->OUTBSYNC(drive, WIN_PACKETCMD, 562 hwif->OUTBSYNC(drive, WIN_PACKETCMD,
546 hwif->io_ports[IDE_COMMAND_OFFSET]); 563 hwif->io_ports.command_addr);
547 ndelay(400); 564 ndelay(400);
548 spin_unlock_irqrestore(&ide_lock, flags); 565 spin_unlock_irqrestore(&ide_lock, flags);
549 566
@@ -551,13 +568,14 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
551 } 568 }
552} 569}
553 570
554/* Send a packet command to DRIVE described by CMD_BUF and CMD_LEN. 571/*
555 The device registers must have already been prepared 572 * Send a packet command to DRIVE described by CMD_BUF and CMD_LEN. The device
556 by cdrom_start_packet_command. 573 * registers must have already been prepared by cdrom_start_packet_command.
557 HANDLER is the interrupt handler to call when the command completes 574 * HANDLER is the interrupt handler to call when the command completes or
558 or there's data ready. */ 575 * there's data ready.
576 */
559#define ATAPI_MIN_CDB_BYTES 12 577#define ATAPI_MIN_CDB_BYTES 12
560static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive, 578static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
561 struct request *rq, 579 struct request *rq,
562 ide_handler_t *handler) 580 ide_handler_t *handler)
563{ 581{
@@ -567,24 +585,26 @@ static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
567 ide_startstop_t startstop; 585 ide_startstop_t startstop;
568 586
569 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { 587 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) {
570 /* Here we should have been called after receiving an interrupt 588 /*
571 from the device. DRQ should how be set. */ 589 * Here we should have been called after receiving an interrupt
590 * from the device. DRQ should how be set.
591 */
572 592
573 /* Check for errors. */ 593 /* check for errors */
574 if (cdrom_decode_status(drive, DRQ_STAT, NULL)) 594 if (cdrom_decode_status(drive, DRQ_STAT, NULL))
575 return ide_stopped; 595 return ide_stopped;
576 596
577 /* Ok, next interrupt will be DMA interrupt. */ 597 /* ok, next interrupt will be DMA interrupt */
578 if (info->dma) 598 if (info->dma)
579 drive->waiting_for_dma = 1; 599 drive->waiting_for_dma = 1;
580 } else { 600 } else {
581 /* Otherwise, we must wait for DRQ to get set. */ 601 /* otherwise, we must wait for DRQ to get set */
582 if (ide_wait_stat(&startstop, drive, DRQ_STAT, 602 if (ide_wait_stat(&startstop, drive, DRQ_STAT,
583 BUSY_STAT, WAIT_READY)) 603 BUSY_STAT, WAIT_READY))
584 return startstop; 604 return startstop;
585 } 605 }
586 606
587 /* Arm the interrupt handler. */ 607 /* arm the interrupt handler */
588 ide_set_handler(drive, handler, rq->timeout, cdrom_timer_expiry); 608 ide_set_handler(drive, handler, rq->timeout, cdrom_timer_expiry);
589 609
590 /* ATAPI commands get padded out to 12 bytes minimum */ 610 /* ATAPI commands get padded out to 12 bytes minimum */
@@ -592,20 +612,19 @@ static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
592 if (cmd_len < ATAPI_MIN_CDB_BYTES) 612 if (cmd_len < ATAPI_MIN_CDB_BYTES)
593 cmd_len = ATAPI_MIN_CDB_BYTES; 613 cmd_len = ATAPI_MIN_CDB_BYTES;
594 614
595 /* Send the command to the device. */ 615 /* send the command to the device */
596 HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len); 616 HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len);
597 617
598 /* Start the DMA if need be */ 618 /* start the DMA if need be */
599 if (info->dma) 619 if (info->dma)
600 hwif->dma_start(drive); 620 hwif->dma_ops->dma_start(drive);
601 621
602 return ide_started; 622 return ide_started;
603} 623}
604 624
605/**************************************************************************** 625/*
606 * Block read functions. 626 * Block read functions.
607 */ 627 */
608
609static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len) 628static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len)
610{ 629{
611 while (len > 0) { 630 while (len > 0) {
@@ -626,47 +645,6 @@ static void ide_cd_drain_data(ide_drive_t *drive, int nsects)
626} 645}
627 646
628/* 647/*
629 * Buffer up to SECTORS_TO_TRANSFER sectors from the drive in our sector
630 * buffer. Once the first sector is added, any subsequent sectors are
631 * assumed to be continuous (until the buffer is cleared). For the first
632 * sector added, SECTOR is its sector number. (SECTOR is then ignored until
633 * the buffer is cleared.)
634 */
635static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
636 int sectors_to_transfer)
637{
638 struct cdrom_info *info = drive->driver_data;
639
640 /* Number of sectors to read into the buffer. */
641 int sectors_to_buffer = min_t(int, sectors_to_transfer,
642 (SECTOR_BUFFER_SIZE >> SECTOR_BITS) -
643 info->nsectors_buffered);
644
645 char *dest;
646
647 /* If we couldn't get a buffer, don't try to buffer anything... */
648 if (info->buffer == NULL)
649 sectors_to_buffer = 0;
650
651 /* If this is the first sector in the buffer, remember its number. */
652 if (info->nsectors_buffered == 0)
653 info->sector_buffered = sector;
654
655 /* Read the data into the buffer. */
656 dest = info->buffer + info->nsectors_buffered * SECTOR_SIZE;
657 while (sectors_to_buffer > 0) {
658 HWIF(drive)->atapi_input_bytes(drive, dest, SECTOR_SIZE);
659 --sectors_to_buffer;
660 --sectors_to_transfer;
661 ++info->nsectors_buffered;
662 dest += SECTOR_SIZE;
663 }
664
665 /* Throw away any remaining data. */
666 ide_cd_drain_data(drive, sectors_to_transfer);
667}
668
669/*
670 * Check the contents of the interrupt reason register from the cdrom 648 * Check the contents of the interrupt reason register from the cdrom
671 * and attempt to recover if there are problems. Returns 0 if everything's 649 * and attempt to recover if there are problems. Returns 0 if everything's
672 * ok; nonzero if the request has been terminated. 650 * ok; nonzero if the request has been terminated.
@@ -684,22 +662,23 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
684 ide_hwif_t *hwif = drive->hwif; 662 ide_hwif_t *hwif = drive->hwif;
685 xfer_func_t *xf; 663 xfer_func_t *xf;
686 664
687 /* Whoops... */ 665 /* whoops... */
688 printk(KERN_ERR "%s: %s: wrong transfer direction!\n", 666 printk(KERN_ERR "%s: %s: wrong transfer direction!\n",
689 drive->name, __FUNCTION__); 667 drive->name, __func__);
690 668
691 xf = rw ? hwif->atapi_output_bytes : hwif->atapi_input_bytes; 669 xf = rw ? hwif->atapi_output_bytes : hwif->atapi_input_bytes;
692 ide_cd_pad_transfer(drive, xf, len); 670 ide_cd_pad_transfer(drive, xf, len);
693 } else if (rw == 0 && ireason == 1) { 671 } else if (rw == 0 && ireason == 1) {
694 /* Some drives (ASUS) seem to tell us that status 672 /*
695 * info is available. just get it and ignore. 673 * Some drives (ASUS) seem to tell us that status info is
674 * available. Just get it and ignore.
696 */ 675 */
697 (void)ide_read_status(drive); 676 (void)ide_read_status(drive);
698 return 0; 677 return 0;
699 } else { 678 } else {
700 /* Drive wants a command packet, or invalid ireason... */ 679 /* drive wants a command packet, or invalid ireason... */
701 printk(KERN_ERR "%s: %s: bad interrupt reason 0x%02x\n", 680 printk(KERN_ERR "%s: %s: bad interrupt reason 0x%02x\n",
702 drive->name, __FUNCTION__, ireason); 681 drive->name, __func__, ireason);
703 } 682 }
704 683
705 if (rq->cmd_type == REQ_TYPE_ATA_PC) 684 if (rq->cmd_type == REQ_TYPE_ATA_PC)
@@ -721,7 +700,7 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
721 return 0; 700 return 0;
722 701
723 printk(KERN_ERR "%s: %s: Bad transfer size %d\n", 702 printk(KERN_ERR "%s: %s: Bad transfer size %d\n",
724 drive->name, __FUNCTION__, len); 703 drive->name, __func__, len);
725 704
726 if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES) 705 if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES)
727 printk(KERN_ERR " This drive is not supported by " 706 printk(KERN_ERR " This drive is not supported by "
@@ -734,72 +713,13 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
734 return 1; 713 return 1;
735} 714}
736 715
737/*
738 * Try to satisfy some of the current read request from our cached data.
739 * Returns nonzero if the request has been completed, zero otherwise.
740 */
741static int cdrom_read_from_buffer (ide_drive_t *drive)
742{
743 struct cdrom_info *info = drive->driver_data;
744 struct request *rq = HWGROUP(drive)->rq;
745 unsigned short sectors_per_frame;
746
747 sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS;
748
749 /* Can't do anything if there's no buffer. */
750 if (info->buffer == NULL) return 0;
751
752 /* Loop while this request needs data and the next block is present
753 in our cache. */
754 while (rq->nr_sectors > 0 &&
755 rq->sector >= info->sector_buffered &&
756 rq->sector < info->sector_buffered + info->nsectors_buffered) {
757 if (rq->current_nr_sectors == 0)
758 cdrom_end_request(drive, 1);
759
760 memcpy (rq->buffer,
761 info->buffer +
762 (rq->sector - info->sector_buffered) * SECTOR_SIZE,
763 SECTOR_SIZE);
764 rq->buffer += SECTOR_SIZE;
765 --rq->current_nr_sectors;
766 --rq->nr_sectors;
767 ++rq->sector;
768 }
769
770 /* If we've satisfied the current request,
771 terminate it successfully. */
772 if (rq->nr_sectors == 0) {
773 cdrom_end_request(drive, 1);
774 return -1;
775 }
776
777 /* Move on to the next buffer if needed. */
778 if (rq->current_nr_sectors == 0)
779 cdrom_end_request(drive, 1);
780
781 /* If this condition does not hold, then the kluge i use to
782 represent the number of sectors to skip at the start of a transfer
783 will fail. I think that this will never happen, but let's be
784 paranoid and check. */
785 if (rq->current_nr_sectors < bio_cur_sectors(rq->bio) &&
786 (rq->sector & (sectors_per_frame - 1))) {
787 printk(KERN_ERR "%s: cdrom_read_from_buffer: buffer botch (%ld)\n",
788 drive->name, (long)rq->sector);
789 cdrom_end_request(drive, 0);
790 return -1;
791 }
792
793 return 0;
794}
795
796static ide_startstop_t cdrom_newpc_intr(ide_drive_t *); 716static ide_startstop_t cdrom_newpc_intr(ide_drive_t *);
797 717
798/* 718/*
799 * Routine to send a read/write packet command to the drive. 719 * Routine to send a read/write packet command to the drive. This is usually
800 * This is usually called directly from cdrom_start_{read,write}(). 720 * called directly from cdrom_start_{read,write}(). However, for drq_interrupt
801 * However, for drq_interrupt devices, it is called from an interrupt 721 * devices, it is called from an interrupt when the drive is ready to accept
802 * when the drive is ready to accept the command. 722 * the command.
803 */ 723 */
804static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) 724static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
805{ 725{
@@ -821,11 +741,11 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
821 * is larger than the buffer size. 741 * is larger than the buffer size.
822 */ 742 */
823 if (nskip > 0) { 743 if (nskip > 0) {
824 /* Sanity check... */ 744 /* sanity check... */
825 if (rq->current_nr_sectors != 745 if (rq->current_nr_sectors !=
826 bio_cur_sectors(rq->bio)) { 746 bio_cur_sectors(rq->bio)) {
827 printk(KERN_ERR "%s: %s: buffer botch (%u)\n", 747 printk(KERN_ERR "%s: %s: buffer botch (%u)\n",
828 drive->name, __FUNCTION__, 748 drive->name, __func__,
829 rq->current_nr_sectors); 749 rq->current_nr_sectors);
830 cdrom_end_request(drive, 0); 750 cdrom_end_request(drive, 0);
831 return ide_stopped; 751 return ide_stopped;
@@ -838,10 +758,10 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
838 /* the immediate bit */ 758 /* the immediate bit */
839 rq->cmd[1] = 1 << 3; 759 rq->cmd[1] = 1 << 3;
840#endif 760#endif
841 /* Set up the command */ 761 /* set up the command */
842 rq->timeout = ATAPI_WAIT_PC; 762 rq->timeout = ATAPI_WAIT_PC;
843 763
844 /* Send the command to the drive and return. */ 764 /* send the command to the drive and return */
845 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); 765 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
846} 766}
847 767
@@ -849,7 +769,7 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
849#define IDECD_SEEK_TIMER (5 * WAIT_MIN_SLEEP) /* 100 ms */ 769#define IDECD_SEEK_TIMER (5 * WAIT_MIN_SLEEP) /* 100 ms */
850#define IDECD_SEEK_TIMEOUT (2 * WAIT_CMD) /* 20 sec */ 770#define IDECD_SEEK_TIMEOUT (2 * WAIT_CMD) /* 20 sec */
851 771
852static ide_startstop_t cdrom_seek_intr (ide_drive_t *drive) 772static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
853{ 773{
854 struct cdrom_info *info = drive->driver_data; 774 struct cdrom_info *info = drive->driver_data;
855 int stat; 775 int stat;
@@ -861,19 +781,13 @@ static ide_startstop_t cdrom_seek_intr (ide_drive_t *drive)
861 info->cd_flags |= IDE_CD_FLAG_SEEKING; 781 info->cd_flags |= IDE_CD_FLAG_SEEKING;
862 782
863 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) { 783 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
864 if (--retry == 0) { 784 if (--retry == 0)
865 /*
866 * this condition is far too common, to bother
867 * users about it
868 */
869 /* printk("%s: disabled DSC seek overlap\n", drive->name);*/
870 drive->dsc_overlap = 0; 785 drive->dsc_overlap = 0;
871 }
872 } 786 }
873 return ide_stopped; 787 return ide_stopped;
874} 788}
875 789
876static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive) 790static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
877{ 791{
878 struct request *rq = HWGROUP(drive)->rq; 792 struct request *rq = HWGROUP(drive)->rq;
879 sector_t frame = rq->sector; 793 sector_t frame = rq->sector;
@@ -888,36 +802,40 @@ static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive)
888 return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr); 802 return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
889} 803}
890 804
891static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block) 805static ide_startstop_t cdrom_start_seek(ide_drive_t *drive, unsigned int block)
892{ 806{
893 struct cdrom_info *info = drive->driver_data; 807 struct cdrom_info *info = drive->driver_data;
894 808
895 info->dma = 0; 809 info->dma = 0;
896 info->start_seek = jiffies; 810 info->start_seek = jiffies;
897 return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation); 811 return cdrom_start_packet_command(drive, 0,
812 cdrom_start_seek_continuation);
898} 813}
899 814
900/* Fix up a possibly partially-processed request so that we can 815/*
901 start it over entirely, or even put it back on the request queue. */ 816 * Fix up a possibly partially-processed request so that we can start it over
902static void restore_request (struct request *rq) 817 * entirely, or even put it back on the request queue.
818 */
819static void restore_request(struct request *rq)
903{ 820{
904 if (rq->buffer != bio_data(rq->bio)) { 821 if (rq->buffer != bio_data(rq->bio)) {
905 sector_t n = (rq->buffer - (char *) bio_data(rq->bio)) / SECTOR_SIZE; 822 sector_t n =
823 (rq->buffer - (char *)bio_data(rq->bio)) / SECTOR_SIZE;
906 824
907 rq->buffer = bio_data(rq->bio); 825 rq->buffer = bio_data(rq->bio);
908 rq->nr_sectors += n; 826 rq->nr_sectors += n;
909 rq->sector -= n; 827 rq->sector -= n;
910 } 828 }
911 rq->hard_cur_sectors = rq->current_nr_sectors = bio_cur_sectors(rq->bio); 829 rq->current_nr_sectors = bio_cur_sectors(rq->bio);
830 rq->hard_cur_sectors = rq->current_nr_sectors;
912 rq->hard_nr_sectors = rq->nr_sectors; 831 rq->hard_nr_sectors = rq->nr_sectors;
913 rq->hard_sector = rq->sector; 832 rq->hard_sector = rq->sector;
914 rq->q->prep_rq_fn(rq->q, rq); 833 rq->q->prep_rq_fn(rq->q, rq);
915} 834}
916 835
917/**************************************************************************** 836/*
918 * Execute all other packet commands. 837 * All other packet commands.
919 */ 838 */
920
921static void ide_cd_request_sense_fixup(struct request *rq) 839static void ide_cd_request_sense_fixup(struct request *rq)
922{ 840{
923 /* 841 /*
@@ -941,7 +859,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, struct request *rq)
941 if (rq->sense == NULL) 859 if (rq->sense == NULL)
942 rq->sense = &sense; 860 rq->sense = &sense;
943 861
944 /* Start of retry loop. */ 862 /* start of retry loop */
945 do { 863 do {
946 int error; 864 int error;
947 unsigned long time = jiffies; 865 unsigned long time = jiffies;
@@ -950,41 +868,45 @@ int ide_cd_queue_pc(ide_drive_t *drive, struct request *rq)
950 error = ide_do_drive_cmd(drive, rq, ide_wait); 868 error = ide_do_drive_cmd(drive, rq, ide_wait);
951 time = jiffies - time; 869 time = jiffies - time;
952 870
953 /* FIXME: we should probably abort/retry or something 871 /*
954 * in case of failure */ 872 * FIXME: we should probably abort/retry or something in case of
873 * failure.
874 */
955 if (rq->cmd_flags & REQ_FAILED) { 875 if (rq->cmd_flags & REQ_FAILED) {
956 /* The request failed. Retry if it was due to a unit 876 /*
957 attention status 877 * The request failed. Retry if it was due to a unit
958 (usually means media was changed). */ 878 * attention status (usually means media was changed).
879 */
959 struct request_sense *reqbuf = rq->sense; 880 struct request_sense *reqbuf = rq->sense;
960 881
961 if (reqbuf->sense_key == UNIT_ATTENTION) 882 if (reqbuf->sense_key == UNIT_ATTENTION)
962 cdrom_saw_media_change(drive); 883 cdrom_saw_media_change(drive);
963 else if (reqbuf->sense_key == NOT_READY && 884 else if (reqbuf->sense_key == NOT_READY &&
964 reqbuf->asc == 4 && reqbuf->ascq != 4) { 885 reqbuf->asc == 4 && reqbuf->ascq != 4) {
965 /* The drive is in the process of loading 886 /*
966 a disk. Retry, but wait a little to give 887 * The drive is in the process of loading
967 the drive time to complete the load. */ 888 * a disk. Retry, but wait a little to give
889 * the drive time to complete the load.
890 */
968 ssleep(2); 891 ssleep(2);
969 } else { 892 } else {
970 /* Otherwise, don't retry. */ 893 /* otherwise, don't retry */
971 retries = 0; 894 retries = 0;
972 } 895 }
973 --retries; 896 --retries;
974 } 897 }
975 898
976 /* End of retry loop. */ 899 /* end of retry loop */
977 } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0); 900 } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0);
978 901
979 /* Return an error if the command failed. */ 902 /* return an error if the command failed */
980 return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0; 903 return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0;
981} 904}
982 905
983/* 906/*
984 * Called from blk_end_request_callback() after the data of the request 907 * Called from blk_end_request_callback() after the data of the request is
985 * is completed and before the request is completed. 908 * completed and before the request itself is completed. By returning value '1',
986 * By returning value '1', blk_end_request_callback() returns immediately 909 * blk_end_request_callback() returns immediately without completing it.
987 * without completing the request.
988 */ 910 */
989static int cdrom_newpc_intr_dummy_cb(struct request *rq) 911static int cdrom_newpc_intr_dummy_cb(struct request *rq)
990{ 912{
@@ -1003,11 +925,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1003 unsigned int timeout; 925 unsigned int timeout;
1004 u8 lowcyl, highcyl; 926 u8 lowcyl, highcyl;
1005 927
1006 /* Check for errors. */ 928 /* check for errors */
1007 dma = info->dma; 929 dma = info->dma;
1008 if (dma) { 930 if (dma) {
1009 info->dma = 0; 931 info->dma = 0;
1010 dma_error = HWIF(drive)->ide_dma_end(drive); 932 dma_error = hwif->dma_ops->dma_end(drive);
1011 if (dma_error) { 933 if (dma_error) {
1012 printk(KERN_ERR "%s: DMA %s error\n", drive->name, 934 printk(KERN_ERR "%s: DMA %s error\n", drive->name,
1013 write ? "write" : "read"); 935 write ? "write" : "read");
@@ -1018,9 +940,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1018 if (cdrom_decode_status(drive, 0, &stat)) 940 if (cdrom_decode_status(drive, 0, &stat))
1019 return ide_stopped; 941 return ide_stopped;
1020 942
1021 /* 943 /* using dma, transfer is complete now */
1022 * using dma, transfer is complete now
1023 */
1024 if (dma) { 944 if (dma) {
1025 if (dma_error) 945 if (dma_error)
1026 return ide_error(drive, "dma error", stat); 946 return ide_error(drive, "dma error", stat);
@@ -1031,12 +951,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1031 goto end_request; 951 goto end_request;
1032 } 952 }
1033 953
1034 /* 954 /* ok we fall to pio :/ */
1035 * ok we fall to pio :/ 955 ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
1036 */ 956 lowcyl = hwif->INB(hwif->io_ports.lbam_addr);
1037 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]) & 0x3; 957 highcyl = hwif->INB(hwif->io_ports.lbah_addr);
1038 lowcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
1039 highcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]);
1040 958
1041 len = lowcyl + (256 * highcyl); 959 len = lowcyl + (256 * highcyl);
1042 960
@@ -1044,9 +962,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1044 if (thislen > len) 962 if (thislen > len)
1045 thislen = len; 963 thislen = len;
1046 964
1047 /* 965 /* If DRQ is clear, the command has completed. */
1048 * If DRQ is clear, the command has completed.
1049 */
1050 if ((stat & DRQ_STAT) == 0) { 966 if ((stat & DRQ_STAT) == 0) {
1051 if (blk_fs_request(rq)) { 967 if (blk_fs_request(rq)) {
1052 /* 968 /*
@@ -1057,7 +973,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1057 if (rq->current_nr_sectors > 0) { 973 if (rq->current_nr_sectors > 0) {
1058 printk(KERN_ERR "%s: %s: data underrun " 974 printk(KERN_ERR "%s: %s: data underrun "
1059 "(%d blocks)\n", 975 "(%d blocks)\n",
1060 drive->name, __FUNCTION__, 976 drive->name, __func__,
1061 rq->current_nr_sectors); 977 rq->current_nr_sectors);
1062 if (!write) 978 if (!write)
1063 rq->cmd_flags |= REQ_FAILED; 979 rq->cmd_flags |= REQ_FAILED;
@@ -1067,15 +983,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1067 return ide_stopped; 983 return ide_stopped;
1068 } else if (!blk_pc_request(rq)) { 984 } else if (!blk_pc_request(rq)) {
1069 ide_cd_request_sense_fixup(rq); 985 ide_cd_request_sense_fixup(rq);
1070 /* Complain if we still have data left to transfer. */ 986 /* complain if we still have data left to transfer */
1071 uptodate = rq->data_len ? 0 : 1; 987 uptodate = rq->data_len ? 0 : 1;
1072 } 988 }
1073 goto end_request; 989 goto end_request;
1074 } 990 }
1075 991
1076 /* 992 /* check which way to transfer data */
1077 * check which way to transfer data
1078 */
1079 if (ide_cd_check_ireason(drive, rq, len, ireason, write)) 993 if (ide_cd_check_ireason(drive, rq, len, ireason, write))
1080 return ide_stopped; 994 return ide_stopped;
1081 995
@@ -1111,16 +1025,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1111 xferfunc = HWIF(drive)->atapi_input_bytes; 1025 xferfunc = HWIF(drive)->atapi_input_bytes;
1112 } 1026 }
1113 1027
1114 /* 1028 /* transfer data */
1115 * transfer data
1116 */
1117 while (thislen > 0) { 1029 while (thislen > 0) {
1118 u8 *ptr = blk_fs_request(rq) ? NULL : rq->data; 1030 u8 *ptr = blk_fs_request(rq) ? NULL : rq->data;
1119 int blen = rq->data_len; 1031 int blen = rq->data_len;
1120 1032
1121 /* 1033 /* bio backed? */
1122 * bio backed?
1123 */
1124 if (rq->bio) { 1034 if (rq->bio) {
1125 if (blk_fs_request(rq)) { 1035 if (blk_fs_request(rq)) {
1126 ptr = rq->buffer; 1036 ptr = rq->buffer;
@@ -1134,11 +1044,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1134 if (!ptr) { 1044 if (!ptr) {
1135 if (blk_fs_request(rq) && !write) 1045 if (blk_fs_request(rq) && !write)
1136 /* 1046 /*
1137 * If the buffers are full, cache the rest 1047 * If the buffers are full, pipe the rest into
1138 * of the data in our internal buffer. 1048 * oblivion.
1139 */ 1049 */
1140 cdrom_buffer_sectors(drive, rq->sector, 1050 ide_cd_drain_data(drive, thislen >> 9);
1141 thislen >> 9);
1142 else { 1051 else {
1143 printk(KERN_ERR "%s: confused, missing data\n", 1052 printk(KERN_ERR "%s: confused, missing data\n",
1144 drive->name); 1053 drive->name);
@@ -1184,9 +1093,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1184 rq->sense_len += blen; 1093 rq->sense_len += blen;
1185 } 1094 }
1186 1095
1187 /* 1096 /* pad, if necessary */
1188 * pad, if necessary
1189 */
1190 if (!blk_fs_request(rq) && len > 0) 1097 if (!blk_fs_request(rq) && len > 0)
1191 ide_cd_pad_transfer(drive, xferfunc, len); 1098 ide_cd_pad_transfer(drive, xferfunc, len);
1192 1099
@@ -1230,9 +1137,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1230 queue_hardsect_size(drive->queue) >> SECTOR_BITS; 1137 queue_hardsect_size(drive->queue) >> SECTOR_BITS;
1231 1138
1232 if (write) { 1139 if (write) {
1233 /* 1140 /* disk has become write protected */
1234 * disk has become write protected
1235 */
1236 if (cd->disk->policy) { 1141 if (cd->disk->policy) {
1237 cdrom_end_request(drive, 0); 1142 cdrom_end_request(drive, 0);
1238 return ide_stopped; 1143 return ide_stopped;
@@ -1243,15 +1148,9 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1243 * weirdness which might be present in the request packet. 1148 * weirdness which might be present in the request packet.
1244 */ 1149 */
1245 restore_request(rq); 1150 restore_request(rq);
1246
1247 /* Satisfy whatever we can of this request from our cache. */
1248 if (cdrom_read_from_buffer(drive))
1249 return ide_stopped;
1250 } 1151 }
1251 1152
1252 /* 1153 /* use DMA, if possible / writes *must* be hardware frame aligned */
1253 * use DMA, if possible / writes *must* be hardware frame aligned
1254 */
1255 if ((rq->nr_sectors & (sectors_per_frame - 1)) || 1154 if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
1256 (rq->sector & (sectors_per_frame - 1))) { 1155 (rq->sector & (sectors_per_frame - 1))) {
1257 if (write) { 1156 if (write) {
@@ -1262,13 +1161,10 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1262 } else 1161 } else
1263 cd->dma = drive->using_dma; 1162 cd->dma = drive->using_dma;
1264 1163
1265 /* Clear the local sector buffer. */
1266 cd->nsectors_buffered = 0;
1267
1268 if (write) 1164 if (write)
1269 cd->devinfo.media_written = 1; 1165 cd->devinfo.media_written = 1;
1270 1166
1271 /* Start sending the read/write request to the drive. */ 1167 /* start sending the read/write request to the drive */
1272 return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont); 1168 return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont);
1273} 1169}
1274 1170
@@ -1293,12 +1189,11 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1293 1189
1294 info->dma = 0; 1190 info->dma = 0;
1295 1191
1296 /* 1192 /* sg request */
1297 * sg request
1298 */
1299 if (rq->bio) { 1193 if (rq->bio) {
1300 int mask = drive->queue->dma_alignment; 1194 int mask = drive->queue->dma_alignment;
1301 unsigned long addr = (unsigned long) page_address(bio_page(rq->bio)); 1195 unsigned long addr =
1196 (unsigned long)page_address(bio_page(rq->bio));
1302 1197
1303 info->dma = drive->using_dma; 1198 info->dma = drive->using_dma;
1304 1199
@@ -1312,15 +1207,16 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1312 info->dma = 0; 1207 info->dma = 0;
1313 } 1208 }
1314 1209
1315 /* Start sending the command to the drive. */ 1210 /* start sending the command to the drive */
1316 return cdrom_start_packet_command(drive, rq->data_len, cdrom_do_newpc_cont); 1211 return cdrom_start_packet_command(drive, rq->data_len,
1212 cdrom_do_newpc_cont);
1317} 1213}
1318 1214
1319/**************************************************************************** 1215/*
1320 * cdrom driver request routine. 1216 * cdrom driver request routine.
1321 */ 1217 */
1322static ide_startstop_t 1218static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq,
1323ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block) 1219 sector_t block)
1324{ 1220{
1325 ide_startstop_t action; 1221 ide_startstop_t action;
1326 struct cdrom_info *info = drive->driver_data; 1222 struct cdrom_info *info = drive->driver_data;
@@ -1332,16 +1228,21 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
1332 1228
1333 if ((stat & SEEK_STAT) != SEEK_STAT) { 1229 if ((stat & SEEK_STAT) != SEEK_STAT) {
1334 if (elapsed < IDECD_SEEK_TIMEOUT) { 1230 if (elapsed < IDECD_SEEK_TIMEOUT) {
1335 ide_stall_queue(drive, IDECD_SEEK_TIMER); 1231 ide_stall_queue(drive,
1232 IDECD_SEEK_TIMER);
1336 return ide_stopped; 1233 return ide_stopped;
1337 } 1234 }
1338 printk (KERN_ERR "%s: DSC timeout\n", drive->name); 1235 printk(KERN_ERR "%s: DSC timeout\n",
1236 drive->name);
1339 } 1237 }
1340 info->cd_flags &= ~IDE_CD_FLAG_SEEKING; 1238 info->cd_flags &= ~IDE_CD_FLAG_SEEKING;
1341 } 1239 }
1342 if ((rq_data_dir(rq) == READ) && IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) { 1240 if (rq_data_dir(rq) == READ &&
1241 IDE_LARGE_SEEK(info->last_block, block,
1242 IDECD_SEEK_THRESHOLD) &&
1243 drive->dsc_overlap)
1343 action = cdrom_start_seek(drive, block); 1244 action = cdrom_start_seek(drive, block);
1344 } else 1245 else
1345 action = cdrom_start_rw(drive, rq); 1246 action = cdrom_start_rw(drive, rq);
1346 info->last_block = block; 1247 info->last_block = block;
1347 return action; 1248 return action;
@@ -1349,9 +1250,7 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
1349 rq->cmd_type == REQ_TYPE_ATA_PC) { 1250 rq->cmd_type == REQ_TYPE_ATA_PC) {
1350 return cdrom_do_block_pc(drive, rq); 1251 return cdrom_do_block_pc(drive, rq);
1351 } else if (blk_special_request(rq)) { 1252 } else if (blk_special_request(rq)) {
1352 /* 1253 /* right now this can only be a reset... */
1353 * right now this can only be a reset...
1354 */
1355 cdrom_end_request(drive, 1); 1254 cdrom_end_request(drive, 1);
1356 return ide_stopped; 1255 return ide_stopped;
1357 } 1256 }
@@ -1363,18 +1262,16 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
1363 1262
1364 1263
1365 1264
1366/**************************************************************************** 1265/*
1367 * Ioctl handling. 1266 * Ioctl handling.
1368 * 1267 *
1369 * Routines which queue packet commands take as a final argument a pointer 1268 * Routines which queue packet commands take as a final argument a pointer to a
1370 * to a request_sense struct. If execution of the command results 1269 * request_sense struct. If execution of the command results in an error with a
1371 * in an error with a CHECK CONDITION status, this structure will be filled 1270 * CHECK CONDITION status, this structure will be filled with the results of the
1372 * with the results of the subsequent request sense command. The pointer 1271 * subsequent request sense command. The pointer can also be NULL, in which case
1373 * can also be NULL, in which case no sense information is returned. 1272 * no sense information is returned.
1374 */ 1273 */
1375 1274static void msf_from_bcd(struct atapi_msf *msf)
1376static
1377void msf_from_bcd (struct atapi_msf *msf)
1378{ 1275{
1379 msf->minute = BCD2BIN(msf->minute); 1276 msf->minute = BCD2BIN(msf->minute);
1380 msf->second = BCD2BIN(msf->second); 1277 msf->second = BCD2BIN(msf->second);
@@ -1394,8 +1291,8 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
1394 req.cmd_flags |= REQ_QUIET; 1291 req.cmd_flags |= REQ_QUIET;
1395 1292
1396 /* 1293 /*
1397 * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to 1294 * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to switch CDs
1398 * switch CDs instead of supporting the LOAD_UNLOAD opcode. 1295 * instead of supporting the LOAD_UNLOAD opcode.
1399 */ 1296 */
1400 req.cmd[7] = cdi->sanyo_slot % 3; 1297 req.cmd[7] = cdi->sanyo_slot % 3;
1401 1298
@@ -1471,36 +1368,39 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1471 unsigned long sectors_per_frame = SECTORS_PER_FRAME; 1368 unsigned long sectors_per_frame = SECTORS_PER_FRAME;
1472 1369
1473 if (toc == NULL) { 1370 if (toc == NULL) {
1474 /* Try to allocate space. */ 1371 /* try to allocate space */
1475 toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL); 1372 toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL);
1476 if (toc == NULL) { 1373 if (toc == NULL) {
1477 printk (KERN_ERR "%s: No cdrom TOC buffer!\n", drive->name); 1374 printk(KERN_ERR "%s: No cdrom TOC buffer!\n",
1375 drive->name);
1478 return -ENOMEM; 1376 return -ENOMEM;
1479 } 1377 }
1480 info->toc = toc; 1378 info->toc = toc;
1481 } 1379 }
1482 1380
1483 /* Check to see if the existing data is still valid. 1381 /*
1484 If it is, just return. */ 1382 * Check to see if the existing data is still valid. If it is,
1383 * just return.
1384 */
1485 (void) cdrom_check_status(drive, sense); 1385 (void) cdrom_check_status(drive, sense);
1486 1386
1487 if (info->cd_flags & IDE_CD_FLAG_TOC_VALID) 1387 if (info->cd_flags & IDE_CD_FLAG_TOC_VALID)
1488 return 0; 1388 return 0;
1489 1389
1490 /* Try to get the total cdrom capacity and sector size. */ 1390 /* try to get the total cdrom capacity and sector size */
1491 stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame, 1391 stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame,
1492 sense); 1392 sense);
1493 if (stat) 1393 if (stat)
1494 toc->capacity = 0x1fffff; 1394 toc->capacity = 0x1fffff;
1495 1395
1496 set_capacity(info->disk, toc->capacity * sectors_per_frame); 1396 set_capacity(info->disk, toc->capacity * sectors_per_frame);
1497 /* Save a private copy of te TOC capacity for error handling */ 1397 /* save a private copy of the TOC capacity for error handling */
1498 drive->probed_capacity = toc->capacity * sectors_per_frame; 1398 drive->probed_capacity = toc->capacity * sectors_per_frame;
1499 1399
1500 blk_queue_hardsect_size(drive->queue, 1400 blk_queue_hardsect_size(drive->queue,
1501 sectors_per_frame << SECTOR_BITS); 1401 sectors_per_frame << SECTOR_BITS);
1502 1402
1503 /* First read just the header, so we know how long the TOC is. */ 1403 /* first read just the header, so we know how long the TOC is */
1504 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, 1404 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
1505 sizeof(struct atapi_toc_header), sense); 1405 sizeof(struct atapi_toc_header), sense);
1506 if (stat) 1406 if (stat)
@@ -1517,7 +1417,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1517 if (ntracks > MAX_TRACKS) 1417 if (ntracks > MAX_TRACKS)
1518 ntracks = MAX_TRACKS; 1418 ntracks = MAX_TRACKS;
1519 1419
1520 /* Now read the whole schmeer. */ 1420 /* now read the whole schmeer */
1521 stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0, 1421 stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
1522 (char *)&toc->hdr, 1422 (char *)&toc->hdr,
1523 sizeof(struct atapi_toc_header) + 1423 sizeof(struct atapi_toc_header) +
@@ -1525,15 +1425,18 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1525 sizeof(struct atapi_toc_entry), sense); 1425 sizeof(struct atapi_toc_entry), sense);
1526 1426
1527 if (stat && toc->hdr.first_track > 1) { 1427 if (stat && toc->hdr.first_track > 1) {
1528 /* Cds with CDI tracks only don't have any TOC entries, 1428 /*
1529 despite of this the returned values are 1429 * Cds with CDI tracks only don't have any TOC entries, despite
1530 first_track == last_track = number of CDI tracks + 1, 1430 * of this the returned values are
1531 so that this case is indistinguishable from the same 1431 * first_track == last_track = number of CDI tracks + 1,
1532 layout plus an additional audio track. 1432 * so that this case is indistinguishable from the same layout
1533 If we get an error for the regular case, we assume 1433 * plus an additional audio track. If we get an error for the
1534 a CDI without additional audio tracks. In this case 1434 * regular case, we assume a CDI without additional audio
1535 the readable TOC is empty (CDI tracks are not included) 1435 * tracks. In this case the readable TOC is empty (CDI tracks
1536 and only holds the Leadout entry. Heiko Eißfeldt */ 1436 * are not included) and only holds the Leadout entry.
1437 *
1438 * Heiko Eißfeldt.
1439 */
1537 ntracks = 0; 1440 ntracks = 0;
1538 stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0, 1441 stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
1539 (char *)&toc->hdr, 1442 (char *)&toc->hdr,
@@ -1569,14 +1472,13 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1569 toc->ent[i].track = BCD2BIN(toc->ent[i].track); 1472 toc->ent[i].track = BCD2BIN(toc->ent[i].track);
1570 msf_from_bcd(&toc->ent[i].addr.msf); 1473 msf_from_bcd(&toc->ent[i].addr.msf);
1571 } 1474 }
1572 toc->ent[i].addr.lba = msf_to_lba (toc->ent[i].addr.msf.minute, 1475 toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute,
1573 toc->ent[i].addr.msf.second, 1476 toc->ent[i].addr.msf.second,
1574 toc->ent[i].addr.msf.frame); 1477 toc->ent[i].addr.msf.frame);
1575 } 1478 }
1576 1479
1577 /* Read the multisession information. */
1578 if (toc->hdr.first_track != CDROM_LEADOUT) { 1480 if (toc->hdr.first_track != CDROM_LEADOUT) {
1579 /* Read the multisession information. */ 1481 /* read the multisession information */
1580 stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp, 1482 stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
1581 sizeof(ms_tmp), sense); 1483 sizeof(ms_tmp), sense);
1582 if (stat) 1484 if (stat)
@@ -1584,26 +1486,27 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1584 1486
1585 toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba); 1487 toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba);
1586 } else { 1488 } else {
1587 ms_tmp.hdr.first_track = ms_tmp.hdr.last_track = CDROM_LEADOUT; 1489 ms_tmp.hdr.last_track = CDROM_LEADOUT;
1490 ms_tmp.hdr.first_track = ms_tmp.hdr.last_track;
1588 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ 1491 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
1589 } 1492 }
1590 1493
1591 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { 1494 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) {
1592 /* Re-read multisession information using MSF format */ 1495 /* re-read multisession information using MSF format */
1593 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, 1496 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
1594 sizeof(ms_tmp), sense); 1497 sizeof(ms_tmp), sense);
1595 if (stat) 1498 if (stat)
1596 return stat; 1499 return stat;
1597 1500
1598 msf_from_bcd (&ms_tmp.ent.addr.msf); 1501 msf_from_bcd(&ms_tmp.ent.addr.msf);
1599 toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute, 1502 toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute,
1600 ms_tmp.ent.addr.msf.second, 1503 ms_tmp.ent.addr.msf.second,
1601 ms_tmp.ent.addr.msf.frame); 1504 ms_tmp.ent.addr.msf.frame);
1602 } 1505 }
1603 1506
1604 toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track); 1507 toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
1605 1508
1606 /* Now try to get the total cdrom capacity. */ 1509 /* now try to get the total cdrom capacity */
1607 stat = cdrom_get_last_written(cdi, &last_written); 1510 stat = cdrom_get_last_written(cdi, &last_written);
1608 if (!stat && (last_written > toc->capacity)) { 1511 if (!stat && (last_written > toc->capacity)) {
1609 toc->capacity = last_written; 1512 toc->capacity = last_written;
@@ -1628,7 +1531,8 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
1628 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; 1531 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
1629 1532
1630 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); 1533 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
1631 do { /* we seem to get stat=0x01,err=0x00 the first time (??) */ 1534 do {
1535 /* we seem to get stat=0x01,err=0x00 the first time (??) */
1632 stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0); 1536 stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1633 if (!stat) 1537 if (!stat)
1634 break; 1538 break;
@@ -1679,7 +1583,7 @@ static struct cdrom_device_ops ide_cdrom_dops = {
1679 .generic_packet = ide_cdrom_packet, 1583 .generic_packet = ide_cdrom_packet,
1680}; 1584};
1681 1585
1682static int ide_cdrom_register (ide_drive_t *drive, int nslots) 1586static int ide_cdrom_register(ide_drive_t *drive, int nslots)
1683{ 1587{
1684 struct cdrom_info *info = drive->driver_data; 1588 struct cdrom_info *info = drive->driver_data;
1685 struct cdrom_device_info *devinfo = &info->devinfo; 1589 struct cdrom_device_info *devinfo = &info->devinfo;
@@ -1697,8 +1601,7 @@ static int ide_cdrom_register (ide_drive_t *drive, int nslots)
1697 return register_cdrom(devinfo); 1601 return register_cdrom(devinfo);
1698} 1602}
1699 1603
1700static 1604static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1701int ide_cdrom_probe_capabilities (ide_drive_t *drive)
1702{ 1605{
1703 struct cdrom_info *cd = drive->driver_data; 1606 struct cdrom_info *cd = drive->driver_data;
1704 struct cdrom_device_info *cdi = &cd->devinfo; 1607 struct cdrom_device_info *cdi = &cd->devinfo;
@@ -1712,7 +1615,8 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
1712 1615
1713 if (drive->media == ide_optical) { 1616 if (drive->media == ide_optical) {
1714 cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM); 1617 cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM);
1715 printk(KERN_ERR "%s: ATAPI magneto-optical drive\n", drive->name); 1618 printk(KERN_ERR "%s: ATAPI magneto-optical drive\n",
1619 drive->name);
1716 return nslots; 1620 return nslots;
1717 } 1621 }
1718 1622
@@ -1723,11 +1627,10 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
1723 } 1627 }
1724 1628
1725 /* 1629 /*
1726 * we have to cheat a little here. the packet will eventually 1630 * We have to cheat a little here. the packet will eventually be queued
1727 * be queued with ide_cdrom_packet(), which extracts the 1631 * with ide_cdrom_packet(), which extracts the drive from cdi->handle.
1728 * drive from cdi->handle. Since this device hasn't been 1632 * Since this device hasn't been registered with the Uniform layer yet,
1729 * registered with the Uniform layer yet, it can't do this. 1633 * it can't do this. Same goes for cdi->ops.
1730 * Same goes for cdi->ops.
1731 */ 1634 */
1732 cdi->handle = drive; 1635 cdi->handle = drive;
1733 cdi->ops = &ide_cdrom_dops; 1636 cdi->ops = &ide_cdrom_dops;
@@ -1796,18 +1699,7 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
1796 return nslots; 1699 return nslots;
1797} 1700}
1798 1701
1799#ifdef CONFIG_IDE_PROC_FS 1702/* standard prep_rq_fn that builds 10 byte cmds */
1800static void ide_cdrom_add_settings(ide_drive_t *drive)
1801{
1802 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
1803}
1804#else
1805static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
1806#endif
1807
1808/*
1809 * standard prep_rq_fn that builds 10 byte cmds
1810 */
1811static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1703static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1812{ 1704{
1813 int hard_sect = queue_hardsect_size(q); 1705 int hard_sect = queue_hardsect_size(q);
@@ -1846,9 +1738,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
1846{ 1738{
1847 u8 *c = rq->cmd; 1739 u8 *c = rq->cmd;
1848 1740
1849 /* 1741 /* transform 6-byte read/write commands to the 10-byte version */
1850 * Transform 6-byte read/write commands to the 10-byte version
1851 */
1852 if (c[0] == READ_6 || c[0] == WRITE_6) { 1742 if (c[0] == READ_6 || c[0] == WRITE_6) {
1853 c[8] = c[4]; 1743 c[8] = c[4];
1854 c[5] = c[3]; 1744 c[5] = c[3];
@@ -1870,7 +1760,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
1870 rq->errors = ILLEGAL_REQUEST; 1760 rq->errors = ILLEGAL_REQUEST;
1871 return BLKPREP_KILL; 1761 return BLKPREP_KILL;
1872 } 1762 }
1873 1763
1874 return BLKPREP_OK; 1764 return BLKPREP_OK;
1875} 1765}
1876 1766
@@ -1890,6 +1780,41 @@ struct cd_list_entry {
1890 unsigned int cd_flags; 1780 unsigned int cd_flags;
1891}; 1781};
1892 1782
1783#ifdef CONFIG_IDE_PROC_FS
1784static sector_t ide_cdrom_capacity(ide_drive_t *drive)
1785{
1786 unsigned long capacity, sectors_per_frame;
1787
1788 if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
1789 return 0;
1790
1791 return capacity * sectors_per_frame;
1792}
1793
1794static int proc_idecd_read_capacity(char *page, char **start, off_t off,
1795 int count, int *eof, void *data)
1796{
1797 ide_drive_t *drive = data;
1798 int len;
1799
1800 len = sprintf(page, "%llu\n", (long long)ide_cdrom_capacity(drive));
1801 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
1802}
1803
1804static ide_proc_entry_t idecd_proc[] = {
1805 { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
1806 { NULL, 0, NULL, NULL }
1807};
1808
1809static void ide_cdrom_add_settings(ide_drive_t *drive)
1810{
1811 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
1812 &drive->dsc_overlap, NULL);
1813}
1814#else
1815static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
1816#endif
1817
1893static const struct cd_list_entry ide_cd_quirks_list[] = { 1818static const struct cd_list_entry ide_cd_quirks_list[] = {
1894 /* Limit transfer size per interrupt. */ 1819 /* Limit transfer size per interrupt. */
1895 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, 1820 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES },
@@ -1947,8 +1872,7 @@ static unsigned int ide_cd_flags(struct hd_driveid *id)
1947 return 0; 1872 return 0;
1948} 1873}
1949 1874
1950static 1875static int ide_cdrom_setup(ide_drive_t *drive)
1951int ide_cdrom_setup (ide_drive_t *drive)
1952{ 1876{
1953 struct cdrom_info *cd = drive->driver_data; 1877 struct cdrom_info *cd = drive->driver_data;
1954 struct cdrom_device_info *cdi = &cd->devinfo; 1878 struct cdrom_device_info *cdi = &cd->devinfo;
@@ -1977,21 +1901,19 @@ int ide_cdrom_setup (ide_drive_t *drive)
1977 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') 1901 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
1978 cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD; 1902 cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD;
1979 else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD) 1903 else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD)
1980 cdi->sanyo_slot = 3; /* 3 => use CD in slot 0 */ 1904 /* 3 => use CD in slot 0 */
1905 cdi->sanyo_slot = 3;
1981 1906
1982 nslots = ide_cdrom_probe_capabilities (drive); 1907 nslots = ide_cdrom_probe_capabilities(drive);
1983 1908
1984 /* 1909 /* set correct block size */
1985 * set correct block size
1986 */
1987 blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE); 1910 blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
1988 1911
1989 if (drive->autotune == IDE_TUNE_DEFAULT || 1912 drive->dsc_overlap = (drive->next != drive);
1990 drive->autotune == IDE_TUNE_AUTO)
1991 drive->dsc_overlap = (drive->next != drive);
1992 1913
1993 if (ide_cdrom_register(drive, nslots)) { 1914 if (ide_cdrom_register(drive, nslots)) {
1994 printk (KERN_ERR "%s: ide_cdrom_setup failed to register device with the cdrom driver.\n", drive->name); 1915 printk(KERN_ERR "%s: %s failed to register device with the"
1916 " cdrom driver.\n", drive->name, __func__);
1995 cd->devinfo.handle = NULL; 1917 cd->devinfo.handle = NULL;
1996 return 1; 1918 return 1;
1997 } 1919 }
@@ -1999,19 +1921,6 @@ int ide_cdrom_setup (ide_drive_t *drive)
1999 return 0; 1921 return 0;
2000} 1922}
2001 1923
2002#ifdef CONFIG_IDE_PROC_FS
2003static
2004sector_t ide_cdrom_capacity (ide_drive_t *drive)
2005{
2006 unsigned long capacity, sectors_per_frame;
2007
2008 if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
2009 return 0;
2010
2011 return capacity * sectors_per_frame;
2012}
2013#endif
2014
2015static void ide_cd_remove(ide_drive_t *drive) 1924static void ide_cd_remove(ide_drive_t *drive)
2016{ 1925{
2017 struct cdrom_info *info = drive->driver_data; 1926 struct cdrom_info *info = drive->driver_data;
@@ -2030,7 +1939,6 @@ static void ide_cd_release(struct kref *kref)
2030 ide_drive_t *drive = info->drive; 1939 ide_drive_t *drive = info->drive;
2031 struct gendisk *g = info->disk; 1940 struct gendisk *g = info->disk;
2032 1941
2033 kfree(info->buffer);
2034 kfree(info->toc); 1942 kfree(info->toc);
2035 if (devinfo->handle == drive) 1943 if (devinfo->handle == drive)
2036 unregister_cdrom(devinfo); 1944 unregister_cdrom(devinfo);
@@ -2044,23 +1952,6 @@ static void ide_cd_release(struct kref *kref)
2044 1952
2045static int ide_cd_probe(ide_drive_t *); 1953static int ide_cd_probe(ide_drive_t *);
2046 1954
2047#ifdef CONFIG_IDE_PROC_FS
2048static int proc_idecd_read_capacity
2049 (char *page, char **start, off_t off, int count, int *eof, void *data)
2050{
2051 ide_drive_t *drive = data;
2052 int len;
2053
2054 len = sprintf(page,"%llu\n", (long long)ide_cdrom_capacity(drive));
2055 PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
2056}
2057
2058static ide_proc_entry_t idecd_proc[] = {
2059 { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
2060 { NULL, 0, NULL, NULL }
2061};
2062#endif
2063
2064static ide_driver_t ide_cdrom_driver = { 1955static ide_driver_t ide_cdrom_driver = {
2065 .gen_driver = { 1956 .gen_driver = {
2066 .owner = THIS_MODULE, 1957 .owner = THIS_MODULE,
@@ -2081,20 +1972,17 @@ static ide_driver_t ide_cdrom_driver = {
2081#endif 1972#endif
2082}; 1973};
2083 1974
2084static int idecd_open(struct inode * inode, struct file * file) 1975static int idecd_open(struct inode *inode, struct file *file)
2085{ 1976{
2086 struct gendisk *disk = inode->i_bdev->bd_disk; 1977 struct gendisk *disk = inode->i_bdev->bd_disk;
2087 struct cdrom_info *info; 1978 struct cdrom_info *info;
2088 int rc = -ENOMEM; 1979 int rc = -ENOMEM;
2089 1980
2090 if (!(info = ide_cd_get(disk))) 1981 info = ide_cd_get(disk);
1982 if (!info)
2091 return -ENXIO; 1983 return -ENXIO;
2092 1984
2093 if (!info->buffer) 1985 rc = cdrom_open(&info->devinfo, inode, file);
2094 info->buffer = kmalloc(SECTOR_BUFFER_SIZE, GFP_KERNEL|__GFP_REPEAT);
2095
2096 if (info->buffer)
2097 rc = cdrom_open(&info->devinfo, inode, file);
2098 1986
2099 if (rc < 0) 1987 if (rc < 0)
2100 ide_cd_put(info); 1988 ide_cd_put(info);
@@ -2102,12 +1990,12 @@ static int idecd_open(struct inode * inode, struct file * file)
2102 return rc; 1990 return rc;
2103} 1991}
2104 1992
2105static int idecd_release(struct inode * inode, struct file * file) 1993static int idecd_release(struct inode *inode, struct file *file)
2106{ 1994{
2107 struct gendisk *disk = inode->i_bdev->bd_disk; 1995 struct gendisk *disk = inode->i_bdev->bd_disk;
2108 struct cdrom_info *info = ide_cd_g(disk); 1996 struct cdrom_info *info = ide_cd_g(disk);
2109 1997
2110 cdrom_release (&info->devinfo, file); 1998 cdrom_release(&info->devinfo, file);
2111 1999
2112 ide_cd_put(info); 2000 ide_cd_put(info);
2113 2001
@@ -2139,7 +2027,7 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
2139 struct packet_command cgc; 2027 struct packet_command cgc;
2140 char buffer[16]; 2028 char buffer[16];
2141 int stat; 2029 int stat;
2142 char spindown; 2030 char spindown;
2143 2031
2144 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); 2032 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
2145 2033
@@ -2148,12 +2036,12 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
2148 return stat; 2036 return stat;
2149 2037
2150 spindown = buffer[11] & 0x0f; 2038 spindown = buffer[11] & 0x0f;
2151 if (copy_to_user((void __user *)arg, &spindown, sizeof (char))) 2039 if (copy_to_user((void __user *)arg, &spindown, sizeof(char)))
2152 return -EFAULT; 2040 return -EFAULT;
2153 return 0; 2041 return 0;
2154} 2042}
2155 2043
2156static int idecd_ioctl (struct inode *inode, struct file *file, 2044static int idecd_ioctl(struct inode *inode, struct file *file,
2157 unsigned int cmd, unsigned long arg) 2045 unsigned int cmd, unsigned long arg)
2158{ 2046{
2159 struct block_device *bdev = inode->i_bdev; 2047 struct block_device *bdev = inode->i_bdev;
@@ -2161,13 +2049,13 @@ static int idecd_ioctl (struct inode *inode, struct file *file,
2161 int err; 2049 int err;
2162 2050
2163 switch (cmd) { 2051 switch (cmd) {
2164 case CDROMSETSPINDOWN: 2052 case CDROMSETSPINDOWN:
2165 return idecd_set_spindown(&info->devinfo, arg); 2053 return idecd_set_spindown(&info->devinfo, arg);
2166 case CDROMGETSPINDOWN: 2054 case CDROMGETSPINDOWN:
2167 return idecd_get_spindown(&info->devinfo, arg); 2055 return idecd_get_spindown(&info->devinfo, arg);
2168 default: 2056 default:
2169 break; 2057 break;
2170 } 2058 }
2171 2059
2172 err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg); 2060 err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg);
2173 if (err == -EINVAL) 2061 if (err == -EINVAL)
@@ -2193,16 +2081,16 @@ static int idecd_revalidate_disk(struct gendisk *disk)
2193} 2081}
2194 2082
2195static struct block_device_operations idecd_ops = { 2083static struct block_device_operations idecd_ops = {
2196 .owner = THIS_MODULE, 2084 .owner = THIS_MODULE,
2197 .open = idecd_open, 2085 .open = idecd_open,
2198 .release = idecd_release, 2086 .release = idecd_release,
2199 .ioctl = idecd_ioctl, 2087 .ioctl = idecd_ioctl,
2200 .media_changed = idecd_media_changed, 2088 .media_changed = idecd_media_changed,
2201 .revalidate_disk= idecd_revalidate_disk 2089 .revalidate_disk = idecd_revalidate_disk
2202}; 2090};
2203 2091
2204/* options */ 2092/* module options */
2205static char *ignore = NULL; 2093static char *ignore;
2206 2094
2207module_param(ignore, charp, 0400); 2095module_param(ignore, charp, 0400);
2208MODULE_DESCRIPTION("ATAPI CD-ROM Driver"); 2096MODULE_DESCRIPTION("ATAPI CD-ROM Driver");
@@ -2222,17 +2110,20 @@ static int ide_cd_probe(ide_drive_t *drive)
2222 /* skip drives that we were told to ignore */ 2110 /* skip drives that we were told to ignore */
2223 if (ignore != NULL) { 2111 if (ignore != NULL) {
2224 if (strstr(ignore, drive->name)) { 2112 if (strstr(ignore, drive->name)) {
2225 printk(KERN_INFO "ide-cd: ignoring drive %s\n", drive->name); 2113 printk(KERN_INFO "ide-cd: ignoring drive %s\n",
2114 drive->name);
2226 goto failed; 2115 goto failed;
2227 } 2116 }
2228 } 2117 }
2229 if (drive->scsi) { 2118 if (drive->scsi) {
2230 printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi emulation.\n", drive->name); 2119 printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi "
2120 "emulation.\n", drive->name);
2231 goto failed; 2121 goto failed;
2232 } 2122 }
2233 info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL); 2123 info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL);
2234 if (info == NULL) { 2124 if (info == NULL) {
2235 printk(KERN_ERR "%s: Can't allocate a cdrom structure\n", drive->name); 2125 printk(KERN_ERR "%s: Can't allocate a cdrom structure\n",
2126 drive->name);
2236 goto failed; 2127 goto failed;
2237 } 2128 }
2238 2129
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 22e3751a681e..a58801c4484d 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -119,10 +119,6 @@ struct cdrom_info {
119 119
120 struct atapi_toc *toc; 120 struct atapi_toc *toc;
121 121
122 unsigned long sector_buffered;
123 unsigned long nsectors_buffered;
124 unsigned char *buffer;
125
126 /* The result of the last successful request sense command 122 /* The result of the last successful request sense command
127 on this device. */ 123 on this device. */
128 struct request_sense sense_data; 124 struct request_sense sense_data;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 39501d130256..8e08d083fce9 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -16,8 +16,6 @@
16 16
17#define IDEDISK_VERSION "1.18" 17#define IDEDISK_VERSION "1.18"
18 18
19//#define DEBUG
20
21#include <linux/module.h> 19#include <linux/module.h>
22#include <linux/types.h> 20#include <linux/types.h>
23#include <linux/string.h> 21#include <linux/string.h>
@@ -88,7 +86,7 @@ static void ide_disk_put(struct ide_disk_obj *idkp)
88 * 86 *
89 * It is called only once for each drive. 87 * It is called only once for each drive.
90 */ 88 */
91static int lba_capacity_is_ok (struct hd_driveid *id) 89static int lba_capacity_is_ok(struct hd_driveid *id)
92{ 90{
93 unsigned long lba_sects, chs_sects, head, tail; 91 unsigned long lba_sects, chs_sects, head, tail;
94 92
@@ -176,7 +174,8 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
176 * __ide_do_rw_disk() issues READ and WRITE commands to a disk, 174 * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
177 * using LBA if supported, or CHS otherwise, to address sectors. 175 * using LBA if supported, or CHS otherwise, to address sectors.
178 */ 176 */
179static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block) 177static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
178 sector_t block)
180{ 179{
181 ide_hwif_t *hwif = HWIF(drive); 180 ide_hwif_t *hwif = HWIF(drive);
182 unsigned int dma = drive->using_dma; 181 unsigned int dma = drive->using_dma;
@@ -228,7 +227,8 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
228 tf->device = (block >> 8) & 0xf; 227 tf->device = (block >> 8) & 0xf;
229 } 228 }
230 } else { 229 } else {
231 unsigned int sect,head,cyl,track; 230 unsigned int sect, head, cyl, track;
231
232 track = (int)block / drive->sect; 232 track = (int)block / drive->sect;
233 sect = (int)block % drive->sect + 1; 233 sect = (int)block % drive->sect + 1;
234 head = track % drive->head; 234 head = track % drive->head;
@@ -271,7 +271,8 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
271 * 1073741822 == 549756 MB or 48bit addressing fake drive 271 * 1073741822 == 549756 MB or 48bit addressing fake drive
272 */ 272 */
273 273
274static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector_t block) 274static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
275 sector_t block)
275{ 276{
276 ide_hwif_t *hwif = HWIF(drive); 277 ide_hwif_t *hwif = HWIF(drive);
277 278
@@ -452,7 +453,7 @@ static void idedisk_check_hpa(ide_drive_t *drive)
452 * in above order (i.e., if value of higher priority is available, 453 * in above order (i.e., if value of higher priority is available,
453 * reset will be ignored). 454 * reset will be ignored).
454 */ 455 */
455static void init_idedisk_capacity (ide_drive_t *drive) 456static void init_idedisk_capacity(ide_drive_t *drive)
456{ 457{
457 struct hd_driveid *id = drive->id; 458 struct hd_driveid *id = drive->id;
458 /* 459 /*
@@ -479,7 +480,7 @@ static void init_idedisk_capacity (ide_drive_t *drive)
479 } 480 }
480} 481}
481 482
482static sector_t idedisk_capacity (ide_drive_t *drive) 483static sector_t idedisk_capacity(ide_drive_t *drive)
483{ 484{
484 return drive->capacity64 - drive->sect0; 485 return drive->capacity64 - drive->sect0;
485} 486}
@@ -524,10 +525,11 @@ static int proc_idedisk_read_cache
524 int len; 525 int len;
525 526
526 if (drive->id_read) 527 if (drive->id_read)
527 len = sprintf(out,"%i\n", drive->id->buf_size / 2); 528 len = sprintf(out, "%i\n", drive->id->buf_size / 2);
528 else 529 else
529 len = sprintf(out,"(none)\n"); 530 len = sprintf(out, "(none)\n");
530 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 531
532 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
531} 533}
532 534
533static int proc_idedisk_read_capacity 535static int proc_idedisk_read_capacity
@@ -536,54 +538,52 @@ static int proc_idedisk_read_capacity
536 ide_drive_t*drive = (ide_drive_t *)data; 538 ide_drive_t*drive = (ide_drive_t *)data;
537 int len; 539 int len;
538 540
539 len = sprintf(page,"%llu\n", (long long)idedisk_capacity(drive)); 541 len = sprintf(page, "%llu\n", (long long)idedisk_capacity(drive));
540 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 542
543 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
541} 544}
542 545
543static int proc_idedisk_read_smart_thresholds 546static int proc_idedisk_read_smart(char *page, char **start, off_t off,
544 (char *page, char **start, off_t off, int count, int *eof, void *data) 547 int count, int *eof, void *data, u8 sub_cmd)
545{ 548{
546 ide_drive_t *drive = (ide_drive_t *)data; 549 ide_drive_t *drive = (ide_drive_t *)data;
547 int len = 0, i = 0; 550 int len = 0, i = 0;
548 551
549 if (get_smart_data(drive, page, SMART_READ_THRESHOLDS) == 0) { 552 if (get_smart_data(drive, page, sub_cmd) == 0) {
550 unsigned short *val = (unsigned short *) page; 553 unsigned short *val = (unsigned short *) page;
551 char *out = ((char *)val) + (SECTOR_WORDS * 4); 554 char *out = ((char *)val) + (SECTOR_WORDS * 4);
552 page = out; 555 page = out;
553 do { 556 do {
554 out += sprintf(out, "%04x%c", le16_to_cpu(*val), (++i & 7) ? ' ' : '\n'); 557 out += sprintf(out, "%04x%c", le16_to_cpu(*val),
558 (++i & 7) ? ' ' : '\n');
555 val += 1; 559 val += 1;
556 } while (i < (SECTOR_WORDS * 2)); 560 } while (i < (SECTOR_WORDS * 2));
557 len = out - page; 561 len = out - page;
558 } 562 }
559 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 563
564 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
560} 565}
561 566
562static int proc_idedisk_read_smart_values 567static int proc_idedisk_read_sv
563 (char *page, char **start, off_t off, int count, int *eof, void *data) 568 (char *page, char **start, off_t off, int count, int *eof, void *data)
564{ 569{
565 ide_drive_t *drive = (ide_drive_t *)data; 570 return proc_idedisk_read_smart(page, start, off, count, eof, data,
566 int len = 0, i = 0; 571 SMART_READ_VALUES);
572}
567 573
568 if (get_smart_data(drive, page, SMART_READ_VALUES) == 0) { 574static int proc_idedisk_read_st
569 unsigned short *val = (unsigned short *) page; 575 (char *page, char **start, off_t off, int count, int *eof, void *data)
570 char *out = ((char *)val) + (SECTOR_WORDS * 4); 576{
571 page = out; 577 return proc_idedisk_read_smart(page, start, off, count, eof, data,
572 do { 578 SMART_READ_THRESHOLDS);
573 out += sprintf(out, "%04x%c", le16_to_cpu(*val), (++i & 7) ? ' ' : '\n');
574 val += 1;
575 } while (i < (SECTOR_WORDS * 2));
576 len = out - page;
577 }
578 PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
579} 579}
580 580
581static ide_proc_entry_t idedisk_proc[] = { 581static ide_proc_entry_t idedisk_proc[] = {
582 { "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL }, 582 { "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL },
583 { "capacity", S_IFREG|S_IRUGO, proc_idedisk_read_capacity, NULL }, 583 { "capacity", S_IFREG|S_IRUGO, proc_idedisk_read_capacity, NULL },
584 { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL }, 584 { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL },
585 { "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_smart_values, NULL }, 585 { "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_sv, NULL },
586 { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL }, 586 { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_st, NULL },
587 { NULL, 0, NULL, NULL } 587 { NULL, 0, NULL, NULL }
588}; 588};
589#endif /* CONFIG_IDE_PROC_FS */ 589#endif /* CONFIG_IDE_PROC_FS */
@@ -625,12 +625,13 @@ static int set_multcount(ide_drive_t *drive, int arg)
625 if (drive->special.b.set_multmode) 625 if (drive->special.b.set_multmode)
626 return -EBUSY; 626 return -EBUSY;
627 627
628 ide_init_drive_cmd (&rq); 628 ide_init_drive_cmd(&rq);
629 rq.cmd_type = REQ_TYPE_ATA_TASKFILE; 629 rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
630 630
631 drive->mult_req = arg; 631 drive->mult_req = arg;
632 drive->special.b.set_multmode = 1; 632 drive->special.b.set_multmode = 1;
633 (void) ide_do_drive_cmd (drive, &rq, ide_wait); 633 (void)ide_do_drive_cmd(drive, &rq, ide_wait);
634
634 return (drive->mult_count == arg) ? 0 : -EIO; 635 return (drive->mult_count == arg) ? 0 : -EIO;
635} 636}
636 637
@@ -706,7 +707,7 @@ static int write_cache(ide_drive_t *drive, int arg)
706 return err; 707 return err;
707} 708}
708 709
709static int do_idedisk_flushcache (ide_drive_t *drive) 710static int do_idedisk_flushcache(ide_drive_t *drive)
710{ 711{
711 ide_task_t args; 712 ide_task_t args;
712 713
@@ -719,7 +720,7 @@ static int do_idedisk_flushcache (ide_drive_t *drive)
719 return ide_no_data_taskfile(drive, &args); 720 return ide_no_data_taskfile(drive, &args);
720} 721}
721 722
722static int set_acoustic (ide_drive_t *drive, int arg) 723static int set_acoustic(ide_drive_t *drive, int arg)
723{ 724{
724 ide_task_t args; 725 ide_task_t args;
725 726
@@ -753,7 +754,7 @@ static int set_lba_addressing(ide_drive_t *drive, int arg)
753 return 0; 754 return 0;
754 755
755 if (!idedisk_supports_lba48(drive->id)) 756 if (!idedisk_supports_lba48(drive->id))
756 return -EIO; 757 return -EIO;
757 drive->addressing = arg; 758 drive->addressing = arg;
758 return 0; 759 return 0;
759} 760}
@@ -763,23 +764,35 @@ static void idedisk_add_settings(ide_drive_t *drive)
763{ 764{
764 struct hd_driveid *id = drive->id; 765 struct hd_driveid *id = drive->id;
765 766
766 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL); 767 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1,
767 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); 768 &drive->bios_cyl, NULL);
768 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); 769 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1,
769 ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1, &drive->addressing, set_lba_addressing); 770 &drive->bios_head, NULL);
770 ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0, id->max_multsect, 1, 1, &drive->mult_count, set_multcount); 771 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1,
771 ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr); 772 &drive->bios_sect, NULL);
772 ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL); 773 ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1,
773 ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache); 774 &drive->addressing, set_lba_addressing);
774 ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic); 775 ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0,
775 ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL); 776 id->max_multsect, 1, 1, &drive->mult_count,
776 ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL); 777 set_multcount);
778 ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
779 &drive->nowerr, set_nowerr);
780 ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1,
781 &drive->lun, NULL);
782 ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
783 &drive->wcache, write_cache);
784 ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1,
785 &drive->acoustic, set_acoustic);
786 ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1,
787 &drive->failures, NULL);
788 ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535,
789 1, 1, &drive->max_failures, NULL);
777} 790}
778#else 791#else
779static inline void idedisk_add_settings(ide_drive_t *drive) { ; } 792static inline void idedisk_add_settings(ide_drive_t *drive) { ; }
780#endif 793#endif
781 794
782static void idedisk_setup (ide_drive_t *drive) 795static void idedisk_setup(ide_drive_t *drive)
783{ 796{
784 ide_hwif_t *hwif = drive->hwif; 797 ide_hwif_t *hwif = drive->hwif;
785 struct hd_driveid *id = drive->id; 798 struct hd_driveid *id = drive->id;
@@ -792,11 +805,10 @@ static void idedisk_setup (ide_drive_t *drive)
792 805
793 if (drive->removable) { 806 if (drive->removable) {
794 /* 807 /*
795 * Removable disks (eg. SYQUEST); ignore 'WD' drives 808 * Removable disks (eg. SYQUEST); ignore 'WD' drives
796 */ 809 */
797 if (id->model[0] != 'W' || id->model[1] != 'D') { 810 if (id->model[0] != 'W' || id->model[1] != 'D')
798 drive->doorlocking = 1; 811 drive->doorlocking = 1;
799 }
800 } 812 }
801 813
802 (void)set_lba_addressing(drive, 1); 814 (void)set_lba_addressing(drive, 1);
@@ -810,10 +822,11 @@ static void idedisk_setup (ide_drive_t *drive)
810 blk_queue_max_sectors(drive->queue, max_s); 822 blk_queue_max_sectors(drive->queue, max_s);
811 } 823 }
812 824
813 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, drive->queue->max_sectors / 2); 825 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
826 drive->queue->max_sectors / 2);
814 827
815 /* calculate drive capacity, and select LBA if possible */ 828 /* calculate drive capacity, and select LBA if possible */
816 init_idedisk_capacity (drive); 829 init_idedisk_capacity(drive);
817 830
818 /* limit drive capacity to 137GB if LBA48 cannot be used */ 831 /* limit drive capacity to 137GB if LBA48 cannot be used */
819 if (drive->addressing == 0 && drive->capacity64 > 1ULL << 28) { 832 if (drive->addressing == 0 && drive->capacity64 > 1ULL << 28) {
@@ -826,9 +839,9 @@ static void idedisk_setup (ide_drive_t *drive)
826 839
827 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && drive->addressing) { 840 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && drive->addressing) {
828 if (drive->capacity64 > 1ULL << 28) { 841 if (drive->capacity64 > 1ULL << 28) {
829 printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode will" 842 printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
830 " be used for accessing sectors > %u\n", 843 " will be used for accessing sectors "
831 drive->name, 1 << 28); 844 "> %u\n", drive->name, 1 << 28);
832 } else 845 } else
833 drive->addressing = 0; 846 drive->addressing = 0;
834 } 847 }
@@ -837,7 +850,8 @@ static void idedisk_setup (ide_drive_t *drive)
837 * if possible, give fdisk access to more of the drive, 850 * if possible, give fdisk access to more of the drive,
838 * by correcting bios_cyls: 851 * by correcting bios_cyls:
839 */ 852 */
840 capacity = idedisk_capacity (drive); 853 capacity = idedisk_capacity(drive);
854
841 if (!drive->forced_geom) { 855 if (!drive->forced_geom) {
842 856
843 if (idedisk_supports_lba48(drive->id)) { 857 if (idedisk_supports_lba48(drive->id)) {
@@ -993,7 +1007,8 @@ static int idedisk_open(struct inode *inode, struct file *filp)
993 struct ide_disk_obj *idkp; 1007 struct ide_disk_obj *idkp;
994 ide_drive_t *drive; 1008 ide_drive_t *drive;
995 1009
996 if (!(idkp = ide_disk_get(disk))) 1010 idkp = ide_disk_get(disk);
1011 if (idkp == NULL)
997 return -ENXIO; 1012 return -ENXIO;
998 1013
999 drive = idkp->drive; 1014 drive = idkp->drive;
@@ -1115,13 +1130,13 @@ static int idedisk_revalidate_disk(struct gendisk *disk)
1115} 1130}
1116 1131
1117static struct block_device_operations idedisk_ops = { 1132static struct block_device_operations idedisk_ops = {
1118 .owner = THIS_MODULE, 1133 .owner = THIS_MODULE,
1119 .open = idedisk_open, 1134 .open = idedisk_open,
1120 .release = idedisk_release, 1135 .release = idedisk_release,
1121 .ioctl = idedisk_ioctl, 1136 .ioctl = idedisk_ioctl,
1122 .getgeo = idedisk_getgeo, 1137 .getgeo = idedisk_getgeo,
1123 .media_changed = idedisk_media_changed, 1138 .media_changed = idedisk_media_changed,
1124 .revalidate_disk= idedisk_revalidate_disk 1139 .revalidate_disk = idedisk_revalidate_disk
1125}; 1140};
1126 1141
1127MODULE_DESCRIPTION("ATA DISK Driver"); 1142MODULE_DESCRIPTION("ATA DISK Driver");
@@ -1184,7 +1199,7 @@ failed:
1184 return -ENODEV; 1199 return -ENODEV;
1185} 1200}
1186 1201
1187static void __exit idedisk_exit (void) 1202static void __exit idedisk_exit(void)
1188{ 1203{
1189 driver_unregister(&idedisk_driver.gen_driver); 1204 driver_unregister(&idedisk_driver.gen_driver);
1190} 1205}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index d61e5788d310..c352cf27b6e7 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -102,7 +102,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
102{ 102{
103 u8 stat = 0, dma_stat = 0; 103 u8 stat = 0, dma_stat = 0;
104 104
105 dma_stat = HWIF(drive)->ide_dma_end(drive); 105 dma_stat = drive->hwif->dma_ops->dma_end(drive);
106 stat = ide_read_status(drive); 106 stat = ide_read_status(drive);
107 107
108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { 108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
@@ -394,7 +394,7 @@ void ide_dma_off_quietly(ide_drive_t *drive)
394 drive->using_dma = 0; 394 drive->using_dma = 0;
395 ide_toggle_bounce(drive, 0); 395 ide_toggle_bounce(drive, 0);
396 396
397 drive->hwif->dma_host_set(drive, 0); 397 drive->hwif->dma_ops->dma_host_set(drive, 0);
398} 398}
399 399
400EXPORT_SYMBOL(ide_dma_off_quietly); 400EXPORT_SYMBOL(ide_dma_off_quietly);
@@ -427,7 +427,7 @@ void ide_dma_on(ide_drive_t *drive)
427 drive->using_dma = 1; 427 drive->using_dma = 1;
428 ide_toggle_bounce(drive, 1); 428 ide_toggle_bounce(drive, 1);
429 429
430 drive->hwif->dma_host_set(drive, 1); 430 drive->hwif->dma_ops->dma_host_set(drive, 1);
431} 431}
432 432
433#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 433#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
@@ -482,11 +482,12 @@ int ide_dma_setup(ide_drive_t *drive)
482 482
483EXPORT_SYMBOL_GPL(ide_dma_setup); 483EXPORT_SYMBOL_GPL(ide_dma_setup);
484 484
485static void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) 485void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
486{ 486{
487 /* issue cmd to drive */ 487 /* issue cmd to drive */
488 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); 488 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
489} 489}
490EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
490 491
491void ide_dma_start(ide_drive_t *drive) 492void ide_dma_start(ide_drive_t *drive)
492{ 493{
@@ -532,7 +533,7 @@ int __ide_dma_end (ide_drive_t *drive)
532EXPORT_SYMBOL(__ide_dma_end); 533EXPORT_SYMBOL(__ide_dma_end);
533 534
534/* returns 1 if dma irq issued, 0 otherwise */ 535/* returns 1 if dma irq issued, 0 otherwise */
535static int __ide_dma_test_irq(ide_drive_t *drive) 536int ide_dma_test_irq(ide_drive_t *drive)
536{ 537{
537 ide_hwif_t *hwif = HWIF(drive); 538 ide_hwif_t *hwif = HWIF(drive);
538 u8 dma_stat = hwif->INB(hwif->dma_status); 539 u8 dma_stat = hwif->INB(hwif->dma_status);
@@ -542,9 +543,10 @@ static int __ide_dma_test_irq(ide_drive_t *drive)
542 return 1; 543 return 1;
543 if (!drive->waiting_for_dma) 544 if (!drive->waiting_for_dma)
544 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 545 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
545 drive->name, __FUNCTION__); 546 drive->name, __func__);
546 return 0; 547 return 0;
547} 548}
549EXPORT_SYMBOL_GPL(ide_dma_test_irq);
548#else 550#else
549static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } 551static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
550#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 552#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
@@ -574,6 +576,7 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
574{ 576{
575 struct hd_driveid *id = drive->id; 577 struct hd_driveid *id = drive->id;
576 ide_hwif_t *hwif = drive->hwif; 578 ide_hwif_t *hwif = drive->hwif;
579 const struct ide_port_ops *port_ops = hwif->port_ops;
577 unsigned int mask = 0; 580 unsigned int mask = 0;
578 581
579 switch(base) { 582 switch(base) {
@@ -581,8 +584,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
581 if ((id->field_valid & 4) == 0) 584 if ((id->field_valid & 4) == 0)
582 break; 585 break;
583 586
584 if (hwif->udma_filter) 587 if (port_ops && port_ops->udma_filter)
585 mask = hwif->udma_filter(drive); 588 mask = port_ops->udma_filter(drive);
586 else 589 else
587 mask = hwif->ultra_mask; 590 mask = hwif->ultra_mask;
588 mask &= id->dma_ultra; 591 mask &= id->dma_ultra;
@@ -598,8 +601,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
598 case XFER_MW_DMA_0: 601 case XFER_MW_DMA_0:
599 if ((id->field_valid & 2) == 0) 602 if ((id->field_valid & 2) == 0)
600 break; 603 break;
601 if (hwif->mdma_filter) 604 if (port_ops && port_ops->mdma_filter)
602 mask = hwif->mdma_filter(drive); 605 mask = port_ops->mdma_filter(drive);
603 else 606 else
604 mask = hwif->mwdma_mask; 607 mask = hwif->mwdma_mask;
605 mask &= id->dma_mword; 608 mask &= id->dma_mword;
@@ -703,17 +706,8 @@ static int ide_tune_dma(ide_drive_t *drive)
703 706
704 speed = ide_max_dma_mode(drive); 707 speed = ide_max_dma_mode(drive);
705 708
706 if (!speed) { 709 if (!speed)
707 /* is this really correct/needed? */ 710 return 0;
708 if ((hwif->host_flags & IDE_HFLAG_CY82C693) &&
709 ide_dma_good_drive(drive))
710 return 1;
711 else
712 return 0;
713 }
714
715 if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
716 return 1;
717 711
718 if (ide_set_dma_mode(drive, speed)) 712 if (ide_set_dma_mode(drive, speed))
719 return 0; 713 return 0;
@@ -810,15 +804,15 @@ void ide_dma_timeout (ide_drive_t *drive)
810 804
811 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); 805 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
812 806
813 if (hwif->ide_dma_test_irq(drive)) 807 if (hwif->dma_ops->dma_test_irq(drive))
814 return; 808 return;
815 809
816 hwif->ide_dma_end(drive); 810 hwif->dma_ops->dma_end(drive);
817} 811}
818 812
819EXPORT_SYMBOL(ide_dma_timeout); 813EXPORT_SYMBOL(ide_dma_timeout);
820 814
821static void ide_release_dma_engine(ide_hwif_t *hwif) 815void ide_release_dma_engine(ide_hwif_t *hwif)
822{ 816{
823 if (hwif->dmatable_cpu) { 817 if (hwif->dmatable_cpu) {
824 struct pci_dev *pdev = to_pci_dev(hwif->dev); 818 struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -829,28 +823,7 @@ static void ide_release_dma_engine(ide_hwif_t *hwif)
829 } 823 }
830} 824}
831 825
832static int ide_release_iomio_dma(ide_hwif_t *hwif) 826int ide_allocate_dma_engine(ide_hwif_t *hwif)
833{
834 release_region(hwif->dma_base, 8);
835 if (hwif->extra_ports)
836 release_region(hwif->extra_base, hwif->extra_ports);
837 return 1;
838}
839
840/*
841 * Needed for allowing full modular support of ide-driver
842 */
843int ide_release_dma(ide_hwif_t *hwif)
844{
845 ide_release_dma_engine(hwif);
846
847 if (hwif->mmio)
848 return 1;
849 else
850 return ide_release_iomio_dma(hwif);
851}
852
853static int ide_allocate_dma_engine(ide_hwif_t *hwif)
854{ 827{
855 struct pci_dev *pdev = to_pci_dev(hwif->dev); 828 struct pci_dev *pdev = to_pci_dev(hwif->dev);
856 829
@@ -862,65 +835,25 @@ static int ide_allocate_dma_engine(ide_hwif_t *hwif)
862 return 0; 835 return 0;
863 836
864 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", 837 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
865 hwif->cds->name); 838 hwif->name);
866 839
867 return 1; 840 return 1;
868} 841}
869 842EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
870static int ide_mapped_mmio_dma(ide_hwif_t *hwif, unsigned long base) 843
871{ 844static const struct ide_dma_ops sff_dma_ops = {
872 printk(KERN_INFO " %s: MMIO-DMA ", hwif->name); 845 .dma_host_set = ide_dma_host_set,
873 846 .dma_setup = ide_dma_setup,
874 return 0; 847 .dma_exec_cmd = ide_dma_exec_cmd,
875} 848 .dma_start = ide_dma_start,
876 849 .dma_end = __ide_dma_end,
877static int ide_iomio_dma(ide_hwif_t *hwif, unsigned long base) 850 .dma_test_irq = ide_dma_test_irq,
878{ 851 .dma_timeout = ide_dma_timeout,
879 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx", 852 .dma_lost_irq = ide_dma_lost_irq,
880 hwif->name, base, base + 7); 853};
881
882 if (!request_region(base, 8, hwif->name)) {
883 printk(" -- Error, ports in use.\n");
884 return 1;
885 }
886
887 if (hwif->cds->extra) {
888 hwif->extra_base = base + (hwif->channel ? 8 : 16);
889
890 if (!hwif->mate || !hwif->mate->extra_ports) {
891 if (!request_region(hwif->extra_base,
892 hwif->cds->extra, hwif->cds->name)) {
893 printk(" -- Error, extra ports in use.\n");
894 release_region(base, 8);
895 return 1;
896 }
897 hwif->extra_ports = hwif->cds->extra;
898 }
899 }
900
901 return 0;
902}
903
904static int ide_dma_iobase(ide_hwif_t *hwif, unsigned long base)
905{
906 if (hwif->mmio)
907 return ide_mapped_mmio_dma(hwif, base);
908
909 return ide_iomio_dma(hwif, base);
910}
911 854
912void ide_setup_dma(ide_hwif_t *hwif, unsigned long base) 855void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
913{ 856{
914 u8 dma_stat;
915
916 if (ide_dma_iobase(hwif, base))
917 return;
918
919 if (ide_allocate_dma_engine(hwif)) {
920 ide_release_dma(hwif);
921 return;
922 }
923
924 hwif->dma_base = base; 857 hwif->dma_base = base;
925 858
926 if (!hwif->dma_command) 859 if (!hwif->dma_command)
@@ -934,27 +867,7 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
934 if (!hwif->dma_prdtable) 867 if (!hwif->dma_prdtable)
935 hwif->dma_prdtable = hwif->dma_base + 4; 868 hwif->dma_prdtable = hwif->dma_base + 4;
936 869
937 if (!hwif->dma_host_set) 870 hwif->dma_ops = &sff_dma_ops;
938 hwif->dma_host_set = &ide_dma_host_set;
939 if (!hwif->dma_setup)
940 hwif->dma_setup = &ide_dma_setup;
941 if (!hwif->dma_exec_cmd)
942 hwif->dma_exec_cmd = &ide_dma_exec_cmd;
943 if (!hwif->dma_start)
944 hwif->dma_start = &ide_dma_start;
945 if (!hwif->ide_dma_end)
946 hwif->ide_dma_end = &__ide_dma_end;
947 if (!hwif->ide_dma_test_irq)
948 hwif->ide_dma_test_irq = &__ide_dma_test_irq;
949 if (!hwif->dma_timeout)
950 hwif->dma_timeout = &ide_dma_timeout;
951 if (!hwif->dma_lost_irq)
952 hwif->dma_lost_irq = &ide_dma_lost_irq;
953
954 dma_stat = hwif->INB(hwif->dma_status);
955 printk(KERN_CONT ", BIOS settings: %s:%s, %s:%s\n",
956 hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "PIO",
957 hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "PIO");
958} 871}
959 872
960EXPORT_SYMBOL_GPL(ide_setup_dma); 873EXPORT_SYMBOL_GPL(ide_setup_dma);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 5f133dfb541c..489079b8ed03 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -396,7 +396,7 @@ static void idefloppy_retry_pc(ide_drive_t *drive)
396} 396}
397 397
398/* The usual interrupt handler called during a packet command. */ 398/* The usual interrupt handler called during a packet command. */
399static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive) 399static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
400{ 400{
401 idefloppy_floppy_t *floppy = drive->driver_data; 401 idefloppy_floppy_t *floppy = drive->driver_data;
402 ide_hwif_t *hwif = drive->hwif; 402 ide_hwif_t *hwif = drive->hwif;
@@ -411,7 +411,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
411 debug_log("Reached %s interrupt handler\n", __func__); 411 debug_log("Reached %s interrupt handler\n", __func__);
412 412
413 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { 413 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
414 dma_error = hwif->ide_dma_end(drive); 414 dma_error = hwif->dma_ops->dma_end(drive);
415 if (dma_error) { 415 if (dma_error) {
416 printk(KERN_ERR "%s: DMA %s error\n", drive->name, 416 printk(KERN_ERR "%s: DMA %s error\n", drive->name,
417 rq_data_dir(rq) ? "write" : "read"); 417 rq_data_dir(rq) ? "write" : "read");
@@ -465,10 +465,10 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
465 } 465 }
466 466
467 /* Get the number of bytes to transfer */ 467 /* Get the number of bytes to transfer */
468 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | 468 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
469 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); 469 hwif->INB(hwif->io_ports.lbam_addr);
470 /* on this interrupt */ 470 /* on this interrupt */
471 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 471 ireason = hwif->INB(hwif->io_ports.nsect_addr);
472 472
473 if (ireason & CD) { 473 if (ireason & CD) {
474 printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__); 474 printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__);
@@ -539,7 +539,7 @@ static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive)
539 "initiated yet DRQ isn't asserted\n"); 539 "initiated yet DRQ isn't asserted\n");
540 return startstop; 540 return startstop;
541 } 541 }
542 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 542 ireason = hwif->INB(hwif->io_ports.nsect_addr);
543 if ((ireason & CD) == 0 || (ireason & IO)) { 543 if ((ireason & CD) == 0 || (ireason & IO)) {
544 printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while " 544 printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while "
545 "issuing a packet command\n"); 545 "issuing a packet command\n");
@@ -586,7 +586,7 @@ static ide_startstop_t idefloppy_transfer_pc1(ide_drive_t *drive)
586 "initiated yet DRQ isn't asserted\n"); 586 "initiated yet DRQ isn't asserted\n");
587 return startstop; 587 return startstop;
588 } 588 }
589 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 589 ireason = hwif->INB(hwif->io_ports.nsect_addr);
590 if ((ireason & CD) == 0 || (ireason & IO)) { 590 if ((ireason & CD) == 0 || (ireason & IO)) {
591 printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) " 591 printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) "
592 "while issuing a packet command\n"); 592 "while issuing a packet command\n");
@@ -663,7 +663,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
663 dma = 0; 663 dma = 0;
664 664
665 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma) 665 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
666 dma = !hwif->dma_setup(drive); 666 dma = !hwif->dma_ops->dma_setup(drive);
667 667
668 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK | 668 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
669 IDE_TFLAG_OUT_DEVICE, bcount, dma); 669 IDE_TFLAG_OUT_DEVICE, bcount, dma);
@@ -671,7 +671,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
671 if (dma) { 671 if (dma) {
672 /* Begin DMA, if necessary */ 672 /* Begin DMA, if necessary */
673 pc->flags |= PC_FLAG_DMA_IN_PROGRESS; 673 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
674 hwif->dma_start(drive); 674 hwif->dma_ops->dma_start(drive);
675 } 675 }
676 676
677 /* Can we transfer the packet when we get the interrupt or wait? */ 677 /* Can we transfer the packet when we get the interrupt or wait? */
@@ -692,7 +692,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
692 return ide_started; 692 return ide_started;
693 } else { 693 } else {
694 /* Issue the packet command */ 694 /* Issue the packet command */
695 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); 695 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
696 return (*pkt_xfer_routine) (drive); 696 return (*pkt_xfer_routine) (drive);
697 } 697 }
698} 698}
@@ -1596,13 +1596,13 @@ static int idefloppy_revalidate_disk(struct gendisk *disk)
1596} 1596}
1597 1597
1598static struct block_device_operations idefloppy_ops = { 1598static struct block_device_operations idefloppy_ops = {
1599 .owner = THIS_MODULE, 1599 .owner = THIS_MODULE,
1600 .open = idefloppy_open, 1600 .open = idefloppy_open,
1601 .release = idefloppy_release, 1601 .release = idefloppy_release,
1602 .ioctl = idefloppy_ioctl, 1602 .ioctl = idefloppy_ioctl,
1603 .getgeo = idefloppy_getgeo, 1603 .getgeo = idefloppy_getgeo,
1604 .media_changed = idefloppy_media_changed, 1604 .media_changed = idefloppy_media_changed,
1605 .revalidate_disk= idefloppy_revalidate_disk 1605 .revalidate_disk = idefloppy_revalidate_disk
1606}; 1606};
1607 1607
1608static int ide_floppy_probe(ide_drive_t *drive) 1608static int ide_floppy_probe(ide_drive_t *drive)
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 25fda0a3263f..a6073e248f45 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -33,7 +33,7 @@ static ssize_t store_add(struct class *cls, const char *buf, size_t n)
33 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3) 33 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
34 return -EINVAL; 34 return -EINVAL;
35 35
36 hwif = ide_find_port(base); 36 hwif = ide_find_port();
37 if (hwif == NULL) 37 if (hwif == NULL)
38 return -ENOENT; 38 return -ENOENT;
39 39
@@ -90,19 +90,45 @@ static int __init ide_generic_init(void)
90 int i; 90 int i;
91 91
92 for (i = 0; i < MAX_HWIFS; i++) { 92 for (i = 0; i < MAX_HWIFS; i++) {
93 ide_hwif_t *hwif = &ide_hwifs[i]; 93 ide_hwif_t *hwif;
94 unsigned long io_addr = ide_default_io_base(i); 94 unsigned long io_addr = ide_default_io_base(i);
95 hw_regs_t hw; 95 hw_regs_t hw;
96 96
97 if (hwif->chipset == ide_unknown && io_addr) { 97 idx[i] = 0xff;
98
99 if (io_addr) {
100 if (!request_region(io_addr, 8, DRV_NAME)) {
101 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
102 "not free.\n",
103 DRV_NAME, io_addr, io_addr + 7);
104 continue;
105 }
106
107 if (!request_region(io_addr + 0x206, 1, DRV_NAME)) {
108 printk(KERN_ERR "%s: I/O resource 0x%lX "
109 "not free.\n",
110 DRV_NAME, io_addr + 0x206);
111 release_region(io_addr, 8);
112 continue;
113 }
114
115 /*
116 * Skip probing if the corresponding
117 * slot is already occupied.
118 */
119 hwif = ide_find_port();
120 if (hwif == NULL || hwif->index != i) {
121 idx[i] = 0xff;
122 continue;
123 }
124
98 memset(&hw, 0, sizeof(hw)); 125 memset(&hw, 0, sizeof(hw));
99 ide_std_init_ports(&hw, io_addr, io_addr + 0x206); 126 ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
100 hw.irq = ide_default_irq(io_addr); 127 hw.irq = ide_default_irq(io_addr);
101 ide_init_port_hw(hwif, &hw); 128 ide_init_port_hw(hwif, &hw);
102 129
103 idx[i] = i; 130 idx[i] = i;
104 } else 131 }
105 idx[i] = 0xff;
106 } 132 }
107 133
108 ide_device_add_all(idx, NULL); 134 ide_device_add_all(idx, NULL);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 31e5afadb7e9..3a2d8930d17f 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -218,7 +218,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
218 * we could be smarter and check for current xfer_speed 218 * we could be smarter and check for current xfer_speed
219 * in struct drive etc... 219 * in struct drive etc...
220 */ 220 */
221 if (drive->hwif->dma_host_set == NULL) 221 if (drive->hwif->dma_ops == NULL)
222 break; 222 break;
223 /* 223 /*
224 * TODO: respect ->using_dma setting 224 * TODO: respect ->using_dma setting
@@ -298,48 +298,43 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
298void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 298void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
299{ 299{
300 ide_hwif_t *hwif = drive->hwif; 300 ide_hwif_t *hwif = drive->hwif;
301 struct ide_io_ports *io_ports = &hwif->io_ports;
301 struct ide_taskfile *tf = &task->tf; 302 struct ide_taskfile *tf = &task->tf;
302 303
303 if (task->tf_flags & IDE_TFLAG_IN_DATA) { 304 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
304 u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]); 305 u16 data = hwif->INW(io_ports->data_addr);
305 306
306 tf->data = data & 0xff; 307 tf->data = data & 0xff;
307 tf->hob_data = (data >> 8) & 0xff; 308 tf->hob_data = (data >> 8) & 0xff;
308 } 309 }
309 310
310 /* be sure we're looking at the low order bits */ 311 /* be sure we're looking at the low order bits */
311 hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]); 312 hwif->OUTB(drive->ctl & ~0x80, io_ports->ctl_addr);
312 313
313 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 314 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
314 tf->nsect = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]); 315 tf->nsect = hwif->INB(io_ports->nsect_addr);
315 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 316 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
316 tf->lbal = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]); 317 tf->lbal = hwif->INB(io_ports->lbal_addr);
317 if (task->tf_flags & IDE_TFLAG_IN_LBAM) 318 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
318 tf->lbam = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]); 319 tf->lbam = hwif->INB(io_ports->lbam_addr);
319 if (task->tf_flags & IDE_TFLAG_IN_LBAH) 320 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
320 tf->lbah = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]); 321 tf->lbah = hwif->INB(io_ports->lbah_addr);
321 if (task->tf_flags & IDE_TFLAG_IN_DEVICE) 322 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
322 tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]); 323 tf->device = hwif->INB(io_ports->device_addr);
323 324
324 if (task->tf_flags & IDE_TFLAG_LBA48) { 325 if (task->tf_flags & IDE_TFLAG_LBA48) {
325 hwif->OUTB(drive->ctl | 0x80, 326 hwif->OUTB(drive->ctl | 0x80, io_ports->ctl_addr);
326 hwif->io_ports[IDE_CONTROL_OFFSET]);
327 327
328 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) 328 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
329 tf->hob_feature = 329 tf->hob_feature = hwif->INB(io_ports->feature_addr);
330 hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]);
331 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) 330 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
332 tf->hob_nsect = 331 tf->hob_nsect = hwif->INB(io_ports->nsect_addr);
333 hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
334 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) 332 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
335 tf->hob_lbal = 333 tf->hob_lbal = hwif->INB(io_ports->lbal_addr);
336 hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
337 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) 334 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
338 tf->hob_lbam = 335 tf->hob_lbam = hwif->INB(io_ports->lbam_addr);
339 hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
340 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) 336 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
341 tf->hob_lbah = 337 tf->hob_lbah = hwif->INB(io_ports->lbah_addr);
342 hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
343 } 338 }
344} 339}
345 340
@@ -454,7 +449,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
454 if (err == ABRT_ERR) { 449 if (err == ABRT_ERR) {
455 if (drive->select.b.lba && 450 if (drive->select.b.lba &&
456 /* some newer drives don't support WIN_SPECIFY */ 451 /* some newer drives don't support WIN_SPECIFY */
457 hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) == 452 hwif->INB(hwif->io_ports.command_addr) ==
458 WIN_SPECIFY) 453 WIN_SPECIFY)
459 return ide_stopped; 454 return ide_stopped;
460 } else if ((err & BAD_CRC) == BAD_CRC) { 455 } else if ((err & BAD_CRC) == BAD_CRC) {
@@ -507,8 +502,7 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
507 502
508 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 503 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
509 /* force an abort */ 504 /* force an abort */
510 hwif->OUTB(WIN_IDLEIMMEDIATE, 505 hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
511 hwif->io_ports[IDE_COMMAND_OFFSET]);
512 506
513 if (rq->errors >= ERROR_MAX) { 507 if (rq->errors >= ERROR_MAX) {
514 ide_kill_rq(drive, rq); 508 ide_kill_rq(drive, rq);
@@ -721,15 +715,12 @@ static ide_startstop_t do_special (ide_drive_t *drive)
721#endif 715#endif
722 if (s->b.set_tune) { 716 if (s->b.set_tune) {
723 ide_hwif_t *hwif = drive->hwif; 717 ide_hwif_t *hwif = drive->hwif;
718 const struct ide_port_ops *port_ops = hwif->port_ops;
724 u8 req_pio = drive->tune_req; 719 u8 req_pio = drive->tune_req;
725 720
726 s->b.set_tune = 0; 721 s->b.set_tune = 0;
727 722
728 if (set_pio_mode_abuse(drive->hwif, req_pio)) { 723 if (set_pio_mode_abuse(drive->hwif, req_pio)) {
729
730 if (hwif->set_pio_mode == NULL)
731 return ide_stopped;
732
733 /* 724 /*
734 * take ide_lock for drive->[no_]unmask/[no_]io_32bit 725 * take ide_lock for drive->[no_]unmask/[no_]io_32bit
735 */ 726 */
@@ -737,10 +728,10 @@ static ide_startstop_t do_special (ide_drive_t *drive)
737 unsigned long flags; 728 unsigned long flags;
738 729
739 spin_lock_irqsave(&ide_lock, flags); 730 spin_lock_irqsave(&ide_lock, flags);
740 hwif->set_pio_mode(drive, req_pio); 731 port_ops->set_pio_mode(drive, req_pio);
741 spin_unlock_irqrestore(&ide_lock, flags); 732 spin_unlock_irqrestore(&ide_lock, flags);
742 } else 733 } else
743 hwif->set_pio_mode(drive, req_pio); 734 port_ops->set_pio_mode(drive, req_pio);
744 } else { 735 } else {
745 int keep_dma = drive->using_dma; 736 int keep_dma = drive->using_dma;
746 737
@@ -1241,12 +1232,12 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1241 1232
1242 if (error < 0) { 1233 if (error < 0) {
1243 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1234 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1244 (void)HWIF(drive)->ide_dma_end(drive); 1235 (void)hwif->dma_ops->dma_end(drive);
1245 ret = ide_error(drive, "dma timeout error", 1236 ret = ide_error(drive, "dma timeout error",
1246 ide_read_status(drive)); 1237 ide_read_status(drive));
1247 } else { 1238 } else {
1248 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1239 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1249 hwif->dma_timeout(drive); 1240 hwif->dma_ops->dma_timeout(drive);
1250 } 1241 }
1251 1242
1252 /* 1243 /*
@@ -1358,7 +1349,7 @@ void ide_timer_expiry (unsigned long data)
1358 startstop = handler(drive); 1349 startstop = handler(drive);
1359 } else if (drive_is_ready(drive)) { 1350 } else if (drive_is_ready(drive)) {
1360 if (drive->waiting_for_dma) 1351 if (drive->waiting_for_dma)
1361 hwgroup->hwif->dma_lost_irq(drive); 1352 hwif->dma_ops->dma_lost_irq(drive);
1362 (void)ide_ack_intr(hwif); 1353 (void)ide_ack_intr(hwif);
1363 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1354 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1364 startstop = handler(drive); 1355 startstop = handler(drive);
@@ -1424,7 +1415,7 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1424 */ 1415 */
1425 do { 1416 do {
1426 if (hwif->irq == irq) { 1417 if (hwif->irq == irq) {
1427 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1418 stat = hwif->INB(hwif->io_ports.status_addr);
1428 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1419 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1429 /* Try to not flood the console with msgs */ 1420 /* Try to not flood the console with msgs */
1430 static unsigned long last_msgtime, count; 1421 static unsigned long last_msgtime, count;
@@ -1514,7 +1505,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1514 * Whack the status register, just in case 1505 * Whack the status register, just in case
1515 * we have a leftover pending IRQ. 1506 * we have a leftover pending IRQ.
1516 */ 1507 */
1517 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1508 (void) hwif->INB(hwif->io_ports.status_addr);
1518#endif /* CONFIG_BLK_DEV_IDEPCI */ 1509#endif /* CONFIG_BLK_DEV_IDEPCI */
1519 } 1510 }
1520 spin_unlock_irqrestore(&ide_lock, flags); 1511 spin_unlock_irqrestore(&ide_lock, flags);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 45944219eea0..5425d3038ec2 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -159,17 +159,20 @@ EXPORT_SYMBOL(default_hwif_mmiops);
159void SELECT_DRIVE (ide_drive_t *drive) 159void SELECT_DRIVE (ide_drive_t *drive)
160{ 160{
161 ide_hwif_t *hwif = drive->hwif; 161 ide_hwif_t *hwif = drive->hwif;
162 const struct ide_port_ops *port_ops = hwif->port_ops;
162 163
163 if (hwif->selectproc) 164 if (port_ops && port_ops->selectproc)
164 hwif->selectproc(drive); 165 port_ops->selectproc(drive);
165 166
166 hwif->OUTB(drive->select.all, hwif->io_ports[IDE_SELECT_OFFSET]); 167 hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
167} 168}
168 169
169void SELECT_MASK (ide_drive_t *drive, int mask) 170void SELECT_MASK (ide_drive_t *drive, int mask)
170{ 171{
171 if (HWIF(drive)->maskproc) 172 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
172 HWIF(drive)->maskproc(drive, mask); 173
174 if (port_ops && port_ops->maskproc)
175 port_ops->maskproc(drive, mask);
173} 176}
174 177
175/* 178/*
@@ -191,24 +194,22 @@ static void ata_vlb_sync(ide_drive_t *drive, unsigned long port)
191 */ 194 */
192static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount) 195static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
193{ 196{
194 ide_hwif_t *hwif = HWIF(drive); 197 ide_hwif_t *hwif = drive->hwif;
195 u8 io_32bit = drive->io_32bit; 198 struct ide_io_ports *io_ports = &hwif->io_ports;
199 u8 io_32bit = drive->io_32bit;
196 200
197 if (io_32bit) { 201 if (io_32bit) {
198 if (io_32bit & 2) { 202 if (io_32bit & 2) {
199 unsigned long flags; 203 unsigned long flags;
200 204
201 local_irq_save(flags); 205 local_irq_save(flags);
202 ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]); 206 ata_vlb_sync(drive, io_ports->nsect_addr);
203 hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, 207 hwif->INSL(io_ports->data_addr, buffer, wcount);
204 wcount);
205 local_irq_restore(flags); 208 local_irq_restore(flags);
206 } else 209 } else
207 hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, 210 hwif->INSL(io_ports->data_addr, buffer, wcount);
208 wcount);
209 } else 211 } else
210 hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], buffer, 212 hwif->INSW(io_ports->data_addr, buffer, wcount << 1);
211 wcount << 1);
212} 213}
213 214
214/* 215/*
@@ -216,24 +217,22 @@ static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
216 */ 217 */
217static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount) 218static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
218{ 219{
219 ide_hwif_t *hwif = HWIF(drive); 220 ide_hwif_t *hwif = drive->hwif;
220 u8 io_32bit = drive->io_32bit; 221 struct ide_io_ports *io_ports = &hwif->io_ports;
222 u8 io_32bit = drive->io_32bit;
221 223
222 if (io_32bit) { 224 if (io_32bit) {
223 if (io_32bit & 2) { 225 if (io_32bit & 2) {
224 unsigned long flags; 226 unsigned long flags;
225 227
226 local_irq_save(flags); 228 local_irq_save(flags);
227 ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]); 229 ata_vlb_sync(drive, io_ports->nsect_addr);
228 hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, 230 hwif->OUTSL(io_ports->data_addr, buffer, wcount);
229 wcount);
230 local_irq_restore(flags); 231 local_irq_restore(flags);
231 } else 232 } else
232 hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, 233 hwif->OUTSL(io_ports->data_addr, buffer, wcount);
233 wcount);
234 } else 234 } else
235 hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], buffer, 235 hwif->OUTSW(io_ports->data_addr, buffer, wcount << 1);
236 wcount << 1);
237} 236}
238 237
239/* 238/*
@@ -252,14 +251,13 @@ static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
252#if defined(CONFIG_ATARI) || defined(CONFIG_Q40) 251#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
253 if (MACH_IS_ATARI || MACH_IS_Q40) { 252 if (MACH_IS_ATARI || MACH_IS_Q40) {
254 /* Atari has a byte-swapped IDE interface */ 253 /* Atari has a byte-swapped IDE interface */
255 insw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer, 254 insw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
256 bytecount / 2);
257 return; 255 return;
258 } 256 }
259#endif /* CONFIG_ATARI || CONFIG_Q40 */ 257#endif /* CONFIG_ATARI || CONFIG_Q40 */
260 hwif->ata_input_data(drive, buffer, bytecount / 4); 258 hwif->ata_input_data(drive, buffer, bytecount / 4);
261 if ((bytecount & 0x03) >= 2) 259 if ((bytecount & 0x03) >= 2)
262 hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], 260 hwif->INSW(hwif->io_ports.data_addr,
263 (u8 *)buffer + (bytecount & ~0x03), 1); 261 (u8 *)buffer + (bytecount & ~0x03), 1);
264} 262}
265 263
@@ -271,14 +269,13 @@ static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
271#if defined(CONFIG_ATARI) || defined(CONFIG_Q40) 269#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
272 if (MACH_IS_ATARI || MACH_IS_Q40) { 270 if (MACH_IS_ATARI || MACH_IS_Q40) {
273 /* Atari has a byte-swapped IDE interface */ 271 /* Atari has a byte-swapped IDE interface */
274 outsw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer, 272 outsw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
275 bytecount / 2);
276 return; 273 return;
277 } 274 }
278#endif /* CONFIG_ATARI || CONFIG_Q40 */ 275#endif /* CONFIG_ATARI || CONFIG_Q40 */
279 hwif->ata_output_data(drive, buffer, bytecount / 4); 276 hwif->ata_output_data(drive, buffer, bytecount / 4);
280 if ((bytecount & 0x03) >= 2) 277 if ((bytecount & 0x03) >= 2)
281 hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], 278 hwif->OUTSW(hwif->io_ports.data_addr,
282 (u8 *)buffer + (bytecount & ~0x03), 1); 279 (u8 *)buffer + (bytecount & ~0x03), 1);
283} 280}
284 281
@@ -429,7 +426,7 @@ int drive_is_ready (ide_drive_t *drive)
429 u8 stat = 0; 426 u8 stat = 0;
430 427
431 if (drive->waiting_for_dma) 428 if (drive->waiting_for_dma)
432 return hwif->ide_dma_test_irq(drive); 429 return hwif->dma_ops->dma_test_irq(drive);
433 430
434#if 0 431#if 0
435 /* need to guarantee 400ns since last command was issued */ 432 /* need to guarantee 400ns since last command was issued */
@@ -442,7 +439,7 @@ int drive_is_ready (ide_drive_t *drive)
442 * an interrupt with another pci card/device. We make no assumptions 439 * an interrupt with another pci card/device. We make no assumptions
443 * about possible isa-pnp and pci-pnp issues yet. 440 * about possible isa-pnp and pci-pnp issues yet.
444 */ 441 */
445 if (hwif->io_ports[IDE_CONTROL_OFFSET]) 442 if (hwif->io_ports.ctl_addr)
446 stat = ide_read_altstatus(drive); 443 stat = ide_read_altstatus(drive);
447 else 444 else
448 /* Note: this may clear a pending IRQ!! */ 445 /* Note: this may clear a pending IRQ!! */
@@ -644,7 +641,7 @@ int ide_driveid_update(ide_drive_t *drive)
644 SELECT_MASK(drive, 1); 641 SELECT_MASK(drive, 1);
645 ide_set_irq(drive, 1); 642 ide_set_irq(drive, 1);
646 msleep(50); 643 msleep(50);
647 hwif->OUTB(WIN_IDENTIFY, hwif->io_ports[IDE_COMMAND_OFFSET]); 644 hwif->OUTB(WIN_IDENTIFY, hwif->io_ports.command_addr);
648 timeout = jiffies + WAIT_WORSTCASE; 645 timeout = jiffies + WAIT_WORSTCASE;
649 do { 646 do {
650 if (time_after(jiffies, timeout)) { 647 if (time_after(jiffies, timeout)) {
@@ -693,6 +690,7 @@ int ide_driveid_update(ide_drive_t *drive)
693int ide_config_drive_speed(ide_drive_t *drive, u8 speed) 690int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
694{ 691{
695 ide_hwif_t *hwif = drive->hwif; 692 ide_hwif_t *hwif = drive->hwif;
693 struct ide_io_ports *io_ports = &hwif->io_ports;
696 int error = 0; 694 int error = 0;
697 u8 stat; 695 u8 stat;
698 696
@@ -700,8 +698,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
700// msleep(50); 698// msleep(50);
701 699
702#ifdef CONFIG_BLK_DEV_IDEDMA 700#ifdef CONFIG_BLK_DEV_IDEDMA
703 if (hwif->dma_host_set) /* check if host supports DMA */ 701 if (hwif->dma_ops) /* check if host supports DMA */
704 hwif->dma_host_set(drive, 0); 702 hwif->dma_ops->dma_host_set(drive, 0);
705#endif 703#endif
706 704
707 /* Skip setting PIO flow-control modes on pre-EIDE drives */ 705 /* Skip setting PIO flow-control modes on pre-EIDE drives */
@@ -731,10 +729,9 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
731 SELECT_MASK(drive, 0); 729 SELECT_MASK(drive, 0);
732 udelay(1); 730 udelay(1);
733 ide_set_irq(drive, 0); 731 ide_set_irq(drive, 0);
734 hwif->OUTB(speed, hwif->io_ports[IDE_NSECTOR_OFFSET]); 732 hwif->OUTB(speed, io_ports->nsect_addr);
735 hwif->OUTB(SETFEATURES_XFER, hwif->io_ports[IDE_FEATURE_OFFSET]); 733 hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
736 hwif->OUTBSYNC(drive, WIN_SETFEATURES, 734 hwif->OUTBSYNC(drive, WIN_SETFEATURES, io_ports->command_addr);
737 hwif->io_ports[IDE_COMMAND_OFFSET]);
738 if (drive->quirk_list == 2) 735 if (drive->quirk_list == 2)
739 ide_set_irq(drive, 1); 736 ide_set_irq(drive, 1);
740 737
@@ -759,8 +756,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
759#ifdef CONFIG_BLK_DEV_IDEDMA 756#ifdef CONFIG_BLK_DEV_IDEDMA
760 if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) && 757 if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
761 drive->using_dma) 758 drive->using_dma)
762 hwif->dma_host_set(drive, 1); 759 hwif->dma_ops->dma_host_set(drive, 1);
763 else if (hwif->dma_host_set) /* check if host supports DMA */ 760 else if (hwif->dma_ops) /* check if host supports DMA */
764 ide_dma_off_quietly(drive); 761 ide_dma_off_quietly(drive);
765#endif 762#endif
766 763
@@ -842,7 +839,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
842 839
843 spin_lock_irqsave(&ide_lock, flags); 840 spin_lock_irqsave(&ide_lock, flags);
844 __ide_set_handler(drive, handler, timeout, expiry); 841 __ide_set_handler(drive, handler, timeout, expiry);
845 hwif->OUTBSYNC(drive, cmd, hwif->io_ports[IDE_COMMAND_OFFSET]); 842 hwif->OUTBSYNC(drive, cmd, hwif->io_ports.command_addr);
846 /* 843 /*
847 * Drive takes 400nS to respond, we must avoid the IRQ being 844 * Drive takes 400nS to respond, we must avoid the IRQ being
848 * serviced before that. 845 * serviced before that.
@@ -905,10 +902,11 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
905{ 902{
906 ide_hwgroup_t *hwgroup = HWGROUP(drive); 903 ide_hwgroup_t *hwgroup = HWGROUP(drive);
907 ide_hwif_t *hwif = HWIF(drive); 904 ide_hwif_t *hwif = HWIF(drive);
905 const struct ide_port_ops *port_ops = hwif->port_ops;
908 u8 tmp; 906 u8 tmp;
909 907
910 if (hwif->reset_poll != NULL) { 908 if (port_ops && port_ops->reset_poll) {
911 if (hwif->reset_poll(drive)) { 909 if (port_ops->reset_poll(drive)) {
912 printk(KERN_ERR "%s: host reset_poll failure for %s.\n", 910 printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
913 hwif->name, drive->name); 911 hwif->name, drive->name);
914 return ide_stopped; 912 return ide_stopped;
@@ -974,6 +972,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
974 972
975static void pre_reset(ide_drive_t *drive) 973static void pre_reset(ide_drive_t *drive)
976{ 974{
975 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
976
977 if (drive->media == ide_disk) 977 if (drive->media == ide_disk)
978 ide_disk_pre_reset(drive); 978 ide_disk_pre_reset(drive);
979 else 979 else
@@ -994,8 +994,8 @@ static void pre_reset(ide_drive_t *drive)
994 return; 994 return;
995 } 995 }
996 996
997 if (HWIF(drive)->pre_reset != NULL) 997 if (port_ops && port_ops->pre_reset)
998 HWIF(drive)->pre_reset(drive); 998 port_ops->pre_reset(drive);
999 999
1000 if (drive->current_speed != 0xff) 1000 if (drive->current_speed != 0xff)
1001 drive->desired_speed = drive->current_speed; 1001 drive->desired_speed = drive->current_speed;
@@ -1023,12 +1023,16 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1023 unsigned long flags; 1023 unsigned long flags;
1024 ide_hwif_t *hwif; 1024 ide_hwif_t *hwif;
1025 ide_hwgroup_t *hwgroup; 1025 ide_hwgroup_t *hwgroup;
1026 struct ide_io_ports *io_ports;
1027 const struct ide_port_ops *port_ops;
1026 u8 ctl; 1028 u8 ctl;
1027 1029
1028 spin_lock_irqsave(&ide_lock, flags); 1030 spin_lock_irqsave(&ide_lock, flags);
1029 hwif = HWIF(drive); 1031 hwif = HWIF(drive);
1030 hwgroup = HWGROUP(drive); 1032 hwgroup = HWGROUP(drive);
1031 1033
1034 io_ports = &hwif->io_ports;
1035
1032 /* We must not reset with running handlers */ 1036 /* We must not reset with running handlers */
1033 BUG_ON(hwgroup->handler != NULL); 1037 BUG_ON(hwgroup->handler != NULL);
1034 1038
@@ -1038,8 +1042,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1038 pre_reset(drive); 1042 pre_reset(drive);
1039 SELECT_DRIVE(drive); 1043 SELECT_DRIVE(drive);
1040 udelay (20); 1044 udelay (20);
1041 hwif->OUTBSYNC(drive, WIN_SRST, 1045 hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
1042 hwif->io_ports[IDE_COMMAND_OFFSET]);
1043 ndelay(400); 1046 ndelay(400);
1044 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1047 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1045 hwgroup->polling = 1; 1048 hwgroup->polling = 1;
@@ -1055,7 +1058,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1055 for (unit = 0; unit < MAX_DRIVES; ++unit) 1058 for (unit = 0; unit < MAX_DRIVES; ++unit)
1056 pre_reset(&hwif->drives[unit]); 1059 pre_reset(&hwif->drives[unit]);
1057 1060
1058 if (hwif->io_ports[IDE_CONTROL_OFFSET] == 0) { 1061 if (io_ports->ctl_addr == 0) {
1059 spin_unlock_irqrestore(&ide_lock, flags); 1062 spin_unlock_irqrestore(&ide_lock, flags);
1060 return ide_stopped; 1063 return ide_stopped;
1061 } 1064 }
@@ -1070,14 +1073,14 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1070 * recover from reset very quickly, saving us the first 50ms wait time. 1073 * recover from reset very quickly, saving us the first 50ms wait time.
1071 */ 1074 */
1072 /* set SRST and nIEN */ 1075 /* set SRST and nIEN */
1073 hwif->OUTBSYNC(drive, drive->ctl|6, hwif->io_ports[IDE_CONTROL_OFFSET]); 1076 hwif->OUTBSYNC(drive, drive->ctl|6, io_ports->ctl_addr);
1074 /* more than enough time */ 1077 /* more than enough time */
1075 udelay(10); 1078 udelay(10);
1076 if (drive->quirk_list == 2) 1079 if (drive->quirk_list == 2)
1077 ctl = drive->ctl; /* clear SRST and nIEN */ 1080 ctl = drive->ctl; /* clear SRST and nIEN */
1078 else 1081 else
1079 ctl = drive->ctl | 2; /* clear SRST, leave nIEN */ 1082 ctl = drive->ctl | 2; /* clear SRST, leave nIEN */
1080 hwif->OUTBSYNC(drive, ctl, hwif->io_ports[IDE_CONTROL_OFFSET]); 1083 hwif->OUTBSYNC(drive, ctl, io_ports->ctl_addr);
1081 /* more than enough time */ 1084 /* more than enough time */
1082 udelay(10); 1085 udelay(10);
1083 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1086 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1089,8 +1092,9 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1089 * state when the disks are reset this way. At least, the Winbond 1092 * state when the disks are reset this way. At least, the Winbond
1090 * 553 documentation says that 1093 * 553 documentation says that
1091 */ 1094 */
1092 if (hwif->resetproc) 1095 port_ops = hwif->port_ops;
1093 hwif->resetproc(drive); 1096 if (port_ops && port_ops->resetproc)
1097 port_ops->resetproc(drive);
1094 1098
1095 spin_unlock_irqrestore(&ide_lock, flags); 1099 spin_unlock_irqrestore(&ide_lock, flags);
1096 return ide_started; 1100 return ide_started;
@@ -1121,7 +1125,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1121 * about locking issues (2.5 work ?). 1125 * about locking issues (2.5 work ?).
1122 */ 1126 */
1123 mdelay(1); 1127 mdelay(1);
1124 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1128 stat = hwif->INB(hwif->io_ports.status_addr);
1125 if ((stat & BUSY_STAT) == 0) 1129 if ((stat & BUSY_STAT) == 0)
1126 return 0; 1130 return 0;
1127 /* 1131 /*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 7031a8dcf692..6f04ea3e93a8 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -85,7 +85,7 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
85 mode = XFER_PIO_4; 85 mode = XFER_PIO_4;
86 } 86 }
87 87
88// printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed); 88/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
89 89
90 return min(speed, mode); 90 return min(speed, mode);
91} 91}
@@ -274,16 +274,6 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
274 if (overridden) 274 if (overridden)
275 printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n", 275 printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
276 drive->name); 276 drive->name);
277
278 /*
279 * Conservative "downgrade" for all pre-ATA2 drives
280 */
281 if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_DOWNGRADE) == 0 &&
282 pio_mode && pio_mode < 4) {
283 pio_mode--;
284 printk(KERN_INFO "%s: applying conservative "
285 "PIO \"downgrade\"\n", drive->name);
286 }
287 } 277 }
288 278
289 if (pio_mode > max_mode) 279 if (pio_mode > max_mode)
@@ -298,9 +288,11 @@ EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
298void ide_set_pio(ide_drive_t *drive, u8 req_pio) 288void ide_set_pio(ide_drive_t *drive, u8 req_pio)
299{ 289{
300 ide_hwif_t *hwif = drive->hwif; 290 ide_hwif_t *hwif = drive->hwif;
291 const struct ide_port_ops *port_ops = hwif->port_ops;
301 u8 host_pio, pio; 292 u8 host_pio, pio;
302 293
303 if (hwif->set_pio_mode == NULL) 294 if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
295 (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
304 return; 296 return;
305 297
306 BUG_ON(hwif->pio_mask == 0x00); 298 BUG_ON(hwif->pio_mask == 0x00);
@@ -352,26 +344,30 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
352int ide_set_pio_mode(ide_drive_t *drive, const u8 mode) 344int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
353{ 345{
354 ide_hwif_t *hwif = drive->hwif; 346 ide_hwif_t *hwif = drive->hwif;
347 const struct ide_port_ops *port_ops = hwif->port_ops;
348
349 if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
350 return 0;
355 351
356 if (hwif->set_pio_mode == NULL) 352 if (port_ops == NULL || port_ops->set_pio_mode == NULL)
357 return -1; 353 return -1;
358 354
359 /* 355 /*
360 * TODO: temporary hack for some legacy host drivers that didn't 356 * TODO: temporary hack for some legacy host drivers that didn't
361 * set transfer mode on the device in ->set_pio_mode method... 357 * set transfer mode on the device in ->set_pio_mode method...
362 */ 358 */
363 if (hwif->set_dma_mode == NULL) { 359 if (port_ops->set_dma_mode == NULL) {
364 hwif->set_pio_mode(drive, mode - XFER_PIO_0); 360 port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
365 return 0; 361 return 0;
366 } 362 }
367 363
368 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 364 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
369 if (ide_config_drive_speed(drive, mode)) 365 if (ide_config_drive_speed(drive, mode))
370 return -1; 366 return -1;
371 hwif->set_pio_mode(drive, mode - XFER_PIO_0); 367 port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
372 return 0; 368 return 0;
373 } else { 369 } else {
374 hwif->set_pio_mode(drive, mode - XFER_PIO_0); 370 port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
375 return ide_config_drive_speed(drive, mode); 371 return ide_config_drive_speed(drive, mode);
376 } 372 }
377} 373}
@@ -379,17 +375,21 @@ int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
379int ide_set_dma_mode(ide_drive_t *drive, const u8 mode) 375int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
380{ 376{
381 ide_hwif_t *hwif = drive->hwif; 377 ide_hwif_t *hwif = drive->hwif;
378 const struct ide_port_ops *port_ops = hwif->port_ops;
379
380 if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
381 return 0;
382 382
383 if (hwif->set_dma_mode == NULL) 383 if (port_ops == NULL || port_ops->set_dma_mode == NULL)
384 return -1; 384 return -1;
385 385
386 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 386 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
387 if (ide_config_drive_speed(drive, mode)) 387 if (ide_config_drive_speed(drive, mode))
388 return -1; 388 return -1;
389 hwif->set_dma_mode(drive, mode); 389 port_ops->set_dma_mode(drive, mode);
390 return 0; 390 return 0;
391 } else { 391 } else {
392 hwif->set_dma_mode(drive, mode); 392 port_ops->set_dma_mode(drive, mode);
393 return ide_config_drive_speed(drive, mode); 393 return ide_config_drive_speed(drive, mode);
394 } 394 }
395} 395}
@@ -409,8 +409,10 @@ EXPORT_SYMBOL_GPL(ide_set_dma_mode);
409int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) 409int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
410{ 410{
411 ide_hwif_t *hwif = drive->hwif; 411 ide_hwif_t *hwif = drive->hwif;
412 const struct ide_port_ops *port_ops = hwif->port_ops;
412 413
413 if (hwif->set_dma_mode == NULL) 414 if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
415 (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
414 return -1; 416 return -1;
415 417
416 rate = ide_rate_filter(drive, rate); 418 rate = ide_rate_filter(drive, rate);
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 34c2ad36ce54..6a8953f68e9f 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -11,34 +11,52 @@
11 * 11 *
12 * You should have received a copy of the GNU General Public License 12 * You should have received a copy of the GNU General Public License
13 * (for example /usr/src/linux/COPYING); if not, write to the Free 13 * (for example /usr/src/linux/COPYING); if not, write to the Free
14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */ 15 */
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pnp.h> 18#include <linux/pnp.h>
19#include <linux/ide.h> 19#include <linux/ide.h>
20 20
21#define DRV_NAME "ide-pnp"
22
21/* Add your devices here :)) */ 23/* Add your devices here :)) */
22static struct pnp_device_id idepnp_devices[] = { 24static struct pnp_device_id idepnp_devices[] = {
23 /* Generic ESDI/IDE/ATA compatible hard disk controller */ 25 /* Generic ESDI/IDE/ATA compatible hard disk controller */
24 {.id = "PNP0600", .driver_data = 0}, 26 {.id = "PNP0600", .driver_data = 0},
25 {.id = ""} 27 {.id = ""}
26}; 28};
27 29
28static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id) 30static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
29{ 31{
30 hw_regs_t hw; 32 hw_regs_t hw;
31 ide_hwif_t *hwif; 33 ide_hwif_t *hwif;
34 unsigned long base, ctl;
32 35
33 if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) 36 if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
34 return -1; 37 return -1;
35 38
39 base = pnp_port_start(dev, 0);
40 ctl = pnp_port_start(dev, 1);
41
42 if (!request_region(base, 8, DRV_NAME)) {
43 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
44 DRV_NAME, base, base + 7);
45 return -EBUSY;
46 }
47
48 if (!request_region(ctl, 1, DRV_NAME)) {
49 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
50 DRV_NAME, ctl);
51 release_region(base, 8);
52 return -EBUSY;
53 }
54
36 memset(&hw, 0, sizeof(hw)); 55 memset(&hw, 0, sizeof(hw));
37 ide_std_init_ports(&hw, pnp_port_start(dev, 0), 56 ide_std_init_ports(&hw, base, ctl);
38 pnp_port_start(dev, 1));
39 hw.irq = pnp_irq(dev, 0); 57 hw.irq = pnp_irq(dev, 0);
40 58
41 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 59 hwif = ide_find_port();
42 if (hwif) { 60 if (hwif) {
43 u8 index = hwif->index; 61 u8 index = hwif->index;
44 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 62 u8 idx[4] = { index, 0xff, 0xff, 0xff };
@@ -47,24 +65,27 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
47 ide_init_port_hw(hwif, &hw); 65 ide_init_port_hw(hwif, &hw);
48 66
49 printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index); 67 printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
50 pnp_set_drvdata(dev,hwif); 68 pnp_set_drvdata(dev, hwif);
51 69
52 ide_device_add(idx, NULL); 70 ide_device_add(idx, NULL);
53 71
54 return 0; 72 return 0;
55 } 73 }
56 74
75 release_region(ctl, 1);
76 release_region(base, 8);
77
57 return -1; 78 return -1;
58} 79}
59 80
60static void idepnp_remove(struct pnp_dev * dev) 81static void idepnp_remove(struct pnp_dev *dev)
61{ 82{
62 ide_hwif_t *hwif = pnp_get_drvdata(dev); 83 ide_hwif_t *hwif = pnp_get_drvdata(dev);
63 84
64 if (hwif) 85 ide_unregister(hwif);
65 ide_unregister(hwif->index); 86
66 else 87 release_region(pnp_port_start(dev, 1), 1);
67 printk(KERN_ERR "idepnp: Unable to remove device, please report.\n"); 88 release_region(pnp_port_start(dev, 0), 8);
68} 89}
69 90
70static struct pnp_driver idepnp_driver = { 91static struct pnp_driver idepnp_driver = {
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 6a196c27b0aa..862f02603f9b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -264,6 +264,7 @@ err_misc:
264static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) 264static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
265{ 265{
266 ide_hwif_t *hwif = HWIF(drive); 266 ide_hwif_t *hwif = HWIF(drive);
267 struct ide_io_ports *io_ports = &hwif->io_ports;
267 int use_altstatus = 0, rc; 268 int use_altstatus = 0, rc;
268 unsigned long timeout; 269 unsigned long timeout;
269 u8 s = 0, a = 0; 270 u8 s = 0, a = 0;
@@ -271,7 +272,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
271 /* take a deep breath */ 272 /* take a deep breath */
272 msleep(50); 273 msleep(50);
273 274
274 if (hwif->io_ports[IDE_CONTROL_OFFSET]) { 275 if (io_ports->ctl_addr) {
275 a = ide_read_altstatus(drive); 276 a = ide_read_altstatus(drive);
276 s = ide_read_status(drive); 277 s = ide_read_status(drive);
277 if ((a ^ s) & ~INDEX_STAT) 278 if ((a ^ s) & ~INDEX_STAT)
@@ -289,10 +290,10 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
289 */ 290 */
290 if ((cmd == WIN_PIDENTIFY)) 291 if ((cmd == WIN_PIDENTIFY))
291 /* disable dma & overlap */ 292 /* disable dma & overlap */
292 hwif->OUTB(0, hwif->io_ports[IDE_FEATURE_OFFSET]); 293 hwif->OUTB(0, io_ports->feature_addr);
293 294
294 /* ask drive for ID */ 295 /* ask drive for ID */
295 hwif->OUTB(cmd, hwif->io_ports[IDE_COMMAND_OFFSET]); 296 hwif->OUTB(cmd, io_ports->command_addr);
296 297
297 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; 298 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
298 timeout += jiffies; 299 timeout += jiffies;
@@ -353,7 +354,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
353 * interrupts during the identify-phase that 354 * interrupts during the identify-phase that
354 * the irq handler isn't expecting. 355 * the irq handler isn't expecting.
355 */ 356 */
356 if (hwif->io_ports[IDE_CONTROL_OFFSET]) { 357 if (hwif->io_ports.ctl_addr) {
357 if (!hwif->irq) { 358 if (!hwif->irq) {
358 autoprobe = 1; 359 autoprobe = 1;
359 cookie = probe_irq_on(); 360 cookie = probe_irq_on();
@@ -393,7 +394,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
393 394
394 do { 395 do {
395 msleep(50); 396 msleep(50);
396 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 397 stat = hwif->INB(hwif->io_ports.status_addr);
397 if ((stat & BUSY_STAT) == 0) 398 if ((stat & BUSY_STAT) == 0)
398 return 0; 399 return 0;
399 } while (time_before(jiffies, timeout)); 400 } while (time_before(jiffies, timeout));
@@ -425,6 +426,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
425static int do_probe (ide_drive_t *drive, u8 cmd) 426static int do_probe (ide_drive_t *drive, u8 cmd)
426{ 427{
427 ide_hwif_t *hwif = HWIF(drive); 428 ide_hwif_t *hwif = HWIF(drive);
429 struct ide_io_ports *io_ports = &hwif->io_ports;
428 int rc; 430 int rc;
429 u8 stat; 431 u8 stat;
430 432
@@ -445,7 +447,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
445 msleep(50); 447 msleep(50);
446 SELECT_DRIVE(drive); 448 SELECT_DRIVE(drive);
447 msleep(50); 449 msleep(50);
448 if (hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]) != drive->select.all && 450 if (hwif->INB(io_ports->device_addr) != drive->select.all &&
449 !drive->present) { 451 !drive->present) {
450 if (drive->select.b.unit != 0) { 452 if (drive->select.b.unit != 0) {
451 /* exit with drive0 selected */ 453 /* exit with drive0 selected */
@@ -472,17 +474,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
472 if (stat == (BUSY_STAT | READY_STAT)) 474 if (stat == (BUSY_STAT | READY_STAT))
473 return 4; 475 return 4;
474 476
475 if ((rc == 1 && cmd == WIN_PIDENTIFY) && 477 if (rc == 1 && cmd == WIN_PIDENTIFY) {
476 ((drive->autotune == IDE_TUNE_DEFAULT) ||
477 (drive->autotune == IDE_TUNE_AUTO))) {
478 printk(KERN_ERR "%s: no response (status = 0x%02x), " 478 printk(KERN_ERR "%s: no response (status = 0x%02x), "
479 "resetting drive\n", drive->name, stat); 479 "resetting drive\n", drive->name, stat);
480 msleep(50); 480 msleep(50);
481 hwif->OUTB(drive->select.all, 481 hwif->OUTB(drive->select.all, io_ports->device_addr);
482 hwif->io_ports[IDE_SELECT_OFFSET]);
483 msleep(50); 482 msleep(50);
484 hwif->OUTB(WIN_SRST, 483 hwif->OUTB(WIN_SRST, io_ports->command_addr);
485 hwif->io_ports[IDE_COMMAND_OFFSET]);
486 (void)ide_busy_sleep(hwif); 484 (void)ide_busy_sleep(hwif);
487 rc = try_to_identify(drive, cmd); 485 rc = try_to_identify(drive, cmd);
488 } 486 }
@@ -518,7 +516,7 @@ static void enable_nest (ide_drive_t *drive)
518 printk("%s: enabling %s -- ", hwif->name, drive->id->model); 516 printk("%s: enabling %s -- ", hwif->name, drive->id->model);
519 SELECT_DRIVE(drive); 517 SELECT_DRIVE(drive);
520 msleep(50); 518 msleep(50);
521 hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports[IDE_COMMAND_OFFSET]); 519 hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
522 520
523 if (ide_busy_sleep(hwif)) { 521 if (ide_busy_sleep(hwif)) {
524 printk(KERN_CONT "failed (timeout)\n"); 522 printk(KERN_CONT "failed (timeout)\n");
@@ -644,7 +642,7 @@ static int ide_register_port(ide_hwif_t *hwif)
644 ret = device_register(&hwif->gendev); 642 ret = device_register(&hwif->gendev);
645 if (ret < 0) { 643 if (ret < 0) {
646 printk(KERN_WARNING "IDE: %s: device_register error: %d\n", 644 printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
647 __FUNCTION__, ret); 645 __func__, ret);
648 goto out; 646 goto out;
649 } 647 }
650 648
@@ -773,8 +771,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
773 771
774 BUG_ON(hwif->present); 772 BUG_ON(hwif->present);
775 773
776 if (hwif->noprobe || 774 if (hwif->drives[0].noprobe && hwif->drives[1].noprobe)
777 (hwif->drives[0].noprobe && hwif->drives[1].noprobe))
778 return -EACCES; 775 return -EACCES;
779 776
780 /* 777 /*
@@ -801,14 +798,9 @@ static int ide_probe_port(ide_hwif_t *hwif)
801 if (drive->present) 798 if (drive->present)
802 rc = 0; 799 rc = 0;
803 } 800 }
804 if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) { 801
805 printk(KERN_WARNING "%s: reset\n", hwif->name);
806 hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
807 udelay(10);
808 hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
809 (void)ide_busy_sleep(hwif);
810 }
811 local_irq_restore(flags); 802 local_irq_restore(flags);
803
812 /* 804 /*
813 * Use cached IRQ number. It might be (and is...) changed by probe 805 * Use cached IRQ number. It might be (and is...) changed by probe
814 * code above 806 * code above
@@ -821,29 +813,25 @@ static int ide_probe_port(ide_hwif_t *hwif)
821 813
822static void ide_port_tune_devices(ide_hwif_t *hwif) 814static void ide_port_tune_devices(ide_hwif_t *hwif)
823{ 815{
816 const struct ide_port_ops *port_ops = hwif->port_ops;
824 int unit; 817 int unit;
825 818
826 for (unit = 0; unit < MAX_DRIVES; unit++) { 819 for (unit = 0; unit < MAX_DRIVES; unit++) {
827 ide_drive_t *drive = &hwif->drives[unit]; 820 ide_drive_t *drive = &hwif->drives[unit];
828 821
829 if (drive->present && hwif->quirkproc) 822 if (drive->present && port_ops && port_ops->quirkproc)
830 hwif->quirkproc(drive); 823 port_ops->quirkproc(drive);
831 } 824 }
832 825
833 for (unit = 0; unit < MAX_DRIVES; ++unit) { 826 for (unit = 0; unit < MAX_DRIVES; ++unit) {
834 ide_drive_t *drive = &hwif->drives[unit]; 827 ide_drive_t *drive = &hwif->drives[unit];
835 828
836 if (drive->present) { 829 if (drive->present) {
837 if (drive->autotune == IDE_TUNE_AUTO) 830 ide_set_max_pio(drive);
838 ide_set_max_pio(drive);
839
840 if (drive->autotune != IDE_TUNE_DEFAULT &&
841 drive->autotune != IDE_TUNE_AUTO)
842 continue;
843 831
844 drive->nice1 = 1; 832 drive->nice1 = 1;
845 833
846 if (hwif->dma_host_set) 834 if (hwif->dma_ops)
847 ide_set_dma(drive); 835 ide_set_dma(drive);
848 } 836 }
849 } 837 }
@@ -994,6 +982,7 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
994 */ 982 */
995static int init_irq (ide_hwif_t *hwif) 983static int init_irq (ide_hwif_t *hwif)
996{ 984{
985 struct ide_io_ports *io_ports = &hwif->io_ports;
997 unsigned int index; 986 unsigned int index;
998 ide_hwgroup_t *hwgroup; 987 ide_hwgroup_t *hwgroup;
999 ide_hwif_t *match = NULL; 988 ide_hwif_t *match = NULL;
@@ -1077,9 +1066,9 @@ static int init_irq (ide_hwif_t *hwif)
1077 if (IDE_CHIPSET_IS_PCI(hwif->chipset)) 1066 if (IDE_CHIPSET_IS_PCI(hwif->chipset))
1078 sa = IRQF_SHARED; 1067 sa = IRQF_SHARED;
1079 1068
1080 if (hwif->io_ports[IDE_CONTROL_OFFSET]) 1069 if (io_ports->ctl_addr)
1081 /* clear nIEN */ 1070 /* clear nIEN */
1082 hwif->OUTB(0x08, hwif->io_ports[IDE_CONTROL_OFFSET]); 1071 hwif->OUTB(0x08, io_ports->ctl_addr);
1083 1072
1084 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) 1073 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
1085 goto out_unlink; 1074 goto out_unlink;
@@ -1095,12 +1084,11 @@ static int init_irq (ide_hwif_t *hwif)
1095 1084
1096#if !defined(__mc68000__) 1085#if !defined(__mc68000__)
1097 printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, 1086 printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
1098 hwif->io_ports[IDE_DATA_OFFSET], 1087 io_ports->data_addr, io_ports->status_addr,
1099 hwif->io_ports[IDE_DATA_OFFSET]+7, 1088 io_ports->ctl_addr, hwif->irq);
1100 hwif->io_ports[IDE_CONTROL_OFFSET], hwif->irq);
1101#else 1089#else
1102 printk("%s at 0x%08lx on irq %d", hwif->name, 1090 printk("%s at 0x%08lx on irq %d", hwif->name,
1103 hwif->io_ports[IDE_DATA_OFFSET], hwif->irq); 1091 io_ports->data_addr, hwif->irq);
1104#endif /* __mc68000__ */ 1092#endif /* __mc68000__ */
1105 if (match) 1093 if (match)
1106 printk(" (%sed with %s)", 1094 printk(" (%sed with %s)",
@@ -1242,8 +1230,8 @@ static int hwif_init(ide_hwif_t *hwif)
1242 int old_irq; 1230 int old_irq;
1243 1231
1244 if (!hwif->irq) { 1232 if (!hwif->irq) {
1245 if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) 1233 hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
1246 { 1234 if (!hwif->irq) {
1247 printk("%s: DISABLED, NO IRQ\n", hwif->name); 1235 printk("%s: DISABLED, NO IRQ\n", hwif->name);
1248 return 0; 1236 return 0;
1249 } 1237 }
@@ -1272,7 +1260,8 @@ static int hwif_init(ide_hwif_t *hwif)
1272 * It failed to initialise. Find the default IRQ for 1260 * It failed to initialise. Find the default IRQ for
1273 * this port and try that. 1261 * this port and try that.
1274 */ 1262 */
1275 if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) { 1263 hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
1264 if (!hwif->irq) {
1276 printk("%s: Disabled unable to get IRQ %d.\n", 1265 printk("%s: Disabled unable to get IRQ %d.\n",
1277 hwif->name, old_irq); 1266 hwif->name, old_irq);
1278 goto out; 1267 goto out;
@@ -1324,6 +1313,7 @@ static void hwif_register_devices(ide_hwif_t *hwif)
1324 1313
1325static void ide_port_init_devices(ide_hwif_t *hwif) 1314static void ide_port_init_devices(ide_hwif_t *hwif)
1326{ 1315{
1316 const struct ide_port_ops *port_ops = hwif->port_ops;
1327 int i; 1317 int i;
1328 1318
1329 for (i = 0; i < MAX_DRIVES; i++) { 1319 for (i = 0; i < MAX_DRIVES; i++) {
@@ -1335,12 +1325,10 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
1335 drive->unmask = 1; 1325 drive->unmask = 1;
1336 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) 1326 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
1337 drive->no_unmask = 1; 1327 drive->no_unmask = 1;
1338 if ((hwif->host_flags & IDE_HFLAG_NO_AUTOTUNE) == 0)
1339 drive->autotune = 1;
1340 } 1328 }
1341 1329
1342 if (hwif->port_init_devs) 1330 if (port_ops && port_ops->port_init_devs)
1343 hwif->port_init_devs(hwif); 1331 port_ops->port_init_devs(hwif);
1344} 1332}
1345 1333
1346static void ide_init_port(ide_hwif_t *hwif, unsigned int port, 1334static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
@@ -1355,9 +1343,6 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1355 if (d->init_iops) 1343 if (d->init_iops)
1356 d->init_iops(hwif); 1344 d->init_iops(hwif);
1357 1345
1358 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0)
1359 ide_hwif_setup_dma(hwif, d);
1360
1361 if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) || 1346 if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) ||
1362 (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS)) 1347 (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
1363 hwif->irq = port ? 15 : 14; 1348 hwif->irq = port ? 15 : 14;
@@ -1365,16 +1350,36 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1365 hwif->host_flags = d->host_flags; 1350 hwif->host_flags = d->host_flags;
1366 hwif->pio_mask = d->pio_mask; 1351 hwif->pio_mask = d->pio_mask;
1367 1352
1368 if ((d->host_flags & IDE_HFLAG_SERIALIZE) && hwif->mate) 1353 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
1369 hwif->mate->serialized = hwif->serialized = 1; 1354 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
1355 hwif->port_ops = d->port_ops;
1356
1357 if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
1358 ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) {
1359 if (hwif->mate)
1360 hwif->mate->serialized = hwif->serialized = 1;
1361 }
1370 1362
1371 hwif->swdma_mask = d->swdma_mask; 1363 hwif->swdma_mask = d->swdma_mask;
1372 hwif->mwdma_mask = d->mwdma_mask; 1364 hwif->mwdma_mask = d->mwdma_mask;
1373 hwif->ultra_mask = d->udma_mask; 1365 hwif->ultra_mask = d->udma_mask;
1374 1366
1375 /* reset DMA masks only for SFF-style DMA controllers */ 1367 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
1376 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0 && hwif->dma_base == 0) 1368 int rc;
1377 hwif->swdma_mask = hwif->mwdma_mask = hwif->ultra_mask = 0; 1369
1370 if (d->init_dma)
1371 rc = d->init_dma(hwif, d);
1372 else
1373 rc = ide_hwif_setup_dma(hwif, d);
1374
1375 if (rc < 0) {
1376 printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
1377 hwif->swdma_mask = 0;
1378 hwif->mwdma_mask = 0;
1379 hwif->ultra_mask = 0;
1380 } else if (d->dma_ops)
1381 hwif->dma_ops = d->dma_ops;
1382 }
1378 1383
1379 if (d->host_flags & IDE_HFLAG_RQSIZE_256) 1384 if (d->host_flags & IDE_HFLAG_RQSIZE_256)
1380 hwif->rqsize = 256; 1385 hwif->rqsize = 256;
@@ -1386,9 +1391,11 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1386 1391
1387static void ide_port_cable_detect(ide_hwif_t *hwif) 1392static void ide_port_cable_detect(ide_hwif_t *hwif)
1388{ 1393{
1389 if (hwif->cable_detect && (hwif->ultra_mask & 0x78)) { 1394 const struct ide_port_ops *port_ops = hwif->port_ops;
1395
1396 if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
1390 if (hwif->cbl != ATA_CBL_PATA40_SHORT) 1397 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
1391 hwif->cbl = hwif->cable_detect(hwif); 1398 hwif->cbl = port_ops->cable_detect(hwif);
1392 } 1399 }
1393} 1400}
1394 1401
@@ -1444,19 +1451,74 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif)
1444 return rc; 1451 return rc;
1445} 1452}
1446 1453
1454/**
1455 * ide_find_port_slot - find free ide_hwifs[] slot
1456 * @d: IDE port info
1457 *
1458 * Return the new hwif. If we are out of free slots return NULL.
1459 */
1460
1461ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
1462{
1463 ide_hwif_t *hwif;
1464 int i;
1465 u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
1466
1467 /*
1468 * Claim an unassigned slot.
1469 *
1470 * Give preference to claiming other slots before claiming ide0/ide1,
1471 * just in case there's another interface yet-to-be-scanned
1472 * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults).
1473 *
1474 * Unless there is a bootable card that does not use the standard
1475 * ports 0x1f0/0x170 (the ide0/ide1 defaults).
1476 */
1477 if (bootable) {
1478 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
1479
1480 for (; i < MAX_HWIFS; i++) {
1481 hwif = &ide_hwifs[i];
1482 if (hwif->chipset == ide_unknown)
1483 return hwif;
1484 }
1485 } else {
1486 for (i = 2; i < MAX_HWIFS; i++) {
1487 hwif = &ide_hwifs[i];
1488 if (hwif->chipset == ide_unknown)
1489 return hwif;
1490 }
1491 for (i = 0; i < 2 && i < MAX_HWIFS; i++) {
1492 hwif = &ide_hwifs[i];
1493 if (hwif->chipset == ide_unknown)
1494 return hwif;
1495 }
1496 }
1497
1498 return NULL;
1499}
1500EXPORT_SYMBOL_GPL(ide_find_port_slot);
1501
1447int ide_device_add_all(u8 *idx, const struct ide_port_info *d) 1502int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1448{ 1503{
1449 ide_hwif_t *hwif, *mate = NULL; 1504 ide_hwif_t *hwif, *mate = NULL;
1450 int i, rc = 0; 1505 int i, rc = 0;
1451 1506
1452 for (i = 0; i < MAX_HWIFS; i++) { 1507 for (i = 0; i < MAX_HWIFS; i++) {
1453 if (d == NULL || idx[i] == 0xff) { 1508 if (idx[i] == 0xff) {
1454 mate = NULL; 1509 mate = NULL;
1455 continue; 1510 continue;
1456 } 1511 }
1457 1512
1458 hwif = &ide_hwifs[idx[i]]; 1513 hwif = &ide_hwifs[idx[i]];
1459 1514
1515 ide_port_apply_params(hwif);
1516
1517 if (d == NULL) {
1518 mate = NULL;
1519 continue;
1520 }
1521
1460 if (d->chipset != ide_etrax100 && (i & 1) && mate) { 1522 if (d->chipset != ide_etrax100 && (i & 1) && mate) {
1461 hwif->mate = mate; 1523 hwif->mate = mate;
1462 mate->mate = hwif; 1524 mate->mate = hwif;
@@ -1475,25 +1537,15 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1475 1537
1476 hwif = &ide_hwifs[idx[i]]; 1538 hwif = &ide_hwifs[idx[i]];
1477 1539
1478 if ((hwif->chipset != ide_4drives || !hwif->mate || 1540 if (ide_probe_port(hwif) == 0)
1479 !hwif->mate->present) && ide_hwif_request_regions(hwif)) { 1541 hwif->present = 1;
1480 printk(KERN_ERR "%s: ports already in use, "
1481 "skipping probe\n", hwif->name);
1482 continue;
1483 }
1484
1485 if (ide_probe_port(hwif) < 0) {
1486 ide_hwif_release_regions(hwif);
1487 continue;
1488 }
1489
1490 hwif->present = 1;
1491 1542
1492 if (hwif->chipset != ide_4drives || !hwif->mate || 1543 if (hwif->chipset != ide_4drives || !hwif->mate ||
1493 !hwif->mate->present) 1544 !hwif->mate->present)
1494 ide_register_port(hwif); 1545 ide_register_port(hwif);
1495 1546
1496 ide_port_tune_devices(hwif); 1547 if (hwif->present)
1548 ide_port_tune_devices(hwif);
1497 } 1549 }
1498 1550
1499 for (i = 0; i < MAX_HWIFS; i++) { 1551 for (i = 0; i < MAX_HWIFS; i++) {
@@ -1502,9 +1554,6 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1502 1554
1503 hwif = &ide_hwifs[idx[i]]; 1555 hwif = &ide_hwifs[idx[i]];
1504 1556
1505 if (!hwif->present)
1506 continue;
1507
1508 if (hwif_init(hwif) == 0) { 1557 if (hwif_init(hwif) == 0) {
1509 printk(KERN_INFO "%s: failed to initialize IDE " 1558 printk(KERN_INFO "%s: failed to initialize IDE "
1510 "interface\n", hwif->name); 1559 "interface\n", hwif->name);
@@ -1513,10 +1562,13 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1513 continue; 1562 continue;
1514 } 1563 }
1515 1564
1516 ide_port_setup_devices(hwif); 1565 if (hwif->present)
1566 ide_port_setup_devices(hwif);
1517 1567
1518 ide_acpi_init(hwif); 1568 ide_acpi_init(hwif);
1519 ide_acpi_port_init_devices(hwif); 1569
1570 if (hwif->present)
1571 ide_acpi_port_init_devices(hwif);
1520 } 1572 }
1521 1573
1522 for (i = 0; i < MAX_HWIFS; i++) { 1574 for (i = 0; i < MAX_HWIFS; i++) {
@@ -1525,11 +1577,11 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1525 1577
1526 hwif = &ide_hwifs[idx[i]]; 1578 hwif = &ide_hwifs[idx[i]];
1527 1579
1528 if (hwif->present) { 1580 if (hwif->chipset == ide_unknown)
1529 if (hwif->chipset == ide_unknown) 1581 hwif->chipset = ide_generic;
1530 hwif->chipset = ide_generic; 1582
1583 if (hwif->present)
1531 hwif_register_devices(hwif); 1584 hwif_register_devices(hwif);
1532 }
1533 } 1585 }
1534 1586
1535 for (i = 0; i < MAX_HWIFS; i++) { 1587 for (i = 0; i < MAX_HWIFS; i++) {
@@ -1538,11 +1590,11 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1538 1590
1539 hwif = &ide_hwifs[idx[i]]; 1591 hwif = &ide_hwifs[idx[i]];
1540 1592
1541 if (hwif->present) { 1593 ide_sysfs_register_port(hwif);
1542 ide_sysfs_register_port(hwif); 1594 ide_proc_register_port(hwif);
1543 ide_proc_register_port(hwif); 1595
1596 if (hwif->present)
1544 ide_proc_port_register_devices(hwif); 1597 ide_proc_port_register_devices(hwif);
1545 }
1546 } 1598 }
1547 1599
1548 return rc; 1600 return rc;
@@ -1563,6 +1615,7 @@ EXPORT_SYMBOL_GPL(ide_device_add);
1563 1615
1564void ide_port_scan(ide_hwif_t *hwif) 1616void ide_port_scan(ide_hwif_t *hwif)
1565{ 1617{
1618 ide_port_apply_params(hwif);
1566 ide_port_cable_detect(hwif); 1619 ide_port_cable_detect(hwif);
1567 ide_port_init_devices(hwif); 1620 ide_port_init_devices(hwif);
1568 1621
@@ -1578,3 +1631,67 @@ void ide_port_scan(ide_hwif_t *hwif)
1578 ide_proc_port_register_devices(hwif); 1631 ide_proc_port_register_devices(hwif);
1579} 1632}
1580EXPORT_SYMBOL_GPL(ide_port_scan); 1633EXPORT_SYMBOL_GPL(ide_port_scan);
1634
1635static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
1636 const struct ide_port_info *d,
1637 unsigned long config)
1638{
1639 ide_hwif_t *hwif;
1640 unsigned long base, ctl;
1641 int irq;
1642
1643 if (port_no == 0) {
1644 base = 0x1f0;
1645 ctl = 0x3f6;
1646 irq = 14;
1647 } else {
1648 base = 0x170;
1649 ctl = 0x376;
1650 irq = 15;
1651 }
1652
1653 if (!request_region(base, 8, d->name)) {
1654 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
1655 d->name, base, base + 7);
1656 return;
1657 }
1658
1659 if (!request_region(ctl, 1, d->name)) {
1660 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
1661 d->name, ctl);
1662 release_region(base, 8);
1663 return;
1664 }
1665
1666 ide_std_init_ports(hw, base, ctl);
1667 hw->irq = irq;
1668
1669 hwif = ide_find_port_slot(d);
1670 if (hwif) {
1671 ide_init_port_hw(hwif, hw);
1672 if (config)
1673 hwif->config_data = config;
1674 idx[port_no] = hwif->index;
1675 }
1676}
1677
1678int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
1679{
1680 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
1681 hw_regs_t hw[2];
1682
1683 memset(&hw, 0, sizeof(hw));
1684
1685 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
1686 ide_legacy_init_one(idx, &hw[0], 0, d, config);
1687 ide_legacy_init_one(idx, &hw[1], 1, d, config);
1688
1689 if (idx[0] == 0xff && idx[1] == 0xff &&
1690 (d->host_flags & IDE_HFLAG_SINGLE))
1691 return -ENOENT;
1692
1693 ide_device_add(idx, d);
1694
1695 return 0;
1696}
1697EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index edd7f186dc4d..7b2f3815a838 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -47,28 +47,28 @@ static int proc_ide_read_imodel
47 const char *name; 47 const char *name;
48 48
49 switch (hwif->chipset) { 49 switch (hwif->chipset) {
50 case ide_generic: name = "generic"; break; 50 case ide_generic: name = "generic"; break;
51 case ide_pci: name = "pci"; break; 51 case ide_pci: name = "pci"; break;
52 case ide_cmd640: name = "cmd640"; break; 52 case ide_cmd640: name = "cmd640"; break;
53 case ide_dtc2278: name = "dtc2278"; break; 53 case ide_dtc2278: name = "dtc2278"; break;
54 case ide_ali14xx: name = "ali14xx"; break; 54 case ide_ali14xx: name = "ali14xx"; break;
55 case ide_qd65xx: name = "qd65xx"; break; 55 case ide_qd65xx: name = "qd65xx"; break;
56 case ide_umc8672: name = "umc8672"; break; 56 case ide_umc8672: name = "umc8672"; break;
57 case ide_ht6560b: name = "ht6560b"; break; 57 case ide_ht6560b: name = "ht6560b"; break;
58 case ide_rz1000: name = "rz1000"; break; 58 case ide_rz1000: name = "rz1000"; break;
59 case ide_trm290: name = "trm290"; break; 59 case ide_trm290: name = "trm290"; break;
60 case ide_cmd646: name = "cmd646"; break; 60 case ide_cmd646: name = "cmd646"; break;
61 case ide_cy82c693: name = "cy82c693"; break; 61 case ide_cy82c693: name = "cy82c693"; break;
62 case ide_4drives: name = "4drives"; break; 62 case ide_4drives: name = "4drives"; break;
63 case ide_pmac: name = "mac-io"; break; 63 case ide_pmac: name = "mac-io"; break;
64 case ide_au1xxx: name = "au1xxx"; break; 64 case ide_au1xxx: name = "au1xxx"; break;
65 case ide_palm3710: name = "palm3710"; break; 65 case ide_palm3710: name = "palm3710"; break;
66 case ide_etrax100: name = "etrax100"; break; 66 case ide_etrax100: name = "etrax100"; break;
67 case ide_acorn: name = "acorn"; break; 67 case ide_acorn: name = "acorn"; break;
68 default: name = "(unknown)"; break; 68 default: name = "(unknown)"; break;
69 } 69 }
70 len = sprintf(page, "%s\n", name); 70 len = sprintf(page, "%s\n", name);
71 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 71 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
72} 72}
73 73
74static int proc_ide_read_mate 74static int proc_ide_read_mate
@@ -81,7 +81,7 @@ static int proc_ide_read_mate
81 len = sprintf(page, "%s\n", hwif->mate->name); 81 len = sprintf(page, "%s\n", hwif->mate->name);
82 else 82 else
83 len = sprintf(page, "(none)\n"); 83 len = sprintf(page, "(none)\n");
84 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 84 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
85} 85}
86 86
87static int proc_ide_read_channel 87static int proc_ide_read_channel
@@ -93,7 +93,7 @@ static int proc_ide_read_channel
93 page[0] = hwif->channel ? '1' : '0'; 93 page[0] = hwif->channel ? '1' : '0';
94 page[1] = '\n'; 94 page[1] = '\n';
95 len = 2; 95 len = 2;
96 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 96 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
97} 97}
98 98
99static int proc_ide_read_identify 99static int proc_ide_read_identify
@@ -120,7 +120,7 @@ static int proc_ide_read_identify
120 len = out - page; 120 len = out - page;
121 } 121 }
122 } 122 }
123 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 123 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
124} 124}
125 125
126/** 126/**
@@ -197,7 +197,7 @@ EXPORT_SYMBOL(ide_add_setting);
197 * The caller must hold the setting semaphore. 197 * The caller must hold the setting semaphore.
198 */ 198 */
199 199
200static void __ide_remove_setting (ide_drive_t *drive, char *name) 200static void __ide_remove_setting(ide_drive_t *drive, char *name)
201{ 201{
202 ide_settings_t **p, *setting; 202 ide_settings_t **p, *setting;
203 203
@@ -205,7 +205,8 @@ static void __ide_remove_setting (ide_drive_t *drive, char *name)
205 205
206 while ((*p) && strcmp((*p)->name, name)) 206 while ((*p) && strcmp((*p)->name, name))
207 p = &((*p)->next); 207 p = &((*p)->next);
208 if ((setting = (*p)) == NULL) 208 setting = (*p);
209 if (setting == NULL)
209 return; 210 return;
210 211
211 (*p) = setting->next; 212 (*p) = setting->next;
@@ -223,7 +224,7 @@ static void __ide_remove_setting (ide_drive_t *drive, char *name)
223 * caller must hold ide_setting_mtx. 224 * caller must hold ide_setting_mtx.
224 */ 225 */
225 226
226static void auto_remove_settings (ide_drive_t *drive) 227static void auto_remove_settings(ide_drive_t *drive)
227{ 228{
228 ide_settings_t *setting; 229 ide_settings_t *setting;
229repeat: 230repeat:
@@ -279,16 +280,16 @@ static int ide_read_setting(ide_drive_t *drive, ide_settings_t *setting)
279 280
280 if ((setting->rw & SETTING_READ)) { 281 if ((setting->rw & SETTING_READ)) {
281 spin_lock_irqsave(&ide_lock, flags); 282 spin_lock_irqsave(&ide_lock, flags);
282 switch(setting->data_type) { 283 switch (setting->data_type) {
283 case TYPE_BYTE: 284 case TYPE_BYTE:
284 val = *((u8 *) setting->data); 285 val = *((u8 *) setting->data);
285 break; 286 break;
286 case TYPE_SHORT: 287 case TYPE_SHORT:
287 val = *((u16 *) setting->data); 288 val = *((u16 *) setting->data);
288 break; 289 break;
289 case TYPE_INT: 290 case TYPE_INT:
290 val = *((u32 *) setting->data); 291 val = *((u32 *) setting->data);
291 break; 292 break;
292 } 293 }
293 spin_unlock_irqrestore(&ide_lock, flags); 294 spin_unlock_irqrestore(&ide_lock, flags);
294 } 295 }
@@ -326,15 +327,15 @@ static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int va
326 if (ide_spin_wait_hwgroup(drive)) 327 if (ide_spin_wait_hwgroup(drive))
327 return -EBUSY; 328 return -EBUSY;
328 switch (setting->data_type) { 329 switch (setting->data_type) {
329 case TYPE_BYTE: 330 case TYPE_BYTE:
330 *((u8 *) setting->data) = val; 331 *((u8 *) setting->data) = val;
331 break; 332 break;
332 case TYPE_SHORT: 333 case TYPE_SHORT:
333 *((u16 *) setting->data) = val; 334 *((u16 *) setting->data) = val;
334 break; 335 break;
335 case TYPE_INT: 336 case TYPE_INT:
336 *((u32 *) setting->data) = val; 337 *((u32 *) setting->data) = val;
337 break; 338 break;
338 } 339 }
339 spin_unlock_irq(&ide_lock); 340 spin_unlock_irq(&ide_lock);
340 return 0; 341 return 0;
@@ -390,7 +391,7 @@ void ide_add_generic_settings (ide_drive_t *drive)
390 391
391static void proc_ide_settings_warn(void) 392static void proc_ide_settings_warn(void)
392{ 393{
393 static int warned = 0; 394 static int warned;
394 395
395 if (warned) 396 if (warned)
396 return; 397 return;
@@ -413,11 +414,12 @@ static int proc_ide_read_settings
413 mutex_lock(&ide_setting_mtx); 414 mutex_lock(&ide_setting_mtx);
414 out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n"); 415 out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
415 out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n"); 416 out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
416 while(setting) { 417 while (setting) {
417 mul_factor = setting->mul_factor; 418 mul_factor = setting->mul_factor;
418 div_factor = setting->div_factor; 419 div_factor = setting->div_factor;
419 out += sprintf(out, "%-24s", setting->name); 420 out += sprintf(out, "%-24s", setting->name);
420 if ((rc = ide_read_setting(drive, setting)) >= 0) 421 rc = ide_read_setting(drive, setting);
422 if (rc >= 0)
421 out += sprintf(out, "%-16d", rc * mul_factor / div_factor); 423 out += sprintf(out, "%-16d", rc * mul_factor / div_factor);
422 else 424 else
423 out += sprintf(out, "%-16s", "write-only"); 425 out += sprintf(out, "%-16s", "write-only");
@@ -431,7 +433,7 @@ static int proc_ide_read_settings
431 } 433 }
432 len = out - page; 434 len = out - page;
433 mutex_unlock(&ide_setting_mtx); 435 mutex_unlock(&ide_setting_mtx);
434 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 436 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
435} 437}
436 438
437#define MAX_LEN 30 439#define MAX_LEN 30
@@ -512,8 +514,7 @@ static int proc_ide_write_settings(struct file *file, const char __user *buffer,
512 514
513 mutex_lock(&ide_setting_mtx); 515 mutex_lock(&ide_setting_mtx);
514 setting = ide_find_setting_by_name(drive, name); 516 setting = ide_find_setting_by_name(drive, name);
515 if (!setting) 517 if (!setting) {
516 {
517 mutex_unlock(&ide_setting_mtx); 518 mutex_unlock(&ide_setting_mtx);
518 goto parse_error; 519 goto parse_error;
519 } 520 }
@@ -533,8 +534,8 @@ parse_error:
533int proc_ide_read_capacity 534int proc_ide_read_capacity
534 (char *page, char **start, off_t off, int count, int *eof, void *data) 535 (char *page, char **start, off_t off, int count, int *eof, void *data)
535{ 536{
536 int len = sprintf(page,"%llu\n", (long long)0x7fffffff); 537 int len = sprintf(page, "%llu\n", (long long)0x7fffffff);
537 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 538 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
538} 539}
539 540
540EXPORT_SYMBOL_GPL(proc_ide_read_capacity); 541EXPORT_SYMBOL_GPL(proc_ide_read_capacity);
@@ -546,13 +547,13 @@ int proc_ide_read_geometry
546 char *out = page; 547 char *out = page;
547 int len; 548 int len;
548 549
549 out += sprintf(out,"physical %d/%d/%d\n", 550 out += sprintf(out, "physical %d/%d/%d\n",
550 drive->cyl, drive->head, drive->sect); 551 drive->cyl, drive->head, drive->sect);
551 out += sprintf(out,"logical %d/%d/%d\n", 552 out += sprintf(out, "logical %d/%d/%d\n",
552 drive->bios_cyl, drive->bios_head, drive->bios_sect); 553 drive->bios_cyl, drive->bios_head, drive->bios_sect);
553 554
554 len = out - page; 555 len = out - page;
555 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 556 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
556} 557}
557 558
558EXPORT_SYMBOL(proc_ide_read_geometry); 559EXPORT_SYMBOL(proc_ide_read_geometry);
@@ -566,7 +567,7 @@ static int proc_ide_read_dmodel
566 567
567 len = sprintf(page, "%.40s\n", 568 len = sprintf(page, "%.40s\n",
568 (id && id->model[0]) ? (char *)id->model : "(none)"); 569 (id && id->model[0]) ? (char *)id->model : "(none)");
569 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 570 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
570} 571}
571 572
572static int proc_ide_read_driver 573static int proc_ide_read_driver
@@ -583,7 +584,7 @@ static int proc_ide_read_driver
583 dev->driver->name, ide_drv->version); 584 dev->driver->name, ide_drv->version);
584 } else 585 } else
585 len = sprintf(page, "ide-default version 0.9.newide\n"); 586 len = sprintf(page, "ide-default version 0.9.newide\n");
586 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 587 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
587} 588}
588 589
589static int ide_replace_subdriver(ide_drive_t *drive, const char *driver) 590static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
@@ -598,14 +599,14 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
598 err = device_attach(dev); 599 err = device_attach(dev);
599 if (err < 0) 600 if (err < 0)
600 printk(KERN_WARNING "IDE: %s: device_attach error: %d\n", 601 printk(KERN_WARNING "IDE: %s: device_attach error: %d\n",
601 __FUNCTION__, err); 602 __func__, err);
602 drive->driver_req[0] = 0; 603 drive->driver_req[0] = 0;
603 if (dev->driver == NULL) { 604 if (dev->driver == NULL) {
604 err = device_attach(dev); 605 err = device_attach(dev);
605 if (err < 0) 606 if (err < 0)
606 printk(KERN_WARNING 607 printk(KERN_WARNING
607 "IDE: %s: device_attach(2) error: %d\n", 608 "IDE: %s: device_attach(2) error: %d\n",
608 __FUNCTION__, err); 609 __func__, err);
609 } 610 }
610 if (dev->driver && !strcmp(dev->driver->name, driver)) 611 if (dev->driver && !strcmp(dev->driver->name, driver))
611 ret = 0; 612 ret = 0;
@@ -639,30 +640,26 @@ static int proc_ide_read_media
639 int len; 640 int len;
640 641
641 switch (drive->media) { 642 switch (drive->media) {
642 case ide_disk: media = "disk\n"; 643 case ide_disk: media = "disk\n"; break;
643 break; 644 case ide_cdrom: media = "cdrom\n"; break;
644 case ide_cdrom: media = "cdrom\n"; 645 case ide_tape: media = "tape\n"; break;
645 break; 646 case ide_floppy: media = "floppy\n"; break;
646 case ide_tape: media = "tape\n"; 647 case ide_optical: media = "optical\n"; break;
647 break; 648 default: media = "UNKNOWN\n"; break;
648 case ide_floppy:media = "floppy\n";
649 break;
650 case ide_optical:media = "optical\n";
651 break;
652 default: media = "UNKNOWN\n";
653 break;
654 } 649 }
655 strcpy(page,media); 650 strcpy(page, media);
656 len = strlen(media); 651 len = strlen(media);
657 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 652 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
658} 653}
659 654
660static ide_proc_entry_t generic_drive_entries[] = { 655static ide_proc_entry_t generic_drive_entries[] = {
661 { "driver", S_IFREG|S_IRUGO, proc_ide_read_driver, proc_ide_write_driver }, 656 { "driver", S_IFREG|S_IRUGO, proc_ide_read_driver,
662 { "identify", S_IFREG|S_IRUSR, proc_ide_read_identify, NULL }, 657 proc_ide_write_driver },
663 { "media", S_IFREG|S_IRUGO, proc_ide_read_media, NULL }, 658 { "identify", S_IFREG|S_IRUSR, proc_ide_read_identify, NULL },
664 { "model", S_IFREG|S_IRUGO, proc_ide_read_dmodel, NULL }, 659 { "media", S_IFREG|S_IRUGO, proc_ide_read_media, NULL },
665 { "settings", S_IFREG|S_IRUSR|S_IWUSR,proc_ide_read_settings, proc_ide_write_settings }, 660 { "model", S_IFREG|S_IRUGO, proc_ide_read_dmodel, NULL },
661 { "settings", S_IFREG|S_IRUSR|S_IWUSR, proc_ide_read_settings,
662 proc_ide_write_settings },
666 { NULL, 0, NULL, NULL } 663 { NULL, 0, NULL, NULL }
667}; 664};
668 665
@@ -734,7 +731,6 @@ void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
734 spin_unlock_irqrestore(&ide_lock, flags); 731 spin_unlock_irqrestore(&ide_lock, flags);
735 mutex_unlock(&ide_setting_mtx); 732 mutex_unlock(&ide_setting_mtx);
736} 733}
737
738EXPORT_SYMBOL(ide_proc_unregister_driver); 734EXPORT_SYMBOL(ide_proc_unregister_driver);
739 735
740void ide_proc_port_register_devices(ide_hwif_t *hwif) 736void ide_proc_port_register_devices(ide_hwif_t *hwif)
@@ -755,7 +751,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
755 drive->proc = proc_mkdir(drive->name, parent); 751 drive->proc = proc_mkdir(drive->name, parent);
756 if (drive->proc) 752 if (drive->proc)
757 ide_add_proc_entries(drive->proc, generic_drive_entries, drive); 753 ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
758 sprintf(name,"ide%d/%s", (drive->name[2]-'a')/2, drive->name); 754 sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
759 ent = proc_symlink(drive->name, proc_ide_root, name); 755 ent = proc_symlink(drive->name, proc_ide_root, name);
760 if (!ent) return; 756 if (!ent) return;
761 } 757 }
@@ -790,15 +786,6 @@ void ide_proc_register_port(ide_hwif_t *hwif)
790 } 786 }
791} 787}
792 788
793#ifdef CONFIG_BLK_DEV_IDEPCI
794void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
795{
796 create_proc_info_entry(name, 0, proc_ide_root, get_info);
797}
798
799EXPORT_SYMBOL_GPL(ide_pci_create_host_proc);
800#endif
801
802void ide_proc_unregister_port(ide_hwif_t *hwif) 789void ide_proc_unregister_port(ide_hwif_t *hwif)
803{ 790{
804 if (hwif->proc) { 791 if (hwif->proc) {
@@ -825,7 +812,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
825 err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver); 812 err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
826 if (err < 0) 813 if (err < 0)
827 printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n", 814 printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n",
828 __FUNCTION__, err); 815 __func__, err);
829 return 0; 816 return 0;
830} 817}
831 818
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 98888da1b600..0e79efff1deb 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -102,7 +102,7 @@ static int __init ide_scan_pcibus(void)
102 if (__pci_register_driver(d, d->driver.owner, 102 if (__pci_register_driver(d, d->driver.owner,
103 d->driver.mod_name)) 103 d->driver.mod_name))
104 printk(KERN_ERR "%s: failed to register %s driver\n", 104 printk(KERN_ERR "%s: failed to register %s driver\n",
105 __FUNCTION__, d->driver.mod_name); 105 __func__, d->driver.mod_name);
106 } 106 }
107 107
108 return 0; 108 return 0;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index f43fd070f1b6..29870c415110 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -72,26 +72,6 @@ enum {
72#endif 72#endif
73 73
74/**************************** Tunable parameters *****************************/ 74/**************************** Tunable parameters *****************************/
75
76
77/*
78 * Pipelined mode parameters.
79 *
80 * We try to use the minimum number of stages which is enough to keep the tape
81 * constantly streaming. To accomplish that, we implement a feedback loop around
82 * the maximum number of stages:
83 *
84 * We start from MIN maximum stages (we will not even use MIN stages if we don't
85 * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
86 * pipeline is empty, until we reach the optimum value or until we reach MAX.
87 *
88 * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
89 * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
90 */
91#define IDETAPE_MIN_PIPELINE_STAGES 1
92#define IDETAPE_MAX_PIPELINE_STAGES 400
93#define IDETAPE_INCREASE_STAGES_RATE 20
94
95/* 75/*
96 * After each failed packet command we issue a request sense command and retry 76 * After each failed packet command we issue a request sense command and retry
97 * the packet command IDETAPE_MAX_PC_RETRIES times. 77 * the packet command IDETAPE_MAX_PC_RETRIES times.
@@ -224,28 +204,17 @@ enum {
224 /* 0 When the tape position is unknown */ 204 /* 0 When the tape position is unknown */
225 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1), 205 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
226 /* Device already opened */ 206 /* Device already opened */
227 IDETAPE_FLAG_BUSY = (1 << 2), 207 IDETAPE_FLAG_BUSY = (1 << 2),
228 /* Error detected in a pipeline stage */
229 IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
230 /* Attempt to auto-detect the current user block size */ 208 /* Attempt to auto-detect the current user block size */
231 IDETAPE_FLAG_DETECT_BS = (1 << 4), 209 IDETAPE_FLAG_DETECT_BS = (1 << 3),
232 /* Currently on a filemark */ 210 /* Currently on a filemark */
233 IDETAPE_FLAG_FILEMARK = (1 << 5), 211 IDETAPE_FLAG_FILEMARK = (1 << 4),
234 /* DRQ interrupt device */ 212 /* DRQ interrupt device */
235 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6), 213 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
236 /* pipeline active */
237 IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
238 /* 0 = no tape is loaded, so we don't rewind after ejecting */ 214 /* 0 = no tape is loaded, so we don't rewind after ejecting */
239 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8), 215 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
240}; 216};
241 217
242/* A pipeline stage. */
243typedef struct idetape_stage_s {
244 struct request rq; /* The corresponding request */
245 struct idetape_bh *bh; /* The data buffers */
246 struct idetape_stage_s *next; /* Pointer to the next stage */
247} idetape_stage_t;
248
249/* 218/*
250 * Most of our global data which we need to save even as we leave the driver due 219 * Most of our global data which we need to save even as we leave the driver due
251 * to an interrupt or a timer event is stored in the struct defined below. 220 * to an interrupt or a timer event is stored in the struct defined below.
@@ -289,9 +258,7 @@ typedef struct ide_tape_obj {
289 * While polling for DSC we use postponed_rq to postpone the current 258 * While polling for DSC we use postponed_rq to postpone the current
290 * request so that ide.c will be able to service pending requests on the 259 * request so that ide.c will be able to service pending requests on the
291 * other device. Note that at most we will have only one DSC (usually 260 * other device. Note that at most we will have only one DSC (usually
292 * data transfer) request in the device request queue. Additional 261 * data transfer) request in the device request queue.
293 * requests can be queued in our internal pipeline, but they will be
294 * visible to ide.c only one at a time.
295 */ 262 */
296 struct request *postponed_rq; 263 struct request *postponed_rq;
297 /* The time in which we started polling for DSC */ 264 /* The time in which we started polling for DSC */
@@ -331,43 +298,20 @@ typedef struct ide_tape_obj {
331 * At most, there is only one ide-tape originated data transfer request 298 * At most, there is only one ide-tape originated data transfer request
332 * in the device request queue. This allows ide.c to easily service 299 * in the device request queue. This allows ide.c to easily service
333 * requests from the other device when we postpone our active request. 300 * requests from the other device when we postpone our active request.
334 * In the pipelined operation mode, we use our internal pipeline
335 * structure to hold more data requests. The data buffer size is chosen
336 * based on the tape's recommendation.
337 */ 301 */
338 /* ptr to the request which is waiting in the device request queue */ 302
339 struct request *active_data_rq;
340 /* Data buffer size chosen based on the tape's recommendation */ 303 /* Data buffer size chosen based on the tape's recommendation */
341 int stage_size; 304 int buffer_size;
342 idetape_stage_t *merge_stage; 305 /* merge buffer */
343 int merge_stage_size; 306 struct idetape_bh *merge_bh;
307 /* size of the merge buffer */
308 int merge_bh_size;
309 /* pointer to current buffer head within the merge buffer */
344 struct idetape_bh *bh; 310 struct idetape_bh *bh;
345 char *b_data; 311 char *b_data;
346 int b_count; 312 int b_count;
347 313
348 /* 314 int pages_per_buffer;
349 * Pipeline parameters.
350 *
351 * To accomplish non-pipelined mode, we simply set the following
352 * variables to zero (or NULL, where appropriate).
353 */
354 /* Number of currently used stages */
355 int nr_stages;
356 /* Number of pending stages */
357 int nr_pending_stages;
358 /* We will not allocate more than this number of stages */
359 int max_stages, min_pipeline, max_pipeline;
360 /* The first stage which will be removed from the pipeline */
361 idetape_stage_t *first_stage;
362 /* The currently active stage */
363 idetape_stage_t *active_stage;
364 /* Will be serviced after the currently active request */
365 idetape_stage_t *next_stage;
366 /* New requests will be added to the pipeline here */
367 idetape_stage_t *last_stage;
368 /* Optional free stage which we can use */
369 idetape_stage_t *cache_stage;
370 int pages_per_stage;
371 /* Wasted space in each stage */ 315 /* Wasted space in each stage */
372 int excess_bh_size; 316 int excess_bh_size;
373 317
@@ -388,45 +332,6 @@ typedef struct ide_tape_obj {
388 /* the tape is write protected (hardware or opened as read-only) */ 332 /* the tape is write protected (hardware or opened as read-only) */
389 char write_prot; 333 char write_prot;
390 334
391 /*
392 * Limit the number of times a request can be postponed, to avoid an
393 * infinite postpone deadlock.
394 */
395 int postpone_cnt;
396
397 /*
398 * Measures number of frames:
399 *
400 * 1. written/read to/from the driver pipeline (pipeline_head).
401 * 2. written/read to/from the tape buffers (idetape_bh).
402 * 3. written/read by the tape to/from the media (tape_head).
403 */
404 int pipeline_head;
405 int buffer_head;
406 int tape_head;
407 int last_tape_head;
408
409 /* Speed control at the tape buffers input/output */
410 unsigned long insert_time;
411 int insert_size;
412 int insert_speed;
413 int max_insert_speed;
414 int measure_insert_time;
415
416 /* Speed regulation negative feedback loop */
417 int speed_control;
418 int pipeline_head_speed;
419 int controlled_pipeline_head_speed;
420 int uncontrolled_pipeline_head_speed;
421 int controlled_last_pipeline_head;
422 unsigned long uncontrolled_pipeline_head_time;
423 unsigned long controlled_pipeline_head_time;
424 int controlled_previous_pipeline_head;
425 int uncontrolled_previous_pipeline_head;
426 unsigned long controlled_previous_head_time;
427 unsigned long uncontrolled_previous_head_time;
428 int restart_speed_control_req;
429
430 u32 debug_mask; 335 u32 debug_mask;
431} idetape_tape_t; 336} idetape_tape_t;
432 337
@@ -674,128 +579,36 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
674 } 579 }
675} 580}
676 581
677static void idetape_activate_next_stage(ide_drive_t *drive) 582/* Free data buffers completely. */
583static void ide_tape_kfree_buffer(idetape_tape_t *tape)
678{ 584{
679 idetape_tape_t *tape = drive->driver_data; 585 struct idetape_bh *prev_bh, *bh = tape->merge_bh;
680 idetape_stage_t *stage = tape->next_stage;
681 struct request *rq = &stage->rq;
682 586
683 debug_log(DBG_PROCS, "Enter %s\n", __func__); 587 while (bh) {
588 u32 size = bh->b_size;
684 589
685 if (stage == NULL) { 590 while (size) {
686 printk(KERN_ERR "ide-tape: bug: Trying to activate a non" 591 unsigned int order = fls(size >> PAGE_SHIFT)-1;
687 " existing stage\n");
688 return;
689 }
690 592
691 rq->rq_disk = tape->disk; 593 if (bh->b_data)
692 rq->buffer = NULL; 594 free_pages((unsigned long)bh->b_data, order);
693 rq->special = (void *)stage->bh; 595
694 tape->active_data_rq = rq; 596 size &= (order-1);
695 tape->active_stage = stage; 597 bh->b_data += (1 << order) * PAGE_SIZE;
696 tape->next_stage = stage->next;
697}
698
699/* Free a stage along with its related buffers completely. */
700static void __idetape_kfree_stage(idetape_stage_t *stage)
701{
702 struct idetape_bh *prev_bh, *bh = stage->bh;
703 int size;
704
705 while (bh != NULL) {
706 if (bh->b_data != NULL) {
707 size = (int) bh->b_size;
708 while (size > 0) {
709 free_page((unsigned long) bh->b_data);
710 size -= PAGE_SIZE;
711 bh->b_data += PAGE_SIZE;
712 }
713 } 598 }
714 prev_bh = bh; 599 prev_bh = bh;
715 bh = bh->b_reqnext; 600 bh = bh->b_reqnext;
716 kfree(prev_bh); 601 kfree(prev_bh);
717 } 602 }
718 kfree(stage); 603 kfree(tape->merge_bh);
719}
720
721static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
722{
723 __idetape_kfree_stage(stage);
724} 604}
725 605
726/*
727 * Remove tape->first_stage from the pipeline. The caller should avoid race
728 * conditions.
729 */
730static void idetape_remove_stage_head(ide_drive_t *drive)
731{
732 idetape_tape_t *tape = drive->driver_data;
733 idetape_stage_t *stage;
734
735 debug_log(DBG_PROCS, "Enter %s\n", __func__);
736
737 if (tape->first_stage == NULL) {
738 printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
739 return;
740 }
741 if (tape->active_stage == tape->first_stage) {
742 printk(KERN_ERR "ide-tape: bug: Trying to free our active "
743 "pipeline stage\n");
744 return;
745 }
746 stage = tape->first_stage;
747 tape->first_stage = stage->next;
748 idetape_kfree_stage(tape, stage);
749 tape->nr_stages--;
750 if (tape->first_stage == NULL) {
751 tape->last_stage = NULL;
752 if (tape->next_stage != NULL)
753 printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
754 " NULL\n");
755 if (tape->nr_stages)
756 printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
757 "now\n");
758 }
759}
760
761/*
762 * This will free all the pipeline stages starting from new_last_stage->next
763 * to the end of the list, and point tape->last_stage to new_last_stage.
764 */
765static void idetape_abort_pipeline(ide_drive_t *drive,
766 idetape_stage_t *new_last_stage)
767{
768 idetape_tape_t *tape = drive->driver_data;
769 idetape_stage_t *stage = new_last_stage->next;
770 idetape_stage_t *nstage;
771
772 debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
773
774 while (stage) {
775 nstage = stage->next;
776 idetape_kfree_stage(tape, stage);
777 --tape->nr_stages;
778 --tape->nr_pending_stages;
779 stage = nstage;
780 }
781 if (new_last_stage)
782 new_last_stage->next = NULL;
783 tape->last_stage = new_last_stage;
784 tape->next_stage = NULL;
785}
786
787/*
788 * Finish servicing a request and insert a pending pipeline request into the
789 * main device queue.
790 */
791static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) 606static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
792{ 607{
793 struct request *rq = HWGROUP(drive)->rq; 608 struct request *rq = HWGROUP(drive)->rq;
794 idetape_tape_t *tape = drive->driver_data; 609 idetape_tape_t *tape = drive->driver_data;
795 unsigned long flags; 610 unsigned long flags;
796 int error; 611 int error;
797 int remove_stage = 0;
798 idetape_stage_t *active_stage;
799 612
800 debug_log(DBG_PROCS, "Enter %s\n", __func__); 613 debug_log(DBG_PROCS, "Enter %s\n", __func__);
801 614
@@ -815,58 +628,8 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
815 628
816 spin_lock_irqsave(&tape->lock, flags); 629 spin_lock_irqsave(&tape->lock, flags);
817 630
818 /* The request was a pipelined data transfer request */
819 if (tape->active_data_rq == rq) {
820 active_stage = tape->active_stage;
821 tape->active_stage = NULL;
822 tape->active_data_rq = NULL;
823 tape->nr_pending_stages--;
824 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
825 remove_stage = 1;
826 if (error) {
827 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
828 &tape->flags);
829 if (error == IDETAPE_ERROR_EOD)
830 idetape_abort_pipeline(drive,
831 active_stage);
832 }
833 } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
834 if (error == IDETAPE_ERROR_EOD) {
835 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
836 &tape->flags);
837 idetape_abort_pipeline(drive, active_stage);
838 }
839 }
840 if (tape->next_stage != NULL) {
841 idetape_activate_next_stage(drive);
842
843 /* Insert the next request into the request queue. */
844 (void)ide_do_drive_cmd(drive, tape->active_data_rq,
845 ide_end);
846 } else if (!error) {
847 /*
848 * This is a part of the feedback loop which tries to
849 * find the optimum number of stages. We are starting
850 * from a minimum maximum number of stages, and if we
851 * sense that the pipeline is empty, we try to increase
852 * it, until we reach the user compile time memory
853 * limit.
854 */
855 int i = (tape->max_pipeline - tape->min_pipeline) / 10;
856
857 tape->max_stages += max(i, 1);
858 tape->max_stages = max(tape->max_stages,
859 tape->min_pipeline);
860 tape->max_stages = min(tape->max_stages,
861 tape->max_pipeline);
862 }
863 }
864 ide_end_drive_cmd(drive, 0, 0); 631 ide_end_drive_cmd(drive, 0, 0);
865 632
866 if (remove_stage)
867 idetape_remove_stage_head(drive);
868 if (tape->active_data_rq == NULL)
869 clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
870 spin_unlock_irqrestore(&tape->lock, flags); 633 spin_unlock_irqrestore(&tape->lock, flags);
871 return 0; 634 return 0;
872} 635}
@@ -993,7 +756,7 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
993 stat = ide_read_status(drive); 756 stat = ide_read_status(drive);
994 757
995 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { 758 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
996 if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) { 759 if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
997 /* 760 /*
998 * A DMA error is sometimes expected. For example, 761 * A DMA error is sometimes expected. For example,
999 * if the tape is crossing a filemark during a 762 * if the tape is crossing a filemark during a
@@ -1083,10 +846,10 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
1083 return ide_do_reset(drive); 846 return ide_do_reset(drive);
1084 } 847 }
1085 /* Get the number of bytes to transfer on this interrupt. */ 848 /* Get the number of bytes to transfer on this interrupt. */
1086 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | 849 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
1087 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); 850 hwif->INB(hwif->io_ports.lbam_addr);
1088 851
1089 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 852 ireason = hwif->INB(hwif->io_ports.nsect_addr);
1090 853
1091 if (ireason & CD) { 854 if (ireason & CD) {
1092 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__); 855 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
@@ -1190,12 +953,12 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1190 "yet DRQ isn't asserted\n"); 953 "yet DRQ isn't asserted\n");
1191 return startstop; 954 return startstop;
1192 } 955 }
1193 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 956 ireason = hwif->INB(hwif->io_ports.nsect_addr);
1194 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { 957 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
1195 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing " 958 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
1196 "a packet command, retrying\n"); 959 "a packet command, retrying\n");
1197 udelay(100); 960 udelay(100);
1198 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 961 ireason = hwif->INB(hwif->io_ports.nsect_addr);
1199 if (retries == 0) { 962 if (retries == 0) {
1200 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while " 963 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
1201 "issuing a packet command, ignoring\n"); 964 "issuing a packet command, ignoring\n");
@@ -1213,7 +976,7 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1213#ifdef CONFIG_BLK_DEV_IDEDMA 976#ifdef CONFIG_BLK_DEV_IDEDMA
1214 /* Begin DMA, if necessary */ 977 /* Begin DMA, if necessary */
1215 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) 978 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
1216 hwif->dma_start(drive); 979 hwif->dma_ops->dma_start(drive);
1217#endif 980#endif
1218 /* Send the actual packet */ 981 /* Send the actual packet */
1219 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12); 982 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
@@ -1279,7 +1042,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1279 ide_dma_off(drive); 1042 ide_dma_off(drive);
1280 } 1043 }
1281 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma) 1044 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
1282 dma_ok = !hwif->dma_setup(drive); 1045 dma_ok = !hwif->dma_ops->dma_setup(drive);
1283 1046
1284 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK | 1047 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
1285 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok); 1048 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
@@ -1292,7 +1055,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1292 IDETAPE_WAIT_CMD, NULL); 1055 IDETAPE_WAIT_CMD, NULL);
1293 return ide_started; 1056 return ide_started;
1294 } else { 1057 } else {
1295 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); 1058 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
1296 return idetape_transfer_pc(drive); 1059 return idetape_transfer_pc(drive);
1297 } 1060 }
1298} 1061}
@@ -1335,69 +1098,6 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
1335 pc->idetape_callback = &idetape_pc_callback; 1098 pc->idetape_callback = &idetape_pc_callback;
1336} 1099}
1337 1100
1338static void idetape_calculate_speeds(ide_drive_t *drive)
1339{
1340 idetape_tape_t *tape = drive->driver_data;
1341
1342 if (time_after(jiffies,
1343 tape->controlled_pipeline_head_time + 120 * HZ)) {
1344 tape->controlled_previous_pipeline_head =
1345 tape->controlled_last_pipeline_head;
1346 tape->controlled_previous_head_time =
1347 tape->controlled_pipeline_head_time;
1348 tape->controlled_last_pipeline_head = tape->pipeline_head;
1349 tape->controlled_pipeline_head_time = jiffies;
1350 }
1351 if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
1352 tape->controlled_pipeline_head_speed = (tape->pipeline_head -
1353 tape->controlled_last_pipeline_head) * 32 * HZ /
1354 (jiffies - tape->controlled_pipeline_head_time);
1355 else if (time_after(jiffies, tape->controlled_previous_head_time))
1356 tape->controlled_pipeline_head_speed = (tape->pipeline_head -
1357 tape->controlled_previous_pipeline_head) * 32 *
1358 HZ / (jiffies - tape->controlled_previous_head_time);
1359
1360 if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
1361 /* -1 for read mode error recovery */
1362 if (time_after(jiffies, tape->uncontrolled_previous_head_time +
1363 10 * HZ)) {
1364 tape->uncontrolled_pipeline_head_time = jiffies;
1365 tape->uncontrolled_pipeline_head_speed =
1366 (tape->pipeline_head -
1367 tape->uncontrolled_previous_pipeline_head) *
1368 32 * HZ / (jiffies -
1369 tape->uncontrolled_previous_head_time);
1370 }
1371 } else {
1372 tape->uncontrolled_previous_head_time = jiffies;
1373 tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
1374 if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
1375 30 * HZ))
1376 tape->uncontrolled_pipeline_head_time = jiffies;
1377
1378 }
1379 tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
1380 tape->controlled_pipeline_head_speed);
1381
1382 if (tape->speed_control == 1) {
1383 if (tape->nr_pending_stages >= tape->max_stages / 2)
1384 tape->max_insert_speed = tape->pipeline_head_speed +
1385 (1100 - tape->pipeline_head_speed) * 2 *
1386 (tape->nr_pending_stages - tape->max_stages / 2)
1387 / tape->max_stages;
1388 else
1389 tape->max_insert_speed = 500 +
1390 (tape->pipeline_head_speed - 500) * 2 *
1391 tape->nr_pending_stages / tape->max_stages;
1392
1393 if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
1394 tape->max_insert_speed = 5000;
1395 } else
1396 tape->max_insert_speed = tape->speed_control;
1397
1398 tape->max_insert_speed = max(tape->max_insert_speed, 500);
1399}
1400
1401static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) 1101static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
1402{ 1102{
1403 idetape_tape_t *tape = drive->driver_data; 1103 idetape_tape_t *tape = drive->driver_data;
@@ -1432,17 +1132,7 @@ static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
1432 int blocks = tape->pc->xferred / tape->blk_size; 1132 int blocks = tape->pc->xferred / tape->blk_size;
1433 1133
1434 tape->avg_size += blocks * tape->blk_size; 1134 tape->avg_size += blocks * tape->blk_size;
1435 tape->insert_size += blocks * tape->blk_size; 1135
1436 if (tape->insert_size > 1024 * 1024)
1437 tape->measure_insert_time = 1;
1438 if (tape->measure_insert_time) {
1439 tape->measure_insert_time = 0;
1440 tape->insert_time = jiffies;
1441 tape->insert_size = 0;
1442 }
1443 if (time_after(jiffies, tape->insert_time))
1444 tape->insert_speed = tape->insert_size / 1024 * HZ /
1445 (jiffies - tape->insert_time);
1446 if (time_after_eq(jiffies, tape->avg_time + HZ)) { 1136 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
1447 tape->avg_speed = tape->avg_size * HZ / 1137 tape->avg_speed = tape->avg_size * HZ /
1448 (jiffies - tape->avg_time) / 1024; 1138 (jiffies - tape->avg_time) / 1024;
@@ -1475,7 +1165,7 @@ static void idetape_create_read_cmd(idetape_tape_t *tape,
1475 pc->buf = NULL; 1165 pc->buf = NULL;
1476 pc->buf_size = length * tape->blk_size; 1166 pc->buf_size = length * tape->blk_size;
1477 pc->req_xfer = pc->buf_size; 1167 pc->req_xfer = pc->buf_size;
1478 if (pc->req_xfer == tape->stage_size) 1168 if (pc->req_xfer == tape->buffer_size)
1479 pc->flags |= PC_FLAG_DMA_RECOMMENDED; 1169 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1480} 1170}
1481 1171
@@ -1495,7 +1185,7 @@ static void idetape_create_write_cmd(idetape_tape_t *tape,
1495 pc->buf = NULL; 1185 pc->buf = NULL;
1496 pc->buf_size = length * tape->blk_size; 1186 pc->buf_size = length * tape->blk_size;
1497 pc->req_xfer = pc->buf_size; 1187 pc->req_xfer = pc->buf_size;
1498 if (pc->req_xfer == tape->stage_size) 1188 if (pc->req_xfer == tape->buffer_size)
1499 pc->flags |= PC_FLAG_DMA_RECOMMENDED; 1189 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1500} 1190}
1501 1191
@@ -1547,10 +1237,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1547 drive->post_reset = 0; 1237 drive->post_reset = 0;
1548 } 1238 }
1549 1239
1550 if (time_after(jiffies, tape->insert_time))
1551 tape->insert_speed = tape->insert_size / 1024 * HZ /
1552 (jiffies - tape->insert_time);
1553 idetape_calculate_speeds(drive);
1554 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) && 1240 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
1555 (stat & SEEK_STAT) == 0) { 1241 (stat & SEEK_STAT) == 0) {
1556 if (postponed_rq == NULL) { 1242 if (postponed_rq == NULL) {
@@ -1574,16 +1260,12 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1574 return ide_stopped; 1260 return ide_stopped;
1575 } 1261 }
1576 if (rq->cmd[0] & REQ_IDETAPE_READ) { 1262 if (rq->cmd[0] & REQ_IDETAPE_READ) {
1577 tape->buffer_head++;
1578 tape->postpone_cnt = 0;
1579 pc = idetape_next_pc_storage(drive); 1263 pc = idetape_next_pc_storage(drive);
1580 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, 1264 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
1581 (struct idetape_bh *)rq->special); 1265 (struct idetape_bh *)rq->special);
1582 goto out; 1266 goto out;
1583 } 1267 }
1584 if (rq->cmd[0] & REQ_IDETAPE_WRITE) { 1268 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
1585 tape->buffer_head++;
1586 tape->postpone_cnt = 0;
1587 pc = idetape_next_pc_storage(drive); 1269 pc = idetape_next_pc_storage(drive);
1588 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, 1270 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
1589 (struct idetape_bh *)rq->special); 1271 (struct idetape_bh *)rq->special);
@@ -1604,111 +1286,91 @@ out:
1604 return idetape_issue_pc(drive, pc); 1286 return idetape_issue_pc(drive, pc);
1605} 1287}
1606 1288
1607/* Pipeline related functions */
1608static inline int idetape_pipeline_active(idetape_tape_t *tape)
1609{
1610 int rc1, rc2;
1611
1612 rc1 = test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
1613 rc2 = (tape->active_data_rq != NULL);
1614 return rc1;
1615}
1616
1617/* 1289/*
1618 * The function below uses __get_free_page to allocate a pipeline stage, along 1290 * The function below uses __get_free_pages to allocate a data buffer of size
1619 * with all the necessary small buffers which together make a buffer of size 1291 * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
1620 * tape->stage_size (or a bit more). We attempt to combine sequential pages as
1621 * much as possible. 1292 * much as possible.
1622 * 1293 *
1623 * It returns a pointer to the new allocated stage, or NULL if we can't (or 1294 * It returns a pointer to the newly allocated buffer, or NULL in case of
1624 * don't want to) allocate a stage. 1295 * failure.
1625 *
1626 * Pipeline stages are optional and are used to increase performance. If we
1627 * can't allocate them, we'll manage without them.
1628 */ 1296 */
1629static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full, 1297static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
1630 int clear) 1298 int full, int clear)
1631{ 1299{
1632 idetape_stage_t *stage; 1300 struct idetape_bh *prev_bh, *bh, *merge_bh;
1633 struct idetape_bh *prev_bh, *bh; 1301 int pages = tape->pages_per_buffer;
1634 int pages = tape->pages_per_stage; 1302 unsigned int order, b_allocd;
1635 char *b_data = NULL; 1303 char *b_data = NULL;
1636 1304
1637 stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL); 1305 merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1638 if (!stage) 1306 bh = merge_bh;
1639 return NULL;
1640 stage->next = NULL;
1641
1642 stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1643 bh = stage->bh;
1644 if (bh == NULL) 1307 if (bh == NULL)
1645 goto abort; 1308 goto abort;
1646 bh->b_reqnext = NULL; 1309
1647 bh->b_data = (char *) __get_free_page(GFP_KERNEL); 1310 order = fls(pages) - 1;
1311 bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
1648 if (!bh->b_data) 1312 if (!bh->b_data)
1649 goto abort; 1313 goto abort;
1314 b_allocd = (1 << order) * PAGE_SIZE;
1315 pages &= (order-1);
1316
1650 if (clear) 1317 if (clear)
1651 memset(bh->b_data, 0, PAGE_SIZE); 1318 memset(bh->b_data, 0, b_allocd);
1652 bh->b_size = PAGE_SIZE; 1319 bh->b_reqnext = NULL;
1320 bh->b_size = b_allocd;
1653 atomic_set(&bh->b_count, full ? bh->b_size : 0); 1321 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1654 1322
1655 while (--pages) { 1323 while (pages) {
1656 b_data = (char *) __get_free_page(GFP_KERNEL); 1324 order = fls(pages) - 1;
1325 b_data = (char *) __get_free_pages(GFP_KERNEL, order);
1657 if (!b_data) 1326 if (!b_data)
1658 goto abort; 1327 goto abort;
1328 b_allocd = (1 << order) * PAGE_SIZE;
1329
1659 if (clear) 1330 if (clear)
1660 memset(b_data, 0, PAGE_SIZE); 1331 memset(b_data, 0, b_allocd);
1661 if (bh->b_data == b_data + PAGE_SIZE) { 1332
1662 bh->b_size += PAGE_SIZE; 1333 /* newly allocated page frames below buffer header or ...*/
1663 bh->b_data -= PAGE_SIZE; 1334 if (bh->b_data == b_data + b_allocd) {
1335 bh->b_size += b_allocd;
1336 bh->b_data -= b_allocd;
1664 if (full) 1337 if (full)
1665 atomic_add(PAGE_SIZE, &bh->b_count); 1338 atomic_add(b_allocd, &bh->b_count);
1666 continue; 1339 continue;
1667 } 1340 }
1341 /* they are above the header */
1668 if (b_data == bh->b_data + bh->b_size) { 1342 if (b_data == bh->b_data + bh->b_size) {
1669 bh->b_size += PAGE_SIZE; 1343 bh->b_size += b_allocd;
1670 if (full) 1344 if (full)
1671 atomic_add(PAGE_SIZE, &bh->b_count); 1345 atomic_add(b_allocd, &bh->b_count);
1672 continue; 1346 continue;
1673 } 1347 }
1674 prev_bh = bh; 1348 prev_bh = bh;
1675 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); 1349 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1676 if (!bh) { 1350 if (!bh) {
1677 free_page((unsigned long) b_data); 1351 free_pages((unsigned long) b_data, order);
1678 goto abort; 1352 goto abort;
1679 } 1353 }
1680 bh->b_reqnext = NULL; 1354 bh->b_reqnext = NULL;
1681 bh->b_data = b_data; 1355 bh->b_data = b_data;
1682 bh->b_size = PAGE_SIZE; 1356 bh->b_size = b_allocd;
1683 atomic_set(&bh->b_count, full ? bh->b_size : 0); 1357 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1684 prev_bh->b_reqnext = bh; 1358 prev_bh->b_reqnext = bh;
1359
1360 pages &= (order-1);
1685 } 1361 }
1362
1686 bh->b_size -= tape->excess_bh_size; 1363 bh->b_size -= tape->excess_bh_size;
1687 if (full) 1364 if (full)
1688 atomic_sub(tape->excess_bh_size, &bh->b_count); 1365 atomic_sub(tape->excess_bh_size, &bh->b_count);
1689 return stage; 1366 return merge_bh;
1690abort: 1367abort:
1691 __idetape_kfree_stage(stage); 1368 ide_tape_kfree_buffer(tape);
1692 return NULL; 1369 return NULL;
1693} 1370}
1694 1371
1695static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
1696{
1697 idetape_stage_t *cache_stage = tape->cache_stage;
1698
1699 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1700
1701 if (tape->nr_stages >= tape->max_stages)
1702 return NULL;
1703 if (cache_stage != NULL) {
1704 tape->cache_stage = NULL;
1705 return cache_stage;
1706 }
1707 return __idetape_kmalloc_stage(tape, 0, 0);
1708}
1709
1710static int idetape_copy_stage_from_user(idetape_tape_t *tape, 1372static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1711 idetape_stage_t *stage, const char __user *buf, int n) 1373 const char __user *buf, int n)
1712{ 1374{
1713 struct idetape_bh *bh = tape->bh; 1375 struct idetape_bh *bh = tape->bh;
1714 int count; 1376 int count;
@@ -1740,7 +1402,7 @@ static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1740} 1402}
1741 1403
1742static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf, 1404static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1743 idetape_stage_t *stage, int n) 1405 int n)
1744{ 1406{
1745 struct idetape_bh *bh = tape->bh; 1407 struct idetape_bh *bh = tape->bh;
1746 int count; 1408 int count;
@@ -1771,11 +1433,11 @@ static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1771 return ret; 1433 return ret;
1772} 1434}
1773 1435
1774static void idetape_init_merge_stage(idetape_tape_t *tape) 1436static void idetape_init_merge_buffer(idetape_tape_t *tape)
1775{ 1437{
1776 struct idetape_bh *bh = tape->merge_stage->bh; 1438 struct idetape_bh *bh = tape->merge_bh;
1439 tape->bh = tape->merge_bh;
1777 1440
1778 tape->bh = bh;
1779 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) 1441 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1780 atomic_set(&bh->b_count, 0); 1442 atomic_set(&bh->b_count, 0);
1781 else { 1443 else {
@@ -1784,61 +1446,6 @@ static void idetape_init_merge_stage(idetape_tape_t *tape)
1784 } 1446 }
1785} 1447}
1786 1448
1787static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
1788{
1789 struct idetape_bh *tmp;
1790
1791 tmp = stage->bh;
1792 stage->bh = tape->merge_stage->bh;
1793 tape->merge_stage->bh = tmp;
1794 idetape_init_merge_stage(tape);
1795}
1796
1797/* Add a new stage at the end of the pipeline. */
1798static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
1799{
1800 idetape_tape_t *tape = drive->driver_data;
1801 unsigned long flags;
1802
1803 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1804
1805 spin_lock_irqsave(&tape->lock, flags);
1806 stage->next = NULL;
1807 if (tape->last_stage != NULL)
1808 tape->last_stage->next = stage;
1809 else
1810 tape->first_stage = stage;
1811 tape->next_stage = stage;
1812 tape->last_stage = stage;
1813 if (tape->next_stage == NULL)
1814 tape->next_stage = tape->last_stage;
1815 tape->nr_stages++;
1816 tape->nr_pending_stages++;
1817 spin_unlock_irqrestore(&tape->lock, flags);
1818}
1819
1820/* Install a completion in a pending request and sleep until it is serviced. The
1821 * caller should ensure that the request will not be serviced before we install
1822 * the completion (usually by disabling interrupts).
1823 */
1824static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
1825{
1826 DECLARE_COMPLETION_ONSTACK(wait);
1827 idetape_tape_t *tape = drive->driver_data;
1828
1829 if (rq == NULL || !blk_special_request(rq)) {
1830 printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
1831 " request\n");
1832 return;
1833 }
1834 rq->end_io_data = &wait;
1835 rq->end_io = blk_end_sync_rq;
1836 spin_unlock_irq(&tape->lock);
1837 wait_for_completion(&wait);
1838 /* The stage and its struct request have been deallocated */
1839 spin_lock_irq(&tape->lock);
1840}
1841
1842static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive) 1449static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
1843{ 1450{
1844 idetape_tape_t *tape = drive->driver_data; 1451 idetape_tape_t *tape = drive->driver_data;
@@ -1907,7 +1514,7 @@ static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
1907 * to the request list without waiting for it to be serviced! In that case, we 1514 * to the request list without waiting for it to be serviced! In that case, we
1908 * usually use idetape_queue_pc_head(). 1515 * usually use idetape_queue_pc_head().
1909 */ 1516 */
1910static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc) 1517static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1911{ 1518{
1912 struct ide_tape_obj *tape = drive->driver_data; 1519 struct ide_tape_obj *tape = drive->driver_data;
1913 struct request rq; 1520 struct request rq;
@@ -1939,7 +1546,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1939 timeout += jiffies; 1546 timeout += jiffies;
1940 while (time_before(jiffies, timeout)) { 1547 while (time_before(jiffies, timeout)) {
1941 idetape_create_test_unit_ready_cmd(&pc); 1548 idetape_create_test_unit_ready_cmd(&pc);
1942 if (!__idetape_queue_pc_tail(drive, &pc)) 1549 if (!idetape_queue_pc_tail(drive, &pc))
1943 return 0; 1550 return 0;
1944 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2) 1551 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
1945 || (tape->asc == 0x3A)) { 1552 || (tape->asc == 0x3A)) {
@@ -1948,7 +1555,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1948 return -ENOMEDIUM; 1555 return -ENOMEDIUM;
1949 idetape_create_load_unload_cmd(drive, &pc, 1556 idetape_create_load_unload_cmd(drive, &pc,
1950 IDETAPE_LU_LOAD_MASK); 1557 IDETAPE_LU_LOAD_MASK);
1951 __idetape_queue_pc_tail(drive, &pc); 1558 idetape_queue_pc_tail(drive, &pc);
1952 load_attempted = 1; 1559 load_attempted = 1;
1953 /* not about to be ready */ 1560 /* not about to be ready */
1954 } else if (!(tape->sense_key == 2 && tape->asc == 4 && 1561 } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
@@ -1959,11 +1566,6 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1959 return -EIO; 1566 return -EIO;
1960} 1567}
1961 1568
1962static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1963{
1964 return __idetape_queue_pc_tail(drive, pc);
1965}
1966
1967static int idetape_flush_tape_buffers(ide_drive_t *drive) 1569static int idetape_flush_tape_buffers(ide_drive_t *drive)
1968{ 1570{
1969 struct ide_atapi_pc pc; 1571 struct ide_atapi_pc pc;
@@ -2029,50 +1631,21 @@ static int idetape_create_prevent_cmd(ide_drive_t *drive,
2029 return 1; 1631 return 1;
2030} 1632}
2031 1633
2032static int __idetape_discard_read_pipeline(ide_drive_t *drive) 1634static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
2033{ 1635{
2034 idetape_tape_t *tape = drive->driver_data; 1636 idetape_tape_t *tape = drive->driver_data;
2035 unsigned long flags;
2036 int cnt;
2037 1637
2038 if (tape->chrdev_dir != IDETAPE_DIR_READ) 1638 if (tape->chrdev_dir != IDETAPE_DIR_READ)
2039 return 0; 1639 return;
2040 1640
2041 /* Remove merge stage. */ 1641 clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
2042 cnt = tape->merge_stage_size / tape->blk_size; 1642 tape->merge_bh_size = 0;
2043 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1643 if (tape->merge_bh != NULL) {
2044 ++cnt; /* Filemarks count as 1 sector */ 1644 ide_tape_kfree_buffer(tape);
2045 tape->merge_stage_size = 0; 1645 tape->merge_bh = NULL;
2046 if (tape->merge_stage != NULL) {
2047 __idetape_kfree_stage(tape->merge_stage);
2048 tape->merge_stage = NULL;
2049 } 1646 }
2050 1647
2051 /* Clear pipeline flags. */
2052 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2053 tape->chrdev_dir = IDETAPE_DIR_NONE; 1648 tape->chrdev_dir = IDETAPE_DIR_NONE;
2054
2055 /* Remove pipeline stages. */
2056 if (tape->first_stage == NULL)
2057 return 0;
2058
2059 spin_lock_irqsave(&tape->lock, flags);
2060 tape->next_stage = NULL;
2061 if (idetape_pipeline_active(tape))
2062 idetape_wait_for_request(drive, tape->active_data_rq);
2063 spin_unlock_irqrestore(&tape->lock, flags);
2064
2065 while (tape->first_stage != NULL) {
2066 struct request *rq_ptr = &tape->first_stage->rq;
2067
2068 cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
2069 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
2070 ++cnt;
2071 idetape_remove_stage_head(drive);
2072 }
2073 tape->nr_pending_stages = 0;
2074 tape->max_stages = tape->min_pipeline;
2075 return cnt;
2076} 1649}
2077 1650
2078/* 1651/*
@@ -2089,7 +1662,7 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
2089 struct ide_atapi_pc pc; 1662 struct ide_atapi_pc pc;
2090 1663
2091 if (tape->chrdev_dir == IDETAPE_DIR_READ) 1664 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2092 __idetape_discard_read_pipeline(drive); 1665 __ide_tape_discard_merge_buffer(drive);
2093 idetape_wait_ready(drive, 60 * 5 * HZ); 1666 idetape_wait_ready(drive, 60 * 5 * HZ);
2094 idetape_create_locate_cmd(drive, &pc, block, partition, skip); 1667 idetape_create_locate_cmd(drive, &pc, block, partition, skip);
2095 retval = idetape_queue_pc_tail(drive, &pc); 1668 retval = idetape_queue_pc_tail(drive, &pc);
@@ -2100,20 +1673,19 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
2100 return (idetape_queue_pc_tail(drive, &pc)); 1673 return (idetape_queue_pc_tail(drive, &pc));
2101} 1674}
2102 1675
2103static void idetape_discard_read_pipeline(ide_drive_t *drive, 1676static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
2104 int restore_position) 1677 int restore_position)
2105{ 1678{
2106 idetape_tape_t *tape = drive->driver_data; 1679 idetape_tape_t *tape = drive->driver_data;
2107 int cnt;
2108 int seek, position; 1680 int seek, position;
2109 1681
2110 cnt = __idetape_discard_read_pipeline(drive); 1682 __ide_tape_discard_merge_buffer(drive);
2111 if (restore_position) { 1683 if (restore_position) {
2112 position = idetape_read_position(drive); 1684 position = idetape_read_position(drive);
2113 seek = position > cnt ? position - cnt : 0; 1685 seek = position > 0 ? position : 0;
2114 if (idetape_position_tape(drive, seek, 0, 0)) { 1686 if (idetape_position_tape(drive, seek, 0, 0)) {
2115 printk(KERN_INFO "ide-tape: %s: position_tape failed in" 1687 printk(KERN_INFO "ide-tape: %s: position_tape failed in"
2116 " discard_pipeline()\n", tape->name); 1688 " %s\n", tape->name, __func__);
2117 return; 1689 return;
2118 } 1690 }
2119 } 1691 }
@@ -2131,12 +1703,6 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
2131 1703
2132 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd); 1704 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
2133 1705
2134 if (idetape_pipeline_active(tape)) {
2135 printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
2136 __func__);
2137 return (0);
2138 }
2139
2140 idetape_init_rq(&rq, cmd); 1706 idetape_init_rq(&rq, cmd);
2141 rq.rq_disk = tape->disk; 1707 rq.rq_disk = tape->disk;
2142 rq.special = (void *)bh; 1708 rq.special = (void *)bh;
@@ -2148,27 +1714,13 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
2148 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0) 1714 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
2149 return 0; 1715 return 0;
2150 1716
2151 if (tape->merge_stage) 1717 if (tape->merge_bh)
2152 idetape_init_merge_stage(tape); 1718 idetape_init_merge_buffer(tape);
2153 if (rq.errors == IDETAPE_ERROR_GENERAL) 1719 if (rq.errors == IDETAPE_ERROR_GENERAL)
2154 return -EIO; 1720 return -EIO;
2155 return (tape->blk_size * (blocks-rq.current_nr_sectors)); 1721 return (tape->blk_size * (blocks-rq.current_nr_sectors));
2156} 1722}
2157 1723
2158/* start servicing the pipeline stages, starting from tape->next_stage. */
2159static void idetape_plug_pipeline(ide_drive_t *drive)
2160{
2161 idetape_tape_t *tape = drive->driver_data;
2162
2163 if (tape->next_stage == NULL)
2164 return;
2165 if (!idetape_pipeline_active(tape)) {
2166 set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
2167 idetape_activate_next_stage(drive);
2168 (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
2169 }
2170}
2171
2172static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc) 1724static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
2173{ 1725{
2174 idetape_init_pc(pc); 1726 idetape_init_pc(pc);
@@ -2206,135 +1758,39 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
2206 pc->idetape_callback = &idetape_pc_callback; 1758 pc->idetape_callback = &idetape_pc_callback;
2207} 1759}
2208 1760
2209static void idetape_wait_first_stage(ide_drive_t *drive) 1761/* Queue up a character device originated write request. */
2210{
2211 idetape_tape_t *tape = drive->driver_data;
2212 unsigned long flags;
2213
2214 if (tape->first_stage == NULL)
2215 return;
2216 spin_lock_irqsave(&tape->lock, flags);
2217 if (tape->active_stage == tape->first_stage)
2218 idetape_wait_for_request(drive, tape->active_data_rq);
2219 spin_unlock_irqrestore(&tape->lock, flags);
2220}
2221
2222/*
2223 * Try to add a character device originated write request to our pipeline. In
2224 * case we don't succeed, we revert to non-pipelined operation mode for this
2225 * request. In order to accomplish that, we
2226 *
2227 * 1. Try to allocate a new pipeline stage.
2228 * 2. If we can't, wait for more and more requests to be serviced and try again
2229 * each time.
2230 * 3. If we still can't allocate a stage, fallback to non-pipelined operation
2231 * mode for this request.
2232 */
2233static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks) 1762static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
2234{ 1763{
2235 idetape_tape_t *tape = drive->driver_data; 1764 idetape_tape_t *tape = drive->driver_data;
2236 idetape_stage_t *new_stage;
2237 unsigned long flags;
2238 struct request *rq;
2239 1765
2240 debug_log(DBG_CHRDEV, "Enter %s\n", __func__); 1766 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2241 1767
2242 /* Attempt to allocate a new stage. Beware possible race conditions. */ 1768 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
2243 while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) { 1769 blocks, tape->merge_bh);
2244 spin_lock_irqsave(&tape->lock, flags);
2245 if (idetape_pipeline_active(tape)) {
2246 idetape_wait_for_request(drive, tape->active_data_rq);
2247 spin_unlock_irqrestore(&tape->lock, flags);
2248 } else {
2249 spin_unlock_irqrestore(&tape->lock, flags);
2250 idetape_plug_pipeline(drive);
2251 if (idetape_pipeline_active(tape))
2252 continue;
2253 /*
2254 * The machine is short on memory. Fallback to non-
2255 * pipelined operation mode for this request.
2256 */
2257 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
2258 blocks, tape->merge_stage->bh);
2259 }
2260 }
2261 rq = &new_stage->rq;
2262 idetape_init_rq(rq, REQ_IDETAPE_WRITE);
2263 /* Doesn't actually matter - We always assume sequential access */
2264 rq->sector = tape->first_frame;
2265 rq->current_nr_sectors = blocks;
2266 rq->nr_sectors = blocks;
2267
2268 idetape_switch_buffers(tape, new_stage);
2269 idetape_add_stage_tail(drive, new_stage);
2270 tape->pipeline_head++;
2271 idetape_calculate_speeds(drive);
2272
2273 /*
2274 * Estimate whether the tape has stopped writing by checking if our
2275 * write pipeline is currently empty. If we are not writing anymore,
2276 * wait for the pipeline to be almost completely full (90%) before
2277 * starting to service requests, so that we will be able to keep up with
2278 * the higher speeds of the tape.
2279 */
2280 if (!idetape_pipeline_active(tape)) {
2281 if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
2282 tape->nr_stages >= tape->max_stages -
2283 tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
2284 tape->blk_size) {
2285 tape->measure_insert_time = 1;
2286 tape->insert_time = jiffies;
2287 tape->insert_size = 0;
2288 tape->insert_speed = 0;
2289 idetape_plug_pipeline(drive);
2290 }
2291 }
2292 if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2293 /* Return a deferred error */
2294 return -EIO;
2295 return blocks;
2296}
2297
2298/*
2299 * Wait until all pending pipeline requests are serviced. Typically called on
2300 * device close.
2301 */
2302static void idetape_wait_for_pipeline(ide_drive_t *drive)
2303{
2304 idetape_tape_t *tape = drive->driver_data;
2305 unsigned long flags;
2306
2307 while (tape->next_stage || idetape_pipeline_active(tape)) {
2308 idetape_plug_pipeline(drive);
2309 spin_lock_irqsave(&tape->lock, flags);
2310 if (idetape_pipeline_active(tape))
2311 idetape_wait_for_request(drive, tape->active_data_rq);
2312 spin_unlock_irqrestore(&tape->lock, flags);
2313 }
2314} 1770}
2315 1771
2316static void idetape_empty_write_pipeline(ide_drive_t *drive) 1772static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
2317{ 1773{
2318 idetape_tape_t *tape = drive->driver_data; 1774 idetape_tape_t *tape = drive->driver_data;
2319 int blocks, min; 1775 int blocks, min;
2320 struct idetape_bh *bh; 1776 struct idetape_bh *bh;
2321 1777
2322 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 1778 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2323 printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline," 1779 printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
2324 " but we are not writing.\n"); 1780 " but we are not writing.\n");
2325 return; 1781 return;
2326 } 1782 }
2327 if (tape->merge_stage_size > tape->stage_size) { 1783 if (tape->merge_bh_size > tape->buffer_size) {
2328 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n"); 1784 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
2329 tape->merge_stage_size = tape->stage_size; 1785 tape->merge_bh_size = tape->buffer_size;
2330 } 1786 }
2331 if (tape->merge_stage_size) { 1787 if (tape->merge_bh_size) {
2332 blocks = tape->merge_stage_size / tape->blk_size; 1788 blocks = tape->merge_bh_size / tape->blk_size;
2333 if (tape->merge_stage_size % tape->blk_size) { 1789 if (tape->merge_bh_size % tape->blk_size) {
2334 unsigned int i; 1790 unsigned int i;
2335 1791
2336 blocks++; 1792 blocks++;
2337 i = tape->blk_size - tape->merge_stage_size % 1793 i = tape->blk_size - tape->merge_bh_size %
2338 tape->blk_size; 1794 tape->blk_size;
2339 bh = tape->bh->b_reqnext; 1795 bh = tape->bh->b_reqnext;
2340 while (bh) { 1796 while (bh) {
@@ -2358,74 +1814,33 @@ static void idetape_empty_write_pipeline(ide_drive_t *drive)
2358 } 1814 }
2359 } 1815 }
2360 (void) idetape_add_chrdev_write_request(drive, blocks); 1816 (void) idetape_add_chrdev_write_request(drive, blocks);
2361 tape->merge_stage_size = 0; 1817 tape->merge_bh_size = 0;
2362 } 1818 }
2363 idetape_wait_for_pipeline(drive); 1819 if (tape->merge_bh != NULL) {
2364 if (tape->merge_stage != NULL) { 1820 ide_tape_kfree_buffer(tape);
2365 __idetape_kfree_stage(tape->merge_stage); 1821 tape->merge_bh = NULL;
2366 tape->merge_stage = NULL;
2367 } 1822 }
2368 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2369 tape->chrdev_dir = IDETAPE_DIR_NONE; 1823 tape->chrdev_dir = IDETAPE_DIR_NONE;
2370
2371 /*
2372 * On the next backup, perform the feedback loop again. (I don't want to
2373 * keep sense information between backups, as some systems are
2374 * constantly on, and the system load can be totally different on the
2375 * next backup).
2376 */
2377 tape->max_stages = tape->min_pipeline;
2378 if (tape->first_stage != NULL ||
2379 tape->next_stage != NULL ||
2380 tape->last_stage != NULL ||
2381 tape->nr_stages != 0) {
2382 printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
2383 "first_stage %p, next_stage %p, "
2384 "last_stage %p, nr_stages %d\n",
2385 tape->first_stage, tape->next_stage,
2386 tape->last_stage, tape->nr_stages);
2387 }
2388} 1824}
2389 1825
2390static void idetape_restart_speed_control(ide_drive_t *drive) 1826static int idetape_init_read(ide_drive_t *drive)
2391{ 1827{
2392 idetape_tape_t *tape = drive->driver_data; 1828 idetape_tape_t *tape = drive->driver_data;
2393
2394 tape->restart_speed_control_req = 0;
2395 tape->pipeline_head = 0;
2396 tape->controlled_last_pipeline_head = 0;
2397 tape->controlled_previous_pipeline_head = 0;
2398 tape->uncontrolled_previous_pipeline_head = 0;
2399 tape->controlled_pipeline_head_speed = 5000;
2400 tape->pipeline_head_speed = 5000;
2401 tape->uncontrolled_pipeline_head_speed = 0;
2402 tape->controlled_pipeline_head_time =
2403 tape->uncontrolled_pipeline_head_time = jiffies;
2404 tape->controlled_previous_head_time =
2405 tape->uncontrolled_previous_head_time = jiffies;
2406}
2407
2408static int idetape_init_read(ide_drive_t *drive, int max_stages)
2409{
2410 idetape_tape_t *tape = drive->driver_data;
2411 idetape_stage_t *new_stage;
2412 struct request rq;
2413 int bytes_read; 1829 int bytes_read;
2414 u16 blocks = *(u16 *)&tape->caps[12];
2415 1830
2416 /* Initialize read operation */ 1831 /* Initialize read operation */
2417 if (tape->chrdev_dir != IDETAPE_DIR_READ) { 1832 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2418 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) { 1833 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2419 idetape_empty_write_pipeline(drive); 1834 ide_tape_flush_merge_buffer(drive);
2420 idetape_flush_tape_buffers(drive); 1835 idetape_flush_tape_buffers(drive);
2421 } 1836 }
2422 if (tape->merge_stage || tape->merge_stage_size) { 1837 if (tape->merge_bh || tape->merge_bh_size) {
2423 printk(KERN_ERR "ide-tape: merge_stage_size should be" 1838 printk(KERN_ERR "ide-tape: merge_bh_size should be"
2424 " 0 now\n"); 1839 " 0 now\n");
2425 tape->merge_stage_size = 0; 1840 tape->merge_bh_size = 0;
2426 } 1841 }
2427 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0); 1842 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
2428 if (!tape->merge_stage) 1843 if (!tape->merge_bh)
2429 return -ENOMEM; 1844 return -ENOMEM;
2430 tape->chrdev_dir = IDETAPE_DIR_READ; 1845 tape->chrdev_dir = IDETAPE_DIR_READ;
2431 1846
@@ -2438,54 +1853,23 @@ static int idetape_init_read(ide_drive_t *drive, int max_stages)
2438 if (drive->dsc_overlap) { 1853 if (drive->dsc_overlap) {
2439 bytes_read = idetape_queue_rw_tail(drive, 1854 bytes_read = idetape_queue_rw_tail(drive,
2440 REQ_IDETAPE_READ, 0, 1855 REQ_IDETAPE_READ, 0,
2441 tape->merge_stage->bh); 1856 tape->merge_bh);
2442 if (bytes_read < 0) { 1857 if (bytes_read < 0) {
2443 __idetape_kfree_stage(tape->merge_stage); 1858 ide_tape_kfree_buffer(tape);
2444 tape->merge_stage = NULL; 1859 tape->merge_bh = NULL;
2445 tape->chrdev_dir = IDETAPE_DIR_NONE; 1860 tape->chrdev_dir = IDETAPE_DIR_NONE;
2446 return bytes_read; 1861 return bytes_read;
2447 } 1862 }
2448 } 1863 }
2449 } 1864 }
2450 if (tape->restart_speed_control_req) 1865
2451 idetape_restart_speed_control(drive);
2452 idetape_init_rq(&rq, REQ_IDETAPE_READ);
2453 rq.sector = tape->first_frame;
2454 rq.nr_sectors = blocks;
2455 rq.current_nr_sectors = blocks;
2456 if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
2457 tape->nr_stages < max_stages) {
2458 new_stage = idetape_kmalloc_stage(tape);
2459 while (new_stage != NULL) {
2460 new_stage->rq = rq;
2461 idetape_add_stage_tail(drive, new_stage);
2462 if (tape->nr_stages >= max_stages)
2463 break;
2464 new_stage = idetape_kmalloc_stage(tape);
2465 }
2466 }
2467 if (!idetape_pipeline_active(tape)) {
2468 if (tape->nr_pending_stages >= 3 * max_stages / 4) {
2469 tape->measure_insert_time = 1;
2470 tape->insert_time = jiffies;
2471 tape->insert_size = 0;
2472 tape->insert_speed = 0;
2473 idetape_plug_pipeline(drive);
2474 }
2475 }
2476 return 0; 1866 return 0;
2477} 1867}
2478 1868
2479/* 1869/* called from idetape_chrdev_read() to service a chrdev read request. */
2480 * Called from idetape_chrdev_read() to service a character device read request
2481 * and add read-ahead requests to our pipeline.
2482 */
2483static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) 1870static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
2484{ 1871{
2485 idetape_tape_t *tape = drive->driver_data; 1872 idetape_tape_t *tape = drive->driver_data;
2486 unsigned long flags;
2487 struct request *rq_ptr;
2488 int bytes_read;
2489 1873
2490 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); 1874 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
2491 1875
@@ -2493,39 +1877,10 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
2493 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1877 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2494 return 0; 1878 return 0;
2495 1879
2496 /* Wait for the next block to reach the head of the pipeline. */ 1880 idetape_init_read(drive);
2497 idetape_init_read(drive, tape->max_stages);
2498 if (tape->first_stage == NULL) {
2499 if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2500 return 0;
2501 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2502 tape->merge_stage->bh);
2503 }
2504 idetape_wait_first_stage(drive);
2505 rq_ptr = &tape->first_stage->rq;
2506 bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
2507 rq_ptr->current_nr_sectors);
2508 rq_ptr->nr_sectors = 0;
2509 rq_ptr->current_nr_sectors = 0;
2510 1881
2511 if (rq_ptr->errors == IDETAPE_ERROR_EOD) 1882 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2512 return 0; 1883 tape->merge_bh);
2513 else {
2514 idetape_switch_buffers(tape, tape->first_stage);
2515 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
2516 set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
2517 spin_lock_irqsave(&tape->lock, flags);
2518 idetape_remove_stage_head(drive);
2519 spin_unlock_irqrestore(&tape->lock, flags);
2520 tape->pipeline_head++;
2521 idetape_calculate_speeds(drive);
2522 }
2523 if (bytes_read > blocks * tape->blk_size) {
2524 printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
2525 " than requested\n");
2526 bytes_read = blocks * tape->blk_size;
2527 }
2528 return (bytes_read);
2529} 1884}
2530 1885
2531static void idetape_pad_zeros(ide_drive_t *drive, int bcount) 1886static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
@@ -2537,8 +1892,8 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2537 while (bcount) { 1892 while (bcount) {
2538 unsigned int count; 1893 unsigned int count;
2539 1894
2540 bh = tape->merge_stage->bh; 1895 bh = tape->merge_bh;
2541 count = min(tape->stage_size, bcount); 1896 count = min(tape->buffer_size, bcount);
2542 bcount -= count; 1897 bcount -= count;
2543 blocks = count / tape->blk_size; 1898 blocks = count / tape->blk_size;
2544 while (count) { 1899 while (count) {
@@ -2549,31 +1904,10 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2549 bh = bh->b_reqnext; 1904 bh = bh->b_reqnext;
2550 } 1905 }
2551 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, 1906 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
2552 tape->merge_stage->bh); 1907 tape->merge_bh);
2553 } 1908 }
2554} 1909}
2555 1910
2556static int idetape_pipeline_size(ide_drive_t *drive)
2557{
2558 idetape_tape_t *tape = drive->driver_data;
2559 idetape_stage_t *stage;
2560 struct request *rq;
2561 int size = 0;
2562
2563 idetape_wait_for_pipeline(drive);
2564 stage = tape->first_stage;
2565 while (stage != NULL) {
2566 rq = &stage->rq;
2567 size += tape->blk_size * (rq->nr_sectors -
2568 rq->current_nr_sectors);
2569 if (rq->errors == IDETAPE_ERROR_FILEMARK)
2570 size += tape->blk_size;
2571 stage = stage->next;
2572 }
2573 size += tape->merge_stage_size;
2574 return size;
2575}
2576
2577/* 1911/*
2578 * Rewinds the tape to the Beginning Of the current Partition (BOP). We 1912 * Rewinds the tape to the Beginning Of the current Partition (BOP). We
2579 * currently support only one partition. 1913 * currently support only one partition.
@@ -2619,11 +1953,10 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2619 if (copy_from_user(&config, argp, sizeof(config))) 1953 if (copy_from_user(&config, argp, sizeof(config)))
2620 return -EFAULT; 1954 return -EFAULT;
2621 tape->best_dsc_rw_freq = config.dsc_rw_frequency; 1955 tape->best_dsc_rw_freq = config.dsc_rw_frequency;
2622 tape->max_stages = config.nr_stages;
2623 break; 1956 break;
2624 case 0x0350: 1957 case 0x0350:
2625 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq; 1958 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
2626 config.nr_stages = tape->max_stages; 1959 config.nr_stages = 1;
2627 if (copy_to_user(argp, &config, sizeof(config))) 1960 if (copy_to_user(argp, &config, sizeof(config)))
2628 return -EFAULT; 1961 return -EFAULT;
2629 break; 1962 break;
@@ -2633,19 +1966,11 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2633 return 0; 1966 return 0;
2634} 1967}
2635 1968
2636/*
2637 * The function below is now a bit more complicated than just passing the
2638 * command to the tape since we may have crossed some filemarks during our
2639 * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
2640 * support MTFSFM when the filemark is in our internal pipeline even if the tape
2641 * doesn't support spacing over filemarks in the reverse direction.
2642 */
2643static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op, 1969static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2644 int mt_count) 1970 int mt_count)
2645{ 1971{
2646 idetape_tape_t *tape = drive->driver_data; 1972 idetape_tape_t *tape = drive->driver_data;
2647 struct ide_atapi_pc pc; 1973 struct ide_atapi_pc pc;
2648 unsigned long flags;
2649 int retval, count = 0; 1974 int retval, count = 0;
2650 int sprev = !!(tape->caps[4] & 0x20); 1975 int sprev = !!(tape->caps[4] & 0x20);
2651 1976
@@ -2658,48 +1983,12 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2658 } 1983 }
2659 1984
2660 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1985 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2661 /* its a read-ahead buffer, scan it for crossed filemarks. */ 1986 tape->merge_bh_size = 0;
2662 tape->merge_stage_size = 0;
2663 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1987 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2664 ++count; 1988 ++count;
2665 while (tape->first_stage != NULL) { 1989 ide_tape_discard_merge_buffer(drive, 0);
2666 if (count == mt_count) {
2667 if (mt_op == MTFSFM)
2668 set_bit(IDETAPE_FLAG_FILEMARK,
2669 &tape->flags);
2670 return 0;
2671 }
2672 spin_lock_irqsave(&tape->lock, flags);
2673 if (tape->first_stage == tape->active_stage) {
2674 /*
2675 * We have reached the active stage in the read
2676 * pipeline. There is no point in allowing the
2677 * drive to continue reading any farther, so we
2678 * stop the pipeline.
2679 *
2680 * This section should be moved to a separate
2681 * subroutine because similar operations are
2682 * done in __idetape_discard_read_pipeline(),
2683 * for example.
2684 */
2685 tape->next_stage = NULL;
2686 spin_unlock_irqrestore(&tape->lock, flags);
2687 idetape_wait_first_stage(drive);
2688 tape->next_stage = tape->first_stage->next;
2689 } else
2690 spin_unlock_irqrestore(&tape->lock, flags);
2691 if (tape->first_stage->rq.errors ==
2692 IDETAPE_ERROR_FILEMARK)
2693 ++count;
2694 idetape_remove_stage_head(drive);
2695 }
2696 idetape_discard_read_pipeline(drive, 0);
2697 } 1990 }
2698 1991
2699 /*
2700 * The filemark was not found in our internal pipeline; now we can issue
2701 * the space command.
2702 */
2703 switch (mt_op) { 1992 switch (mt_op) {
2704 case MTFSF: 1993 case MTFSF:
2705 case MTBSF: 1994 case MTBSF:
@@ -2755,27 +2044,25 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2755 (count % tape->blk_size) == 0) 2044 (count % tape->blk_size) == 0)
2756 tape->user_bs_factor = count / tape->blk_size; 2045 tape->user_bs_factor = count / tape->blk_size;
2757 } 2046 }
2758 rc = idetape_init_read(drive, tape->max_stages); 2047 rc = idetape_init_read(drive);
2759 if (rc < 0) 2048 if (rc < 0)
2760 return rc; 2049 return rc;
2761 if (count == 0) 2050 if (count == 0)
2762 return (0); 2051 return (0);
2763 if (tape->merge_stage_size) { 2052 if (tape->merge_bh_size) {
2764 actually_read = min((unsigned int)(tape->merge_stage_size), 2053 actually_read = min((unsigned int)(tape->merge_bh_size),
2765 (unsigned int)count); 2054 (unsigned int)count);
2766 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, 2055 if (idetape_copy_stage_to_user(tape, buf, actually_read))
2767 actually_read))
2768 ret = -EFAULT; 2056 ret = -EFAULT;
2769 buf += actually_read; 2057 buf += actually_read;
2770 tape->merge_stage_size -= actually_read; 2058 tape->merge_bh_size -= actually_read;
2771 count -= actually_read; 2059 count -= actually_read;
2772 } 2060 }
2773 while (count >= tape->stage_size) { 2061 while (count >= tape->buffer_size) {
2774 bytes_read = idetape_add_chrdev_read_request(drive, ctl); 2062 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2775 if (bytes_read <= 0) 2063 if (bytes_read <= 0)
2776 goto finish; 2064 goto finish;
2777 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, 2065 if (idetape_copy_stage_to_user(tape, buf, bytes_read))
2778 bytes_read))
2779 ret = -EFAULT; 2066 ret = -EFAULT;
2780 buf += bytes_read; 2067 buf += bytes_read;
2781 count -= bytes_read; 2068 count -= bytes_read;
@@ -2786,11 +2073,10 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2786 if (bytes_read <= 0) 2073 if (bytes_read <= 0)
2787 goto finish; 2074 goto finish;
2788 temp = min((unsigned long)count, (unsigned long)bytes_read); 2075 temp = min((unsigned long)count, (unsigned long)bytes_read);
2789 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, 2076 if (idetape_copy_stage_to_user(tape, buf, temp))
2790 temp))
2791 ret = -EFAULT; 2077 ret = -EFAULT;
2792 actually_read += temp; 2078 actually_read += temp;
2793 tape->merge_stage_size = bytes_read-temp; 2079 tape->merge_bh_size = bytes_read-temp;
2794 } 2080 }
2795finish: 2081finish:
2796 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) { 2082 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
@@ -2821,17 +2107,17 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2821 /* Initialize write operation */ 2107 /* Initialize write operation */
2822 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 2108 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2823 if (tape->chrdev_dir == IDETAPE_DIR_READ) 2109 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2824 idetape_discard_read_pipeline(drive, 1); 2110 ide_tape_discard_merge_buffer(drive, 1);
2825 if (tape->merge_stage || tape->merge_stage_size) { 2111 if (tape->merge_bh || tape->merge_bh_size) {
2826 printk(KERN_ERR "ide-tape: merge_stage_size " 2112 printk(KERN_ERR "ide-tape: merge_bh_size "
2827 "should be 0 now\n"); 2113 "should be 0 now\n");
2828 tape->merge_stage_size = 0; 2114 tape->merge_bh_size = 0;
2829 } 2115 }
2830 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0); 2116 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
2831 if (!tape->merge_stage) 2117 if (!tape->merge_bh)
2832 return -ENOMEM; 2118 return -ENOMEM;
2833 tape->chrdev_dir = IDETAPE_DIR_WRITE; 2119 tape->chrdev_dir = IDETAPE_DIR_WRITE;
2834 idetape_init_merge_stage(tape); 2120 idetape_init_merge_buffer(tape);
2835 2121
2836 /* 2122 /*
2837 * Issue a write 0 command to ensure that DSC handshake is 2123 * Issue a write 0 command to ensure that DSC handshake is
@@ -2842,10 +2128,10 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2842 if (drive->dsc_overlap) { 2128 if (drive->dsc_overlap) {
2843 ssize_t retval = idetape_queue_rw_tail(drive, 2129 ssize_t retval = idetape_queue_rw_tail(drive,
2844 REQ_IDETAPE_WRITE, 0, 2130 REQ_IDETAPE_WRITE, 0,
2845 tape->merge_stage->bh); 2131 tape->merge_bh);
2846 if (retval < 0) { 2132 if (retval < 0) {
2847 __idetape_kfree_stage(tape->merge_stage); 2133 ide_tape_kfree_buffer(tape);
2848 tape->merge_stage = NULL; 2134 tape->merge_bh = NULL;
2849 tape->chrdev_dir = IDETAPE_DIR_NONE; 2135 tape->chrdev_dir = IDETAPE_DIR_NONE;
2850 return retval; 2136 return retval;
2851 } 2137 }
@@ -2853,49 +2139,44 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2853 } 2139 }
2854 if (count == 0) 2140 if (count == 0)
2855 return (0); 2141 return (0);
2856 if (tape->restart_speed_control_req) 2142 if (tape->merge_bh_size) {
2857 idetape_restart_speed_control(drive); 2143 if (tape->merge_bh_size >= tape->buffer_size) {
2858 if (tape->merge_stage_size) {
2859 if (tape->merge_stage_size >= tape->stage_size) {
2860 printk(KERN_ERR "ide-tape: bug: merge buf too big\n"); 2144 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
2861 tape->merge_stage_size = 0; 2145 tape->merge_bh_size = 0;
2862 } 2146 }
2863 actually_written = min((unsigned int) 2147 actually_written = min((unsigned int)
2864 (tape->stage_size - tape->merge_stage_size), 2148 (tape->buffer_size - tape->merge_bh_size),
2865 (unsigned int)count); 2149 (unsigned int)count);
2866 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, 2150 if (idetape_copy_stage_from_user(tape, buf, actually_written))
2867 actually_written))
2868 ret = -EFAULT; 2151 ret = -EFAULT;
2869 buf += actually_written; 2152 buf += actually_written;
2870 tape->merge_stage_size += actually_written; 2153 tape->merge_bh_size += actually_written;
2871 count -= actually_written; 2154 count -= actually_written;
2872 2155
2873 if (tape->merge_stage_size == tape->stage_size) { 2156 if (tape->merge_bh_size == tape->buffer_size) {
2874 ssize_t retval; 2157 ssize_t retval;
2875 tape->merge_stage_size = 0; 2158 tape->merge_bh_size = 0;
2876 retval = idetape_add_chrdev_write_request(drive, ctl); 2159 retval = idetape_add_chrdev_write_request(drive, ctl);
2877 if (retval <= 0) 2160 if (retval <= 0)
2878 return (retval); 2161 return (retval);
2879 } 2162 }
2880 } 2163 }
2881 while (count >= tape->stage_size) { 2164 while (count >= tape->buffer_size) {
2882 ssize_t retval; 2165 ssize_t retval;
2883 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, 2166 if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
2884 tape->stage_size))
2885 ret = -EFAULT; 2167 ret = -EFAULT;
2886 buf += tape->stage_size; 2168 buf += tape->buffer_size;
2887 count -= tape->stage_size; 2169 count -= tape->buffer_size;
2888 retval = idetape_add_chrdev_write_request(drive, ctl); 2170 retval = idetape_add_chrdev_write_request(drive, ctl);
2889 actually_written += tape->stage_size; 2171 actually_written += tape->buffer_size;
2890 if (retval <= 0) 2172 if (retval <= 0)
2891 return (retval); 2173 return (retval);
2892 } 2174 }
2893 if (count) { 2175 if (count) {
2894 actually_written += count; 2176 actually_written += count;
2895 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, 2177 if (idetape_copy_stage_from_user(tape, buf, count))
2896 count))
2897 ret = -EFAULT; 2178 ret = -EFAULT;
2898 tape->merge_stage_size += count; 2179 tape->merge_bh_size += count;
2899 } 2180 }
2900 return ret ? ret : actually_written; 2181 return ret ? ret : actually_written;
2901} 2182}
@@ -2919,8 +2200,7 @@ static int idetape_write_filemark(ide_drive_t *drive)
2919 * 2200 *
2920 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support 2201 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
2921 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also 2202 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
2922 * usually not supported (it is supported in the rare case in which we crossed 2203 * usually not supported.
2923 * the filemark during our read-ahead pipelined operation mode).
2924 * 2204 *
2925 * The following commands are currently not supported: 2205 * The following commands are currently not supported:
2926 * 2206 *
@@ -2936,7 +2216,6 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2936 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n", 2216 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
2937 mt_op, mt_count); 2217 mt_op, mt_count);
2938 2218
2939 /* Commands which need our pipelined read-ahead stages. */
2940 switch (mt_op) { 2219 switch (mt_op) {
2941 case MTFSF: 2220 case MTFSF:
2942 case MTFSFM: 2221 case MTFSFM:
@@ -2953,7 +2232,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2953 case MTWEOF: 2232 case MTWEOF:
2954 if (tape->write_prot) 2233 if (tape->write_prot)
2955 return -EACCES; 2234 return -EACCES;
2956 idetape_discard_read_pipeline(drive, 1); 2235 ide_tape_discard_merge_buffer(drive, 1);
2957 for (i = 0; i < mt_count; i++) { 2236 for (i = 0; i < mt_count; i++) {
2958 retval = idetape_write_filemark(drive); 2237 retval = idetape_write_filemark(drive);
2959 if (retval) 2238 if (retval)
@@ -2961,12 +2240,12 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2961 } 2240 }
2962 return 0; 2241 return 0;
2963 case MTREW: 2242 case MTREW:
2964 idetape_discard_read_pipeline(drive, 0); 2243 ide_tape_discard_merge_buffer(drive, 0);
2965 if (idetape_rewind_tape(drive)) 2244 if (idetape_rewind_tape(drive))
2966 return -EIO; 2245 return -EIO;
2967 return 0; 2246 return 0;
2968 case MTLOAD: 2247 case MTLOAD:
2969 idetape_discard_read_pipeline(drive, 0); 2248 ide_tape_discard_merge_buffer(drive, 0);
2970 idetape_create_load_unload_cmd(drive, &pc, 2249 idetape_create_load_unload_cmd(drive, &pc,
2971 IDETAPE_LU_LOAD_MASK); 2250 IDETAPE_LU_LOAD_MASK);
2972 return idetape_queue_pc_tail(drive, &pc); 2251 return idetape_queue_pc_tail(drive, &pc);
@@ -2981,7 +2260,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2981 if (!idetape_queue_pc_tail(drive, &pc)) 2260 if (!idetape_queue_pc_tail(drive, &pc))
2982 tape->door_locked = DOOR_UNLOCKED; 2261 tape->door_locked = DOOR_UNLOCKED;
2983 } 2262 }
2984 idetape_discard_read_pipeline(drive, 0); 2263 ide_tape_discard_merge_buffer(drive, 0);
2985 idetape_create_load_unload_cmd(drive, &pc, 2264 idetape_create_load_unload_cmd(drive, &pc,
2986 !IDETAPE_LU_LOAD_MASK); 2265 !IDETAPE_LU_LOAD_MASK);
2987 retval = idetape_queue_pc_tail(drive, &pc); 2266 retval = idetape_queue_pc_tail(drive, &pc);
@@ -2989,10 +2268,10 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2989 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); 2268 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
2990 return retval; 2269 return retval;
2991 case MTNOP: 2270 case MTNOP:
2992 idetape_discard_read_pipeline(drive, 0); 2271 ide_tape_discard_merge_buffer(drive, 0);
2993 return idetape_flush_tape_buffers(drive); 2272 return idetape_flush_tape_buffers(drive);
2994 case MTRETEN: 2273 case MTRETEN:
2995 idetape_discard_read_pipeline(drive, 0); 2274 ide_tape_discard_merge_buffer(drive, 0);
2996 idetape_create_load_unload_cmd(drive, &pc, 2275 idetape_create_load_unload_cmd(drive, &pc,
2997 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK); 2276 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
2998 return idetape_queue_pc_tail(drive, &pc); 2277 return idetape_queue_pc_tail(drive, &pc);
@@ -3014,11 +2293,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
3014 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); 2293 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
3015 return 0; 2294 return 0;
3016 case MTSEEK: 2295 case MTSEEK:
3017 idetape_discard_read_pipeline(drive, 0); 2296 ide_tape_discard_merge_buffer(drive, 0);
3018 return idetape_position_tape(drive, 2297 return idetape_position_tape(drive,
3019 mt_count * tape->user_bs_factor, tape->partition, 0); 2298 mt_count * tape->user_bs_factor, tape->partition, 0);
3020 case MTSETPART: 2299 case MTSETPART:
3021 idetape_discard_read_pipeline(drive, 0); 2300 ide_tape_discard_merge_buffer(drive, 0);
3022 return idetape_position_tape(drive, 0, mt_count, 0); 2301 return idetape_position_tape(drive, 0, mt_count, 0);
3023 case MTFSR: 2302 case MTFSR:
3024 case MTBSR: 2303 case MTBSR:
@@ -3063,13 +2342,12 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
3063 2342
3064 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd); 2343 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
3065 2344
3066 tape->restart_speed_control_req = 1;
3067 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) { 2345 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
3068 idetape_empty_write_pipeline(drive); 2346 ide_tape_flush_merge_buffer(drive);
3069 idetape_flush_tape_buffers(drive); 2347 idetape_flush_tape_buffers(drive);
3070 } 2348 }
3071 if (cmd == MTIOCGET || cmd == MTIOCPOS) { 2349 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
3072 block_offset = idetape_pipeline_size(drive) / 2350 block_offset = tape->merge_bh_size /
3073 (tape->blk_size * tape->user_bs_factor); 2351 (tape->blk_size * tape->user_bs_factor);
3074 position = idetape_read_position(drive); 2352 position = idetape_read_position(drive);
3075 if (position < 0) 2353 if (position < 0)
@@ -3101,7 +2379,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
3101 return 0; 2379 return 0;
3102 default: 2380 default:
3103 if (tape->chrdev_dir == IDETAPE_DIR_READ) 2381 if (tape->chrdev_dir == IDETAPE_DIR_READ)
3104 idetape_discard_read_pipeline(drive, 1); 2382 ide_tape_discard_merge_buffer(drive, 1);
3105 return idetape_blkdev_ioctl(drive, cmd, arg); 2383 return idetape_blkdev_ioctl(drive, cmd, arg);
3106 } 2384 }
3107} 2385}
@@ -3175,9 +2453,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
3175 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags)) 2453 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
3176 (void)idetape_rewind_tape(drive); 2454 (void)idetape_rewind_tape(drive);
3177 2455
3178 if (tape->chrdev_dir != IDETAPE_DIR_READ)
3179 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
3180
3181 /* Read block size and write protect status from drive. */ 2456 /* Read block size and write protect status from drive. */
3182 ide_tape_get_bsize_from_bdesc(drive); 2457 ide_tape_get_bsize_from_bdesc(drive);
3183 2458
@@ -3206,8 +2481,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
3206 } 2481 }
3207 } 2482 }
3208 } 2483 }
3209 idetape_restart_speed_control(drive);
3210 tape->restart_speed_control_req = 0;
3211 return 0; 2484 return 0;
3212 2485
3213out_put_tape: 2486out_put_tape:
@@ -3219,13 +2492,13 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
3219{ 2492{
3220 idetape_tape_t *tape = drive->driver_data; 2493 idetape_tape_t *tape = drive->driver_data;
3221 2494
3222 idetape_empty_write_pipeline(drive); 2495 ide_tape_flush_merge_buffer(drive);
3223 tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0); 2496 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0);
3224 if (tape->merge_stage != NULL) { 2497 if (tape->merge_bh != NULL) {
3225 idetape_pad_zeros(drive, tape->blk_size * 2498 idetape_pad_zeros(drive, tape->blk_size *
3226 (tape->user_bs_factor - 1)); 2499 (tape->user_bs_factor - 1));
3227 __idetape_kfree_stage(tape->merge_stage); 2500 ide_tape_kfree_buffer(tape);
3228 tape->merge_stage = NULL; 2501 tape->merge_bh = NULL;
3229 } 2502 }
3230 idetape_write_filemark(drive); 2503 idetape_write_filemark(drive);
3231 idetape_flush_tape_buffers(drive); 2504 idetape_flush_tape_buffers(drive);
@@ -3248,14 +2521,9 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
3248 idetape_write_release(drive, minor); 2521 idetape_write_release(drive, minor);
3249 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 2522 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
3250 if (minor < 128) 2523 if (minor < 128)
3251 idetape_discard_read_pipeline(drive, 1); 2524 ide_tape_discard_merge_buffer(drive, 1);
3252 else
3253 idetape_wait_for_pipeline(drive);
3254 }
3255 if (tape->cache_stage != NULL) {
3256 __idetape_kfree_stage(tape->cache_stage);
3257 tape->cache_stage = NULL;
3258 } 2525 }
2526
3259 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags)) 2527 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
3260 (void) idetape_rewind_tape(drive); 2528 (void) idetape_rewind_tape(drive);
3261 if (tape->chrdev_dir == IDETAPE_DIR_NONE) { 2529 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
@@ -3392,33 +2660,15 @@ static void idetape_add_settings(ide_drive_t *drive)
3392 2660
3393 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff, 2661 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3394 1, 2, (u16 *)&tape->caps[16], NULL); 2662 1, 2, (u16 *)&tape->caps[16], NULL);
3395 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
3396 tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
3397 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
3398 tape->stage_size / 1024, 1, &tape->max_stages, NULL);
3399 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
3400 tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
3401 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
3402 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
3403 NULL);
3404 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
3405 0xffff, tape->stage_size / 1024, 1,
3406 &tape->nr_pending_stages, NULL);
3407 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff, 2663 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3408 1, 1, (u16 *)&tape->caps[14], NULL); 2664 1, 1, (u16 *)&tape->caps[14], NULL);
3409 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 2665 ide_add_setting(drive, "buffer_size", SETTING_READ, TYPE_INT, 0, 0xffff,
3410 1024, &tape->stage_size, NULL); 2666 1, 1024, &tape->buffer_size, NULL);
3411 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, 2667 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
3412 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq, 2668 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
3413 NULL); 2669 NULL);
3414 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 2670 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
3415 1, &drive->dsc_overlap, NULL); 2671 1, &drive->dsc_overlap, NULL);
3416 ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
3417 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
3418 NULL);
3419 ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
3420 0, 0xffff, 1, 1,
3421 &tape->uncontrolled_pipeline_head_speed, NULL);
3422 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 2672 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
3423 1, 1, &tape->avg_speed, NULL); 2673 1, 1, &tape->avg_speed, NULL);
3424 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1, 2674 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
@@ -3441,11 +2691,10 @@ static inline void idetape_add_settings(ide_drive_t *drive) { ; }
3441 */ 2691 */
3442static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) 2692static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3443{ 2693{
3444 unsigned long t1, tmid, tn, t; 2694 unsigned long t;
3445 int speed; 2695 int speed;
3446 int stage_size; 2696 int buffer_size;
3447 u8 gcw[2]; 2697 u8 gcw[2];
3448 struct sysinfo si;
3449 u16 *ctl = (u16 *)&tape->caps[12]; 2698 u16 *ctl = (u16 *)&tape->caps[12];
3450 2699
3451 spin_lock_init(&tape->lock); 2700 spin_lock_init(&tape->lock);
@@ -3464,65 +2713,33 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3464 tape->name[2] = '0' + minor; 2713 tape->name[2] = '0' + minor;
3465 tape->chrdev_dir = IDETAPE_DIR_NONE; 2714 tape->chrdev_dir = IDETAPE_DIR_NONE;
3466 tape->pc = tape->pc_stack; 2715 tape->pc = tape->pc_stack;
3467 tape->max_insert_speed = 10000;
3468 tape->speed_control = 1;
3469 *((unsigned short *) &gcw) = drive->id->config; 2716 *((unsigned short *) &gcw) = drive->id->config;
3470 2717
3471 /* Command packet DRQ type */ 2718 /* Command packet DRQ type */
3472 if (((gcw[0] & 0x60) >> 5) == 1) 2719 if (((gcw[0] & 0x60) >> 5) == 1)
3473 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags); 2720 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
3474 2721
3475 tape->min_pipeline = 10;
3476 tape->max_pipeline = 10;
3477 tape->max_stages = 10;
3478
3479 idetape_get_inquiry_results(drive); 2722 idetape_get_inquiry_results(drive);
3480 idetape_get_mode_sense_results(drive); 2723 idetape_get_mode_sense_results(drive);
3481 ide_tape_get_bsize_from_bdesc(drive); 2724 ide_tape_get_bsize_from_bdesc(drive);
3482 tape->user_bs_factor = 1; 2725 tape->user_bs_factor = 1;
3483 tape->stage_size = *ctl * tape->blk_size; 2726 tape->buffer_size = *ctl * tape->blk_size;
3484 while (tape->stage_size > 0xffff) { 2727 while (tape->buffer_size > 0xffff) {
3485 printk(KERN_NOTICE "ide-tape: decreasing stage size\n"); 2728 printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
3486 *ctl /= 2; 2729 *ctl /= 2;
3487 tape->stage_size = *ctl * tape->blk_size; 2730 tape->buffer_size = *ctl * tape->blk_size;
3488 } 2731 }
3489 stage_size = tape->stage_size; 2732 buffer_size = tape->buffer_size;
3490 tape->pages_per_stage = stage_size / PAGE_SIZE; 2733 tape->pages_per_buffer = buffer_size / PAGE_SIZE;
3491 if (stage_size % PAGE_SIZE) { 2734 if (buffer_size % PAGE_SIZE) {
3492 tape->pages_per_stage++; 2735 tape->pages_per_buffer++;
3493 tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE; 2736 tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
3494 } 2737 }
3495 2738
3496 /* Select the "best" DSC read/write polling freq and pipeline size. */ 2739 /* select the "best" DSC read/write polling freq */
3497 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]); 2740 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
3498 2741
3499 tape->max_stages = speed * 1000 * 10 / tape->stage_size; 2742 t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000);
3500
3501 /* Limit memory use for pipeline to 10% of physical memory */
3502 si_meminfo(&si);
3503 if (tape->max_stages * tape->stage_size >
3504 si.totalram * si.mem_unit / 10)
3505 tape->max_stages =
3506 si.totalram * si.mem_unit / (10 * tape->stage_size);
3507
3508 tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
3509 tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
3510 tape->max_pipeline =
3511 min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
3512 if (tape->max_stages == 0) {
3513 tape->max_stages = 1;
3514 tape->min_pipeline = 1;
3515 tape->max_pipeline = 1;
3516 }
3517
3518 t1 = (tape->stage_size * HZ) / (speed * 1000);
3519 tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
3520 tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
3521
3522 if (tape->max_stages)
3523 t = tn;
3524 else
3525 t = t1;
3526 2743
3527 /* 2744 /*
3528 * Ensure that the number we got makes sense; limit it within 2745 * Ensure that the number we got makes sense; limit it within
@@ -3532,11 +2749,10 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3532 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX), 2749 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
3533 IDETAPE_DSC_RW_MIN); 2750 IDETAPE_DSC_RW_MIN);
3534 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " 2751 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
3535 "%dkB pipeline, %lums tDSC%s\n", 2752 "%lums tDSC%s\n",
3536 drive->name, tape->name, *(u16 *)&tape->caps[14], 2753 drive->name, tape->name, *(u16 *)&tape->caps[14],
3537 (*(u16 *)&tape->caps[16] * 512) / tape->stage_size, 2754 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
3538 tape->stage_size / 1024, 2755 tape->buffer_size / 1024,
3539 tape->max_stages * tape->stage_size / 1024,
3540 tape->best_dsc_rw_freq * 1000 / HZ, 2756 tape->best_dsc_rw_freq * 1000 / HZ,
3541 drive->using_dma ? ", DMA":""); 2757 drive->using_dma ? ", DMA":"");
3542 2758
@@ -3560,7 +2776,7 @@ static void ide_tape_release(struct kref *kref)
3560 ide_drive_t *drive = tape->drive; 2776 ide_drive_t *drive = tape->drive;
3561 struct gendisk *g = tape->disk; 2777 struct gendisk *g = tape->disk;
3562 2778
3563 BUG_ON(tape->first_stage != NULL || tape->merge_stage_size); 2779 BUG_ON(tape->merge_bh_size);
3564 2780
3565 drive->dsc_overlap = 0; 2781 drive->dsc_overlap = 0;
3566 drive->driver_data = NULL; 2782 drive->driver_data = NULL;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 155cc904f4eb..9f9ad9fb6b89 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -36,6 +36,7 @@
36void ide_tf_load(ide_drive_t *drive, ide_task_t *task) 36void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
37{ 37{
38 ide_hwif_t *hwif = drive->hwif; 38 ide_hwif_t *hwif = drive->hwif;
39 struct ide_io_ports *io_ports = &hwif->io_ports;
39 struct ide_taskfile *tf = &task->tf; 40 struct ide_taskfile *tf = &task->tf;
40 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; 41 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
41 42
@@ -59,34 +60,33 @@ void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
59 SELECT_MASK(drive, 0); 60 SELECT_MASK(drive, 0);
60 61
61 if (task->tf_flags & IDE_TFLAG_OUT_DATA) 62 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
62 hwif->OUTW((tf->hob_data << 8) | tf->data, 63 hwif->OUTW((tf->hob_data << 8) | tf->data, io_ports->data_addr);
63 hwif->io_ports[IDE_DATA_OFFSET]);
64 64
65 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) 65 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
66 hwif->OUTB(tf->hob_feature, hwif->io_ports[IDE_FEATURE_OFFSET]); 66 hwif->OUTB(tf->hob_feature, io_ports->feature_addr);
67 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) 67 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
68 hwif->OUTB(tf->hob_nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]); 68 hwif->OUTB(tf->hob_nsect, io_ports->nsect_addr);
69 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) 69 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
70 hwif->OUTB(tf->hob_lbal, hwif->io_ports[IDE_SECTOR_OFFSET]); 70 hwif->OUTB(tf->hob_lbal, io_ports->lbal_addr);
71 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) 71 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
72 hwif->OUTB(tf->hob_lbam, hwif->io_ports[IDE_LCYL_OFFSET]); 72 hwif->OUTB(tf->hob_lbam, io_ports->lbam_addr);
73 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) 73 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
74 hwif->OUTB(tf->hob_lbah, hwif->io_ports[IDE_HCYL_OFFSET]); 74 hwif->OUTB(tf->hob_lbah, io_ports->lbah_addr);
75 75
76 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE) 76 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
77 hwif->OUTB(tf->feature, hwif->io_ports[IDE_FEATURE_OFFSET]); 77 hwif->OUTB(tf->feature, io_ports->feature_addr);
78 if (task->tf_flags & IDE_TFLAG_OUT_NSECT) 78 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
79 hwif->OUTB(tf->nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]); 79 hwif->OUTB(tf->nsect, io_ports->nsect_addr);
80 if (task->tf_flags & IDE_TFLAG_OUT_LBAL) 80 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
81 hwif->OUTB(tf->lbal, hwif->io_ports[IDE_SECTOR_OFFSET]); 81 hwif->OUTB(tf->lbal, io_ports->lbal_addr);
82 if (task->tf_flags & IDE_TFLAG_OUT_LBAM) 82 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
83 hwif->OUTB(tf->lbam, hwif->io_ports[IDE_LCYL_OFFSET]); 83 hwif->OUTB(tf->lbam, io_ports->lbam_addr);
84 if (task->tf_flags & IDE_TFLAG_OUT_LBAH) 84 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
85 hwif->OUTB(tf->lbah, hwif->io_ports[IDE_HCYL_OFFSET]); 85 hwif->OUTB(tf->lbah, io_ports->lbah_addr);
86 86
87 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) 87 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
88 hwif->OUTB((tf->device & HIHI) | drive->select.all, 88 hwif->OUTB((tf->device & HIHI) | drive->select.all,
89 hwif->io_ports[IDE_SELECT_OFFSET]); 89 io_ports->device_addr);
90} 90}
91 91
92int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) 92int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
@@ -135,6 +135,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
135 ide_hwif_t *hwif = HWIF(drive); 135 ide_hwif_t *hwif = HWIF(drive);
136 struct ide_taskfile *tf = &task->tf; 136 struct ide_taskfile *tf = &task->tf;
137 ide_handler_t *handler = NULL; 137 ide_handler_t *handler = NULL;
138 const struct ide_dma_ops *dma_ops = hwif->dma_ops;
138 139
139 if (task->data_phase == TASKFILE_MULTI_IN || 140 if (task->data_phase == TASKFILE_MULTI_IN ||
140 task->data_phase == TASKFILE_MULTI_OUT) { 141 task->data_phase == TASKFILE_MULTI_OUT) {
@@ -154,8 +155,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
154 switch (task->data_phase) { 155 switch (task->data_phase) {
155 case TASKFILE_MULTI_OUT: 156 case TASKFILE_MULTI_OUT:
156 case TASKFILE_OUT: 157 case TASKFILE_OUT:
157 hwif->OUTBSYNC(drive, tf->command, 158 hwif->OUTBSYNC(drive, tf->command, hwif->io_ports.command_addr);
158 hwif->io_ports[IDE_COMMAND_OFFSET]);
159 ndelay(400); /* FIXME */ 159 ndelay(400); /* FIXME */
160 return pre_task_out_intr(drive, task->rq); 160 return pre_task_out_intr(drive, task->rq);
161 case TASKFILE_MULTI_IN: 161 case TASKFILE_MULTI_IN:
@@ -178,10 +178,10 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
178 return ide_started; 178 return ide_started;
179 default: 179 default:
180 if (task_dma_ok(task) == 0 || drive->using_dma == 0 || 180 if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
181 hwif->dma_setup(drive)) 181 dma_ops->dma_setup(drive))
182 return ide_stopped; 182 return ide_stopped;
183 hwif->dma_exec_cmd(drive, tf->command); 183 dma_ops->dma_exec_cmd(drive, tf->command);
184 hwif->dma_start(drive); 184 dma_ops->dma_start(drive);
185 return ide_started; 185 return ide_started;
186 } 186 }
187} 187}
@@ -455,7 +455,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
455 455
456 /* Error? */ 456 /* Error? */
457 if (stat & ERR_STAT) 457 if (stat & ERR_STAT)
458 return task_error(drive, rq, __FUNCTION__, stat); 458 return task_error(drive, rq, __func__, stat);
459 459
460 /* Didn't want any data? Odd. */ 460 /* Didn't want any data? Odd. */
461 if (!(stat & DRQ_STAT)) 461 if (!(stat & DRQ_STAT))
@@ -467,7 +467,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
467 if (!hwif->nleft) { 467 if (!hwif->nleft) {
468 stat = wait_drive_not_busy(drive); 468 stat = wait_drive_not_busy(drive);
469 if (!OK_STAT(stat, 0, BAD_STAT)) 469 if (!OK_STAT(stat, 0, BAD_STAT))
470 return task_error(drive, rq, __FUNCTION__, stat); 470 return task_error(drive, rq, __func__, stat);
471 task_end_request(drive, rq, stat); 471 task_end_request(drive, rq, stat);
472 return ide_stopped; 472 return ide_stopped;
473 } 473 }
@@ -488,11 +488,11 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
488 u8 stat = ide_read_status(drive); 488 u8 stat = ide_read_status(drive);
489 489
490 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) 490 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
491 return task_error(drive, rq, __FUNCTION__, stat); 491 return task_error(drive, rq, __func__, stat);
492 492
493 /* Deal with unexpected ATA data phase. */ 493 /* Deal with unexpected ATA data phase. */
494 if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft) 494 if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
495 return task_error(drive, rq, __FUNCTION__, stat); 495 return task_error(drive, rq, __func__, stat);
496 496
497 if (!hwif->nleft) { 497 if (!hwif->nleft) {
498 task_end_request(drive, rq, stat); 498 task_end_request(drive, rq, stat);
@@ -675,7 +675,7 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
675 /* (hs): give up if multcount is not set */ 675 /* (hs): give up if multcount is not set */
676 printk(KERN_ERR "%s: %s Multimode Write " \ 676 printk(KERN_ERR "%s: %s Multimode Write " \
677 "multcount is not set\n", 677 "multcount is not set\n",
678 drive->name, __FUNCTION__); 678 drive->name, __func__);
679 err = -EPERM; 679 err = -EPERM;
680 goto abort; 680 goto abort;
681 } 681 }
@@ -692,7 +692,7 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
692 /* (hs): give up if multcount is not set */ 692 /* (hs): give up if multcount is not set */
693 printk(KERN_ERR "%s: %s Multimode Read failure " \ 693 printk(KERN_ERR "%s: %s Multimode Read failure " \
694 "multcount is not set\n", 694 "multcount is not set\n",
695 drive->name, __FUNCTION__); 695 drive->name, __func__);
696 err = -EPERM; 696 err = -EPERM;
697 goto abort; 697 goto abort;
698 } 698 }
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 917c72dcd33d..999584c03d97 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -94,19 +94,8 @@ DEFINE_MUTEX(ide_cfg_mtx);
94 94
95int noautodma = 0; 95int noautodma = 0;
96 96
97#ifdef CONFIG_BLK_DEV_IDEACPI
98int ide_noacpi = 0;
99int ide_noacpitfs = 1;
100int ide_noacpionboot = 1;
101#endif
102
103/*
104 * This is declared extern in ide.h, for access by other IDE modules:
105 */
106ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ 97ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
107 98
108EXPORT_SYMBOL(ide_hwifs);
109
110static void ide_port_init_devices_data(ide_hwif_t *); 99static void ide_port_init_devices_data(ide_hwif_t *);
111 100
112/* 101/*
@@ -232,117 +221,6 @@ static int ide_system_bus_speed(void)
232 return pci_dev_present(pci_default) ? 33 : 50; 221 return pci_dev_present(pci_default) ? 33 : 50;
233} 222}
234 223
235ide_hwif_t * ide_find_port(unsigned long base)
236{
237 ide_hwif_t *hwif;
238 int i;
239
240 for (i = 0; i < MAX_HWIFS; i++) {
241 hwif = &ide_hwifs[i];
242 if (hwif->io_ports[IDE_DATA_OFFSET] == base)
243 goto found;
244 }
245
246 for (i = 0; i < MAX_HWIFS; i++) {
247 hwif = &ide_hwifs[i];
248 if (hwif->chipset == ide_unknown)
249 goto found;
250 }
251
252 hwif = NULL;
253found:
254 return hwif;
255}
256
257EXPORT_SYMBOL_GPL(ide_find_port);
258
259static struct resource* hwif_request_region(ide_hwif_t *hwif,
260 unsigned long addr, int num)
261{
262 struct resource *res = request_region(addr, num, hwif->name);
263
264 if (!res)
265 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
266 hwif->name, addr, addr+num-1);
267 return res;
268}
269
270/**
271 * ide_hwif_request_regions - request resources for IDE
272 * @hwif: interface to use
273 *
274 * Requests all the needed resources for an interface.
275 * Right now core IDE code does this work which is deeply wrong.
276 * MMIO leaves it to the controller driver,
277 * PIO will migrate this way over time.
278 */
279
280int ide_hwif_request_regions(ide_hwif_t *hwif)
281{
282 unsigned long addr;
283 unsigned int i;
284
285 if (hwif->mmio)
286 return 0;
287 addr = hwif->io_ports[IDE_CONTROL_OFFSET];
288 if (addr && !hwif_request_region(hwif, addr, 1))
289 goto control_region_busy;
290 hwif->straight8 = 0;
291 addr = hwif->io_ports[IDE_DATA_OFFSET];
292 if ((addr | 7) == hwif->io_ports[IDE_STATUS_OFFSET]) {
293 if (!hwif_request_region(hwif, addr, 8))
294 goto data_region_busy;
295 hwif->straight8 = 1;
296 return 0;
297 }
298 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
299 addr = hwif->io_ports[i];
300 if (!hwif_request_region(hwif, addr, 1)) {
301 while (--i)
302 release_region(addr, 1);
303 goto data_region_busy;
304 }
305 }
306 return 0;
307
308data_region_busy:
309 addr = hwif->io_ports[IDE_CONTROL_OFFSET];
310 if (addr)
311 release_region(addr, 1);
312control_region_busy:
313 /* If any errors are return, we drop the hwif interface. */
314 return -EBUSY;
315}
316
317/**
318 * ide_hwif_release_regions - free IDE resources
319 *
320 * Note that we only release the standard ports,
321 * and do not even try to handle any extra ports
322 * allocated for weird IDE interface chipsets.
323 *
324 * Note also that we don't yet handle mmio resources here. More
325 * importantly our caller should be doing this so we need to
326 * restructure this as a helper function for drivers.
327 */
328
329void ide_hwif_release_regions(ide_hwif_t *hwif)
330{
331 u32 i = 0;
332
333 if (hwif->mmio)
334 return;
335 if (hwif->io_ports[IDE_CONTROL_OFFSET])
336 release_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1);
337 if (hwif->straight8) {
338 release_region(hwif->io_ports[IDE_DATA_OFFSET], 8);
339 return;
340 }
341 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
342 if (hwif->io_ports[i])
343 release_region(hwif->io_ports[i], 1);
344}
345
346void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) 224void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
347{ 225{
348 ide_hwgroup_t *hwgroup = hwif->hwgroup; 226 ide_hwgroup_t *hwgroup = hwif->hwgroup;
@@ -409,7 +287,7 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
409 287
410/** 288/**
411 * ide_unregister - free an IDE interface 289 * ide_unregister - free an IDE interface
412 * @index: index of interface (will change soon to a pointer) 290 * @hwif: IDE interface
413 * 291 *
414 * Perform the final unregister of an IDE interface. At the moment 292 * Perform the final unregister of an IDE interface. At the moment
415 * we don't refcount interfaces so this will also get split up. 293 * we don't refcount interfaces so this will also get split up.
@@ -429,19 +307,16 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
429 * This is raving bonkers. 307 * This is raving bonkers.
430 */ 308 */
431 309
432void ide_unregister(unsigned int index) 310void ide_unregister(ide_hwif_t *hwif)
433{ 311{
434 ide_hwif_t *hwif, *g; 312 ide_hwif_t *g;
435 ide_hwgroup_t *hwgroup; 313 ide_hwgroup_t *hwgroup;
436 int irq_count = 0; 314 int irq_count = 0;
437 315
438 BUG_ON(index >= MAX_HWIFS);
439
440 BUG_ON(in_interrupt()); 316 BUG_ON(in_interrupt());
441 BUG_ON(irqs_disabled()); 317 BUG_ON(irqs_disabled());
442 mutex_lock(&ide_cfg_mtx); 318 mutex_lock(&ide_cfg_mtx);
443 spin_lock_irq(&ide_lock); 319 spin_lock_irq(&ide_lock);
444 hwif = &ide_hwifs[index];
445 if (!hwif->present) 320 if (!hwif->present)
446 goto abort; 321 goto abort;
447 __ide_port_unregister_devices(hwif); 322 __ide_port_unregister_devices(hwif);
@@ -479,12 +354,10 @@ void ide_unregister(unsigned int index)
479 spin_lock_irq(&ide_lock); 354 spin_lock_irq(&ide_lock);
480 355
481 if (hwif->dma_base) 356 if (hwif->dma_base)
482 (void)ide_release_dma(hwif); 357 ide_release_dma_engine(hwif);
483
484 ide_hwif_release_regions(hwif);
485 358
486 /* restore hwif data to pristine status */ 359 /* restore hwif data to pristine status */
487 ide_init_port_data(hwif, index); 360 ide_init_port_data(hwif, hwif->index);
488 361
489abort: 362abort:
490 spin_unlock_irq(&ide_lock); 363 spin_unlock_irq(&ide_lock);
@@ -495,9 +368,8 @@ EXPORT_SYMBOL(ide_unregister);
495 368
496void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) 369void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
497{ 370{
498 memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); 371 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
499 hwif->irq = hw->irq; 372 hwif->irq = hw->irq;
500 hwif->noprobe = 0;
501 hwif->chipset = hw->chipset; 373 hwif->chipset = hw->chipset;
502 hwif->gendev.parent = hw->dev; 374 hwif->gendev.parent = hw->dev;
503 hwif->ack_intr = hw->ack_intr; 375 hwif->ack_intr = hw->ack_intr;
@@ -588,7 +460,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
588 if (!drive->id || !(drive->id->capability & 1)) 460 if (!drive->id || !(drive->id->capability & 1))
589 goto out; 461 goto out;
590 462
591 if (hwif->dma_host_set == NULL) 463 if (hwif->dma_ops == NULL)
592 goto out; 464 goto out;
593 465
594 err = -EBUSY; 466 err = -EBUSY;
@@ -627,11 +499,14 @@ out:
627int set_pio_mode(ide_drive_t *drive, int arg) 499int set_pio_mode(ide_drive_t *drive, int arg)
628{ 500{
629 struct request rq; 501 struct request rq;
502 ide_hwif_t *hwif = drive->hwif;
503 const struct ide_port_ops *port_ops = hwif->port_ops;
630 504
631 if (arg < 0 || arg > 255) 505 if (arg < 0 || arg > 255)
632 return -EINVAL; 506 return -EINVAL;
633 507
634 if (drive->hwif->set_pio_mode == NULL) 508 if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
509 (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
635 return -ENOSYS; 510 return -ENOSYS;
636 511
637 if (drive->special.b.set_tune) 512 if (drive->special.b.set_tune)
@@ -953,16 +828,6 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
953 return 0; /* zero = nothing matched */ 828 return 0; /* zero = nothing matched */
954} 829}
955 830
956extern int probe_ali14xx;
957extern int probe_umc8672;
958extern int probe_dtc2278;
959extern int probe_ht6560b;
960extern int probe_qd65xx;
961extern int cmd640_vlb;
962extern int probe_4drives;
963
964static int __initdata is_chipset_set;
965
966/* 831/*
967 * ide_setup() gets called VERY EARLY during initialization, 832 * ide_setup() gets called VERY EARLY during initialization,
968 * to handle kernel "command line" strings beginning with "hdx=" or "ide". 833 * to handle kernel "command line" strings beginning with "hdx=" or "ide".
@@ -971,14 +836,12 @@ static int __initdata is_chipset_set;
971 */ 836 */
972static int __init ide_setup(char *s) 837static int __init ide_setup(char *s)
973{ 838{
974 int i, vals[3];
975 ide_hwif_t *hwif; 839 ide_hwif_t *hwif;
976 ide_drive_t *drive; 840 ide_drive_t *drive;
977 unsigned int hw, unit; 841 unsigned int hw, unit;
842 int vals[3];
978 const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1); 843 const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
979 const char max_hwif = '0' + (MAX_HWIFS - 1);
980 844
981
982 if (strncmp(s,"hd",2) == 0 && s[2] == '=') /* hd= is for hd.c */ 845 if (strncmp(s,"hd",2) == 0 && s[2] == '=') /* hd= is for hd.c */
983 return 0; /* driver and not us */ 846 return 0; /* driver and not us */
984 847
@@ -994,7 +857,7 @@ static int __init ide_setup(char *s)
994 857
995 printk(" : Enabled support for IDE doublers\n"); 858 printk(" : Enabled support for IDE doublers\n");
996 ide_doubler = 1; 859 ide_doubler = 1;
997 return 1; 860 goto obsolete_option;
998 } 861 }
999#endif /* CONFIG_BLK_DEV_IDEDOUBLER */ 862#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
1000 863
@@ -1008,17 +871,17 @@ static int __init ide_setup(char *s)
1008 if (!strcmp(s, "ide=noacpi")) { 871 if (!strcmp(s, "ide=noacpi")) {
1009 //printk(" : Disable IDE ACPI support.\n"); 872 //printk(" : Disable IDE ACPI support.\n");
1010 ide_noacpi = 1; 873 ide_noacpi = 1;
1011 return 1; 874 goto obsolete_option;
1012 } 875 }
1013 if (!strcmp(s, "ide=acpigtf")) { 876 if (!strcmp(s, "ide=acpigtf")) {
1014 //printk(" : Enable IDE ACPI _GTF support.\n"); 877 //printk(" : Enable IDE ACPI _GTF support.\n");
1015 ide_noacpitfs = 0; 878 ide_acpigtf = 1;
1016 return 1; 879 goto obsolete_option;
1017 } 880 }
1018 if (!strcmp(s, "ide=acpionboot")) { 881 if (!strcmp(s, "ide=acpionboot")) {
1019 //printk(" : Call IDE ACPI methods on boot.\n"); 882 //printk(" : Call IDE ACPI methods on boot.\n");
1020 ide_noacpionboot = 0; 883 ide_acpionboot = 1;
1021 return 1; 884 goto obsolete_option;
1022 } 885 }
1023#endif /* CONFIG_BLK_DEV_IDEACPI */ 886#endif /* CONFIG_BLK_DEV_IDEACPI */
1024 887
@@ -1028,7 +891,7 @@ static int __init ide_setup(char *s)
1028 if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) { 891 if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
1029 const char *hd_words[] = { 892 const char *hd_words[] = {
1030 "none", "noprobe", "nowerr", "cdrom", "nodma", 893 "none", "noprobe", "nowerr", "cdrom", "nodma",
1031 "autotune", "noautotune", "-8", "-9", "-10", 894 "-6", "-7", "-8", "-9", "-10",
1032 "noflush", "remap", "remap63", "scsi", NULL }; 895 "noflush", "remap", "remap63", "scsi", NULL };
1033 unit = s[2] - 'a'; 896 unit = s[2] - 'a';
1034 hw = unit / MAX_DRIVES; 897 hw = unit / MAX_DRIVES;
@@ -1043,30 +906,22 @@ static int __init ide_setup(char *s)
1043 case -1: /* "none" */ 906 case -1: /* "none" */
1044 case -2: /* "noprobe" */ 907 case -2: /* "noprobe" */
1045 drive->noprobe = 1; 908 drive->noprobe = 1;
1046 goto done; 909 goto obsolete_option;
1047 case -3: /* "nowerr" */ 910 case -3: /* "nowerr" */
1048 drive->bad_wstat = BAD_R_STAT; 911 drive->bad_wstat = BAD_R_STAT;
1049 hwif->noprobe = 0; 912 goto obsolete_option;
1050 goto done;
1051 case -4: /* "cdrom" */ 913 case -4: /* "cdrom" */
1052 drive->present = 1; 914 drive->present = 1;
1053 drive->media = ide_cdrom; 915 drive->media = ide_cdrom;
1054 /* an ATAPI device ignores DRDY */ 916 /* an ATAPI device ignores DRDY */
1055 drive->ready_stat = 0; 917 drive->ready_stat = 0;
1056 hwif->noprobe = 0; 918 goto obsolete_option;
1057 goto done;
1058 case -5: /* nodma */ 919 case -5: /* nodma */
1059 drive->nodma = 1; 920 drive->nodma = 1;
1060 goto done;
1061 case -6: /* "autotune" */
1062 drive->autotune = IDE_TUNE_AUTO;
1063 goto obsolete_option;
1064 case -7: /* "noautotune" */
1065 drive->autotune = IDE_TUNE_NOAUTO;
1066 goto obsolete_option; 921 goto obsolete_option;
1067 case -11: /* noflush */ 922 case -11: /* noflush */
1068 drive->noflush = 1; 923 drive->noflush = 1;
1069 goto done; 924 goto obsolete_option;
1070 case -12: /* "remap" */ 925 case -12: /* "remap" */
1071 drive->remap_0_to_1 = 1; 926 drive->remap_0_to_1 = 1;
1072 goto obsolete_option; 927 goto obsolete_option;
@@ -1084,8 +939,7 @@ static int __init ide_setup(char *s)
1084 drive->sect = drive->bios_sect = vals[2]; 939 drive->sect = drive->bios_sect = vals[2];
1085 drive->present = 1; 940 drive->present = 1;
1086 drive->forced_geom = 1; 941 drive->forced_geom = 1;
1087 hwif->noprobe = 0; 942 goto obsolete_option;
1088 goto done;
1089 default: 943 default:
1090 goto bad_option; 944 goto bad_option;
1091 } 945 }
@@ -1103,126 +957,15 @@ static int __init ide_setup(char *s)
1103 idebus_parameter = vals[0]; 957 idebus_parameter = vals[0];
1104 } else 958 } else
1105 printk(" -- BAD BUS SPEED! Expected value from 20 to 66"); 959 printk(" -- BAD BUS SPEED! Expected value from 20 to 66");
1106 goto done; 960 goto obsolete_option;
1107 } 961 }
1108 /*
1109 * Look for interface options: "idex="
1110 */
1111 if (s[3] >= '0' && s[3] <= max_hwif) {
1112 /*
1113 * Be VERY CAREFUL changing this: note hardcoded indexes below
1114 * (-8, -9, -10) are reserved to ease the hardcoding.
1115 */
1116 static const char *ide_words[] = {
1117 "minus1", "serialize", "minus3", "minus4",
1118 "reset", "minus6", "ata66", "minus8", "minus9",
1119 "minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb",
1120 "dtc2278", "umc8672", "ali14xx", NULL };
1121
1122 hw = s[3] - '0';
1123 hwif = &ide_hwifs[hw];
1124 i = match_parm(&s[4], ide_words, vals, 3);
1125 962
1126 /*
1127 * Cryptic check to ensure chipset not already set for hwif.
1128 * Note: we can't depend on hwif->chipset here.
1129 */
1130 if (i >= -18 && i <= -11) {
1131 /* chipset already specified */
1132 if (is_chipset_set)
1133 goto bad_option;
1134 /* these drivers are for "ide0=" only */
1135 if (hw != 0)
1136 goto bad_hwif;
1137 is_chipset_set = 1;
1138 printk("\n");
1139 }
1140
1141 switch (i) {
1142#ifdef CONFIG_BLK_DEV_ALI14XX
1143 case -17: /* "ali14xx" */
1144 probe_ali14xx = 1;
1145 goto obsolete_option;
1146#endif
1147#ifdef CONFIG_BLK_DEV_UMC8672
1148 case -16: /* "umc8672" */
1149 probe_umc8672 = 1;
1150 goto obsolete_option;
1151#endif
1152#ifdef CONFIG_BLK_DEV_DTC2278
1153 case -15: /* "dtc2278" */
1154 probe_dtc2278 = 1;
1155 goto obsolete_option;
1156#endif
1157#ifdef CONFIG_BLK_DEV_CMD640
1158 case -14: /* "cmd640_vlb" */
1159 cmd640_vlb = 1;
1160 goto obsolete_option;
1161#endif
1162#ifdef CONFIG_BLK_DEV_HT6560B
1163 case -13: /* "ht6560b" */
1164 probe_ht6560b = 1;
1165 goto obsolete_option;
1166#endif
1167#ifdef CONFIG_BLK_DEV_QD65XX
1168 case -12: /* "qd65xx" */
1169 probe_qd65xx = 1;
1170 goto obsolete_option;
1171#endif
1172#ifdef CONFIG_BLK_DEV_4DRIVES
1173 case -11: /* "four" drives on one set of ports */
1174 probe_4drives = 1;
1175 goto obsolete_option;
1176#endif
1177 case -10: /* minus10 */
1178 case -9: /* minus9 */
1179 case -8: /* minus8 */
1180 case -6:
1181 case -4:
1182 case -3:
1183 goto bad_option;
1184 case -7: /* ata66 */
1185#ifdef CONFIG_BLK_DEV_IDEPCI
1186 /*
1187 * Use ATA_CBL_PATA40_SHORT so drive side
1188 * cable detection is also overriden.
1189 */
1190 hwif->cbl = ATA_CBL_PATA40_SHORT;
1191 goto obsolete_option;
1192#else
1193 goto bad_hwif;
1194#endif
1195 case -5: /* "reset" */
1196 hwif->reset = 1;
1197 goto obsolete_option;
1198 case -2: /* "serialize" */
1199 hwif->mate = &ide_hwifs[hw^1];
1200 hwif->mate->mate = hwif;
1201 hwif->serialized = hwif->mate->serialized = 1;
1202 goto obsolete_option;
1203
1204 case -1:
1205 case 0:
1206 case 1:
1207 case 2:
1208 case 3:
1209 goto bad_option;
1210 default:
1211 printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
1212 return 1;
1213 }
1214 }
1215bad_option: 963bad_option:
1216 printk(" -- BAD OPTION\n"); 964 printk(" -- BAD OPTION\n");
1217 return 1; 965 return 1;
1218obsolete_option: 966obsolete_option:
1219 printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n"); 967 printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n");
1220 return 1; 968 return 1;
1221bad_hwif:
1222 printk("-- NOT SUPPORTED ON ide%d", hw);
1223done:
1224 printk("\n");
1225 return 1;
1226} 969}
1227 970
1228EXPORT_SYMBOL(ide_lock); 971EXPORT_SYMBOL(ide_lock);
@@ -1358,6 +1101,185 @@ static void ide_port_class_release(struct device *portdev)
1358 put_device(&hwif->gendev); 1101 put_device(&hwif->gendev);
1359} 1102}
1360 1103
1104int ide_vlb_clk;
1105EXPORT_SYMBOL_GPL(ide_vlb_clk);
1106
1107module_param_named(vlb_clock, ide_vlb_clk, int, 0);
1108MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
1109
1110int ide_pci_clk;
1111EXPORT_SYMBOL_GPL(ide_pci_clk);
1112
1113module_param_named(pci_clock, ide_pci_clk, int, 0);
1114MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
1115
1116static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
1117{
1118 int a, b, i, j = 1;
1119 unsigned int *dev_param_mask = (unsigned int *)kp->arg;
1120
1121 if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
1122 sscanf(s, "%d.%d", &a, &b) != 2)
1123 return -EINVAL;
1124
1125 i = a * MAX_DRIVES + b;
1126
1127 if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
1128 return -EINVAL;
1129
1130 if (j)
1131 *dev_param_mask |= (1 << i);
1132 else
1133 *dev_param_mask &= (1 << i);
1134
1135 return 0;
1136}
1137
1138static unsigned int ide_nodma;
1139
1140module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
1141MODULE_PARM_DESC(nodma, "disallow DMA for a device");
1142
1143static unsigned int ide_noflush;
1144
1145module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
1146MODULE_PARM_DESC(noflush, "disable flush requests for a device");
1147
1148static unsigned int ide_noprobe;
1149
1150module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
1151MODULE_PARM_DESC(noprobe, "skip probing for a device");
1152
1153static unsigned int ide_nowerr;
1154
1155module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
1156MODULE_PARM_DESC(nowerr, "ignore the WRERR_STAT bit for a device");
1157
1158static unsigned int ide_cdroms;
1159
1160module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
1161MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
1162
1163struct chs_geom {
1164 unsigned int cyl;
1165 u8 head;
1166 u8 sect;
1167};
1168
1169static unsigned int ide_disks;
1170static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
1171
1172static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
1173{
1174 int a, b, c = 0, h = 0, s = 0, i, j = 1;
1175
1176 if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
1177 sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
1178 return -EINVAL;
1179
1180 i = a * MAX_DRIVES + b;
1181
1182 if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
1183 return -EINVAL;
1184
1185 if (c > INT_MAX || h > 255 || s > 255)
1186 return -EINVAL;
1187
1188 if (j)
1189 ide_disks |= (1 << i);
1190 else
1191 ide_disks &= (1 << i);
1192
1193 ide_disks_chs[i].cyl = c;
1194 ide_disks_chs[i].head = h;
1195 ide_disks_chs[i].sect = s;
1196
1197 return 0;
1198}
1199
1200module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
1201MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
1202
1203static void ide_dev_apply_params(ide_drive_t *drive)
1204{
1205 int i = drive->hwif->index * MAX_DRIVES + drive->select.b.unit;
1206
1207 if (ide_nodma & (1 << i)) {
1208 printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
1209 drive->nodma = 1;
1210 }
1211 if (ide_noflush & (1 << i)) {
1212 printk(KERN_INFO "ide: disabling flush requests for %s\n",
1213 drive->name);
1214 drive->noflush = 1;
1215 }
1216 if (ide_noprobe & (1 << i)) {
1217 printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
1218 drive->noprobe = 1;
1219 }
1220 if (ide_nowerr & (1 << i)) {
1221 printk(KERN_INFO "ide: ignoring the WRERR_STAT bit for %s\n",
1222 drive->name);
1223 drive->bad_wstat = BAD_R_STAT;
1224 }
1225 if (ide_cdroms & (1 << i)) {
1226 printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
1227 drive->present = 1;
1228 drive->media = ide_cdrom;
1229 /* an ATAPI device ignores DRDY */
1230 drive->ready_stat = 0;
1231 }
1232 if (ide_disks & (1 << i)) {
1233 drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl;
1234 drive->head = drive->bios_head = ide_disks_chs[i].head;
1235 drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
1236 drive->forced_geom = 1;
1237 printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
1238 drive->name,
1239 drive->cyl, drive->head, drive->sect);
1240 drive->present = 1;
1241 drive->media = ide_disk;
1242 drive->ready_stat = READY_STAT;
1243 }
1244}
1245
1246static unsigned int ide_ignore_cable;
1247
1248static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
1249{
1250 int i, j = 1;
1251
1252 if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
1253 return -EINVAL;
1254
1255 if (i >= MAX_HWIFS || j < 0 || j > 1)
1256 return -EINVAL;
1257
1258 if (j)
1259 ide_ignore_cable |= (1 << i);
1260 else
1261 ide_ignore_cable &= (1 << i);
1262
1263 return 0;
1264}
1265
1266module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
1267MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
1268
1269void ide_port_apply_params(ide_hwif_t *hwif)
1270{
1271 int i;
1272
1273 if (ide_ignore_cable & (1 << hwif->index)) {
1274 printk(KERN_INFO "ide: ignoring cable detection for %s\n",
1275 hwif->name);
1276 hwif->cbl = ATA_CBL_PATA40_SHORT;
1277 }
1278
1279 for (i = 0; i < MAX_DRIVES; i++)
1280 ide_dev_apply_params(&hwif->drives[i]);
1281}
1282
1361/* 1283/*
1362 * This is gets invoked once during initialization, to set *everything* up 1284 * This is gets invoked once during initialization, to set *everything* up
1363 */ 1285 */
@@ -1424,11 +1346,6 @@ int __init init_module (void)
1424 1346
1425void __exit cleanup_module (void) 1347void __exit cleanup_module (void)
1426{ 1348{
1427 int index;
1428
1429 for (index = 0; index < MAX_HWIFS; ++index)
1430 ide_unregister(index);
1431
1432 proc_ide_destroy(); 1349 proc_ide_destroy();
1433 1350
1434 class_destroy(ide_port_class); 1351 class_destroy(ide_port_class);
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index bc8b1f8de614..90c65cf97448 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -49,6 +49,8 @@
49 49
50#include <asm/io.h> 50#include <asm/io.h>
51 51
52#define DRV_NAME "ali14xx"
53
52/* port addresses for auto-detection */ 54/* port addresses for auto-detection */
53#define ALI_NUM_PORTS 4 55#define ALI_NUM_PORTS 4
54static const int ports[ALI_NUM_PORTS] __initdata = 56static const int ports[ALI_NUM_PORTS] __initdata =
@@ -86,7 +88,7 @@ static u8 regOff; /* output to base port to close registers */
86/* 88/*
87 * Read a controller register. 89 * Read a controller register.
88 */ 90 */
89static inline u8 inReg (u8 reg) 91static inline u8 inReg(u8 reg)
90{ 92{
91 outb_p(reg, regPort); 93 outb_p(reg, regPort);
92 return inb(dataPort); 94 return inb(dataPort);
@@ -95,7 +97,7 @@ static inline u8 inReg (u8 reg)
95/* 97/*
96 * Write a controller register. 98 * Write a controller register.
97 */ 99 */
98static void outReg (u8 data, u8 reg) 100static void outReg(u8 data, u8 reg)
99{ 101{
100 outb_p(reg, regPort); 102 outb_p(reg, regPort);
101 outb_p(data, dataPort); 103 outb_p(data, dataPort);
@@ -114,7 +116,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
114 int time1, time2; 116 int time1, time2;
115 u8 param1, param2, param3, param4; 117 u8 param1, param2, param3, param4;
116 unsigned long flags; 118 unsigned long flags;
117 int bus_speed = system_bus_clock(); 119 int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
118 120
119 /* calculate timing, according to PIO mode */ 121 /* calculate timing, according to PIO mode */
120 time1 = ide_pio_cycle_time(drive, pio); 122 time1 = ide_pio_cycle_time(drive, pio);
@@ -143,7 +145,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
143/* 145/*
144 * Auto-detect the IDE controller port. 146 * Auto-detect the IDE controller port.
145 */ 147 */
146static int __init findPort (void) 148static int __init findPort(void)
147{ 149{
148 int i; 150 int i;
149 u8 t; 151 u8 t;
@@ -175,7 +177,8 @@ static int __init findPort (void)
175/* 177/*
176 * Initialize controller registers with default values. 178 * Initialize controller registers with default values.
177 */ 179 */
178static int __init initRegisters (void) { 180static int __init initRegisters(void)
181{
179 const RegInitializer *p; 182 const RegInitializer *p;
180 u8 t; 183 u8 t;
181 unsigned long flags; 184 unsigned long flags;
@@ -191,17 +194,20 @@ static int __init initRegisters (void) {
191 return t; 194 return t;
192} 195}
193 196
197static const struct ide_port_ops ali14xx_port_ops = {
198 .set_pio_mode = ali14xx_set_pio_mode,
199};
200
194static const struct ide_port_info ali14xx_port_info = { 201static const struct ide_port_info ali14xx_port_info = {
202 .name = DRV_NAME,
195 .chipset = ide_ali14xx, 203 .chipset = ide_ali14xx,
196 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE, 204 .port_ops = &ali14xx_port_ops,
205 .host_flags = IDE_HFLAG_NO_DMA,
197 .pio_mask = ATA_PIO4, 206 .pio_mask = ATA_PIO4,
198}; 207};
199 208
200static int __init ali14xx_probe(void) 209static int __init ali14xx_probe(void)
201{ 210{
202 static u8 idx[4] = { 0, 1, 0xff, 0xff };
203 hw_regs_t hw[2];
204
205 printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n", 211 printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n",
206 basePort, regOn); 212 basePort, regOn);
207 213
@@ -211,26 +217,10 @@ static int __init ali14xx_probe(void)
211 return 1; 217 return 1;
212 } 218 }
213 219
214 memset(&hw, 0, sizeof(hw)); 220 return ide_legacy_device_add(&ali14xx_port_info, 0);
215
216 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
217 hw[0].irq = 14;
218
219 ide_std_init_ports(&hw[1], 0x170, 0x376);
220 hw[1].irq = 15;
221
222 ide_init_port_hw(&ide_hwifs[0], &hw[0]);
223 ide_init_port_hw(&ide_hwifs[1], &hw[1]);
224
225 ide_hwifs[0].set_pio_mode = &ali14xx_set_pio_mode;
226 ide_hwifs[1].set_pio_mode = &ali14xx_set_pio_mode;
227
228 ide_device_add(idx, &ali14xx_port_info);
229
230 return 0;
231} 221}
232 222
233int probe_ali14xx = 0; 223static int probe_ali14xx;
234 224
235module_param_named(probe, probe_ali14xx, bool, 0); 225module_param_named(probe, probe_ali14xx, bool, 0);
236MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); 226MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index fdd3791e465f..5c730e4dd735 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -102,7 +102,7 @@ static int buddha_ack_intr(ide_hwif_t *hwif)
102{ 102{
103 unsigned char ch; 103 unsigned char ch;
104 104
105 ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); 105 ch = z_readb(hwif->io_ports.irq_addr);
106 if (!(ch & 0x80)) 106 if (!(ch & 0x80))
107 return 0; 107 return 0;
108 return 1; 108 return 1;
@@ -112,9 +112,9 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
112{ 112{
113 unsigned char ch; 113 unsigned char ch;
114 114
115 ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); 115 ch = z_readb(hwif->io_ports.irq_addr);
116 /* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */ 116 /* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */
117 z_writeb(0, hwif->io_ports[IDE_IRQ_OFFSET]); 117 z_writeb(0, hwif->io_ports.irq_addr);
118 if (!(ch & 0x80)) 118 if (!(ch & 0x80))
119 return 0; 119 return 0;
120 return 1; 120 return 1;
@@ -128,13 +128,13 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
128 128
129 memset(hw, 0, sizeof(*hw)); 129 memset(hw, 0, sizeof(*hw));
130 130
131 hw->io_ports[IDE_DATA_OFFSET] = base; 131 hw->io_ports.data_addr = base;
132 132
133 for (i = 1; i < 8; i++) 133 for (i = 1; i < 8; i++)
134 hw->io_ports[i] = base + 2 + i * 4; 134 hw->io_ports_array[i] = base + 2 + i * 4;
135 135
136 hw->io_ports[IDE_CONTROL_OFFSET] = ctl; 136 hw->io_ports.ctl_addr = ctl;
137 hw->io_ports[IDE_IRQ_OFFSET] = irq_port; 137 hw->io_ports.irq_addr = irq_port;
138 138
139 hw->irq = IRQ_AMIGA_PORTS; 139 hw->irq = IRQ_AMIGA_PORTS;
140 hw->ack_intr = ack_intr; 140 hw->ack_intr = ack_intr;
@@ -221,15 +221,13 @@ fail_base2:
221 221
222 buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr); 222 buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr);
223 223
224 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 224 hwif = ide_find_port();
225 if (hwif) { 225 if (hwif) {
226 u8 index = hwif->index; 226 u8 index = hwif->index;
227 227
228 ide_init_port_data(hwif, index); 228 ide_init_port_data(hwif, index);
229 ide_init_port_hw(hwif, &hw); 229 ide_init_port_hw(hwif, &hw);
230 230
231 hwif->mmio = 1;
232
233 idx[i] = index; 231 idx[i] = index;
234 } 232 }
235 } 233 }
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index 5f69cd2ea6f7..af791a02a120 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -16,6 +16,8 @@
16 16
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19#define DRV_NAME "dtc2278"
20
19/* 21/*
20 * Changing this #undef to #define may solve start up problems in some systems. 22 * Changing this #undef to #define may solve start up problems in some systems.
21 */ 23 */
@@ -86,30 +88,26 @@ static void dtc2278_set_pio_mode(ide_drive_t *drive, const u8 pio)
86 } 88 }
87} 89}
88 90
91static const struct ide_port_ops dtc2278_port_ops = {
92 .set_pio_mode = dtc2278_set_pio_mode,
93};
94
89static const struct ide_port_info dtc2278_port_info __initdata = { 95static const struct ide_port_info dtc2278_port_info __initdata = {
96 .name = DRV_NAME,
90 .chipset = ide_dtc2278, 97 .chipset = ide_dtc2278,
98 .port_ops = &dtc2278_port_ops,
91 .host_flags = IDE_HFLAG_SERIALIZE | 99 .host_flags = IDE_HFLAG_SERIALIZE |
92 IDE_HFLAG_NO_UNMASK_IRQS | 100 IDE_HFLAG_NO_UNMASK_IRQS |
93 IDE_HFLAG_IO_32BIT | 101 IDE_HFLAG_IO_32BIT |
94 /* disallow ->io_32bit changes */ 102 /* disallow ->io_32bit changes */
95 IDE_HFLAG_NO_IO_32BIT | 103 IDE_HFLAG_NO_IO_32BIT |
96 IDE_HFLAG_NO_DMA | 104 IDE_HFLAG_NO_DMA,
97 IDE_HFLAG_NO_AUTOTUNE,
98 .pio_mask = ATA_PIO4, 105 .pio_mask = ATA_PIO4,
99}; 106};
100 107
101static int __init dtc2278_probe(void) 108static int __init dtc2278_probe(void)
102{ 109{
103 unsigned long flags; 110 unsigned long flags;
104 ide_hwif_t *hwif, *mate;
105 static u8 idx[4] = { 0, 1, 0xff, 0xff };
106 hw_regs_t hw[2];
107
108 hwif = &ide_hwifs[0];
109 mate = &ide_hwifs[1];
110
111 if (hwif->chipset != ide_unknown || mate->chipset != ide_unknown)
112 return 1;
113 111
114 local_irq_save(flags); 112 local_irq_save(flags);
115 /* 113 /*
@@ -129,25 +127,10 @@ static int __init dtc2278_probe(void)
129#endif 127#endif
130 local_irq_restore(flags); 128 local_irq_restore(flags);
131 129
132 memset(&hw, 0, sizeof(hw)); 130 return ide_legacy_device_add(&dtc2278_port_info, 0);
133
134 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
135 hw[0].irq = 14;
136
137 ide_std_init_ports(&hw[1], 0x170, 0x376);
138 hw[1].irq = 15;
139
140 ide_init_port_hw(hwif, &hw[0]);
141 ide_init_port_hw(mate, &hw[1]);
142
143 hwif->set_pio_mode = &dtc2278_set_pio_mode;
144
145 ide_device_add(idx, &dtc2278_port_info);
146
147 return 0;
148} 131}
149 132
150int probe_dtc2278 = 0; 133static int probe_dtc2278;
151 134
152module_param_named(probe, probe_dtc2278, bool, 0); 135module_param_named(probe, probe_dtc2278, bool, 0);
153MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets"); 136MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index e950afa5939c..56cdaa0eeea5 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -22,6 +22,7 @@
22#include <asm/atariints.h> 22#include <asm/atariints.h>
23#include <asm/atari_stdma.h> 23#include <asm/atari_stdma.h>
24 24
25#define DRV_NAME "falconide"
25 26
26 /* 27 /*
27 * Base of the IDE interface 28 * Base of the IDE interface
@@ -49,12 +50,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
49 50
50 memset(hw, 0, sizeof(*hw)); 51 memset(hw, 0, sizeof(*hw));
51 52
52 hw->io_ports[IDE_DATA_OFFSET] = ATA_HD_BASE; 53 hw->io_ports.data_addr = ATA_HD_BASE;
53 54
54 for (i = 1; i < 8; i++) 55 for (i = 1; i < 8; i++)
55 hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4; 56 hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4;
56 57
57 hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_BASE + ATA_HD_CONTROL; 58 hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL;
58 59
59 hw->irq = IRQ_MFP_IDE; 60 hw->irq = IRQ_MFP_IDE;
60 hw->ack_intr = NULL; 61 hw->ack_intr = NULL;
@@ -74,9 +75,14 @@ static int __init falconide_init(void)
74 75
75 printk(KERN_INFO "ide: Falcon IDE controller\n"); 76 printk(KERN_INFO "ide: Falcon IDE controller\n");
76 77
78 if (!request_mem_region(ATA_HD_BASE, 0x40, DRV_NAME)) {
79 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
80 return -EBUSY;
81 }
82
77 falconide_setup_ports(&hw); 83 falconide_setup_ports(&hw);
78 84
79 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 85 hwif = ide_find_port();
80 if (hwif) { 86 if (hwif) {
81 u8 index = hwif->index; 87 u8 index = hwif->index;
82 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 88 u8 idx[4] = { index, 0xff, 0xff, 0xff };
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index e3b4638cc883..a9c2593a898c 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -63,6 +63,8 @@
63#define GAYLE_HAS_CONTROL_REG (!ide_doubler) 63#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
64#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000) 64#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
65int ide_doubler = 0; /* support IDE doublers? */ 65int ide_doubler = 0; /* support IDE doublers? */
66module_param_named(doubler, ide_doubler, bool, 0);
67MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
66#endif /* CONFIG_BLK_DEV_IDEDOUBLER */ 68#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
67 69
68 70
@@ -74,7 +76,7 @@ static int gayle_ack_intr_a4000(ide_hwif_t *hwif)
74{ 76{
75 unsigned char ch; 77 unsigned char ch;
76 78
77 ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); 79 ch = z_readb(hwif->io_ports.irq_addr);
78 if (!(ch & GAYLE_IRQ_IDE)) 80 if (!(ch & GAYLE_IRQ_IDE))
79 return 0; 81 return 0;
80 return 1; 82 return 1;
@@ -84,11 +86,11 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
84{ 86{
85 unsigned char ch; 87 unsigned char ch;
86 88
87 ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); 89 ch = z_readb(hwif->io_ports.irq_addr);
88 if (!(ch & GAYLE_IRQ_IDE)) 90 if (!(ch & GAYLE_IRQ_IDE))
89 return 0; 91 return 0;
90 (void)z_readb(hwif->io_ports[IDE_STATUS_OFFSET]); 92 (void)z_readb(hwif->io_ports.status_addr);
91 z_writeb(0x7c, hwif->io_ports[IDE_IRQ_OFFSET]); 93 z_writeb(0x7c, hwif->io_ports.irq_addr);
92 return 1; 94 return 1;
93} 95}
94 96
@@ -100,13 +102,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
100 102
101 memset(hw, 0, sizeof(*hw)); 103 memset(hw, 0, sizeof(*hw));
102 104
103 hw->io_ports[IDE_DATA_OFFSET] = base; 105 hw->io_ports.data_addr = base;
104 106
105 for (i = 1; i < 8; i++) 107 for (i = 1; i < 8; i++)
106 hw->io_ports[i] = base + 2 + i * 4; 108 hw->io_ports_array[i] = base + 2 + i * 4;
107 109
108 hw->io_ports[IDE_CONTROL_OFFSET] = ctl; 110 hw->io_ports.ctl_addr = ctl;
109 hw->io_ports[IDE_IRQ_OFFSET] = irq_port; 111 hw->io_ports.irq_addr = irq_port;
110 112
111 hw->irq = IRQ_AMIGA_PORTS; 113 hw->irq = IRQ_AMIGA_PORTS;
112 hw->ack_intr = ack_intr; 114 hw->ack_intr = ack_intr;
@@ -175,15 +177,13 @@ found:
175 177
176 gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr); 178 gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr);
177 179
178 hwif = ide_find_port(base); 180 hwif = ide_find_port();
179 if (hwif) { 181 if (hwif) {
180 u8 index = hwif->index; 182 u8 index = hwif->index;
181 183
182 ide_init_port_data(hwif, index); 184 ide_init_port_data(hwif, index);
183 ide_init_port_hw(hwif, &hw); 185 ide_init_port_hw(hwif, &hw);
184 186
185 hwif->mmio = 1;
186
187 idx[i] = index; 187 idx[i] = index;
188 } else 188 } else
189 release_mem_region(res_start, res_n); 189 release_mem_region(res_start, res_n);
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 0b0d86731927..abdedf56643e 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -122,12 +122,12 @@ static int hd_error;
122 * This struct defines the HD's and their types. 122 * This struct defines the HD's and their types.
123 */ 123 */
124struct hd_i_struct { 124struct hd_i_struct {
125 unsigned int head,sect,cyl,wpcom,lzone,ctl; 125 unsigned int head, sect, cyl, wpcom, lzone, ctl;
126 int unit; 126 int unit;
127 int recalibrate; 127 int recalibrate;
128 int special_op; 128 int special_op;
129}; 129};
130 130
131#ifdef HD_TYPE 131#ifdef HD_TYPE
132static struct hd_i_struct hd_info[] = { HD_TYPE }; 132static struct hd_i_struct hd_info[] = { HD_TYPE };
133static int NR_HD = ARRAY_SIZE(hd_info); 133static int NR_HD = ARRAY_SIZE(hd_info);
@@ -168,7 +168,7 @@ unsigned long read_timer(void)
168 168
169 spin_lock_irqsave(&i8253_lock, flags); 169 spin_lock_irqsave(&i8253_lock, flags);
170 t = jiffies * 11932; 170 t = jiffies * 11932;
171 outb_p(0, 0x43); 171 outb_p(0, 0x43);
172 i = inb_p(0x40); 172 i = inb_p(0x40);
173 i |= inb(0x40) << 8; 173 i |= inb(0x40) << 8;
174 spin_unlock_irqrestore(&i8253_lock, flags); 174 spin_unlock_irqrestore(&i8253_lock, flags);
@@ -183,7 +183,7 @@ static void __init hd_setup(char *str, int *ints)
183 if (ints[0] != 3) 183 if (ints[0] != 3)
184 return; 184 return;
185 if (hd_info[0].head != 0) 185 if (hd_info[0].head != 0)
186 hdind=1; 186 hdind = 1;
187 hd_info[hdind].head = ints[2]; 187 hd_info[hdind].head = ints[2];
188 hd_info[hdind].sect = ints[3]; 188 hd_info[hdind].sect = ints[3];
189 hd_info[hdind].cyl = ints[1]; 189 hd_info[hdind].cyl = ints[1];
@@ -193,7 +193,7 @@ static void __init hd_setup(char *str, int *ints)
193 NR_HD = hdind+1; 193 NR_HD = hdind+1;
194} 194}
195 195
196static void dump_status (const char *msg, unsigned int stat) 196static void dump_status(const char *msg, unsigned int stat)
197{ 197{
198 char *name = "hd?"; 198 char *name = "hd?";
199 if (CURRENT) 199 if (CURRENT)
@@ -291,7 +291,6 @@ static int controller_ready(unsigned int drive, unsigned int head)
291 return 0; 291 return 0;
292} 292}
293 293
294
295static void hd_out(struct hd_i_struct *disk, 294static void hd_out(struct hd_i_struct *disk,
296 unsigned int nsect, 295 unsigned int nsect,
297 unsigned int sect, 296 unsigned int sect,
@@ -313,15 +312,15 @@ static void hd_out(struct hd_i_struct *disk,
313 return; 312 return;
314 } 313 }
315 SET_HANDLER(intr_addr); 314 SET_HANDLER(intr_addr);
316 outb_p(disk->ctl,HD_CMD); 315 outb_p(disk->ctl, HD_CMD);
317 port=HD_DATA; 316 port = HD_DATA;
318 outb_p(disk->wpcom>>2,++port); 317 outb_p(disk->wpcom >> 2, ++port);
319 outb_p(nsect,++port); 318 outb_p(nsect, ++port);
320 outb_p(sect,++port); 319 outb_p(sect, ++port);
321 outb_p(cyl,++port); 320 outb_p(cyl, ++port);
322 outb_p(cyl>>8,++port); 321 outb_p(cyl >> 8, ++port);
323 outb_p(0xA0|(disk->unit<<4)|head,++port); 322 outb_p(0xA0 | (disk->unit << 4) | head, ++port);
324 outb_p(cmd,++port); 323 outb_p(cmd, ++port);
325} 324}
326 325
327static void hd_request (void); 326static void hd_request (void);
@@ -344,14 +343,14 @@ static void reset_controller(void)
344{ 343{
345 int i; 344 int i;
346 345
347 outb_p(4,HD_CMD); 346 outb_p(4, HD_CMD);
348 for(i = 0; i < 1000; i++) barrier(); 347 for (i = 0; i < 1000; i++) barrier();
349 outb_p(hd_info[0].ctl & 0x0f,HD_CMD); 348 outb_p(hd_info[0].ctl & 0x0f, HD_CMD);
350 for(i = 0; i < 1000; i++) barrier(); 349 for (i = 0; i < 1000; i++) barrier();
351 if (drive_busy()) 350 if (drive_busy())
352 printk("hd: controller still busy\n"); 351 printk("hd: controller still busy\n");
353 else if ((hd_error = inb(HD_ERROR)) != 1) 352 else if ((hd_error = inb(HD_ERROR)) != 1)
354 printk("hd: controller reset failed: %02x\n",hd_error); 353 printk("hd: controller reset failed: %02x\n", hd_error);
355} 354}
356 355
357static void reset_hd(void) 356static void reset_hd(void)
@@ -371,8 +370,8 @@ repeat:
371 if (++i < NR_HD) { 370 if (++i < NR_HD) {
372 struct hd_i_struct *disk = &hd_info[i]; 371 struct hd_i_struct *disk = &hd_info[i];
373 disk->special_op = disk->recalibrate = 1; 372 disk->special_op = disk->recalibrate = 1;
374 hd_out(disk,disk->sect,disk->sect,disk->head-1, 373 hd_out(disk, disk->sect, disk->sect, disk->head-1,
375 disk->cyl,WIN_SPECIFY,&reset_hd); 374 disk->cyl, WIN_SPECIFY, &reset_hd);
376 if (reset) 375 if (reset)
377 goto repeat; 376 goto repeat;
378 } else 377 } else
@@ -393,7 +392,7 @@ static void unexpected_hd_interrupt(void)
393 unsigned int stat = inb_p(HD_STATUS); 392 unsigned int stat = inb_p(HD_STATUS);
394 393
395 if (stat & (BUSY_STAT|DRQ_STAT|ECC_STAT|ERR_STAT)) { 394 if (stat & (BUSY_STAT|DRQ_STAT|ECC_STAT|ERR_STAT)) {
396 dump_status ("unexpected interrupt", stat); 395 dump_status("unexpected interrupt", stat);
397 SET_TIMER; 396 SET_TIMER;
398 } 397 }
399} 398}
@@ -453,7 +452,7 @@ static void read_intr(void)
453 return; 452 return;
454ok_to_read: 453ok_to_read:
455 req = CURRENT; 454 req = CURRENT;
456 insw(HD_DATA,req->buffer,256); 455 insw(HD_DATA, req->buffer, 256);
457 req->sector++; 456 req->sector++;
458 req->buffer += 512; 457 req->buffer += 512;
459 req->errors = 0; 458 req->errors = 0;
@@ -507,7 +506,7 @@ ok_to_write:
507 end_request(req, 1); 506 end_request(req, 1);
508 if (i > 0) { 507 if (i > 0) {
509 SET_HANDLER(&write_intr); 508 SET_HANDLER(&write_intr);
510 outsw(HD_DATA,req->buffer,256); 509 outsw(HD_DATA, req->buffer, 256);
511 local_irq_enable(); 510 local_irq_enable();
512 } else { 511 } else {
513#if (HD_DELAY > 0) 512#if (HD_DELAY > 0)
@@ -560,11 +559,11 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
560{ 559{
561 if (disk->recalibrate) { 560 if (disk->recalibrate) {
562 disk->recalibrate = 0; 561 disk->recalibrate = 0;
563 hd_out(disk,disk->sect,0,0,0,WIN_RESTORE,&recal_intr); 562 hd_out(disk, disk->sect, 0, 0, 0, WIN_RESTORE, &recal_intr);
564 return reset; 563 return reset;
565 } 564 }
566 if (disk->head > 16) { 565 if (disk->head > 16) {
567 printk ("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); 566 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
568 end_request(req, 0); 567 end_request(req, 0);
569 } 568 }
570 disk->special_op = 0; 569 disk->special_op = 0;
@@ -633,19 +632,21 @@ repeat:
633 if (blk_fs_request(req)) { 632 if (blk_fs_request(req)) {
634 switch (rq_data_dir(req)) { 633 switch (rq_data_dir(req)) {
635 case READ: 634 case READ:
636 hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr); 635 hd_out(disk, nsect, sec, head, cyl, WIN_READ,
636 &read_intr);
637 if (reset) 637 if (reset)
638 goto repeat; 638 goto repeat;
639 break; 639 break;
640 case WRITE: 640 case WRITE:
641 hd_out(disk,nsect,sec,head,cyl,WIN_WRITE,&write_intr); 641 hd_out(disk, nsect, sec, head, cyl, WIN_WRITE,
642 &write_intr);
642 if (reset) 643 if (reset)
643 goto repeat; 644 goto repeat;
644 if (wait_DRQ()) { 645 if (wait_DRQ()) {
645 bad_rw_intr(); 646 bad_rw_intr();
646 goto repeat; 647 goto repeat;
647 } 648 }
648 outsw(HD_DATA,req->buffer,256); 649 outsw(HD_DATA, req->buffer, 256);
649 break; 650 break;
650 default: 651 default:
651 printk("unknown hd-command\n"); 652 printk("unknown hd-command\n");
@@ -655,7 +656,7 @@ repeat:
655 } 656 }
656} 657}
657 658
658static void do_hd_request (struct request_queue * q) 659static void do_hd_request(struct request_queue *q)
659{ 660{
660 disable_irq(HD_IRQ); 661 disable_irq(HD_IRQ);
661 hd_request(); 662 hd_request();
@@ -708,12 +709,12 @@ static int __init hd_init(void)
708{ 709{
709 int drive; 710 int drive;
710 711
711 if (register_blkdev(MAJOR_NR,"hd")) 712 if (register_blkdev(MAJOR_NR, "hd"))
712 return -1; 713 return -1;
713 714
714 hd_queue = blk_init_queue(do_hd_request, &hd_lock); 715 hd_queue = blk_init_queue(do_hd_request, &hd_lock);
715 if (!hd_queue) { 716 if (!hd_queue) {
716 unregister_blkdev(MAJOR_NR,"hd"); 717 unregister_blkdev(MAJOR_NR, "hd");
717 return -ENOMEM; 718 return -ENOMEM;
718 } 719 }
719 720
@@ -742,7 +743,7 @@ static int __init hd_init(void)
742 goto out; 743 goto out;
743 } 744 }
744 745
745 for (drive=0 ; drive < NR_HD ; drive++) { 746 for (drive = 0 ; drive < NR_HD ; drive++) {
746 struct gendisk *disk = alloc_disk(64); 747 struct gendisk *disk = alloc_disk(64);
747 struct hd_i_struct *p = &hd_info[drive]; 748 struct hd_i_struct *p = &hd_info[drive];
748 if (!disk) 749 if (!disk)
@@ -756,7 +757,7 @@ static int __init hd_init(void)
756 disk->queue = hd_queue; 757 disk->queue = hd_queue;
757 p->unit = drive; 758 p->unit = drive;
758 hd_gendisk[drive] = disk; 759 hd_gendisk[drive] = disk;
759 printk ("%s: %luMB, CHS=%d/%d/%d\n", 760 printk("%s: %luMB, CHS=%d/%d/%d\n",
760 disk->disk_name, (unsigned long)get_capacity(disk)/2048, 761 disk->disk_name, (unsigned long)get_capacity(disk)/2048,
761 p->cyl, p->head, p->sect); 762 p->cyl, p->head, p->sect);
762 } 763 }
@@ -776,7 +777,7 @@ static int __init hd_init(void)
776 } 777 }
777 778
778 /* Let them fly */ 779 /* Let them fly */
779 for(drive=0; drive < NR_HD; drive++) 780 for (drive = 0; drive < NR_HD; drive++)
780 add_disk(hd_gendisk[drive]); 781 add_disk(hd_gendisk[drive]);
781 782
782 return 0; 783 return 0;
@@ -791,7 +792,7 @@ out1:
791 NR_HD = 0; 792 NR_HD = 0;
792out: 793out:
793 del_timer(&device_timer); 794 del_timer(&device_timer);
794 unregister_blkdev(MAJOR_NR,"hd"); 795 unregister_blkdev(MAJOR_NR, "hd");
795 blk_cleanup_queue(hd_queue); 796 blk_cleanup_queue(hd_queue);
796 return -1; 797 return -1;
797Enomem: 798Enomem:
@@ -800,7 +801,8 @@ Enomem:
800 goto out; 801 goto out;
801} 802}
802 803
803static int __init parse_hd_setup (char *line) { 804static int __init parse_hd_setup(char *line)
805{
804 int ints[6]; 806 int ints[6];
805 807
806 (void) get_options(line, ARRAY_SIZE(ints), ints); 808 (void) get_options(line, ARRAY_SIZE(ints), ints);
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index 88fe9070c9c3..4fe516df9f74 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -35,6 +35,7 @@
35 * Try: http://www.maf.iki.fi/~maf/ht6560b/ 35 * Try: http://www.maf.iki.fi/~maf/ht6560b/
36 */ 36 */
37 37
38#define DRV_NAME "ht6560b"
38#define HT6560B_VERSION "v0.08" 39#define HT6560B_VERSION "v0.08"
39 40
40#include <linux/module.h> 41#include <linux/module.h>
@@ -156,8 +157,8 @@ static void ht6560b_selectproc (ide_drive_t *drive)
156 /* 157 /*
157 * Set timing for this drive: 158 * Set timing for this drive:
158 */ 159 */
159 outb(timing, hwif->io_ports[IDE_SELECT_OFFSET]); 160 outb(timing, hwif->io_ports.device_addr);
160 (void)inb(hwif->io_ports[IDE_STATUS_OFFSET]); 161 (void)inb(hwif->io_ports.status_addr);
161#ifdef DEBUG 162#ifdef DEBUG
162 printk("ht6560b: %s: select=%#x timing=%#x\n", 163 printk("ht6560b: %s: select=%#x timing=%#x\n",
163 drive->name, select, timing); 164 drive->name, select, timing);
@@ -211,8 +212,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
211{ 212{
212 int active_time, recovery_time; 213 int active_time, recovery_time;
213 int active_cycles, recovery_cycles; 214 int active_cycles, recovery_cycles;
214 int bus_speed = system_bus_clock(); 215 int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
215 216
216 if (pio) { 217 if (pio) {
217 unsigned int cycle_time; 218 unsigned int cycle_time;
218 219
@@ -322,66 +323,44 @@ static void __init ht6560b_port_init_devs(ide_hwif_t *hwif)
322 hwif->drives[1].drive_data = t; 323 hwif->drives[1].drive_data = t;
323} 324}
324 325
325int probe_ht6560b = 0; 326static int probe_ht6560b;
326 327
327module_param_named(probe, probe_ht6560b, bool, 0); 328module_param_named(probe, probe_ht6560b, bool, 0);
328MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); 329MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
329 330
331static const struct ide_port_ops ht6560b_port_ops = {
332 .port_init_devs = ht6560b_port_init_devs,
333 .set_pio_mode = ht6560b_set_pio_mode,
334 .selectproc = ht6560b_selectproc,
335};
336
330static const struct ide_port_info ht6560b_port_info __initdata = { 337static const struct ide_port_info ht6560b_port_info __initdata = {
338 .name = DRV_NAME,
331 .chipset = ide_ht6560b, 339 .chipset = ide_ht6560b,
340 .port_ops = &ht6560b_port_ops,
332 .host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */ 341 .host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */
333 IDE_HFLAG_NO_DMA | 342 IDE_HFLAG_NO_DMA |
334 IDE_HFLAG_NO_AUTOTUNE |
335 IDE_HFLAG_ABUSE_PREFETCH, 343 IDE_HFLAG_ABUSE_PREFETCH,
336 .pio_mask = ATA_PIO4, 344 .pio_mask = ATA_PIO4,
337}; 345};
338 346
339static int __init ht6560b_init(void) 347static int __init ht6560b_init(void)
340{ 348{
341 ide_hwif_t *hwif, *mate;
342 static u8 idx[4] = { 0, 1, 0xff, 0xff };
343 hw_regs_t hw[2];
344
345 if (probe_ht6560b == 0) 349 if (probe_ht6560b == 0)
346 return -ENODEV; 350 return -ENODEV;
347 351
348 hwif = &ide_hwifs[0]; 352 if (!request_region(HT_CONFIG_PORT, 1, DRV_NAME)) {
349 mate = &ide_hwifs[1];
350
351 if (!request_region(HT_CONFIG_PORT, 1, hwif->name)) {
352 printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n", 353 printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n",
353 __FUNCTION__); 354 __func__);
354 return -ENODEV; 355 return -ENODEV;
355 } 356 }
356 357
357 if (!try_to_init_ht6560b()) { 358 if (!try_to_init_ht6560b()) {
358 printk(KERN_NOTICE "%s: HBA not found\n", __FUNCTION__); 359 printk(KERN_NOTICE "%s: HBA not found\n", __func__);
359 goto release_region; 360 goto release_region;
360 } 361 }
361 362
362 memset(&hw, 0, sizeof(hw)); 363 return ide_legacy_device_add(&ht6560b_port_info, 0);
363
364 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
365 hw[0].irq = 14;
366
367 ide_std_init_ports(&hw[1], 0x170, 0x376);
368 hw[1].irq = 15;
369
370 ide_init_port_hw(hwif, &hw[0]);
371 ide_init_port_hw(mate, &hw[1]);
372
373 hwif->selectproc = &ht6560b_selectproc;
374 hwif->set_pio_mode = &ht6560b_set_pio_mode;
375
376 mate->selectproc = &ht6560b_selectproc;
377 mate->set_pio_mode = &ht6560b_set_pio_mode;
378
379 hwif->port_init_devs = ht6560b_port_init_devs;
380 mate->port_init_devs = ht6560b_port_init_devs;
381
382 ide_device_add(idx, &ht6560b_port_info);
383
384 return 0;
385 364
386release_region: 365release_region:
387 release_region(HT_CONFIG_PORT, 1); 366 release_region(HT_CONFIG_PORT, 1);
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index ecd7f3553554..ecae916a3385 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -4,7 +4,9 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/ide.h> 5#include <linux/ide.h>
6 6
7int probe_4drives = 0; 7#define DRV_NAME "ide-4drives"
8
9static int probe_4drives;
8 10
9module_param_named(probe, probe_4drives, bool, 0); 11module_param_named(probe, probe_4drives, bool, 0);
10MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port"); 12MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
@@ -12,31 +14,51 @@ MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
12static int __init ide_4drives_init(void) 14static int __init ide_4drives_init(void)
13{ 15{
14 ide_hwif_t *hwif, *mate; 16 ide_hwif_t *hwif, *mate;
15 u8 idx[4] = { 0, 1, 0xff, 0xff }; 17 unsigned long base = 0x1f0, ctl = 0x3f6;
18 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
16 hw_regs_t hw; 19 hw_regs_t hw;
17 20
18 if (probe_4drives == 0) 21 if (probe_4drives == 0)
19 return -ENODEV; 22 return -ENODEV;
20 23
21 hwif = &ide_hwifs[0]; 24 if (!request_region(base, 8, DRV_NAME)) {
22 mate = &ide_hwifs[1]; 25 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
26 DRV_NAME, base, base + 7);
27 return -EBUSY;
28 }
29
30 if (!request_region(ctl, 1, DRV_NAME)) {
31 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
32 DRV_NAME, ctl);
33 release_region(base, 8);
34 return -EBUSY;
35 }
23 36
24 memset(&hw, 0, sizeof(hw)); 37 memset(&hw, 0, sizeof(hw));
25 38
26 ide_std_init_ports(&hw, 0x1f0, 0x3f6); 39 ide_std_init_ports(&hw, base, ctl);
27 hw.irq = 14; 40 hw.irq = 14;
28 hw.chipset = ide_4drives; 41 hw.chipset = ide_4drives;
29 42
30 ide_init_port_hw(hwif, &hw); 43 hwif = ide_find_port();
31 ide_init_port_hw(mate, &hw); 44 if (hwif) {
32 45 ide_init_port_hw(hwif, &hw);
33 mate->drives[0].select.all ^= 0x20; 46 idx[0] = hwif->index;
34 mate->drives[1].select.all ^= 0x20; 47 }
35 48
36 hwif->mate = mate; 49 mate = ide_find_port();
37 mate->mate = hwif; 50 if (mate) {
38 51 ide_init_port_hw(mate, &hw);
39 hwif->serialized = mate->serialized = 1; 52 mate->drives[0].select.all ^= 0x20;
53 mate->drives[1].select.all ^= 0x20;
54 idx[1] = mate->index;
55
56 if (hwif) {
57 hwif->mate = mate;
58 mate->mate = hwif;
59 hwif->serialized = mate->serialized = 1;
60 }
61 }
40 62
41 ide_device_add(idx, NULL); 63 ide_device_add(idx, NULL);
42 64
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 9a23b94f2939..aa2ea3deac85 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -51,6 +51,8 @@
51#include <pcmcia/cisreg.h> 51#include <pcmcia/cisreg.h>
52#include <pcmcia/ciscode.h> 52#include <pcmcia/ciscode.h>
53 53
54#define DRV_NAME "ide-cs"
55
54/*====================================================================*/ 56/*====================================================================*/
55 57
56/* Module parameters */ 58/* Module parameters */
@@ -72,16 +74,11 @@ static char *version =
72 74
73/*====================================================================*/ 75/*====================================================================*/
74 76
75static const char ide_major[] = {
76 IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR,
77 IDE4_MAJOR, IDE5_MAJOR
78};
79
80typedef struct ide_info_t { 77typedef struct ide_info_t {
81 struct pcmcia_device *p_dev; 78 struct pcmcia_device *p_dev;
79 ide_hwif_t *hwif;
82 int ndev; 80 int ndev;
83 dev_node_t node; 81 dev_node_t node;
84 int hd;
85} ide_info_t; 82} ide_info_t;
86 83
87static void ide_release(struct pcmcia_device *); 84static void ide_release(struct pcmcia_device *);
@@ -136,45 +133,71 @@ static int ide_probe(struct pcmcia_device *link)
136 133
137static void ide_detach(struct pcmcia_device *link) 134static void ide_detach(struct pcmcia_device *link)
138{ 135{
136 ide_info_t *info = link->priv;
137 ide_hwif_t *hwif = info->hwif;
138
139 DEBUG(0, "ide_detach(0x%p)\n", link); 139 DEBUG(0, "ide_detach(0x%p)\n", link);
140 140
141 ide_release(link); 141 ide_release(link);
142 142
143 kfree(link->priv); 143 release_region(hwif->io_ports.ctl_addr, 1);
144 release_region(hwif->io_ports.data_addr, 8);
145
146 kfree(info);
144} /* ide_detach */ 147} /* ide_detach */
145 148
146static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle) 149static const struct ide_port_ops idecs_port_ops = {
150 .quirkproc = ide_undecoded_slave,
151};
152
153static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
154 unsigned long irq, struct pcmcia_device *handle)
147{ 155{
148 ide_hwif_t *hwif; 156 ide_hwif_t *hwif;
149 hw_regs_t hw; 157 hw_regs_t hw;
150 int i; 158 int i;
151 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 159 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
152 160
161 if (!request_region(io, 8, DRV_NAME)) {
162 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
163 DRV_NAME, io, io + 7);
164 return NULL;
165 }
166
167 if (!request_region(ctl, 1, DRV_NAME)) {
168 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
169 DRV_NAME, ctl);
170 release_region(io, 8);
171 return NULL;
172 }
173
153 memset(&hw, 0, sizeof(hw)); 174 memset(&hw, 0, sizeof(hw));
154 ide_std_init_ports(&hw, io, ctl); 175 ide_std_init_ports(&hw, io, ctl);
155 hw.irq = irq; 176 hw.irq = irq;
156 hw.chipset = ide_pci; 177 hw.chipset = ide_pci;
157 hw.dev = &handle->dev; 178 hw.dev = &handle->dev;
158 179
159 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 180 hwif = ide_find_port();
160 if (hwif == NULL) 181 if (hwif == NULL)
161 return -1; 182 goto out_release;
162 183
163 i = hwif->index; 184 i = hwif->index;
164 185
165 if (hwif->present) 186 ide_init_port_data(hwif, i);
166 ide_unregister(i);
167 else
168 ide_init_port_data(hwif, i);
169
170 ide_init_port_hw(hwif, &hw); 187 ide_init_port_hw(hwif, &hw);
171 hwif->quirkproc = &ide_undecoded_slave; 188 hwif->port_ops = &idecs_port_ops;
172 189
173 idx[0] = i; 190 idx[0] = i;
174 191
175 ide_device_add(idx, NULL); 192 ide_device_add(idx, NULL);
176 193
177 return hwif->present ? i : -1; 194 if (hwif->present)
195 return hwif;
196
197out_release:
198 release_region(ctl, 1);
199 release_region(io, 8);
200 return NULL;
178} 201}
179 202
180/*====================================================================== 203/*======================================================================
@@ -199,8 +222,9 @@ static int ide_config(struct pcmcia_device *link)
199 cistpl_cftable_entry_t dflt; 222 cistpl_cftable_entry_t dflt;
200 } *stk = NULL; 223 } *stk = NULL;
201 cistpl_cftable_entry_t *cfg; 224 cistpl_cftable_entry_t *cfg;
202 int i, pass, last_ret = 0, last_fn = 0, hd, is_kme = 0; 225 int i, pass, last_ret = 0, last_fn = 0, is_kme = 0;
203 unsigned long io_base, ctl_base; 226 unsigned long io_base, ctl_base;
227 ide_hwif_t *hwif;
204 228
205 DEBUG(0, "ide_config(0x%p)\n", link); 229 DEBUG(0, "ide_config(0x%p)\n", link);
206 230
@@ -296,14 +320,15 @@ static int ide_config(struct pcmcia_device *link)
296 outb(0x81, ctl_base+1); 320 outb(0x81, ctl_base+1);
297 321
298 /* retry registration in case device is still spinning up */ 322 /* retry registration in case device is still spinning up */
299 for (hd = -1, i = 0; i < 10; i++) { 323 for (i = 0; i < 10; i++) {
300 hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link); 324 hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
301 if (hd >= 0) break; 325 if (hwif)
326 break;
302 if (link->io.NumPorts1 == 0x20) { 327 if (link->io.NumPorts1 == 0x20) {
303 outb(0x02, ctl_base + 0x10); 328 outb(0x02, ctl_base + 0x10);
304 hd = idecs_register(io_base + 0x10, ctl_base + 0x10, 329 hwif = idecs_register(io_base + 0x10, ctl_base + 0x10,
305 link->irq.AssignedIRQ, link); 330 link->irq.AssignedIRQ, link);
306 if (hd >= 0) { 331 if (hwif) {
307 io_base += 0x10; 332 io_base += 0x10;
308 ctl_base += 0x10; 333 ctl_base += 0x10;
309 break; 334 break;
@@ -312,7 +337,7 @@ static int ide_config(struct pcmcia_device *link)
312 msleep(100); 337 msleep(100);
313 } 338 }
314 339
315 if (hd < 0) { 340 if (hwif == NULL) {
316 printk(KERN_NOTICE "ide-cs: ide_register() at 0x%3lx & 0x%3lx" 341 printk(KERN_NOTICE "ide-cs: ide_register() at 0x%3lx & 0x%3lx"
317 ", irq %u failed\n", io_base, ctl_base, 342 ", irq %u failed\n", io_base, ctl_base,
318 link->irq.AssignedIRQ); 343 link->irq.AssignedIRQ);
@@ -320,10 +345,10 @@ static int ide_config(struct pcmcia_device *link)
320 } 345 }
321 346
322 info->ndev = 1; 347 info->ndev = 1;
323 sprintf(info->node.dev_name, "hd%c", 'a' + (hd * 2)); 348 sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2);
324 info->node.major = ide_major[hd]; 349 info->node.major = hwif->major;
325 info->node.minor = 0; 350 info->node.minor = 0;
326 info->hd = hd; 351 info->hwif = hwif;
327 link->dev_node = &info->node; 352 link->dev_node = &info->node;
328 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n", 353 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
329 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10); 354 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -354,13 +379,14 @@ failed:
354void ide_release(struct pcmcia_device *link) 379void ide_release(struct pcmcia_device *link)
355{ 380{
356 ide_info_t *info = link->priv; 381 ide_info_t *info = link->priv;
382 ide_hwif_t *hwif = info->hwif;
357 383
358 DEBUG(0, "ide_release(0x%p)\n", link); 384 DEBUG(0, "ide_release(0x%p)\n", link);
359 385
360 if (info->ndev) { 386 if (info->ndev) {
361 /* FIXME: if this fails we need to queue the cleanup somehow 387 /* FIXME: if this fails we need to queue the cleanup somehow
362 -- need to investigate the required PCMCIA magic */ 388 -- need to investigate the required PCMCIA magic */
363 ide_unregister(info->hd); 389 ide_unregister(hwif);
364 } 390 }
365 info->ndev = 0; 391 info->ndev = 0;
366 392
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 361b1bb544bf..8279dc7ca4c0 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -30,14 +30,14 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
30 unsigned long port = (unsigned long)base; 30 unsigned long port = (unsigned long)base;
31 int i; 31 int i;
32 32
33 hw->io_ports[IDE_DATA_OFFSET] = port; 33 hw->io_ports.data_addr = port;
34 34
35 port += (1 << pdata->ioport_shift); 35 port += (1 << pdata->ioport_shift);
36 for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET; 36 for (i = 1; i <= 7;
37 i++, port += (1 << pdata->ioport_shift)) 37 i++, port += (1 << pdata->ioport_shift))
38 hw->io_ports[i] = port; 38 hw->io_ports_array[i] = port;
39 39
40 hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; 40 hw->io_ports.ctl_addr = (unsigned long)ctrl;
41 41
42 hw->irq = irq; 42 hw->irq = irq;
43 43
@@ -89,7 +89,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
89 res_alt->start, res_alt->end - res_alt->start + 1); 89 res_alt->start, res_alt->end - res_alt->start + 1);
90 } 90 }
91 91
92 hwif = ide_find_port((unsigned long)base); 92 hwif = ide_find_port();
93 if (!hwif) { 93 if (!hwif) {
94 ret = -ENODEV; 94 ret = -ENODEV;
95 goto out; 95 goto out;
@@ -101,10 +101,8 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
101 101
102 ide_init_port_hw(hwif, &hw); 102 ide_init_port_hw(hwif, &hw);
103 103
104 if (mmio) { 104 if (mmio)
105 hwif->mmio = 1;
106 default_hwif_mmiops(hwif); 105 default_hwif_mmiops(hwif);
107 }
108 106
109 idx[0] = hwif->index; 107 idx[0] = hwif->index;
110 108
@@ -122,7 +120,7 @@ static int __devexit plat_ide_remove(struct platform_device *pdev)
122{ 120{
123 ide_hwif_t *hwif = pdev->dev.driver_data; 121 ide_hwif_t *hwif = pdev->dev.driver_data;
124 122
125 ide_unregister(hwif->index); 123 ide_unregister(hwif);
126 124
127 return 0; 125 return 0;
128} 126}
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index eaf5dbe58bc2..1f527bbf8d96 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -72,9 +72,9 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
72 memset(hw, 0, sizeof(*hw)); 72 memset(hw, 0, sizeof(*hw));
73 73
74 for (i = 0; i < 8; i++) 74 for (i = 0; i < 8; i++)
75 hw->io_ports[i] = base + i * 4; 75 hw->io_ports_array[i] = base + i * 4;
76 76
77 hw->io_ports[IDE_CONTROL_OFFSET] = base + IDE_CONTROL; 77 hw->io_ports.ctl_addr = base + IDE_CONTROL;
78 78
79 hw->irq = irq; 79 hw->irq = irq;
80 hw->ack_intr = ack_intr; 80 hw->ack_intr = ack_intr;
@@ -120,7 +120,7 @@ static int __init macide_init(void)
120 120
121 macide_setup_ports(&hw, base, irq, ack_intr); 121 macide_setup_ports(&hw, base, irq, ack_intr);
122 122
123 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 123 hwif = ide_find_port();
124 if (hwif) { 124 if (hwif) {
125 u8 index = hwif->index; 125 u8 index = hwif->index;
126 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 126 u8 idx[4] = { index, 0xff, 0xff, 0xff };
@@ -128,8 +128,6 @@ static int __init macide_init(void)
128 ide_init_port_data(hwif, index); 128 ide_init_port_data(hwif, index);
129 ide_init_port_hw(hwif, &hw); 129 ide_init_port_hw(hwif, &hw);
130 130
131 hwif->mmio = 1;
132
133 ide_device_add(idx, NULL); 131 ide_device_add(idx, NULL);
134 } 132 }
135 133
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 2da28759686e..a3573d40b4b7 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -80,10 +80,10 @@ void q40_ide_setup_ports ( hw_regs_t *hw,
80 for (i = 0; i < IDE_NR_PORTS; i++) { 80 for (i = 0; i < IDE_NR_PORTS; i++) {
81 /* BIG FAT WARNING: 81 /* BIG FAT WARNING:
82 assumption: only DATA port is ever used in 16 bit mode */ 82 assumption: only DATA port is ever used in 16 bit mode */
83 if ( i==0 ) 83 if (i == 0)
84 hw->io_ports[i] = Q40_ISA_IO_W(base + offsets[i]); 84 hw->io_ports_array[i] = Q40_ISA_IO_W(base + offsets[i]);
85 else 85 else
86 hw->io_ports[i] = Q40_ISA_IO_B(base + offsets[i]); 86 hw->io_ports_array[i] = Q40_ISA_IO_B(base + offsets[i]);
87 } 87 }
88 88
89 hw->irq = irq; 89 hw->irq = irq;
@@ -137,11 +137,10 @@ static int __init q40ide_init(void)
137// m68kide_iops, 137// m68kide_iops,
138 q40ide_default_irq(pcide_bases[i])); 138 q40ide_default_irq(pcide_bases[i]));
139 139
140 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 140 hwif = ide_find_port();
141 if (hwif) { 141 if (hwif) {
142 ide_init_port_data(hwif, hwif->index); 142 ide_init_port_data(hwif, hwif->index);
143 ide_init_port_hw(hwif, &hw); 143 ide_init_port_hw(hwif, &hw);
144 hwif->mmio = 1;
145 144
146 idx[i] = hwif->index; 145 idx[i] = hwif->index;
147 } 146 }
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 7016bdf4fcc1..6424af154325 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -11,11 +11,7 @@
11 * 11 *
12 * QDI QD6500/QD6580 EIDE controller fast support 12 * QDI QD6500/QD6580 EIDE controller fast support
13 * 13 *
14 * Please set local bus speed using kernel parameter idebus
15 * for example, "idebus=33" stands for 33Mhz VLbus
16 * To activate controller support, use "ide0=qd65xx" 14 * To activate controller support, use "ide0=qd65xx"
17 * To enable tuning, use "hda=autotune hdb=autotune"
18 * To enable 2nd channel tuning (qd6580 only), use "hdc=autotune hdd=autotune"
19 */ 15 */
20 16
21/* 17/*
@@ -37,6 +33,8 @@
37#include <asm/system.h> 33#include <asm/system.h>
38#include <asm/io.h> 34#include <asm/io.h>
39 35
36#define DRV_NAME "qd65xx"
37
40#include "qd65xx.h" 38#include "qd65xx.h"
41 39
42/* 40/*
@@ -88,12 +86,12 @@
88static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */ 86static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */
89 87
90/* 88/*
91 * qd_select: 89 * qd65xx_select:
92 * 90 *
93 * This routine is invoked from ide.c to prepare for access to a given drive. 91 * This routine is invoked to prepare for access to a given drive.
94 */ 92 */
95 93
96static void qd_select (ide_drive_t *drive) 94static void qd65xx_select(ide_drive_t *drive)
97{ 95{
98 u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) | 96 u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) |
99 (QD_TIMREG(drive) & 0x02); 97 (QD_TIMREG(drive) & 0x02);
@@ -112,17 +110,18 @@ static void qd_select (ide_drive_t *drive)
112 110
113static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time) 111static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time)
114{ 112{
115 u8 active_cycle,recovery_cycle; 113 int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
114 u8 act_cyc, rec_cyc;
116 115
117 if (system_bus_clock()<=33) { 116 if (clk <= 33) {
118 active_cycle = 9 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 9); 117 act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9);
119 recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 0, 15); 118 rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15);
120 } else { 119 } else {
121 active_cycle = 8 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 1, 8); 120 act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8);
122 recovery_cycle = 18 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 3, 18); 121 rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18);
123 } 122 }
124 123
125 return((recovery_cycle<<4) | 0x08 | active_cycle); 124 return (rec_cyc << 4) | 0x08 | act_cyc;
126} 125}
127 126
128/* 127/*
@@ -133,10 +132,13 @@ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery
133 132
134static u8 qd6580_compute_timing (int active_time, int recovery_time) 133static u8 qd6580_compute_timing (int active_time, int recovery_time)
135{ 134{
136 u8 active_cycle = 17 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 17); 135 int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
137 u8 recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 2, 15); 136 u8 act_cyc, rec_cyc;
137
138 act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17);
139 rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15);
138 140
139 return((recovery_cycle<<4) | active_cycle); 141 return (rec_cyc << 4) | act_cyc;
140} 142}
141 143
142/* 144/*
@@ -168,36 +170,15 @@ static int qd_find_disk_type (ide_drive_t *drive,
168} 170}
169 171
170/* 172/*
171 * qd_timing_ok:
172 *
173 * check whether timings don't conflict
174 */
175
176static int qd_timing_ok (ide_drive_t drives[])
177{
178 return (IDE_IMPLY(drives[0].present && drives[1].present,
179 IDE_IMPLY(QD_TIMREG(drives) == QD_TIMREG(drives+1),
180 QD_TIMING(drives) == QD_TIMING(drives+1))));
181 /* if same timing register, must be same timing */
182}
183
184/*
185 * qd_set_timing: 173 * qd_set_timing:
186 * 174 *
187 * records the timing, and enables selectproc as needed 175 * records the timing
188 */ 176 */
189 177
190static void qd_set_timing (ide_drive_t *drive, u8 timing) 178static void qd_set_timing (ide_drive_t *drive, u8 timing)
191{ 179{
192 ide_hwif_t *hwif = HWIF(drive);
193
194 drive->drive_data &= 0xff00; 180 drive->drive_data &= 0xff00;
195 drive->drive_data |= timing; 181 drive->drive_data |= timing;
196 if (qd_timing_ok(hwif->drives)) {
197 qd_select(drive); /* selects once */
198 hwif->selectproc = NULL;
199 } else
200 hwif->selectproc = &qd_select;
201 182
202 printk(KERN_DEBUG "%s: %#x\n", drive->name, timing); 183 printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
203} 184}
@@ -225,10 +206,11 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
225 206
226static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) 207static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio)
227{ 208{
228 int base = HWIF(drive)->select_data; 209 ide_hwif_t *hwif = drive->hwif;
229 unsigned int cycle_time; 210 unsigned int cycle_time;
230 int active_time = 175; 211 int active_time = 175;
231 int recovery_time = 415; /* worst case values from the dos driver */ 212 int recovery_time = 415; /* worst case values from the dos driver */
213 u8 base = (hwif->config_data & 0xff00) >> 8;
232 214
233 if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) { 215 if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) {
234 cycle_time = ide_pio_cycle_time(drive, pio); 216 cycle_time = ide_pio_cycle_time(drive, pio);
@@ -299,21 +281,10 @@ static int __init qd_testreg(int port)
299 return (readreg != QD_TESTVAL); 281 return (readreg != QD_TESTVAL);
300} 282}
301 283
302/*
303 * qd_setup:
304 *
305 * called to setup an ata channel : adjusts attributes & links for tuning
306 */
307
308static void __init qd_setup(ide_hwif_t *hwif, int base, int config)
309{
310 hwif->select_data = base;
311 hwif->config_data = config;
312}
313
314static void __init qd6500_port_init_devs(ide_hwif_t *hwif) 284static void __init qd6500_port_init_devs(ide_hwif_t *hwif)
315{ 285{
316 u8 base = hwif->select_data, config = QD_CONFIG(hwif); 286 u8 base = (hwif->config_data & 0xff00) >> 8;
287 u8 config = QD_CONFIG(hwif);
317 288
318 hwif->drives[0].drive_data = QD6500_DEF_DATA; 289 hwif->drives[0].drive_data = QD6500_DEF_DATA;
319 hwif->drives[1].drive_data = QD6500_DEF_DATA; 290 hwif->drives[1].drive_data = QD6500_DEF_DATA;
@@ -322,9 +293,10 @@ static void __init qd6500_port_init_devs(ide_hwif_t *hwif)
322static void __init qd6580_port_init_devs(ide_hwif_t *hwif) 293static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
323{ 294{
324 u16 t1, t2; 295 u16 t1, t2;
325 u8 base = hwif->select_data, config = QD_CONFIG(hwif); 296 u8 base = (hwif->config_data & 0xff00) >> 8;
297 u8 config = QD_CONFIG(hwif);
326 298
327 if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) { 299 if (hwif->host_flags & IDE_HFLAG_SINGLE) {
328 t1 = QD6580_DEF_DATA; 300 t1 = QD6580_DEF_DATA;
329 t2 = QD6580_DEF_DATA2; 301 t2 = QD6580_DEF_DATA2;
330 } else 302 } else
@@ -334,11 +306,23 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
334 hwif->drives[1].drive_data = t2; 306 hwif->drives[1].drive_data = t2;
335} 307}
336 308
309static const struct ide_port_ops qd6500_port_ops = {
310 .port_init_devs = qd6500_port_init_devs,
311 .set_pio_mode = qd6500_set_pio_mode,
312 .selectproc = qd65xx_select,
313};
314
315static const struct ide_port_ops qd6580_port_ops = {
316 .port_init_devs = qd6580_port_init_devs,
317 .set_pio_mode = qd6580_set_pio_mode,
318 .selectproc = qd65xx_select,
319};
320
337static const struct ide_port_info qd65xx_port_info __initdata = { 321static const struct ide_port_info qd65xx_port_info __initdata = {
322 .name = DRV_NAME,
338 .chipset = ide_qd65xx, 323 .chipset = ide_qd65xx,
339 .host_flags = IDE_HFLAG_IO_32BIT | 324 .host_flags = IDE_HFLAG_IO_32BIT |
340 IDE_HFLAG_NO_DMA | 325 IDE_HFLAG_NO_DMA,
341 IDE_HFLAG_NO_AUTOTUNE,
342 .pio_mask = ATA_PIO4, 326 .pio_mask = ATA_PIO4,
343}; 327};
344 328
@@ -351,65 +335,41 @@ static const struct ide_port_info qd65xx_port_info __initdata = {
351 335
352static int __init qd_probe(int base) 336static int __init qd_probe(int base)
353{ 337{
354 ide_hwif_t *hwif; 338 int rc;
355 u8 config, unit; 339 u8 config, unit, control;
356 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 340 struct ide_port_info d = qd65xx_port_info;
357 hw_regs_t hw[2];
358 341
359 config = inb(QD_CONFIG_PORT); 342 config = inb(QD_CONFIG_PORT);
360 343
361 if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) ) 344 if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) )
362 return 1; 345 return -ENODEV;
363 346
364 unit = ! (config & QD_CONFIG_IDE_BASEPORT); 347 unit = ! (config & QD_CONFIG_IDE_BASEPORT);
365 348
366 memset(&hw, 0, sizeof(hw)); 349 if (unit)
350 d.host_flags |= IDE_HFLAG_QD_2ND_PORT;
367 351
368 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); 352 switch (config & 0xf0) {
369 hw[0].irq = 14; 353 case QD_CONFIG_QD6500:
354 if (qd_testreg(base))
355 return -ENODEV; /* bad register */
370 356
371 ide_std_init_ports(&hw[1], 0x170, 0x376);
372 hw[1].irq = 15;
373
374 if ((config & 0xf0) == QD_CONFIG_QD6500) {
375
376 if (qd_testreg(base)) return 1; /* bad register */
377
378 /* qd6500 found */
379
380 hwif = &ide_hwifs[unit];
381 printk(KERN_NOTICE "%s: qd6500 at %#x\n", hwif->name, base);
382 printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
383 config, QD_ID3);
384
385 if (config & QD_CONFIG_DISABLED) { 357 if (config & QD_CONFIG_DISABLED) {
386 printk(KERN_WARNING "qd6500 is disabled !\n"); 358 printk(KERN_WARNING "qd6500 is disabled !\n");
387 return 1; 359 return -ENODEV;
388 } 360 }
389 361
390 ide_init_port_hw(hwif, &hw[unit]); 362 printk(KERN_NOTICE "qd6500 at %#x\n", base);
391 363 printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
392 qd_setup(hwif, base, config); 364 config, QD_ID3);
393
394 hwif->port_init_devs = qd6500_port_init_devs;
395 hwif->set_pio_mode = &qd6500_set_pio_mode;
396
397 idx[unit] = unit;
398
399 ide_device_add(idx, &qd65xx_port_info);
400
401 return 1;
402 }
403
404 if (((config & 0xf0) == QD_CONFIG_QD6580_A) ||
405 ((config & 0xf0) == QD_CONFIG_QD6580_B)) {
406
407 u8 control;
408
409 if (qd_testreg(base) || qd_testreg(base+0x02)) return 1;
410 /* bad registers */
411 365
412 /* qd6580 found */ 366 d.port_ops = &qd6500_port_ops;
367 d.host_flags |= IDE_HFLAG_SINGLE;
368 break;
369 case QD_CONFIG_QD6580_A:
370 case QD_CONFIG_QD6580_B:
371 if (qd_testreg(base) || qd_testreg(base + 0x02))
372 return -ENODEV; /* bad registers */
413 373
414 control = inb(QD_CONTROL_PORT); 374 control = inb(QD_CONTROL_PORT);
415 375
@@ -419,74 +379,44 @@ static int __init qd_probe(int base)
419 379
420 outb(QD_DEF_CONTR, QD_CONTROL_PORT); 380 outb(QD_DEF_CONTR, QD_CONTROL_PORT);
421 381
422 if (control & QD_CONTR_SEC_DISABLED) { 382 d.port_ops = &qd6580_port_ops;
423 /* secondary disabled */ 383 if (control & QD_CONTR_SEC_DISABLED)
424 384 d.host_flags |= IDE_HFLAG_SINGLE;
425 hwif = &ide_hwifs[unit];
426 printk(KERN_INFO "%s: qd6580: single IDE board\n",
427 hwif->name);
428
429 ide_init_port_hw(hwif, &hw[unit]);
430
431 qd_setup(hwif, base, config | (control << 8));
432
433 hwif->port_init_devs = qd6580_port_init_devs;
434 hwif->set_pio_mode = &qd6580_set_pio_mode;
435
436 idx[unit] = unit;
437 385
438 ide_device_add(idx, &qd65xx_port_info); 386 printk(KERN_INFO "qd6580: %s IDE board\n",
439 387 (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual");
440 return 1; 388 break;
441 } else { 389 default:
442 ide_hwif_t *mate; 390 return -ENODEV;
443 391 }
444 hwif = &ide_hwifs[0];
445 mate = &ide_hwifs[1];
446 /* secondary enabled */
447 printk(KERN_INFO "%s&%s: qd6580: dual IDE board\n",
448 hwif->name, mate->name);
449
450 ide_init_port_hw(hwif, &hw[0]);
451 ide_init_port_hw(mate, &hw[1]);
452
453 qd_setup(hwif, base, config | (control << 8));
454
455 hwif->port_init_devs = qd6580_port_init_devs;
456 hwif->set_pio_mode = &qd6580_set_pio_mode;
457
458 qd_setup(mate, base, config | (control << 8));
459
460 mate->port_init_devs = qd6580_port_init_devs;
461 mate->set_pio_mode = &qd6580_set_pio_mode;
462 392
463 idx[0] = 0; 393 rc = ide_legacy_device_add(&d, (base << 8) | config);
464 idx[1] = 1;
465 394
466 ide_device_add(idx, &qd65xx_port_info); 395 if (d.host_flags & IDE_HFLAG_SINGLE)
396 return (rc == 0) ? 1 : rc;
467 397
468 return 0; /* no other qd65xx possible */ 398 return rc;
469 }
470 }
471 /* no qd65xx found */
472 return 1;
473} 399}
474 400
475int probe_qd65xx = 0; 401static int probe_qd65xx;
476 402
477module_param_named(probe, probe_qd65xx, bool, 0); 403module_param_named(probe, probe_qd65xx, bool, 0);
478MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); 404MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
479 405
480static int __init qd65xx_init(void) 406static int __init qd65xx_init(void)
481{ 407{
408 int rc1, rc2 = -ENODEV;
409
482 if (probe_qd65xx == 0) 410 if (probe_qd65xx == 0)
483 return -ENODEV; 411 return -ENODEV;
484 412
485 if (qd_probe(0x30)) 413 rc1 = qd_probe(0x30);
486 qd_probe(0xb0); 414 if (rc1)
487 if (ide_hwifs[0].chipset != ide_qd65xx && 415 rc2 = qd_probe(0xb0);
488 ide_hwifs[1].chipset != ide_qd65xx) 416
417 if (rc1 < 0 && rc2 < 0)
489 return -ENODEV; 418 return -ENODEV;
419
490 return 0; 420 return 0;
491} 421}
492 422
diff --git a/drivers/ide/legacy/qd65xx.h b/drivers/ide/legacy/qd65xx.h
index 28dd50a15d55..c83dea85e621 100644
--- a/drivers/ide/legacy/qd65xx.h
+++ b/drivers/ide/legacy/qd65xx.h
@@ -30,7 +30,6 @@
30#define QD_ID3 ((config & QD_CONFIG_ID3)!=0) 30#define QD_ID3 ((config & QD_CONFIG_ID3)!=0)
31 31
32#define QD_CONFIG(hwif) ((hwif)->config_data & 0x00ff) 32#define QD_CONFIG(hwif) ((hwif)->config_data & 0x00ff)
33#define QD_CONTROL(hwif) (((hwif)->config_data & 0xff00) >> 8)
34 33
35#define QD_TIMING(drive) (byte)(((drive)->drive_data) & 0x00ff) 34#define QD_TIMING(drive) (byte)(((drive)->drive_data) & 0x00ff)
36#define QD_TIMREG(drive) (byte)((((drive)->drive_data) & 0xff00) >> 8) 35#define QD_TIMREG(drive) (byte)((((drive)->drive_data) & 0xff00) >> 8)
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index bc1944811b99..b54a14a57755 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -19,7 +19,7 @@
19 */ 19 */
20 20
21/* 21/*
22 * VLB Controller Support from 22 * VLB Controller Support from
23 * Wolfram Podien 23 * Wolfram Podien
24 * Rohoefe 3 24 * Rohoefe 3
25 * D28832 Achim 25 * D28832 Achim
@@ -32,7 +32,7 @@
32 * #define UMC_DRIVE0 11 32 * #define UMC_DRIVE0 11
33 * in the beginning of the driver, which sets the speed of drive 0 to 11 (there 33 * in the beginning of the driver, which sets the speed of drive 0 to 11 (there
34 * are some lines present). 0 - 11 are allowed speed values. These values are 34 * are some lines present). 0 - 11 are allowed speed values. These values are
35 * the results from the DOS speed test program supplied from UMC. 11 is the 35 * the results from the DOS speed test program supplied from UMC. 11 is the
36 * highest speed (about PIO mode 3) 36 * highest speed (about PIO mode 3)
37 */ 37 */
38#define REALLY_SLOW_IO /* some systems can safely undef this */ 38#define REALLY_SLOW_IO /* some systems can safely undef this */
@@ -51,6 +51,8 @@
51 51
52#include <asm/io.h> 52#include <asm/io.h>
53 53
54#define DRV_NAME "umc8672"
55
54/* 56/*
55 * Default speeds. These can be changed with "auto-tune" and/or hdparm. 57 * Default speeds. These can be changed with "auto-tune" and/or hdparm.
56 */ 58 */
@@ -60,115 +62,103 @@
60#define UMC_DRIVE3 1 /* In case of crash reduce speed */ 62#define UMC_DRIVE3 1 /* In case of crash reduce speed */
61 63
62static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3}; 64static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3};
63static const u8 pio_to_umc [5] = {0,3,7,10,11}; /* rough guesses */ 65static const u8 pio_to_umc [5] = {0, 3, 7, 10, 11}; /* rough guesses */
64 66
65/* 0 1 2 3 4 5 6 7 8 9 10 11 */ 67/* 0 1 2 3 4 5 6 7 8 9 10 11 */
66static const u8 speedtab [3][12] = { 68static const u8 speedtab [3][12] = {
67 {0xf, 0xb, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }, 69 {0x0f, 0x0b, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
68 {0x3, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }, 70 {0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
69 {0xff,0xcb,0xc0,0x58,0x36,0x33,0x23,0x22,0x21,0x11,0x10,0x0}}; 71 {0xff, 0xcb, 0xc0, 0x58, 0x36, 0x33, 0x23, 0x22, 0x21, 0x11, 0x10, 0x0}
72};
70 73
71static void out_umc (char port,char wert) 74static void out_umc(char port, char wert)
72{ 75{
73 outb_p(port,0x108); 76 outb_p(port, 0x108);
74 outb_p(wert,0x109); 77 outb_p(wert, 0x109);
75} 78}
76 79
77static inline u8 in_umc (char port) 80static inline u8 in_umc(char port)
78{ 81{
79 outb_p(port,0x108); 82 outb_p(port, 0x108);
80 return inb_p(0x109); 83 return inb_p(0x109);
81} 84}
82 85
83static void umc_set_speeds (u8 speeds[]) 86static void umc_set_speeds(u8 speeds[])
84{ 87{
85 int i, tmp; 88 int i, tmp;
86 89
87 outb_p(0x5A,0x108); /* enable umc */ 90 outb_p(0x5A, 0x108); /* enable umc */
88 91
89 out_umc (0xd7,(speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4))); 92 out_umc(0xd7, (speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4)));
90 out_umc (0xd6,(speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4))); 93 out_umc(0xd6, (speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4)));
91 tmp = 0; 94 tmp = 0;
92 for (i = 3; i >= 0; i--) { 95 for (i = 3; i >= 0; i--)
93 tmp = (tmp << 2) | speedtab[1][speeds[i]]; 96 tmp = (tmp << 2) | speedtab[1][speeds[i]];
97 out_umc(0xdc, tmp);
98 for (i = 0; i < 4; i++) {
99 out_umc(0xd0 + i, speedtab[2][speeds[i]]);
100 out_umc(0xd8 + i, speedtab[2][speeds[i]]);
94 } 101 }
95 out_umc (0xdc,tmp); 102 outb_p(0xa5, 0x108); /* disable umc */
96 for (i = 0;i < 4; i++) {
97 out_umc (0xd0+i,speedtab[2][speeds[i]]);
98 out_umc (0xd8+i,speedtab[2][speeds[i]]);
99 }
100 outb_p(0xa5,0x108); /* disable umc */
101 103
102 printk ("umc8672: drive speeds [0 to 11]: %d %d %d %d\n", 104 printk("umc8672: drive speeds [0 to 11]: %d %d %d %d\n",
103 speeds[0], speeds[1], speeds[2], speeds[3]); 105 speeds[0], speeds[1], speeds[2], speeds[3]);
104} 106}
105 107
106static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio) 108static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
107{ 109{
110 ide_hwif_t *hwif = drive->hwif;
108 unsigned long flags; 111 unsigned long flags;
109 ide_hwgroup_t *hwgroup = ide_hwifs[HWIF(drive)->index^1].hwgroup;
110 112
111 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", 113 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
112 drive->name, pio, pio_to_umc[pio]); 114 drive->name, pio, pio_to_umc[pio]);
113 spin_lock_irqsave(&ide_lock, flags); 115 spin_lock_irqsave(&ide_lock, flags);
114 if (hwgroup && hwgroup->handler != NULL) { 116 if (hwif->mate && hwif->mate->hwgroup->handler) {
115 printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n"); 117 printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
116 } else { 118 } else {
117 current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio]; 119 current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
118 umc_set_speeds (current_speeds); 120 umc_set_speeds(current_speeds);
119 } 121 }
120 spin_unlock_irqrestore(&ide_lock, flags); 122 spin_unlock_irqrestore(&ide_lock, flags);
121} 123}
122 124
125static const struct ide_port_ops umc8672_port_ops = {
126 .set_pio_mode = umc_set_pio_mode,
127};
128
123static const struct ide_port_info umc8672_port_info __initdata = { 129static const struct ide_port_info umc8672_port_info __initdata = {
130 .name = DRV_NAME,
124 .chipset = ide_umc8672, 131 .chipset = ide_umc8672,
125 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE, 132 .port_ops = &umc8672_port_ops,
133 .host_flags = IDE_HFLAG_NO_DMA,
126 .pio_mask = ATA_PIO4, 134 .pio_mask = ATA_PIO4,
127}; 135};
128 136
129static int __init umc8672_probe(void) 137static int __init umc8672_probe(void)
130{ 138{
131 unsigned long flags; 139 unsigned long flags;
132 static u8 idx[4] = { 0, 1, 0xff, 0xff };
133 hw_regs_t hw[2];
134 140
135 if (!request_region(0x108, 2, "umc8672")) { 141 if (!request_region(0x108, 2, "umc8672")) {
136 printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n"); 142 printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n");
137 return 1; 143 return 1;
138 } 144 }
139 local_irq_save(flags); 145 local_irq_save(flags);
140 outb_p(0x5A,0x108); /* enable umc */ 146 outb_p(0x5A, 0x108); /* enable umc */
141 if (in_umc (0xd5) != 0xa0) { 147 if (in_umc (0xd5) != 0xa0) {
142 local_irq_restore(flags); 148 local_irq_restore(flags);
143 printk(KERN_ERR "umc8672: not found\n"); 149 printk(KERN_ERR "umc8672: not found\n");
144 release_region(0x108, 2); 150 release_region(0x108, 2);
145 return 1; 151 return 1;
146 } 152 }
147 outb_p(0xa5,0x108); /* disable umc */ 153 outb_p(0xa5, 0x108); /* disable umc */
148 154
149 umc_set_speeds (current_speeds); 155 umc_set_speeds(current_speeds);
150 local_irq_restore(flags); 156 local_irq_restore(flags);
151 157
152 memset(&hw, 0, sizeof(hw)); 158 return ide_legacy_device_add(&umc8672_port_info, 0);
153
154 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
155 hw[0].irq = 14;
156
157 ide_std_init_ports(&hw[1], 0x170, 0x376);
158 hw[1].irq = 15;
159
160 ide_init_port_hw(&ide_hwifs[0], &hw[0]);
161 ide_init_port_hw(&ide_hwifs[1], &hw[1]);
162
163 ide_hwifs[0].set_pio_mode = &umc_set_pio_mode;
164 ide_hwifs[1].set_pio_mode = &umc_set_pio_mode;
165
166 ide_device_add(idx, &umc8672_port_info);
167
168 return 0;
169} 159}
170 160
171int probe_umc8672 = 0; 161static int probe_umc8672;
172 162
173module_param_named(probe, probe_umc8672, bool, 0); 163module_param_named(probe, probe_umc8672, bool, 0);
174MODULE_PARM_DESC(probe, "probe for UMC8672 chipset"); 164MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 9b628248f2f4..296b9c674bae 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -47,7 +47,6 @@
47#define IDE_AU1XXX_BURSTMODE 1 47#define IDE_AU1XXX_BURSTMODE 1
48 48
49static _auide_hwif auide_hwif; 49static _auide_hwif auide_hwif;
50static int dbdma_init_done;
51 50
52static int auide_ddma_init(_auide_hwif *auide); 51static int auide_ddma_init(_auide_hwif *auide);
53 52
@@ -61,7 +60,7 @@ void auide_insw(unsigned long port, void *addr, u32 count)
61 60
62 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, 61 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
63 DDMA_FLAGS_NOIE)) { 62 DDMA_FLAGS_NOIE)) {
64 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); 63 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
65 return; 64 return;
66 } 65 }
67 ctp = *((chan_tab_t **)ahwif->rx_chan); 66 ctp = *((chan_tab_t **)ahwif->rx_chan);
@@ -79,7 +78,7 @@ void auide_outsw(unsigned long port, void *addr, u32 count)
79 78
80 if(!put_source_flags(ahwif->tx_chan, (void*)addr, 79 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
81 count << 1, DDMA_FLAGS_NOIE)) { 80 count << 1, DDMA_FLAGS_NOIE)) {
82 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); 81 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
83 return; 82 return;
84 } 83 }
85 ctp = *((chan_tab_t **)ahwif->tx_chan); 84 ctp = *((chan_tab_t **)ahwif->tx_chan);
@@ -250,7 +249,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
250 (void*) sg_virt(sg), 249 (void*) sg_virt(sg),
251 tc, flags)) { 250 tc, flags)) {
252 printk(KERN_ERR "%s failed %d\n", 251 printk(KERN_ERR "%s failed %d\n",
253 __FUNCTION__, __LINE__); 252 __func__, __LINE__);
254 } 253 }
255 } else 254 } else
256 { 255 {
@@ -258,7 +257,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
258 (void*) sg_virt(sg), 257 (void*) sg_virt(sg),
259 tc, flags)) { 258 tc, flags)) {
260 printk(KERN_ERR "%s failed %d\n", 259 printk(KERN_ERR "%s failed %d\n",
261 __FUNCTION__, __LINE__); 260 __func__, __LINE__);
262 } 261 }
263 } 262 }
264 263
@@ -315,35 +314,6 @@ static int auide_dma_setup(ide_drive_t *drive)
315 return 0; 314 return 0;
316} 315}
317 316
318static u8 auide_mdma_filter(ide_drive_t *drive)
319{
320 /*
321 * FIXME: ->white_list and ->black_list are based on completely bogus
322 * ->ide_dma_check implementation which didn't set neither the host
323 * controller timings nor the device for the desired transfer mode.
324 *
325 * They should be either removed or 0x00 MWDMA mask should be
326 * returned for devices on the ->black_list.
327 */
328
329 if (dbdma_init_done == 0) {
330 auide_hwif.white_list = ide_in_drive_list(drive->id,
331 dma_white_list);
332 auide_hwif.black_list = ide_in_drive_list(drive->id,
333 dma_black_list);
334 auide_hwif.drive = drive;
335 auide_ddma_init(&auide_hwif);
336 dbdma_init_done = 1;
337 }
338
339 /* Is the drive in our DMA black list? */
340 if (auide_hwif.black_list)
341 printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
342 drive->name, drive->id->model);
343
344 return drive->hwif->mwdma_mask;
345}
346
347static int auide_dma_test_irq(ide_drive_t *drive) 317static int auide_dma_test_irq(ide_drive_t *drive)
348{ 318{
349 if (drive->waiting_for_dma == 0) 319 if (drive->waiting_for_dma == 0)
@@ -396,41 +366,41 @@ static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 de
396 dev->dev_devwidth = devwidth; 366 dev->dev_devwidth = devwidth;
397 dev->dev_flags = flags; 367 dev->dev_flags = flags;
398} 368}
399
400#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
401 369
370#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
402static void auide_dma_timeout(ide_drive_t *drive) 371static void auide_dma_timeout(ide_drive_t *drive)
403{ 372{
404 ide_hwif_t *hwif = HWIF(drive); 373 ide_hwif_t *hwif = HWIF(drive);
405 374
406 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 375 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
407 376
408 if (hwif->ide_dma_test_irq(drive)) 377 if (auide_dma_test_irq(drive))
409 return; 378 return;
410 379
411 hwif->ide_dma_end(drive); 380 auide_dma_end(drive);
412} 381}
413
414 382
415static int auide_ddma_init(_auide_hwif *auide) { 383static const struct ide_dma_ops au1xxx_dma_ops = {
416 384 .dma_host_set = auide_dma_host_set,
385 .dma_setup = auide_dma_setup,
386 .dma_exec_cmd = auide_dma_exec_cmd,
387 .dma_start = auide_dma_start,
388 .dma_end = auide_dma_end,
389 .dma_test_irq = auide_dma_test_irq,
390 .dma_lost_irq = auide_dma_lost_irq,
391 .dma_timeout = auide_dma_timeout,
392};
393
394static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
395{
396 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
417 dbdev_tab_t source_dev_tab, target_dev_tab; 397 dbdev_tab_t source_dev_tab, target_dev_tab;
418 u32 dev_id, tsize, devwidth, flags; 398 u32 dev_id, tsize, devwidth, flags;
419 ide_hwif_t *hwif = auide->hwif;
420 399
421 dev_id = AU1XXX_ATA_DDMA_REQ; 400 dev_id = AU1XXX_ATA_DDMA_REQ;
422 401
423 if (auide->white_list || auide->black_list) { 402 tsize = 8; /* 1 */
424 tsize = 8; 403 devwidth = 32; /* 16 */
425 devwidth = 32;
426 }
427 else {
428 tsize = 1;
429 devwidth = 16;
430
431 printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
432 printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
433 }
434 404
435#ifdef IDE_AU1XXX_BURSTMODE 405#ifdef IDE_AU1XXX_BURSTMODE
436 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE; 406 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
@@ -482,9 +452,9 @@ static int auide_ddma_init(_auide_hwif *auide) {
482 return 0; 452 return 0;
483} 453}
484#else 454#else
485 455static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
486static int auide_ddma_init( _auide_hwif *auide )
487{ 456{
457 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
488 dbdev_tab_t source_dev_tab; 458 dbdev_tab_t source_dev_tab;
489 int flags; 459 int flags;
490 460
@@ -532,20 +502,28 @@ static int auide_ddma_init( _auide_hwif *auide )
532static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) 502static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
533{ 503{
534 int i; 504 int i;
535 unsigned long *ata_regs = hw->io_ports; 505 unsigned long *ata_regs = hw->io_ports_array;
536 506
537 /* FIXME? */ 507 /* FIXME? */
538 for (i = 0; i < IDE_CONTROL_OFFSET; i++) { 508 for (i = 0; i < 8; i++)
539 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET); 509 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
540 }
541 510
542 /* set the Alternative Status register */ 511 /* set the Alternative Status register */
543 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET); 512 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
544} 513}
545 514
515static const struct ide_port_ops au1xxx_port_ops = {
516 .set_pio_mode = au1xxx_set_pio_mode,
517 .set_dma_mode = auide_set_dma_mode,
518};
519
546static const struct ide_port_info au1xxx_port_info = { 520static const struct ide_port_info au1xxx_port_info = {
521 .init_dma = auide_ddma_init,
522 .port_ops = &au1xxx_port_ops,
523#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
524 .dma_ops = &au1xxx_dma_ops,
525#endif
547 .host_flags = IDE_HFLAG_POST_SET_MODE | 526 .host_flags = IDE_HFLAG_POST_SET_MODE |
548 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
549 IDE_HFLAG_NO_IO_32BIT | 527 IDE_HFLAG_NO_IO_32BIT |
550 IDE_HFLAG_UNMASK_IRQS, 528 IDE_HFLAG_UNMASK_IRQS,
551 .pio_mask = ATA_PIO4, 529 .pio_mask = ATA_PIO4,
@@ -599,9 +577,11 @@ static int au_ide_probe(struct device *dev)
599 goto out; 577 goto out;
600 } 578 }
601 579
602 /* FIXME: This might possibly break PCMCIA IDE devices */ 580 hwif = ide_find_port();
603 581 if (hwif == NULL) {
604 hwif = &ide_hwifs[pdev->id]; 582 ret = -ENOENT;
583 goto out;
584 }
605 585
606 memset(&hw, 0, sizeof(hw)); 586 memset(&hw, 0, sizeof(hw));
607 auide_setup_ports(&hw, ahwif); 587 auide_setup_ports(&hw, ahwif);
@@ -613,8 +593,6 @@ static int au_ide_probe(struct device *dev)
613 593
614 hwif->dev = dev; 594 hwif->dev = dev;
615 595
616 hwif->mmio = 1;
617
618 /* If the user has selected DDMA assisted copies, 596 /* If the user has selected DDMA assisted copies,
619 then set up a few local I/O function entry points 597 then set up a few local I/O function entry points
620 */ 598 */
@@ -623,34 +601,12 @@ static int au_ide_probe(struct device *dev)
623 hwif->INSW = auide_insw; 601 hwif->INSW = auide_insw;
624 hwif->OUTSW = auide_outsw; 602 hwif->OUTSW = auide_outsw;
625#endif 603#endif
626
627 hwif->set_pio_mode = &au1xxx_set_pio_mode;
628 hwif->set_dma_mode = &auide_set_dma_mode;
629
630#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
631 hwif->dma_timeout = &auide_dma_timeout;
632
633 hwif->mdma_filter = &auide_mdma_filter;
634
635 hwif->dma_host_set = &auide_dma_host_set;
636 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
637 hwif->dma_start = &auide_dma_start;
638 hwif->ide_dma_end = &auide_dma_end;
639 hwif->dma_setup = &auide_dma_setup;
640 hwif->ide_dma_test_irq = &auide_dma_test_irq;
641 hwif->dma_lost_irq = &auide_dma_lost_irq;
642#endif
643 hwif->select_data = 0; /* no chipset-specific code */ 604 hwif->select_data = 0; /* no chipset-specific code */
644 hwif->config_data = 0; /* no chipset-specific code */ 605 hwif->config_data = 0; /* no chipset-specific code */
645 606
646 auide_hwif.hwif = hwif; 607 auide_hwif.hwif = hwif;
647 hwif->hwif_data = &auide_hwif; 608 hwif->hwif_data = &auide_hwif;
648 609
649#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
650 auide_ddma_init(&auide_hwif);
651 dbdma_init_done = 1;
652#endif
653
654 idx[0] = hwif->index; 610 idx[0] = hwif->index;
655 611
656 ide_device_add(idx, &au1xxx_port_info); 612 ide_device_add(idx, &au1xxx_port_info);
@@ -670,7 +626,7 @@ static int au_ide_remove(struct device *dev)
670 ide_hwif_t *hwif = dev_get_drvdata(dev); 626 ide_hwif_t *hwif = dev_get_drvdata(dev);
671 _auide_hwif *ahwif = &auide_hwif; 627 _auide_hwif *ahwif = &auide_hwif;
672 628
673 ide_unregister(hwif->index); 629 ide_unregister(hwif);
674 630
675 iounmap((void *)ahwif->regbase); 631 iounmap((void *)ahwif->regbase);
676 632
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 956259fc09ba..68947626e4aa 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -76,17 +76,12 @@ static int __devinit swarm_ide_probe(struct device *dev)
76 if (!SIBYTE_HAVE_IDE) 76 if (!SIBYTE_HAVE_IDE)
77 return -ENODEV; 77 return -ENODEV;
78 78
79 /* Find an empty slot. */ 79 hwif = ide_find_port();
80 for (i = 0; i < MAX_HWIFS; i++) 80 if (hwif == NULL) {
81 if (!ide_hwifs[i].io_ports[IDE_DATA_OFFSET])
82 break;
83 if (i >= MAX_HWIFS) {
84 printk(KERN_ERR DRV_NAME ": no free slot for interface\n"); 81 printk(KERN_ERR DRV_NAME ": no free slot for interface\n");
85 return -ENOMEM; 82 return -ENOMEM;
86 } 83 }
87 84
88 hwif = ide_hwifs + i;
89
90 base = ioremap(A_IO_EXT_BASE, 0x800); 85 base = ioremap(A_IO_EXT_BASE, 0x800);
91 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); 86 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
92 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); 87 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
@@ -115,15 +110,13 @@ static int __devinit swarm_ide_probe(struct device *dev)
115 110
116 /* Setup MMIO ops. */ 111 /* Setup MMIO ops. */
117 default_hwif_mmiops(hwif); 112 default_hwif_mmiops(hwif);
118 /* Prevent resource map manipulation. */ 113
119 hwif->mmio = 1;
120 hwif->chipset = ide_generic; 114 hwif->chipset = ide_generic;
121 hwif->noprobe = 0;
122 115
123 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) 116 for (i = 0; i <= 7; i++)
124 hwif->io_ports[i] = 117 hwif->io_ports_array[i] =
125 (unsigned long)(base + ((0x1f0 + i) << 5)); 118 (unsigned long)(base + ((0x1f0 + i) << 5));
126 hwif->io_ports[IDE_CONTROL_OFFSET] = 119 hwif->io_ports.ctl_addr =
127 (unsigned long)(base + (0x3f6 << 5)); 120 (unsigned long)(base + (0x3f6 << 5));
128 hwif->irq = K_INT_GB_IDE; 121 hwif->irq = K_INT_GB_IDE;
129 122
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index cfb3265bc1a8..7f46c224b7c4 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -135,12 +135,12 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
135 135
136static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio) 136static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
137{ 137{
138 drive->hwif->set_dma_mode(drive, pio + XFER_PIO_0); 138 drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0);
139} 139}
140 140
141static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name) 141static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name)
142{ 142{
143 int bus_speed = system_bus_clock(); 143 int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
144 144
145 if (bus_speed <= 33) 145 if (bus_speed <= 33)
146 pci_set_drvdata(dev, (void *) aec6xxx_33_base); 146 pci_set_drvdata(dev, (void *) aec6xxx_33_base);
@@ -175,27 +175,23 @@ static u8 __devinit atp86x_cable_detect(ide_hwif_t *hwif)
175 return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; 175 return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
176} 176}
177 177
178static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif) 178static const struct ide_port_ops atp850_port_ops = {
179{ 179 .set_pio_mode = aec_set_pio_mode,
180 struct pci_dev *dev = to_pci_dev(hwif->dev); 180 .set_dma_mode = aec6210_set_mode,
181 181};
182 hwif->set_pio_mode = &aec_set_pio_mode;
183
184 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF)
185 hwif->set_dma_mode = &aec6210_set_mode;
186 else {
187 hwif->set_dma_mode = &aec6260_set_mode;
188 182
189 hwif->cable_detect = atp86x_cable_detect; 183static const struct ide_port_ops atp86x_port_ops = {
190 } 184 .set_pio_mode = aec_set_pio_mode,
191} 185 .set_dma_mode = aec6260_set_mode,
186 .cable_detect = atp86x_cable_detect,
187};
192 188
193static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { 189static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
194 { /* 0 */ 190 { /* 0 */
195 .name = "AEC6210", 191 .name = "AEC6210",
196 .init_chipset = init_chipset_aec62xx, 192 .init_chipset = init_chipset_aec62xx,
197 .init_hwif = init_hwif_aec62xx,
198 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 193 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
194 .port_ops = &atp850_port_ops,
199 .host_flags = IDE_HFLAG_SERIALIZE | 195 .host_flags = IDE_HFLAG_SERIALIZE |
200 IDE_HFLAG_NO_ATAPI_DMA | 196 IDE_HFLAG_NO_ATAPI_DMA |
201 IDE_HFLAG_NO_DSC | 197 IDE_HFLAG_NO_DSC |
@@ -207,7 +203,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
207 },{ /* 1 */ 203 },{ /* 1 */
208 .name = "AEC6260", 204 .name = "AEC6260",
209 .init_chipset = init_chipset_aec62xx, 205 .init_chipset = init_chipset_aec62xx,
210 .init_hwif = init_hwif_aec62xx, 206 .port_ops = &atp86x_port_ops,
211 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA | 207 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
212 IDE_HFLAG_ABUSE_SET_DMA_MODE | 208 IDE_HFLAG_ABUSE_SET_DMA_MODE |
213 IDE_HFLAG_OFF_BOARD, 209 IDE_HFLAG_OFF_BOARD,
@@ -217,17 +213,18 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
217 },{ /* 2 */ 213 },{ /* 2 */
218 .name = "AEC6260R", 214 .name = "AEC6260R",
219 .init_chipset = init_chipset_aec62xx, 215 .init_chipset = init_chipset_aec62xx,
220 .init_hwif = init_hwif_aec62xx,
221 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 216 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
217 .port_ops = &atp86x_port_ops,
222 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 218 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
223 IDE_HFLAG_ABUSE_SET_DMA_MODE, 219 IDE_HFLAG_ABUSE_SET_DMA_MODE |
220 IDE_HFLAG_NON_BOOTABLE,
224 .pio_mask = ATA_PIO4, 221 .pio_mask = ATA_PIO4,
225 .mwdma_mask = ATA_MWDMA2, 222 .mwdma_mask = ATA_MWDMA2,
226 .udma_mask = ATA_UDMA4, 223 .udma_mask = ATA_UDMA4,
227 },{ /* 3 */ 224 },{ /* 3 */
228 .name = "AEC6280", 225 .name = "AEC6280",
229 .init_chipset = init_chipset_aec62xx, 226 .init_chipset = init_chipset_aec62xx,
230 .init_hwif = init_hwif_aec62xx, 227 .port_ops = &atp86x_port_ops,
231 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 228 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
232 IDE_HFLAG_ABUSE_SET_DMA_MODE | 229 IDE_HFLAG_ABUSE_SET_DMA_MODE |
233 IDE_HFLAG_OFF_BOARD, 230 IDE_HFLAG_OFF_BOARD,
@@ -237,8 +234,8 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
237 },{ /* 4 */ 234 },{ /* 4 */
238 .name = "AEC6280R", 235 .name = "AEC6280R",
239 .init_chipset = init_chipset_aec62xx, 236 .init_chipset = init_chipset_aec62xx,
240 .init_hwif = init_hwif_aec62xx,
241 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 237 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
238 .port_ops = &atp86x_port_ops,
242 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 239 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
243 IDE_HFLAG_ABUSE_SET_DMA_MODE | 240 IDE_HFLAG_ABUSE_SET_DMA_MODE |
244 IDE_HFLAG_OFF_BOARD, 241 IDE_HFLAG_OFF_BOARD,
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index b3b6f514ce2d..b36a22b8c213 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -38,8 +38,6 @@
38 38
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DISPLAY_ALI_TIMINGS
42
43/* 41/*
44 * ALi devices are not plug in. Otherwise these static values would 42 * ALi devices are not plug in. Otherwise these static values would
45 * need to go. They ought to go away anyway 43 * need to go. They ought to go away anyway
@@ -49,236 +47,6 @@ static u8 m5229_revision;
49static u8 chip_is_1543c_e; 47static u8 chip_is_1543c_e;
50static struct pci_dev *isa_dev; 48static struct pci_dev *isa_dev;
51 49
52#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
53#include <linux/stat.h>
54#include <linux/proc_fs.h>
55
56static u8 ali_proc = 0;
57
58static struct pci_dev *bmide_dev;
59
60static char *fifo[4] = {
61 "FIFO Off",
62 "FIFO On ",
63 "DMA mode",
64 "PIO mode" };
65
66static char *udmaT[8] = {
67 "1.5T",
68 " 2T",
69 "2.5T",
70 " 3T",
71 "3.5T",
72 " 4T",
73 " 6T",
74 " 8T"
75};
76
77static char *channel_status[8] = {
78 "OK ",
79 "busy ",
80 "DRQ ",
81 "DRQ busy ",
82 "error ",
83 "error busy ",
84 "error DRQ ",
85 "error DRQ busy"
86};
87
88/**
89 * ali_get_info - generate proc file for ALi IDE
90 * @buffer: buffer to fill
91 * @addr: address of user start in buffer
92 * @offset: offset into 'file'
93 * @count: buffer count
94 *
95 * Walks the Ali devices and outputs summary data on the tuning and
96 * anything else that will help with debugging
97 */
98
99static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
100{
101 unsigned long bibma;
102 u8 reg53h, reg5xh, reg5yh, reg5xh1, reg5yh1, c0, c1, rev, tmp;
103 char *q, *p = buffer;
104
105 /* fetch rev. */
106 pci_read_config_byte(bmide_dev, 0x08, &rev);
107 if (rev >= 0xc1) /* M1543C or newer */
108 udmaT[7] = " ???";
109 else
110 fifo[3] = " ??? ";
111
112 /* first fetch bibma: */
113
114 bibma = pci_resource_start(bmide_dev, 4);
115
116 /*
117 * at that point bibma+0x2 et bibma+0xa are byte
118 * registers to investigate:
119 */
120 c0 = inb(bibma + 0x02);
121 c1 = inb(bibma + 0x0a);
122
123 p += sprintf(p,
124 "\n Ali M15x3 Chipset.\n");
125 p += sprintf(p,
126 " ------------------\n");
127 pci_read_config_byte(bmide_dev, 0x78, &reg53h);
128 p += sprintf(p, "PCI Clock: %d.\n", reg53h);
129
130 pci_read_config_byte(bmide_dev, 0x53, &reg53h);
131 p += sprintf(p,
132 "CD_ROM FIFO:%s, CD_ROM DMA:%s\n",
133 (reg53h & 0x02) ? "Yes" : "No ",
134 (reg53h & 0x01) ? "Yes" : "No " );
135 pci_read_config_byte(bmide_dev, 0x74, &reg53h);
136 p += sprintf(p,
137 "FIFO Status: contains %d Words, runs%s%s\n\n",
138 (reg53h & 0x3f),
139 (reg53h & 0x40) ? " OVERWR" : "",
140 (reg53h & 0x80) ? " OVERRD." : "." );
141
142 p += sprintf(p,
143 "-------------------primary channel"
144 "-------------------secondary channel"
145 "---------\n\n");
146
147 pci_read_config_byte(bmide_dev, 0x09, &reg53h);
148 p += sprintf(p,
149 "channel status: %s"
150 " %s\n",
151 (reg53h & 0x20) ? "On " : "Off",
152 (reg53h & 0x10) ? "On " : "Off" );
153
154 p += sprintf(p,
155 "both channels togth: %s"
156 " %s\n",
157 (c0&0x80) ? "No " : "Yes",
158 (c1&0x80) ? "No " : "Yes" );
159
160 pci_read_config_byte(bmide_dev, 0x76, &reg53h);
161 p += sprintf(p,
162 "Channel state: %s %s\n",
163 channel_status[reg53h & 0x07],
164 channel_status[(reg53h & 0x70) >> 4] );
165
166 pci_read_config_byte(bmide_dev, 0x58, &reg5xh);
167 pci_read_config_byte(bmide_dev, 0x5c, &reg5yh);
168 p += sprintf(p,
169 "Add. Setup Timing: %dT"
170 " %dT\n",
171 (reg5xh & 0x07) ? (reg5xh & 0x07) : 8,
172 (reg5yh & 0x07) ? (reg5yh & 0x07) : 8 );
173
174 pci_read_config_byte(bmide_dev, 0x59, &reg5xh);
175 pci_read_config_byte(bmide_dev, 0x5d, &reg5yh);
176 p += sprintf(p,
177 "Command Act. Count: %dT"
178 " %dT\n"
179 "Command Rec. Count: %dT"
180 " %dT\n\n",
181 (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
182 (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
183 (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
184 (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16 );
185
186 p += sprintf(p,
187 "----------------drive0-----------drive1"
188 "------------drive0-----------drive1------\n\n");
189 p += sprintf(p,
190 "DMA enabled: %s %s"
191 " %s %s\n",
192 (c0&0x20) ? "Yes" : "No ",
193 (c0&0x40) ? "Yes" : "No ",
194 (c1&0x20) ? "Yes" : "No ",
195 (c1&0x40) ? "Yes" : "No " );
196
197 pci_read_config_byte(bmide_dev, 0x54, &reg5xh);
198 pci_read_config_byte(bmide_dev, 0x55, &reg5yh);
199 q = "FIFO threshold: %2d Words %2d Words"
200 " %2d Words %2d Words\n";
201 if (rev < 0xc1) {
202 if ((rev == 0x20) &&
203 (pci_read_config_byte(bmide_dev, 0x4f, &tmp), (tmp &= 0x20))) {
204 p += sprintf(p, q, 8, 8, 8, 8);
205 } else {
206 p += sprintf(p, q,
207 (reg5xh & 0x03) + 12,
208 ((reg5xh & 0x30)>>4) + 12,
209 (reg5yh & 0x03) + 12,
210 ((reg5yh & 0x30)>>4) + 12 );
211 }
212 } else {
213 int t1 = (tmp = (reg5xh & 0x03)) ? (tmp << 3) : 4;
214 int t2 = (tmp = ((reg5xh & 0x30)>>4)) ? (tmp << 3) : 4;
215 int t3 = (tmp = (reg5yh & 0x03)) ? (tmp << 3) : 4;
216 int t4 = (tmp = ((reg5yh & 0x30)>>4)) ? (tmp << 3) : 4;
217 p += sprintf(p, q, t1, t2, t3, t4);
218 }
219
220#if 0
221 p += sprintf(p,
222 "FIFO threshold: %2d Words %2d Words"
223 " %2d Words %2d Words\n",
224 (reg5xh & 0x03) + 12,
225 ((reg5xh & 0x30)>>4) + 12,
226 (reg5yh & 0x03) + 12,
227 ((reg5yh & 0x30)>>4) + 12 );
228#endif
229
230 p += sprintf(p,
231 "FIFO mode: %s %s %s %s\n",
232 fifo[((reg5xh & 0x0c) >> 2)],
233 fifo[((reg5xh & 0xc0) >> 6)],
234 fifo[((reg5yh & 0x0c) >> 2)],
235 fifo[((reg5yh & 0xc0) >> 6)] );
236
237 pci_read_config_byte(bmide_dev, 0x5a, &reg5xh);
238 pci_read_config_byte(bmide_dev, 0x5b, &reg5xh1);
239 pci_read_config_byte(bmide_dev, 0x5e, &reg5yh);
240 pci_read_config_byte(bmide_dev, 0x5f, &reg5yh1);
241
242 p += sprintf(p,/*
243 "------------------drive0-----------drive1"
244 "------------drive0-----------drive1------\n")*/
245 "Dt RW act. Cnt %2dT %2dT"
246 " %2dT %2dT\n"
247 "Dt RW rec. Cnt %2dT %2dT"
248 " %2dT %2dT\n\n",
249 (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
250 (reg5xh1 & 0x70) ? ((reg5xh1 & 0x70) >> 4) : 8,
251 (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
252 (reg5yh1 & 0x70) ? ((reg5yh1 & 0x70) >> 4) : 8,
253 (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
254 (reg5xh1 & 0x0f) ? (reg5xh1 & 0x0f) : 16,
255 (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16,
256 (reg5yh1 & 0x0f) ? (reg5yh1 & 0x0f) : 16 );
257
258 p += sprintf(p,
259 "-----------------------------------UDMA Timings"
260 "--------------------------------\n\n");
261
262 pci_read_config_byte(bmide_dev, 0x56, &reg5xh);
263 pci_read_config_byte(bmide_dev, 0x57, &reg5yh);
264 p += sprintf(p,
265 "UDMA: %s %s"
266 " %s %s\n"
267 "UDMA timings: %s %s"
268 " %s %s\n\n",
269 (reg5xh & 0x08) ? "OK" : "No",
270 (reg5xh & 0x80) ? "OK" : "No",
271 (reg5yh & 0x08) ? "OK" : "No",
272 (reg5yh & 0x80) ? "OK" : "No",
273 udmaT[(reg5xh & 0x07)],
274 udmaT[(reg5xh & 0x70) >> 4],
275 udmaT[reg5yh & 0x07],
276 udmaT[(reg5yh & 0x70) >> 4] );
277
278 return p-buffer; /* => must be less than 4k! */
279}
280#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
281
282/** 50/**
283 * ali_set_pio_mode - set host controller for PIO mode 51 * ali_set_pio_mode - set host controller for PIO mode
284 * @drive: drive 52 * @drive: drive
@@ -294,7 +62,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
294 int s_time, a_time, c_time; 62 int s_time, a_time, c_time;
295 u8 s_clc, a_clc, r_clc; 63 u8 s_clc, a_clc, r_clc;
296 unsigned long flags; 64 unsigned long flags;
297 int bus_speed = system_bus_clock(); 65 int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
298 int port = hwif->channel ? 0x5c : 0x58; 66 int port = hwif->channel ? 0x5c : 0x58;
299 int portFIFO = hwif->channel ? 0x55 : 0x54; 67 int portFIFO = hwif->channel ? 0x55 : 0x54;
300 u8 cd_dma_fifo = 0; 68 u8 cd_dma_fifo = 0;
@@ -465,14 +233,6 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c
465 233
466 isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 234 isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
467 235
468#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
469 if (!ali_proc) {
470 ali_proc = 1;
471 bmide_dev = dev;
472 ide_pci_create_host_proc("ali", ali_get_info);
473 }
474#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
475
476 local_irq_save(flags); 236 local_irq_save(flags);
477 237
478 if (m5229_revision < 0xC2) { 238 if (m5229_revision < 0xC2) {
@@ -610,7 +370,7 @@ static int ali_cable_override(struct pci_dev *pdev)
610} 370}
611 371
612/** 372/**
613 * ata66_ali15x3 - check for UDMA 66 support 373 * ali_cable_detect - cable detection
614 * @hwif: IDE interface 374 * @hwif: IDE interface
615 * 375 *
616 * This checks if the controller and the cable are capable 376 * This checks if the controller and the cable are capable
@@ -620,7 +380,7 @@ static int ali_cable_override(struct pci_dev *pdev)
620 * FIXME: frobs bits that are not defined on newer ALi devicea 380 * FIXME: frobs bits that are not defined on newer ALi devicea
621 */ 381 */
622 382
623static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif) 383static u8 __devinit ali_cable_detect(ide_hwif_t *hwif)
624{ 384{
625 struct pci_dev *dev = to_pci_dev(hwif->dev); 385 struct pci_dev *dev = to_pci_dev(hwif->dev);
626 unsigned long flags; 386 unsigned long flags;
@@ -652,27 +412,7 @@ static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif)
652 return cbl; 412 return cbl;
653} 413}
654 414
655/** 415#ifndef CONFIG_SPARC64
656 * init_hwif_common_ali15x3 - Set up ALI IDE hardware
657 * @hwif: IDE interface
658 *
659 * Initialize the IDE structure side of the ALi 15x3 driver.
660 */
661
662static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
663{
664 hwif->set_pio_mode = &ali_set_pio_mode;
665 hwif->set_dma_mode = &ali_set_dma_mode;
666 hwif->udma_filter = &ali_udma_filter;
667
668 hwif->cable_detect = ata66_ali15x3;
669
670 if (hwif->dma_base == 0)
671 return;
672
673 hwif->dma_setup = &ali15x3_dma_setup;
674}
675
676/** 416/**
677 * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff 417 * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
678 * @hwif: interface to configure 418 * @hwif: interface to configure
@@ -722,35 +462,66 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
722 if(irq >= 0) 462 if(irq >= 0)
723 hwif->irq = irq; 463 hwif->irq = irq;
724 } 464 }
725
726 init_hwif_common_ali15x3(hwif);
727} 465}
466#endif
728 467
729/** 468/**
730 * init_dma_ali15x3 - set up DMA on ALi15x3 469 * init_dma_ali15x3 - set up DMA on ALi15x3
731 * @hwif: IDE interface 470 * @hwif: IDE interface
732 * @dmabase: DMA interface base PCI address 471 * @d: IDE port info
733 * 472 *
734 * Set up the DMA functionality on the ALi 15x3. For the ALi 473 * Set up the DMA functionality on the ALi 15x3.
735 * controllers this is generic so we can let the generic code do
736 * the actual work.
737 */ 474 */
738 475
739static void __devinit init_dma_ali15x3 (ide_hwif_t *hwif, unsigned long dmabase) 476static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
477 const struct ide_port_info *d)
740{ 478{
741 if (m5229_revision < 0x20) 479 struct pci_dev *dev = to_pci_dev(hwif->dev);
742 return; 480 unsigned long base = ide_pci_dma_base(hwif, d);
481
482 if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
483 return -1;
484
743 if (!hwif->channel) 485 if (!hwif->channel)
744 outb(inb(dmabase + 2) & 0x60, dmabase + 2); 486 outb(inb(base + 2) & 0x60, base + 2);
745 ide_setup_dma(hwif, dmabase); 487
488 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
489 hwif->name, base, base + 7);
490
491 if (ide_allocate_dma_engine(hwif))
492 return -1;
493
494 ide_setup_dma(hwif, base);
495
496 return 0;
746} 497}
747 498
499static const struct ide_port_ops ali_port_ops = {
500 .set_pio_mode = ali_set_pio_mode,
501 .set_dma_mode = ali_set_dma_mode,
502 .udma_filter = ali_udma_filter,
503 .cable_detect = ali_cable_detect,
504};
505
506static const struct ide_dma_ops ali_dma_ops = {
507 .dma_host_set = ide_dma_host_set,
508 .dma_setup = ali15x3_dma_setup,
509 .dma_exec_cmd = ide_dma_exec_cmd,
510 .dma_start = ide_dma_start,
511 .dma_end = __ide_dma_end,
512 .dma_test_irq = ide_dma_test_irq,
513 .dma_lost_irq = ide_dma_lost_irq,
514 .dma_timeout = ide_dma_timeout,
515};
516
748static const struct ide_port_info ali15x3_chipset __devinitdata = { 517static const struct ide_port_info ali15x3_chipset __devinitdata = {
749 .name = "ALI15X3", 518 .name = "ALI15X3",
750 .init_chipset = init_chipset_ali15x3, 519 .init_chipset = init_chipset_ali15x3,
520#ifndef CONFIG_SPARC64
751 .init_hwif = init_hwif_ali15x3, 521 .init_hwif = init_hwif_ali15x3,
522#endif
752 .init_dma = init_dma_ali15x3, 523 .init_dma = init_dma_ali15x3,
753 .host_flags = IDE_HFLAG_BOOTABLE, 524 .port_ops = &ali_port_ops,
754 .pio_mask = ATA_PIO5, 525 .pio_mask = ATA_PIO5,
755 .swdma_mask = ATA_SWDMA2, 526 .swdma_mask = ATA_SWDMA2,
756 .mwdma_mask = ATA_MWDMA2, 527 .mwdma_mask = ATA_MWDMA2,
@@ -793,14 +564,17 @@ static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_dev
793 d.udma_mask = ATA_UDMA5; 564 d.udma_mask = ATA_UDMA5;
794 else 565 else
795 d.udma_mask = ATA_UDMA6; 566 d.udma_mask = ATA_UDMA6;
567
568 d.dma_ops = &ali_dma_ops;
569 } else {
570 d.host_flags |= IDE_HFLAG_NO_DMA;
571
572 d.mwdma_mask = d.swdma_mask = 0;
796 } 573 }
797 574
798 if (idx == 0) 575 if (idx == 0)
799 d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX; 576 d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
800 577
801#if defined(CONFIG_SPARC64)
802 d.init_hwif = init_hwif_common_ali15x3;
803#endif /* CONFIG_SPARC64 */
804 return ide_setup_pci_device(dev, &d); 578 return ide_setup_pci_device(dev, &d);
805} 579}
806 580
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 2ef890ce8097..efcf54338be7 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -179,7 +179,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev,
179 * Determine the system bus clock. 179 * Determine the system bus clock.
180 */ 180 */
181 181
182 amd_clock = system_bus_clock() * 1000; 182 amd_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
183 183
184 switch (amd_clock) { 184 switch (amd_clock) {
185 case 33000: amd_clock = 33333; break; 185 case 33000: amd_clock = 33333; break;
@@ -210,21 +210,20 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
210 210
211 if (hwif->irq == 0) /* 0 is bogus but will do for now */ 211 if (hwif->irq == 0) /* 0 is bogus but will do for now */
212 hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel); 212 hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel);
213
214 hwif->set_pio_mode = &amd_set_pio_mode;
215 hwif->set_dma_mode = &amd_set_drive;
216
217 hwif->cable_detect = amd_cable_detect;
218} 213}
219 214
215static const struct ide_port_ops amd_port_ops = {
216 .set_pio_mode = amd_set_pio_mode,
217 .set_dma_mode = amd_set_drive,
218 .cable_detect = amd_cable_detect,
219};
220
220#define IDE_HFLAGS_AMD \ 221#define IDE_HFLAGS_AMD \
221 (IDE_HFLAG_PIO_NO_BLACKLIST | \ 222 (IDE_HFLAG_PIO_NO_BLACKLIST | \
222 IDE_HFLAG_PIO_NO_DOWNGRADE | \
223 IDE_HFLAG_ABUSE_SET_DMA_MODE | \ 223 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
224 IDE_HFLAG_POST_SET_MODE | \ 224 IDE_HFLAG_POST_SET_MODE | \
225 IDE_HFLAG_IO_32BIT | \ 225 IDE_HFLAG_IO_32BIT | \
226 IDE_HFLAG_UNMASK_IRQS | \ 226 IDE_HFLAG_UNMASK_IRQS)
227 IDE_HFLAG_BOOTABLE)
228 227
229#define DECLARE_AMD_DEV(name_str, swdma, udma) \ 228#define DECLARE_AMD_DEV(name_str, swdma, udma) \
230 { \ 229 { \
@@ -232,6 +231,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
232 .init_chipset = init_chipset_amd74xx, \ 231 .init_chipset = init_chipset_amd74xx, \
233 .init_hwif = init_hwif_amd74xx, \ 232 .init_hwif = init_hwif_amd74xx, \
234 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \ 233 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
234 .port_ops = &amd_port_ops, \
235 .host_flags = IDE_HFLAGS_AMD, \ 235 .host_flags = IDE_HFLAGS_AMD, \
236 .pio_mask = ATA_PIO5, \ 236 .pio_mask = ATA_PIO5, \
237 .swdma_mask = swdma, \ 237 .swdma_mask = swdma, \
@@ -245,6 +245,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
245 .init_chipset = init_chipset_amd74xx, \ 245 .init_chipset = init_chipset_amd74xx, \
246 .init_hwif = init_hwif_amd74xx, \ 246 .init_hwif = init_hwif_amd74xx, \
247 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \ 247 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
248 .port_ops = &amd_port_ops, \
248 .host_flags = IDE_HFLAGS_AMD, \ 249 .host_flags = IDE_HFLAGS_AMD, \
249 .pio_mask = ATA_PIO5, \ 250 .pio_mask = ATA_PIO5, \
250 .swdma_mask = ATA_SWDMA2, \ 251 .swdma_mask = ATA_SWDMA2, \
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 7e037c880cb0..8b637181681a 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -130,37 +130,26 @@ static u8 __devinit atiixp_cable_detect(ide_hwif_t *hwif)
130 return ATA_CBL_PATA40; 130 return ATA_CBL_PATA40;
131} 131}
132 132
133/** 133static const struct ide_port_ops atiixp_port_ops = {
134 * init_hwif_atiixp - fill in the hwif for the ATIIXP 134 .set_pio_mode = atiixp_set_pio_mode,
135 * @hwif: IDE interface 135 .set_dma_mode = atiixp_set_dma_mode,
136 * 136 .cable_detect = atiixp_cable_detect,
137 * Set up the ide_hwif_t for the ATIIXP interface according to the 137};
138 * capabilities of the hardware.
139 */
140
141static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
142{
143 hwif->set_pio_mode = &atiixp_set_pio_mode;
144 hwif->set_dma_mode = &atiixp_set_dma_mode;
145
146 hwif->cable_detect = atiixp_cable_detect;
147}
148 138
149static const struct ide_port_info atiixp_pci_info[] __devinitdata = { 139static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
150 { /* 0 */ 140 { /* 0 */
151 .name = "ATIIXP", 141 .name = "ATIIXP",
152 .init_hwif = init_hwif_atiixp,
153 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}}, 142 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
154 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE, 143 .port_ops = &atiixp_port_ops,
144 .host_flags = IDE_HFLAG_LEGACY_IRQS,
155 .pio_mask = ATA_PIO4, 145 .pio_mask = ATA_PIO4,
156 .mwdma_mask = ATA_MWDMA2, 146 .mwdma_mask = ATA_MWDMA2,
157 .udma_mask = ATA_UDMA5, 147 .udma_mask = ATA_UDMA5,
158 },{ /* 1 */ 148 },{ /* 1 */
159 .name = "SB600_PATA", 149 .name = "SB600_PATA",
160 .init_hwif = init_hwif_atiixp,
161 .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}}, 150 .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
162 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS | 151 .port_ops = &atiixp_port_ops,
163 IDE_HFLAG_BOOTABLE, 152 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS,
164 .pio_mask = ATA_PIO4, 153 .pio_mask = ATA_PIO4,
165 .mwdma_mask = ATA_MWDMA2, 154 .mwdma_mask = ATA_MWDMA2,
166 .udma_mask = ATA_UDMA5, 155 .udma_mask = ATA_UDMA5,
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index a1cfe033a55f..aaf38109eaec 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -4,7 +4,7 @@
4 4
5/* 5/*
6 * Original authors: abramov@cecmow.enet.dec.com (Igor Abramov) 6 * Original authors: abramov@cecmow.enet.dec.com (Igor Abramov)
7 * mlord@pobox.com (Mark Lord) 7 * mlord@pobox.com (Mark Lord)
8 * 8 *
9 * See linux/MAINTAINERS for address of current maintainer. 9 * See linux/MAINTAINERS for address of current maintainer.
10 * 10 *
@@ -98,7 +98,7 @@
98 98
99#define CMD640_PREFETCH_MASKS 1 99#define CMD640_PREFETCH_MASKS 1
100 100
101//#define CMD640_DUMP_REGS 101/*#define CMD640_DUMP_REGS */
102 102
103#include <linux/types.h> 103#include <linux/types.h>
104#include <linux/kernel.h> 104#include <linux/kernel.h>
@@ -109,10 +109,9 @@
109 109
110#include <asm/io.h> 110#include <asm/io.h>
111 111
112/* 112#define DRV_NAME "cmd640"
113 * This flag is set in ide.c by the parameter: ide0=cmd640_vlb 113
114 */ 114static int cmd640_vlb;
115int cmd640_vlb = 0;
116 115
117/* 116/*
118 * CMD640 specific registers definition. 117 * CMD640 specific registers definition.
@@ -185,7 +184,6 @@ static DEFINE_SPINLOCK(cmd640_lock);
185 * These are initialized to point at the devices we control 184 * These are initialized to point at the devices we control
186 */ 185 */
187static ide_hwif_t *cmd_hwif0, *cmd_hwif1; 186static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
188static ide_drive_t *cmd_drives[4];
189 187
190/* 188/*
191 * Interface to access cmd640x registers 189 * Interface to access cmd640x registers
@@ -207,13 +205,13 @@ static unsigned int cmd640_chip_version;
207 205
208/* PCI method 1 access */ 206/* PCI method 1 access */
209 207
210static void put_cmd640_reg_pci1 (u16 reg, u8 val) 208static void put_cmd640_reg_pci1(u16 reg, u8 val)
211{ 209{
212 outl_p((reg & 0xfc) | cmd640_key, 0xcf8); 210 outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
213 outb_p(val, (reg & 3) | 0xcfc); 211 outb_p(val, (reg & 3) | 0xcfc);
214} 212}
215 213
216static u8 get_cmd640_reg_pci1 (u16 reg) 214static u8 get_cmd640_reg_pci1(u16 reg)
217{ 215{
218 outl_p((reg & 0xfc) | cmd640_key, 0xcf8); 216 outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
219 return inb_p((reg & 3) | 0xcfc); 217 return inb_p((reg & 3) | 0xcfc);
@@ -221,14 +219,14 @@ static u8 get_cmd640_reg_pci1 (u16 reg)
221 219
222/* PCI method 2 access (from CMD datasheet) */ 220/* PCI method 2 access (from CMD datasheet) */
223 221
224static void put_cmd640_reg_pci2 (u16 reg, u8 val) 222static void put_cmd640_reg_pci2(u16 reg, u8 val)
225{ 223{
226 outb_p(0x10, 0xcf8); 224 outb_p(0x10, 0xcf8);
227 outb_p(val, cmd640_key + reg); 225 outb_p(val, cmd640_key + reg);
228 outb_p(0, 0xcf8); 226 outb_p(0, 0xcf8);
229} 227}
230 228
231static u8 get_cmd640_reg_pci2 (u16 reg) 229static u8 get_cmd640_reg_pci2(u16 reg)
232{ 230{
233 u8 b; 231 u8 b;
234 232
@@ -240,13 +238,13 @@ static u8 get_cmd640_reg_pci2 (u16 reg)
240 238
241/* VLB access */ 239/* VLB access */
242 240
243static void put_cmd640_reg_vlb (u16 reg, u8 val) 241static void put_cmd640_reg_vlb(u16 reg, u8 val)
244{ 242{
245 outb_p(reg, cmd640_key); 243 outb_p(reg, cmd640_key);
246 outb_p(val, cmd640_key + 4); 244 outb_p(val, cmd640_key + 4);
247} 245}
248 246
249static u8 get_cmd640_reg_vlb (u16 reg) 247static u8 get_cmd640_reg_vlb(u16 reg)
250{ 248{
251 outb_p(reg, cmd640_key); 249 outb_p(reg, cmd640_key);
252 return inb_p(cmd640_key + 4); 250 return inb_p(cmd640_key + 4);
@@ -268,11 +266,11 @@ static void put_cmd640_reg(u16 reg, u8 val)
268 unsigned long flags; 266 unsigned long flags;
269 267
270 spin_lock_irqsave(&cmd640_lock, flags); 268 spin_lock_irqsave(&cmd640_lock, flags);
271 __put_cmd640_reg(reg,val); 269 __put_cmd640_reg(reg, val);
272 spin_unlock_irqrestore(&cmd640_lock, flags); 270 spin_unlock_irqrestore(&cmd640_lock, flags);
273} 271}
274 272
275static int __init match_pci_cmd640_device (void) 273static int __init match_pci_cmd640_device(void)
276{ 274{
277 const u8 ven_dev[4] = {0x95, 0x10, 0x40, 0x06}; 275 const u8 ven_dev[4] = {0x95, 0x10, 0x40, 0x06};
278 unsigned int i; 276 unsigned int i;
@@ -292,7 +290,7 @@ static int __init match_pci_cmd640_device (void)
292/* 290/*
293 * Probe for CMD640x -- pci method 1 291 * Probe for CMD640x -- pci method 1
294 */ 292 */
295static int __init probe_for_cmd640_pci1 (void) 293static int __init probe_for_cmd640_pci1(void)
296{ 294{
297 __get_cmd640_reg = get_cmd640_reg_pci1; 295 __get_cmd640_reg = get_cmd640_reg_pci1;
298 __put_cmd640_reg = put_cmd640_reg_pci1; 296 __put_cmd640_reg = put_cmd640_reg_pci1;
@@ -308,7 +306,7 @@ static int __init probe_for_cmd640_pci1 (void)
308/* 306/*
309 * Probe for CMD640x -- pci method 2 307 * Probe for CMD640x -- pci method 2
310 */ 308 */
311static int __init probe_for_cmd640_pci2 (void) 309static int __init probe_for_cmd640_pci2(void)
312{ 310{
313 __get_cmd640_reg = get_cmd640_reg_pci2; 311 __get_cmd640_reg = get_cmd640_reg_pci2;
314 __put_cmd640_reg = put_cmd640_reg_pci2; 312 __put_cmd640_reg = put_cmd640_reg_pci2;
@@ -322,7 +320,7 @@ static int __init probe_for_cmd640_pci2 (void)
322/* 320/*
323 * Probe for CMD640x -- vlb 321 * Probe for CMD640x -- vlb
324 */ 322 */
325static int __init probe_for_cmd640_vlb (void) 323static int __init probe_for_cmd640_vlb(void)
326{ 324{
327 u8 b; 325 u8 b;
328 326
@@ -343,18 +341,18 @@ static int __init probe_for_cmd640_vlb (void)
343 * Returns 1 if an IDE interface/drive exists at 0x170, 341 * Returns 1 if an IDE interface/drive exists at 0x170,
344 * Returns 0 otherwise. 342 * Returns 0 otherwise.
345 */ 343 */
346static int __init secondary_port_responding (void) 344static int __init secondary_port_responding(void)
347{ 345{
348 unsigned long flags; 346 unsigned long flags;
349 347
350 spin_lock_irqsave(&cmd640_lock, flags); 348 spin_lock_irqsave(&cmd640_lock, flags);
351 349
352 outb_p(0x0a, 0x170 + IDE_SELECT_OFFSET); /* select drive0 */ 350 outb_p(0x0a, 0x176); /* select drive0 */
353 udelay(100); 351 udelay(100);
354 if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x0a) { 352 if ((inb_p(0x176) & 0x1f) != 0x0a) {
355 outb_p(0x1a, 0x170 + IDE_SELECT_OFFSET); /* select drive1 */ 353 outb_p(0x1a, 0x176); /* select drive1 */
356 udelay(100); 354 udelay(100);
357 if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x1a) { 355 if ((inb_p(0x176) & 0x1f) != 0x1a) {
358 spin_unlock_irqrestore(&cmd640_lock, flags); 356 spin_unlock_irqrestore(&cmd640_lock, flags);
359 return 0; /* nothing responded */ 357 return 0; /* nothing responded */
360 } 358 }
@@ -367,7 +365,7 @@ static int __init secondary_port_responding (void)
367/* 365/*
368 * Dump out all cmd640 registers. May be called from ide.c 366 * Dump out all cmd640 registers. May be called from ide.c
369 */ 367 */
370static void cmd640_dump_regs (void) 368static void cmd640_dump_regs(void)
371{ 369{
372 unsigned int reg = cmd640_vlb ? 0x50 : 0x00; 370 unsigned int reg = cmd640_vlb ? 0x50 : 0x00;
373 371
@@ -382,13 +380,13 @@ static void cmd640_dump_regs (void)
382} 380}
383#endif 381#endif
384 382
383#ifndef CONFIG_BLK_DEV_CMD640_ENHANCED
385/* 384/*
386 * Check whether prefetch is on for a drive, 385 * Check whether prefetch is on for a drive,
387 * and initialize the unmask flags for safe operation. 386 * and initialize the unmask flags for safe operation.
388 */ 387 */
389static void __init check_prefetch (unsigned int index) 388static void __init check_prefetch(ide_drive_t *drive, unsigned int index)
390{ 389{
391 ide_drive_t *drive = cmd_drives[index];
392 u8 b = get_cmd640_reg(prefetch_regs[index]); 390 u8 b = get_cmd640_reg(prefetch_regs[index]);
393 391
394 if (b & prefetch_masks[index]) { /* is prefetch off? */ 392 if (b & prefetch_masks[index]) { /* is prefetch off? */
@@ -403,29 +401,12 @@ static void __init check_prefetch (unsigned int index)
403 drive->no_io_32bit = 0; 401 drive->no_io_32bit = 0;
404 } 402 }
405} 403}
406 404#else
407/*
408 * Figure out which devices we control
409 */
410static void __init setup_device_ptrs (void)
411{
412 cmd_hwif0 = &ide_hwifs[0];
413 cmd_hwif1 = &ide_hwifs[1];
414
415 cmd_drives[0] = &cmd_hwif0->drives[0];
416 cmd_drives[1] = &cmd_hwif0->drives[1];
417 cmd_drives[2] = &cmd_hwif1->drives[0];
418 cmd_drives[3] = &cmd_hwif1->drives[1];
419}
420
421#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
422
423/* 405/*
424 * Sets prefetch mode for a drive. 406 * Sets prefetch mode for a drive.
425 */ 407 */
426static void set_prefetch_mode (unsigned int index, int mode) 408static void set_prefetch_mode(ide_drive_t *drive, unsigned int index, int mode)
427{ 409{
428 ide_drive_t *drive = cmd_drives[index];
429 unsigned long flags; 410 unsigned long flags;
430 int reg = prefetch_regs[index]; 411 int reg = prefetch_regs[index];
431 u8 b; 412 u8 b;
@@ -452,7 +433,7 @@ static void set_prefetch_mode (unsigned int index, int mode)
452/* 433/*
453 * Dump out current drive clocks settings 434 * Dump out current drive clocks settings
454 */ 435 */
455static void display_clocks (unsigned int index) 436static void display_clocks(unsigned int index)
456{ 437{
457 u8 active_count, recovery_count; 438 u8 active_count, recovery_count;
458 439
@@ -471,44 +452,16 @@ static void display_clocks (unsigned int index)
471 * Pack active and recovery counts into single byte representation 452 * Pack active and recovery counts into single byte representation
472 * used by controller 453 * used by controller
473 */ 454 */
474static inline u8 pack_nibbles (u8 upper, u8 lower) 455static inline u8 pack_nibbles(u8 upper, u8 lower)
475{ 456{
476 return ((upper & 0x0f) << 4) | (lower & 0x0f); 457 return ((upper & 0x0f) << 4) | (lower & 0x0f);
477} 458}
478 459
479/* 460/*
480 * This routine retrieves the initial drive timings from the chipset.
481 */
482static void __init retrieve_drive_counts (unsigned int index)
483{
484 u8 b;
485
486 /*
487 * Get the internal setup timing, and convert to clock count
488 */
489 b = get_cmd640_reg(arttim_regs[index]) & ~0x3f;
490 switch (b) {
491 case 0x00: b = 4; break;
492 case 0x80: b = 3; break;
493 case 0x40: b = 2; break;
494 default: b = 5; break;
495 }
496 setup_counts[index] = b;
497
498 /*
499 * Get the active/recovery counts
500 */
501 b = get_cmd640_reg(drwtim_regs[index]);
502 active_counts[index] = (b >> 4) ? (b >> 4) : 0x10;
503 recovery_counts[index] = (b & 0x0f) ? (b & 0x0f) : 0x10;
504}
505
506
507/*
508 * This routine writes the prepared setup/active/recovery counts 461 * This routine writes the prepared setup/active/recovery counts
509 * for a drive into the cmd640 chipset registers to active them. 462 * for a drive into the cmd640 chipset registers to active them.
510 */ 463 */
511static void program_drive_counts (unsigned int index) 464static void program_drive_counts(ide_drive_t *drive, unsigned int index)
512{ 465{
513 unsigned long flags; 466 unsigned long flags;
514 u8 setup_count = setup_counts[index]; 467 u8 setup_count = setup_counts[index];
@@ -522,8 +475,11 @@ static void program_drive_counts (unsigned int index)
522 * so we merge the timings, using the slowest value for each timing. 475 * so we merge the timings, using the slowest value for each timing.
523 */ 476 */
524 if (index > 1) { 477 if (index > 1) {
525 unsigned int mate; 478 ide_hwif_t *hwif = drive->hwif;
526 if (cmd_drives[mate = index ^ 1]->present) { 479 ide_drive_t *peer = &hwif->drives[!drive->select.b.unit];
480 unsigned int mate = index ^ 1;
481
482 if (peer->present) {
527 if (setup_count < setup_counts[mate]) 483 if (setup_count < setup_counts[mate])
528 setup_count = setup_counts[mate]; 484 setup_count = setup_counts[mate];
529 if (active_count < active_counts[mate]) 485 if (active_count < active_counts[mate])
@@ -537,11 +493,11 @@ static void program_drive_counts (unsigned int index)
537 * Convert setup_count to internal chipset representation 493 * Convert setup_count to internal chipset representation
538 */ 494 */
539 switch (setup_count) { 495 switch (setup_count) {
540 case 4: setup_count = 0x00; break; 496 case 4: setup_count = 0x00; break;
541 case 3: setup_count = 0x80; break; 497 case 3: setup_count = 0x80; break;
542 case 1: 498 case 1:
543 case 2: setup_count = 0x40; break; 499 case 2: setup_count = 0x40; break;
544 default: setup_count = 0xc0; /* case 5 */ 500 default: setup_count = 0xc0; /* case 5 */
545 } 501 }
546 502
547 /* 503 /*
@@ -562,11 +518,19 @@ static void program_drive_counts (unsigned int index)
562/* 518/*
563 * Set a specific pio_mode for a drive 519 * Set a specific pio_mode for a drive
564 */ 520 */
565static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle_time) 521static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
522 u8 pio_mode, unsigned int cycle_time)
566{ 523{
567 int setup_time, active_time, recovery_time, clock_time; 524 int setup_time, active_time, recovery_time, clock_time;
568 u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count; 525 u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count;
569 int bus_speed = system_bus_clock(); 526 int bus_speed;
527
528 if (cmd640_vlb && ide_vlb_clk)
529 bus_speed = ide_vlb_clk;
530 else if (!cmd640_vlb && ide_pci_clk)
531 bus_speed = ide_pci_clk;
532 else
533 bus_speed = system_bus_clock();
570 534
571 if (pio_mode > 5) 535 if (pio_mode > 5)
572 pio_mode = 5; 536 pio_mode = 5;
@@ -574,15 +538,15 @@ static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle
574 active_time = ide_pio_timings[pio_mode].active_time; 538 active_time = ide_pio_timings[pio_mode].active_time;
575 recovery_time = cycle_time - (setup_time + active_time); 539 recovery_time = cycle_time - (setup_time + active_time);
576 clock_time = 1000 / bus_speed; 540 clock_time = 1000 / bus_speed;
577 cycle_count = (cycle_time + clock_time - 1) / clock_time; 541 cycle_count = DIV_ROUND_UP(cycle_time, clock_time);
578 542
579 setup_count = (setup_time + clock_time - 1) / clock_time; 543 setup_count = DIV_ROUND_UP(setup_time, clock_time);
580 544
581 active_count = (active_time + clock_time - 1) / clock_time; 545 active_count = DIV_ROUND_UP(active_time, clock_time);
582 if (active_count < 2) 546 if (active_count < 2)
583 active_count = 2; /* minimum allowed by cmd640 */ 547 active_count = 2; /* minimum allowed by cmd640 */
584 548
585 recovery_count = (recovery_time + clock_time - 1) / clock_time; 549 recovery_count = DIV_ROUND_UP(recovery_time, clock_time);
586 recovery_count2 = cycle_count - (setup_count + active_count); 550 recovery_count2 = cycle_count - (setup_count + active_count);
587 if (recovery_count2 > recovery_count) 551 if (recovery_count2 > recovery_count)
588 recovery_count = recovery_count2; 552 recovery_count = recovery_count2;
@@ -611,7 +575,7 @@ static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle
611 * 1) this is the wrong place to do it (proper is do_special() in ide.c) 575 * 1) this is the wrong place to do it (proper is do_special() in ide.c)
612 * 2) in practice this is rarely, if ever, necessary 576 * 2) in practice this is rarely, if ever, necessary
613 */ 577 */
614 program_drive_counts (index); 578 program_drive_counts(drive, index);
615} 579}
616 580
617static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio) 581static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -619,32 +583,26 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
619 unsigned int index = 0, cycle_time; 583 unsigned int index = 0, cycle_time;
620 u8 b; 584 u8 b;
621 585
622 while (drive != cmd_drives[index]) {
623 if (++index > 3) {
624 printk(KERN_ERR "%s: bad news in %s\n",
625 drive->name, __FUNCTION__);
626 return;
627 }
628 }
629 switch (pio) { 586 switch (pio) {
630 case 6: /* set fast-devsel off */ 587 case 6: /* set fast-devsel off */
631 case 7: /* set fast-devsel on */ 588 case 7: /* set fast-devsel on */
632 b = get_cmd640_reg(CNTRL) & ~0x27; 589 b = get_cmd640_reg(CNTRL) & ~0x27;
633 if (pio & 1) 590 if (pio & 1)
634 b |= 0x27; 591 b |= 0x27;
635 put_cmd640_reg(CNTRL, b); 592 put_cmd640_reg(CNTRL, b);
636 printk("%s: %sabled cmd640 fast host timing (devsel)\n", drive->name, (pio & 1) ? "en" : "dis"); 593 printk("%s: %sabled cmd640 fast host timing (devsel)\n",
637 return; 594 drive->name, (pio & 1) ? "en" : "dis");
638 595 return;
639 case 8: /* set prefetch off */ 596 case 8: /* set prefetch off */
640 case 9: /* set prefetch on */ 597 case 9: /* set prefetch on */
641 set_prefetch_mode(index, pio & 1); 598 set_prefetch_mode(drive, index, pio & 1);
642 printk("%s: %sabled cmd640 prefetch\n", drive->name, (pio & 1) ? "en" : "dis"); 599 printk("%s: %sabled cmd640 prefetch\n",
643 return; 600 drive->name, (pio & 1) ? "en" : "dis");
601 return;
644 } 602 }
645 603
646 cycle_time = ide_pio_cycle_time(drive, pio); 604 cycle_time = ide_pio_cycle_time(drive, pio);
647 cmd640_set_mode(index, pio, cycle_time); 605 cmd640_set_mode(drive, index, pio, cycle_time);
648 606
649 printk("%s: selected cmd640 PIO mode%d (%dns)", 607 printk("%s: selected cmd640 PIO mode%d (%dns)",
650 drive->name, pio, cycle_time); 608 drive->name, pio, cycle_time);
@@ -652,6 +610,9 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
652 display_clocks(index); 610 display_clocks(index);
653} 611}
654 612
613static const struct ide_port_ops cmd640_port_ops = {
614 .set_pio_mode = cmd640_set_pio_mode,
615};
655#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ 616#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
656 617
657static int pci_conf1(void) 618static int pci_conf1(void)
@@ -693,14 +654,32 @@ static const struct ide_port_info cmd640_port_info __initdata = {
693 .chipset = ide_cmd640, 654 .chipset = ide_cmd640,
694 .host_flags = IDE_HFLAG_SERIALIZE | 655 .host_flags = IDE_HFLAG_SERIALIZE |
695 IDE_HFLAG_NO_DMA | 656 IDE_HFLAG_NO_DMA |
696 IDE_HFLAG_NO_AUTOTUNE |
697 IDE_HFLAG_ABUSE_PREFETCH | 657 IDE_HFLAG_ABUSE_PREFETCH |
698 IDE_HFLAG_ABUSE_FAST_DEVSEL, 658 IDE_HFLAG_ABUSE_FAST_DEVSEL,
699#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED 659#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
660 .port_ops = &cmd640_port_ops,
700 .pio_mask = ATA_PIO5, 661 .pio_mask = ATA_PIO5,
701#endif 662#endif
702}; 663};
703 664
665static int cmd640x_init_one(unsigned long base, unsigned long ctl)
666{
667 if (!request_region(base, 8, DRV_NAME)) {
668 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
669 DRV_NAME, base, base + 7);
670 return -EBUSY;
671 }
672
673 if (!request_region(ctl, 1, DRV_NAME)) {
674 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
675 DRV_NAME, ctl);
676 release_region(base, 8);
677 return -EBUSY;
678 }
679
680 return 0;
681}
682
704/* 683/*
705 * Probe for a cmd640 chipset, and initialize it if found. 684 * Probe for a cmd640 chipset, and initialize it if found.
706 */ 685 */
@@ -709,7 +688,7 @@ static int __init cmd640x_init(void)
709#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED 688#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
710 int second_port_toggled = 0; 689 int second_port_toggled = 0;
711#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ 690#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
712 int second_port_cmd640 = 0; 691 int second_port_cmd640 = 0, rc;
713 const char *bus_type, *port2; 692 const char *bus_type, *port2;
714 unsigned int index; 693 unsigned int index;
715 u8 b, cfr; 694 u8 b, cfr;
@@ -749,10 +728,21 @@ static int __init cmd640x_init(void)
749 cfr = get_cmd640_reg(CFR); 728 cfr = get_cmd640_reg(CFR);
750 cmd640_chip_version = cfr & CFR_DEVREV; 729 cmd640_chip_version = cfr & CFR_DEVREV;
751 if (cmd640_chip_version == 0) { 730 if (cmd640_chip_version == 0) {
752 printk ("ide: bad cmd640 revision: %d\n", cmd640_chip_version); 731 printk("ide: bad cmd640 revision: %d\n", cmd640_chip_version);
753 return 0; 732 return 0;
754 } 733 }
755 734
735 rc = cmd640x_init_one(0x1f0, 0x3f6);
736 if (rc)
737 return rc;
738
739 rc = cmd640x_init_one(0x170, 0x376);
740 if (rc) {
741 release_region(0x3f6, 1);
742 release_region(0x1f0, 8);
743 return rc;
744 }
745
756 memset(&hw, 0, sizeof(hw)); 746 memset(&hw, 0, sizeof(hw));
757 747
758 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); 748 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
@@ -764,17 +754,15 @@ static int __init cmd640x_init(void)
764 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" 754 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
765 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); 755 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
766 756
757 cmd_hwif0 = ide_find_port();
758
767 /* 759 /*
768 * Initialize data for primary port 760 * Initialize data for primary port
769 */ 761 */
770 setup_device_ptrs (); 762 if (cmd_hwif0) {
771 763 ide_init_port_hw(cmd_hwif0, &hw[0]);
772 ide_init_port_hw(cmd_hwif0, &hw[0]); 764 idx[0] = cmd_hwif0->index;
773#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED 765 }
774 cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode;
775#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
776
777 idx[0] = cmd_hwif0->index;
778 766
779 /* 767 /*
780 * Ensure compatibility by always using the slowest timings 768 * Ensure compatibility by always using the slowest timings
@@ -786,10 +774,13 @@ static int __init cmd640x_init(void)
786 put_cmd640_reg(CMDTIM, 0); 774 put_cmd640_reg(CMDTIM, 0);
787 put_cmd640_reg(BRST, 0x40); 775 put_cmd640_reg(BRST, 0x40);
788 776
777 cmd_hwif1 = ide_find_port();
778
789 /* 779 /*
790 * Try to enable the secondary interface, if not already enabled 780 * Try to enable the secondary interface, if not already enabled
791 */ 781 */
792 if (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) { 782 if (cmd_hwif1 &&
783 cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) {
793 port2 = "not probed"; 784 port2 = "not probed";
794 } else { 785 } else {
795 b = get_cmd640_reg(CNTRL); 786 b = get_cmd640_reg(CNTRL);
@@ -820,15 +811,11 @@ static int __init cmd640x_init(void)
820 /* 811 /*
821 * Initialize data for secondary cmd640 port, if enabled 812 * Initialize data for secondary cmd640 port, if enabled
822 */ 813 */
823 if (second_port_cmd640) { 814 if (second_port_cmd640 && cmd_hwif1) {
824 ide_init_port_hw(cmd_hwif1, &hw[1]); 815 ide_init_port_hw(cmd_hwif1, &hw[1]);
825#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
826 cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode;
827#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
828
829 idx[1] = cmd_hwif1->index; 816 idx[1] = cmd_hwif1->index;
830 } 817 }
831 printk(KERN_INFO "%s: %sserialized, secondary interface %s\n", cmd_hwif1->name, 818 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
832 second_port_cmd640 ? "" : "not ", port2); 819 second_port_cmd640 ? "" : "not ", port2);
833 820
834 /* 821 /*
@@ -836,35 +823,34 @@ static int __init cmd640x_init(void)
836 * Do not unnecessarily disturb any prior BIOS setup of these. 823 * Do not unnecessarily disturb any prior BIOS setup of these.
837 */ 824 */
838 for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) { 825 for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) {
839 ide_drive_t *drive = cmd_drives[index]; 826 ide_drive_t *drive;
840#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED 827
841 if (drive->autotune || ((index > 1) && second_port_toggled)) { 828 if (index > 1) {
842 /* 829 if (cmd_hwif1 == NULL)
843 * Reset timing to the slowest speed and turn off prefetch. 830 continue;
844 * This way, the drive identify code has a better chance. 831 drive = &cmd_hwif1->drives[index & 1];
845 */ 832 } else {
846 setup_counts [index] = 4; /* max possible */ 833 if (cmd_hwif0 == NULL)
847 active_counts [index] = 16; /* max possible */ 834 continue;
848 recovery_counts [index] = 16; /* max possible */ 835 drive = &cmd_hwif0->drives[index & 1];
849 program_drive_counts (index);
850 set_prefetch_mode (index, 0);
851 printk("cmd640: drive%d timings/prefetch cleared\n", index);
852 } else {
853 /*
854 * Record timings/prefetch without changing them.
855 * This preserves any prior BIOS setup.
856 */
857 retrieve_drive_counts (index);
858 check_prefetch (index);
859 printk("cmd640: drive%d timings/prefetch(%s) preserved",
860 index, drive->no_io_32bit ? "off" : "on");
861 display_clocks(index);
862 } 836 }
837
838#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
839 /*
840 * Reset timing to the slowest speed and turn off prefetch.
841 * This way, the drive identify code has a better chance.
842 */
843 setup_counts [index] = 4; /* max possible */
844 active_counts [index] = 16; /* max possible */
845 recovery_counts [index] = 16; /* max possible */
846 program_drive_counts(drive, index);
847 set_prefetch_mode(drive, index, 0);
848 printk("cmd640: drive%d timings/prefetch cleared\n", index);
863#else 849#else
864 /* 850 /*
865 * Set the drive unmask flags to match the prefetch setting 851 * Set the drive unmask flags to match the prefetch setting
866 */ 852 */
867 check_prefetch (index); 853 check_prefetch(drive, index);
868 printk("cmd640: drive%d timings/prefetch(%s) preserved\n", 854 printk("cmd640: drive%d timings/prefetch(%s) preserved\n",
869 index, drive->no_io_32bit ? "off" : "on"); 855 index, drive->no_io_32bit ? "off" : "on");
870#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ 856#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index edabe6299efd..08674711d089 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -68,8 +68,8 @@ static u8 quantize_timing(int timing, int quant)
68 */ 68 */
69static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time) 69static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time)
70{ 70{
71 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 71 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
72 int clock_time = 1000 / system_bus_clock(); 72 int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock());
73 u8 cycle_count, active_count, recovery_count, drwtim; 73 u8 cycle_count, active_count, recovery_count, drwtim;
74 static const u8 recovery_values[] = 74 static const u8 recovery_values[] =
75 {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0}; 75 {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
@@ -128,7 +128,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
128 ide_pio_timings[pio].active_time); 128 ide_pio_timings[pio].active_time);
129 129
130 setup_count = quantize_timing(ide_pio_timings[pio].setup_time, 130 setup_count = quantize_timing(ide_pio_timings[pio].setup_time,
131 1000 / system_bus_clock()); 131 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock()));
132 132
133 /* 133 /*
134 * The primary channel has individual address setup timing registers 134 * The primary channel has individual address setup timing registers
@@ -223,7 +223,7 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
223 (void) pci_write_config_byte(dev, pciU, regU); 223 (void) pci_write_config_byte(dev, pciU, regU);
224} 224}
225 225
226static int cmd648_ide_dma_end (ide_drive_t *drive) 226static int cmd648_dma_end(ide_drive_t *drive)
227{ 227{
228 ide_hwif_t *hwif = HWIF(drive); 228 ide_hwif_t *hwif = HWIF(drive);
229 unsigned long base = hwif->dma_base - (hwif->channel * 8); 229 unsigned long base = hwif->dma_base - (hwif->channel * 8);
@@ -239,7 +239,7 @@ static int cmd648_ide_dma_end (ide_drive_t *drive)
239 return err; 239 return err;
240} 240}
241 241
242static int cmd64x_ide_dma_end (ide_drive_t *drive) 242static int cmd64x_dma_end(ide_drive_t *drive)
243{ 243{
244 ide_hwif_t *hwif = HWIF(drive); 244 ide_hwif_t *hwif = HWIF(drive);
245 struct pci_dev *dev = to_pci_dev(hwif->dev); 245 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -256,7 +256,7 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
256 return err; 256 return err;
257} 257}
258 258
259static int cmd648_ide_dma_test_irq (ide_drive_t *drive) 259static int cmd648_dma_test_irq(ide_drive_t *drive)
260{ 260{
261 ide_hwif_t *hwif = HWIF(drive); 261 ide_hwif_t *hwif = HWIF(drive);
262 unsigned long base = hwif->dma_base - (hwif->channel * 8); 262 unsigned long base = hwif->dma_base - (hwif->channel * 8);
@@ -279,7 +279,7 @@ static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
279 return 0; 279 return 0;
280} 280}
281 281
282static int cmd64x_ide_dma_test_irq (ide_drive_t *drive) 282static int cmd64x_dma_test_irq(ide_drive_t *drive)
283{ 283{
284 ide_hwif_t *hwif = HWIF(drive); 284 ide_hwif_t *hwif = HWIF(drive);
285 struct pci_dev *dev = to_pci_dev(hwif->dev); 285 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -310,7 +310,7 @@ static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
310 * event order for DMA transfers. 310 * event order for DMA transfers.
311 */ 311 */
312 312
313static int cmd646_1_ide_dma_end (ide_drive_t *drive) 313static int cmd646_1_dma_end(ide_drive_t *drive)
314{ 314{
315 ide_hwif_t *hwif = HWIF(drive); 315 ide_hwif_t *hwif = HWIF(drive);
316 u8 dma_stat = 0, dma_cmd = 0; 316 u8 dma_stat = 0, dma_cmd = 0;
@@ -370,7 +370,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
370 return 0; 370 return 0;
371} 371}
372 372
373static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif) 373static u8 __devinit cmd64x_cable_detect(ide_hwif_t *hwif)
374{ 374{
375 struct pci_dev *dev = to_pci_dev(hwif->dev); 375 struct pci_dev *dev = to_pci_dev(hwif->dev);
376 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01; 376 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
@@ -385,91 +385,85 @@ static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
385 } 385 }
386} 386}
387 387
388static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif) 388static const struct ide_port_ops cmd64x_port_ops = {
389{ 389 .set_pio_mode = cmd64x_set_pio_mode,
390 struct pci_dev *dev = to_pci_dev(hwif->dev); 390 .set_dma_mode = cmd64x_set_dma_mode,
391 391 .cable_detect = cmd64x_cable_detect,
392 hwif->set_pio_mode = &cmd64x_set_pio_mode; 392};
393 hwif->set_dma_mode = &cmd64x_set_dma_mode;
394
395 hwif->cable_detect = ata66_cmd64x;
396 393
397 if (!hwif->dma_base) 394static const struct ide_dma_ops cmd64x_dma_ops = {
398 return; 395 .dma_host_set = ide_dma_host_set,
396 .dma_setup = ide_dma_setup,
397 .dma_exec_cmd = ide_dma_exec_cmd,
398 .dma_start = ide_dma_start,
399 .dma_end = cmd64x_dma_end,
400 .dma_test_irq = cmd64x_dma_test_irq,
401 .dma_lost_irq = ide_dma_lost_irq,
402 .dma_timeout = ide_dma_timeout,
403};
399 404
400 /* 405static const struct ide_dma_ops cmd646_rev1_dma_ops = {
401 * UltraDMA only supported on PCI646U and PCI646U2, which 406 .dma_host_set = ide_dma_host_set,
402 * correspond to revisions 0x03, 0x05 and 0x07 respectively. 407 .dma_setup = ide_dma_setup,
403 * Actually, although the CMD tech support people won't 408 .dma_exec_cmd = ide_dma_exec_cmd,
404 * tell me the details, the 0x03 revision cannot support 409 .dma_start = ide_dma_start,
405 * UDMA correctly without hardware modifications, and even 410 .dma_end = cmd646_1_dma_end,
406 * then it only works with Quantum disks due to some 411 .dma_test_irq = ide_dma_test_irq,
407 * hold time assumptions in the 646U part which are fixed 412 .dma_lost_irq = ide_dma_lost_irq,
408 * in the 646U2. 413 .dma_timeout = ide_dma_timeout,
409 * 414};
410 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
411 */
412 if (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 5)
413 hwif->ultra_mask = 0x00;
414 415
415 switch (dev->device) { 416static const struct ide_dma_ops cmd648_dma_ops = {
416 case PCI_DEVICE_ID_CMD_648: 417 .dma_host_set = ide_dma_host_set,
417 case PCI_DEVICE_ID_CMD_649: 418 .dma_setup = ide_dma_setup,
418 alt_irq_bits: 419 .dma_exec_cmd = ide_dma_exec_cmd,
419 hwif->ide_dma_end = &cmd648_ide_dma_end; 420 .dma_start = ide_dma_start,
420 hwif->ide_dma_test_irq = &cmd648_ide_dma_test_irq; 421 .dma_end = cmd648_dma_end,
421 break; 422 .dma_test_irq = cmd648_dma_test_irq,
422 case PCI_DEVICE_ID_CMD_646: 423 .dma_lost_irq = ide_dma_lost_irq,
423 if (dev->revision == 0x01) { 424 .dma_timeout = ide_dma_timeout,
424 hwif->ide_dma_end = &cmd646_1_ide_dma_end; 425};
425 break;
426 } else if (dev->revision >= 0x03)
427 goto alt_irq_bits;
428 /* fall thru */
429 default:
430 hwif->ide_dma_end = &cmd64x_ide_dma_end;
431 hwif->ide_dma_test_irq = &cmd64x_ide_dma_test_irq;
432 break;
433 }
434}
435 426
436static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { 427static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
437 { /* 0 */ 428 { /* 0 */
438 .name = "CMD643", 429 .name = "CMD643",
439 .init_chipset = init_chipset_cmd64x, 430 .init_chipset = init_chipset_cmd64x,
440 .init_hwif = init_hwif_cmd64x,
441 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, 431 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
432 .port_ops = &cmd64x_port_ops,
433 .dma_ops = &cmd64x_dma_ops,
442 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | 434 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
443 IDE_HFLAG_ABUSE_PREFETCH | 435 IDE_HFLAG_ABUSE_PREFETCH,
444 IDE_HFLAG_BOOTABLE,
445 .pio_mask = ATA_PIO5, 436 .pio_mask = ATA_PIO5,
446 .mwdma_mask = ATA_MWDMA2, 437 .mwdma_mask = ATA_MWDMA2,
447 .udma_mask = 0x00, /* no udma */ 438 .udma_mask = 0x00, /* no udma */
448 },{ /* 1 */ 439 },{ /* 1 */
449 .name = "CMD646", 440 .name = "CMD646",
450 .init_chipset = init_chipset_cmd64x, 441 .init_chipset = init_chipset_cmd64x,
451 .init_hwif = init_hwif_cmd64x,
452 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 442 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
453 .chipset = ide_cmd646, 443 .chipset = ide_cmd646,
454 .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE, 444 .port_ops = &cmd64x_port_ops,
445 .dma_ops = &cmd648_dma_ops,
446 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
455 .pio_mask = ATA_PIO5, 447 .pio_mask = ATA_PIO5,
456 .mwdma_mask = ATA_MWDMA2, 448 .mwdma_mask = ATA_MWDMA2,
457 .udma_mask = ATA_UDMA2, 449 .udma_mask = ATA_UDMA2,
458 },{ /* 2 */ 450 },{ /* 2 */
459 .name = "CMD648", 451 .name = "CMD648",
460 .init_chipset = init_chipset_cmd64x, 452 .init_chipset = init_chipset_cmd64x,
461 .init_hwif = init_hwif_cmd64x,
462 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 453 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
463 .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE, 454 .port_ops = &cmd64x_port_ops,
455 .dma_ops = &cmd648_dma_ops,
456 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
464 .pio_mask = ATA_PIO5, 457 .pio_mask = ATA_PIO5,
465 .mwdma_mask = ATA_MWDMA2, 458 .mwdma_mask = ATA_MWDMA2,
466 .udma_mask = ATA_UDMA4, 459 .udma_mask = ATA_UDMA4,
467 },{ /* 3 */ 460 },{ /* 3 */
468 .name = "CMD649", 461 .name = "CMD649",
469 .init_chipset = init_chipset_cmd64x, 462 .init_chipset = init_chipset_cmd64x,
470 .init_hwif = init_hwif_cmd64x,
471 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 463 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
472 .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE, 464 .port_ops = &cmd64x_port_ops,
465 .dma_ops = &cmd648_dma_ops,
466 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
473 .pio_mask = ATA_PIO5, 467 .pio_mask = ATA_PIO5,
474 .mwdma_mask = ATA_MWDMA2, 468 .mwdma_mask = ATA_MWDMA2,
475 .udma_mask = ATA_UDMA5, 469 .udma_mask = ATA_UDMA5,
@@ -483,12 +477,35 @@ static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_devic
483 477
484 d = cmd64x_chipsets[idx]; 478 d = cmd64x_chipsets[idx];
485 479
486 /* 480 if (idx == 1) {
487 * The original PCI0646 didn't have the primary channel enable bit, 481 /*
488 * it appeared starting with PCI0646U (i.e. revision ID 3). 482 * UltraDMA only supported on PCI646U and PCI646U2, which
489 */ 483 * correspond to revisions 0x03, 0x05 and 0x07 respectively.
490 if (idx == 1 && dev->revision < 3) 484 * Actually, although the CMD tech support people won't
491 d.enablebits[0].reg = 0; 485 * tell me the details, the 0x03 revision cannot support
486 * UDMA correctly without hardware modifications, and even
487 * then it only works with Quantum disks due to some
488 * hold time assumptions in the 646U part which are fixed
489 * in the 646U2.
490 *
491 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
492 */
493 if (dev->revision < 5) {
494 d.udma_mask = 0x00;
495 /*
496 * The original PCI0646 didn't have the primary
497 * channel enable bit, it appeared starting with
498 * PCI0646U (i.e. revision ID 3).
499 */
500 if (dev->revision < 3) {
501 d.enablebits[0].reg = 0;
502 if (dev->revision == 1)
503 d.dma_ops = &cmd646_rev1_dma_ops;
504 else
505 d.dma_ops = &cmd64x_dma_ops;
506 }
507 }
508 }
492 509
493 return ide_setup_pci_device(dev, &d); 510 return ide_setup_pci_device(dev, &d);
494} 511}
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 1c163e4ef03f..17669a434438 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -103,27 +103,32 @@ static void cs5520_dma_host_set(ide_drive_t *drive, int on)
103 ide_dma_host_set(drive, on); 103 ide_dma_host_set(drive, on);
104} 104}
105 105
106static void __devinit init_hwif_cs5520(ide_hwif_t *hwif) 106static const struct ide_port_ops cs5520_port_ops = {
107{ 107 .set_pio_mode = cs5520_set_pio_mode,
108 hwif->set_pio_mode = &cs5520_set_pio_mode; 108 .set_dma_mode = cs5520_set_dma_mode,
109 hwif->set_dma_mode = &cs5520_set_dma_mode; 109};
110
111 if (hwif->dma_base == 0)
112 return;
113 110
114 hwif->dma_host_set = &cs5520_dma_host_set; 111static const struct ide_dma_ops cs5520_dma_ops = {
115} 112 .dma_host_set = cs5520_dma_host_set,
113 .dma_setup = ide_dma_setup,
114 .dma_exec_cmd = ide_dma_exec_cmd,
115 .dma_start = ide_dma_start,
116 .dma_end = __ide_dma_end,
117 .dma_test_irq = ide_dma_test_irq,
118 .dma_lost_irq = ide_dma_lost_irq,
119 .dma_timeout = ide_dma_timeout,
120};
116 121
117#define DECLARE_CS_DEV(name_str) \ 122#define DECLARE_CS_DEV(name_str) \
118 { \ 123 { \
119 .name = name_str, \ 124 .name = name_str, \
120 .init_hwif = init_hwif_cs5520, \ 125 .port_ops = &cs5520_port_ops, \
126 .dma_ops = &cs5520_dma_ops, \
121 .host_flags = IDE_HFLAG_ISA_PORTS | \ 127 .host_flags = IDE_HFLAG_ISA_PORTS | \
122 IDE_HFLAG_CS5520 | \ 128 IDE_HFLAG_CS5520 | \
123 IDE_HFLAG_VDMA | \ 129 IDE_HFLAG_VDMA | \
124 IDE_HFLAG_NO_ATAPI_DMA | \ 130 IDE_HFLAG_NO_ATAPI_DMA | \
125 IDE_HFLAG_ABUSE_SET_DMA_MODE |\ 131 IDE_HFLAG_ABUSE_SET_DMA_MODE, \
126 IDE_HFLAG_BOOTABLE, \
127 .pio_mask = ATA_PIO4, \ 132 .pio_mask = ATA_PIO4, \
128 } 133 }
129 134
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index 941a1344820b..f5534c1ff349 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -228,29 +228,27 @@ static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif)
228 unsigned long basereg; 228 unsigned long basereg;
229 u32 d0_timings; 229 u32 d0_timings;
230 230
231 hwif->set_pio_mode = &cs5530_set_pio_mode;
232 hwif->set_dma_mode = &cs5530_set_dma_mode;
233
234 basereg = CS5530_BASEREG(hwif); 231 basereg = CS5530_BASEREG(hwif);
235 d0_timings = inl(basereg + 0); 232 d0_timings = inl(basereg + 0);
236 if (CS5530_BAD_PIO(d0_timings)) 233 if (CS5530_BAD_PIO(d0_timings))
237 outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0); 234 outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0);
238 if (CS5530_BAD_PIO(inl(basereg + 8))) 235 if (CS5530_BAD_PIO(inl(basereg + 8)))
239 outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8); 236 outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8);
240
241 if (hwif->dma_base == 0)
242 return;
243
244 hwif->udma_filter = cs5530_udma_filter;
245} 237}
246 238
239static const struct ide_port_ops cs5530_port_ops = {
240 .set_pio_mode = cs5530_set_pio_mode,
241 .set_dma_mode = cs5530_set_dma_mode,
242 .udma_filter = cs5530_udma_filter,
243};
244
247static const struct ide_port_info cs5530_chipset __devinitdata = { 245static const struct ide_port_info cs5530_chipset __devinitdata = {
248 .name = "CS5530", 246 .name = "CS5530",
249 .init_chipset = init_chipset_cs5530, 247 .init_chipset = init_chipset_cs5530,
250 .init_hwif = init_hwif_cs5530, 248 .init_hwif = init_hwif_cs5530,
249 .port_ops = &cs5530_port_ops,
251 .host_flags = IDE_HFLAG_SERIALIZE | 250 .host_flags = IDE_HFLAG_SERIALIZE |
252 IDE_HFLAG_POST_SET_MODE | 251 IDE_HFLAG_POST_SET_MODE,
253 IDE_HFLAG_BOOTABLE,
254 .pio_mask = ATA_PIO4, 252 .pio_mask = ATA_PIO4,
255 .mwdma_mask = ATA_MWDMA2, 253 .mwdma_mask = ATA_MWDMA2,
256 .udma_mask = ATA_UDMA2, 254 .udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index d7b5ea992e94..99fe91a191b8 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -166,27 +166,17 @@ static u8 __devinit cs5535_cable_detect(ide_hwif_t *hwif)
166 return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 166 return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
167} 167}
168 168
169/**** 169static const struct ide_port_ops cs5535_port_ops = {
170 * init_hwif_cs5535 - Initialize one ide cannel 170 .set_pio_mode = cs5535_set_pio_mode,
171 * @hwif: Channel descriptor 171 .set_dma_mode = cs5535_set_dma_mode,
172 * 172 .cable_detect = cs5535_cable_detect,
173 * This gets invoked by the IDE driver once for each channel. It 173};
174 * performs channel-specific pre-initialization before drive probing.
175 *
176 */
177static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
178{
179 hwif->set_pio_mode = &cs5535_set_pio_mode;
180 hwif->set_dma_mode = &cs5535_set_dma_mode;
181
182 hwif->cable_detect = cs5535_cable_detect;
183}
184 174
185static const struct ide_port_info cs5535_chipset __devinitdata = { 175static const struct ide_port_info cs5535_chipset __devinitdata = {
186 .name = "CS5535", 176 .name = "CS5535",
187 .init_hwif = init_hwif_cs5535, 177 .port_ops = &cs5535_port_ops,
188 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE | 178 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE |
189 IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_BOOTABLE, 179 IDE_HFLAG_ABUSE_SET_DMA_MODE,
190 .pio_mask = ATA_PIO4, 180 .pio_mask = ATA_PIO4,
191 .mwdma_mask = ATA_MWDMA2, 181 .mwdma_mask = ATA_MWDMA2,
192 .udma_mask = ATA_UDMA4, 182 .udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index 724cbacf4e5b..77cc22c2ad45 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards. 7 * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
8 * Writing the driver was quite simple, since most of the job is 8 * Writing the driver was quite simple, since most of the job is
9 * done by the generic pci-ide support. 9 * done by the generic pci-ide support.
10 * The hard part was finding the CY82C693's datasheet on Cypress's 10 * The hard part was finding the CY82C693's datasheet on Cypress's
11 * web page :-(. But Altavista solved this problem :-). 11 * web page :-(. But Altavista solved this problem :-).
12 * 12 *
@@ -15,12 +15,10 @@
15 * - I recently got a 16.8G IBM DTTA, so I was able to test it with 15 * - I recently got a 16.8G IBM DTTA, so I was able to test it with
16 * a large and fast disk - the results look great, so I'd say the 16 * a large and fast disk - the results look great, so I'd say the
17 * driver is working fine :-) 17 * driver is working fine :-)
18 * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA 18 * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
19 * - this is my first linux driver, so there's probably a lot of room 19 * - this is my first linux driver, so there's probably a lot of room
20 * for optimizations and bug fixing, so feel free to do it. 20 * for optimizations and bug fixing, so feel free to do it.
21 * - use idebus=xx parameter to set PCI bus speed - needed to calc 21 * - if using PIO mode it's a good idea to set the PIO mode and
22 * timings for PIO modes (default will be 40)
23 * - if using PIO mode it's a good idea to set the PIO mode and
24 * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda 22 * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda
25 * - I had some problems with my IBM DHEA with PIO modes < 2 23 * - I had some problems with my IBM DHEA with PIO modes < 2
26 * (lost interrupts) ????? 24 * (lost interrupts) ?????
@@ -110,11 +108,11 @@ typedef struct pio_clocks_s {
110 * calc clocks using bus_speed 108 * calc clocks using bus_speed
111 * returns (rounded up) time in bus clocks for time in ns 109 * returns (rounded up) time in bus clocks for time in ns
112 */ 110 */
113static int calc_clk (int time, int bus_speed) 111static int calc_clk(int time, int bus_speed)
114{ 112{
115 int clocks; 113 int clocks;
116 114
117 clocks = (time*bus_speed+999)/1000 -1; 115 clocks = (time*bus_speed+999)/1000 - 1;
118 116
119 if (clocks < 0) 117 if (clocks < 0)
120 clocks = 0; 118 clocks = 0;
@@ -132,11 +130,11 @@ static int calc_clk (int time, int bus_speed)
132 * NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used 130 * NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used
133 * for mode 3 and 4 drives 8 and 16-bit timings are the same 131 * for mode 3 and 4 drives 8 and 16-bit timings are the same
134 * 132 *
135 */ 133 */
136static void compute_clocks (u8 pio, pio_clocks_t *p_pclk) 134static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
137{ 135{
138 int clk1, clk2; 136 int clk1, clk2;
139 int bus_speed = system_bus_clock(); /* get speed of PCI bus */ 137 int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
140 138
141 /* we don't check against CY82C693's min and max speed, 139 /* we don't check against CY82C693's min and max speed,
142 * so you can play with the idebus=xx parameter 140 * so you can play with the idebus=xx parameter
@@ -158,7 +156,7 @@ static void compute_clocks (u8 pio, pio_clocks_t *p_pclk)
158 clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */ 156 clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */
159 157
160 /* note: we use the same values for 16bit IOR and IOW 158 /* note: we use the same values for 16bit IOR and IOW
161 * those are all the same, since I don't have other 159 * those are all the same, since I don't have other
162 * timings than those from ide-lib.c 160 * timings than those from ide-lib.c
163 */ 161 */
164 162
@@ -186,7 +184,7 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
186 outb(index, CY82_INDEX_PORT); 184 outb(index, CY82_INDEX_PORT);
187 data = inb(CY82_DATA_PORT); 185 data = inb(CY82_DATA_PORT);
188 186
189 printk (KERN_INFO "%s (ch=%d, dev=%d): DMA mode is %d (single=%d)\n", 187 printk(KERN_INFO "%s (ch=%d, dev=%d): DMA mode is %d (single=%d)\n",
190 drive->name, HWIF(drive)->channel, drive->select.b.unit, 188 drive->name, HWIF(drive)->channel, drive->select.b.unit,
191 (data&0x3), ((data>>2)&1)); 189 (data&0x3), ((data>>2)&1));
192#endif /* CY82C693_DEBUG_LOGS */ 190#endif /* CY82C693_DEBUG_LOGS */
@@ -202,7 +200,7 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
202 mode & 3, single); 200 mode & 3, single);
203#endif /* CY82C693_DEBUG_INFO */ 201#endif /* CY82C693_DEBUG_INFO */
204 202
205 /* 203 /*
206 * note: below we set the value for Bus Master IDE TimeOut Register 204 * note: below we set the value for Bus Master IDE TimeOut Register
207 * I'm not absolutly sure what this does, but it solved my problem 205 * I'm not absolutly sure what this does, but it solved my problem
208 * with IDE DMA and sound, so I now can play sound and work with 206 * with IDE DMA and sound, so I now can play sound and work with
@@ -216,8 +214,8 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
216 outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT); 214 outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT);
217 outb(data, CY82_DATA_PORT); 215 outb(data, CY82_DATA_PORT);
218 216
219#if CY82C693_DEBUG_INFO 217#if CY82C693_DEBUG_INFO
220 printk (KERN_INFO "%s: Set IDE Bus Master TimeOut Register to 0x%X\n", 218 printk(KERN_INFO "%s: Set IDE Bus Master TimeOut Register to 0x%X\n",
221 drive->name, data); 219 drive->name, data);
222#endif /* CY82C693_DEBUG_INFO */ 220#endif /* CY82C693_DEBUG_INFO */
223} 221}
@@ -242,14 +240,14 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
242 240
243#if CY82C693_DEBUG_LOGS 241#if CY82C693_DEBUG_LOGS
244 /* for debug let's show the register values */ 242 /* for debug let's show the register values */
245 243
246 if (drive->select.b.unit == 0) { 244 if (drive->select.b.unit == 0) {
247 /* 245 /*
248 * get master drive registers 246 * get master drive registers
249 * address setup control register 247 * address setup control register
250 * is 32 bit !!! 248 * is 32 bit !!!
251 */ 249 */
252 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 250 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
253 addrCtrl &= 0x0F; 251 addrCtrl &= 0x0F;
254 252
255 /* now let's get the remaining registers */ 253 /* now let's get the remaining registers */
@@ -261,7 +259,7 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
261 * set slave drive registers 259 * set slave drive registers
262 * address setup control register 260 * address setup control register
263 * is 32 bit !!! 261 * is 32 bit !!!
264 */ 262 */
265 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 263 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
266 264
267 addrCtrl &= 0xF0; 265 addrCtrl &= 0xF0;
@@ -288,9 +286,9 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
288 * set master drive 286 * set master drive
289 * address setup control register 287 * address setup control register
290 * is 32 bit !!! 288 * is 32 bit !!!
291 */ 289 */
292 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 290 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
293 291
294 addrCtrl &= (~0xF); 292 addrCtrl &= (~0xF);
295 addrCtrl |= (unsigned int)pclk.address_time; 293 addrCtrl |= (unsigned int)pclk.address_time;
296 pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl); 294 pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
@@ -299,14 +297,14 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
299 pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r); 297 pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r);
300 pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w); 298 pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w);
301 pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8); 299 pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8);
302 300
303 addrCtrl &= 0xF; 301 addrCtrl &= 0xF;
304 } else { 302 } else {
305 /* 303 /*
306 * set slave drive 304 * set slave drive
307 * address setup control register 305 * address setup control register
308 * is 32 bit !!! 306 * is 32 bit !!!
309 */ 307 */
310 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 308 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
311 309
312 addrCtrl &= (~0xF0); 310 addrCtrl &= (~0xF0);
@@ -320,7 +318,7 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
320 318
321 addrCtrl >>= 4; 319 addrCtrl >>= 4;
322 addrCtrl &= 0xF; 320 addrCtrl &= 0xF;
323 } 321 }
324 322
325#if CY82C693_DEBUG_INFO 323#if CY82C693_DEBUG_INFO
326 printk(KERN_INFO "%s (ch=%d, dev=%d): set PIO timing to " 324 printk(KERN_INFO "%s (ch=%d, dev=%d): set PIO timing to "
@@ -340,41 +338,41 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
340 338
341#ifdef CY82C693_SETDMA_CLOCK 339#ifdef CY82C693_SETDMA_CLOCK
342 u8 data = 0; 340 u8 data = 0;
343#endif /* CY82C693_SETDMA_CLOCK */ 341#endif /* CY82C693_SETDMA_CLOCK */
344 342
345 /* write info about this verion of the driver */ 343 /* write info about this verion of the driver */
346 printk(KERN_INFO CY82_VERSION "\n"); 344 printk(KERN_INFO CY82_VERSION "\n");
347 345
348#ifdef CY82C693_SETDMA_CLOCK 346#ifdef CY82C693_SETDMA_CLOCK
349 /* okay let's set the DMA clock speed */ 347 /* okay let's set the DMA clock speed */
350 348
351 outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT); 349 outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT);
352 data = inb(CY82_DATA_PORT); 350 data = inb(CY82_DATA_PORT);
353 351
354#if CY82C693_DEBUG_INFO 352#if CY82C693_DEBUG_INFO
355 printk(KERN_INFO "%s: Peripheral Configuration Register: 0x%X\n", 353 printk(KERN_INFO "%s: Peripheral Configuration Register: 0x%X\n",
356 name, data); 354 name, data);
357#endif /* CY82C693_DEBUG_INFO */ 355#endif /* CY82C693_DEBUG_INFO */
358 356
359 /* 357 /*
360 * for some reason sometimes the DMA controller 358 * for some reason sometimes the DMA controller
361 * speed is set to ATCLK/2 ???? - we fix this here 359 * speed is set to ATCLK/2 ???? - we fix this here
362 * 360 *
363 * note: i don't know what causes this strange behaviour, 361 * note: i don't know what causes this strange behaviour,
364 * but even changing the dma speed doesn't solve it :-( 362 * but even changing the dma speed doesn't solve it :-(
365 * the ide performance is still only half the normal speed 363 * the ide performance is still only half the normal speed
366 * 364 *
367 * if anybody knows what goes wrong with my machine, please 365 * if anybody knows what goes wrong with my machine, please
368 * let me know - ASK 366 * let me know - ASK
369 */ 367 */
370 368
371 data |= 0x03; 369 data |= 0x03;
372 370
373 outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT); 371 outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT);
374 outb(data, CY82_DATA_PORT); 372 outb(data, CY82_DATA_PORT);
375 373
376#if CY82C693_DEBUG_INFO 374#if CY82C693_DEBUG_INFO
377 printk (KERN_INFO "%s: New Peripheral Configuration Register: 0x%X\n", 375 printk(KERN_INFO "%s: New Peripheral Configuration Register: 0x%X\n",
378 name, data); 376 name, data);
379#endif /* CY82C693_DEBUG_INFO */ 377#endif /* CY82C693_DEBUG_INFO */
380 378
@@ -382,15 +380,6 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
382 return 0; 380 return 0;
383} 381}
384 382
385/*
386 * the init function - called for each ide channel once
387 */
388static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
389{
390 hwif->set_pio_mode = &cy82c693_set_pio_mode;
391 hwif->set_dma_mode = &cy82c693_set_dma_mode;
392}
393
394static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) 383static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
395{ 384{
396 static ide_hwif_t *primary; 385 static ide_hwif_t *primary;
@@ -404,14 +393,18 @@ static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
404 } 393 }
405} 394}
406 395
396static const struct ide_port_ops cy82c693_port_ops = {
397 .set_pio_mode = cy82c693_set_pio_mode,
398 .set_dma_mode = cy82c693_set_dma_mode,
399};
400
407static const struct ide_port_info cy82c693_chipset __devinitdata = { 401static const struct ide_port_info cy82c693_chipset __devinitdata = {
408 .name = "CY82C693", 402 .name = "CY82C693",
409 .init_chipset = init_chipset_cy82c693, 403 .init_chipset = init_chipset_cy82c693,
410 .init_iops = init_iops_cy82c693, 404 .init_iops = init_iops_cy82c693,
411 .init_hwif = init_hwif_cy82c693, 405 .port_ops = &cy82c693_port_ops,
412 .chipset = ide_cy82c693, 406 .chipset = ide_cy82c693,
413 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_CY82C693 | 407 .host_flags = IDE_HFLAG_SINGLE,
414 IDE_HFLAG_BOOTABLE,
415 .pio_mask = ATA_PIO4, 408 .pio_mask = ATA_PIO4,
416 .swdma_mask = ATA_SWDMA2, 409 .swdma_mask = ATA_SWDMA2,
417 .mwdma_mask = ATA_MWDMA2, 410 .mwdma_mask = ATA_MWDMA2,
@@ -424,7 +417,7 @@ static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_dev
424 417
425 /* CY82C693 is more than only a IDE controller. 418 /* CY82C693 is more than only a IDE controller.
426 Function 1 is primary IDE channel, function 2 - secondary. */ 419 Function 1 is primary IDE channel, function 2 - secondary. */
427 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && 420 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
428 PCI_FUNC(dev->devfn) == 1) { 421 PCI_FUNC(dev->devfn) == 1) {
429 dev2 = pci_get_slot(dev->bus, dev->devfn + 1); 422 dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
430 ret = ide_setup_pci_devices(dev, dev2, &cy82c693_chipset); 423 ret = ide_setup_pci_devices(dev, dev2, &cy82c693_chipset);
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index 961698d655eb..b9e457996d0e 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -43,6 +43,10 @@ static const u8 setup[] = {
43 0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13, 43 0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
44}; 44};
45 45
46static const struct ide_port_ops delkin_cb_port_ops = {
47 .quirkproc = ide_undecoded_slave,
48};
49
46static int __devinit 50static int __devinit
47delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) 51delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
48{ 52{
@@ -71,26 +75,21 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
71 if (setup[i]) 75 if (setup[i])
72 outb(setup[i], base + i); 76 outb(setup[i], base + i);
73 } 77 }
74 pci_release_regions(dev); /* IDE layer handles regions itself */
75 78
76 memset(&hw, 0, sizeof(hw)); 79 memset(&hw, 0, sizeof(hw));
77 ide_std_init_ports(&hw, base + 0x10, base + 0x1e); 80 ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
78 hw.irq = dev->irq; 81 hw.irq = dev->irq;
79 hw.chipset = ide_pci; /* this enables IRQ sharing */ 82 hw.chipset = ide_pci; /* this enables IRQ sharing */
80 83
81 hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); 84 hwif = ide_find_port();
82 if (hwif == NULL) 85 if (hwif == NULL)
83 goto out_disable; 86 goto out_disable;
84 87
85 i = hwif->index; 88 i = hwif->index;
86 89
87 if (hwif->present) 90 ide_init_port_data(hwif, i);
88 ide_unregister(i);
89 else
90 ide_init_port_data(hwif, i);
91
92 ide_init_port_hw(hwif, &hw); 91 ide_init_port_hw(hwif, &hw);
93 hwif->quirkproc = &ide_undecoded_slave; 92 hwif->port_ops = &delkin_cb_port_ops;
94 93
95 idx[0] = i; 94 idx[0] = i;
96 95
@@ -110,6 +109,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
110 109
111out_disable: 110out_disable:
112 printk(KERN_ERR "delkin_cb: no IDE devices found\n"); 111 printk(KERN_ERR "delkin_cb: no IDE devices found\n");
112 pci_release_regions(dev);
113 pci_disable_device(dev); 113 pci_disable_device(dev);
114 return -ENODEV; 114 return -ENODEV;
115} 115}
@@ -119,9 +119,9 @@ delkin_cb_remove (struct pci_dev *dev)
119{ 119{
120 ide_hwif_t *hwif = pci_get_drvdata(dev); 120 ide_hwif_t *hwif = pci_get_drvdata(dev);
121 121
122 if (hwif) 122 ide_unregister(hwif);
123 ide_unregister(hwif->index);
124 123
124 pci_release_regions(dev);
125 pci_disable_device(dev); 125 pci_disable_device(dev);
126} 126}
127 127
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 7fd83a9d4dee..041720e22762 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -38,8 +38,7 @@ MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE st
38 { \ 38 { \
39 .name = name_str, \ 39 .name = name_str, \
40 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \ 40 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \
41 extra_flags | \ 41 extra_flags, \
42 IDE_HFLAG_BOOTABLE, \
43 .swdma_mask = ATA_SWDMA2, \ 42 .swdma_mask = ATA_SWDMA2, \
44 .mwdma_mask = ATA_MWDMA2, \ 43 .mwdma_mask = ATA_MWDMA2, \
45 .udma_mask = ATA_UDMA6, \ 44 .udma_mask = ATA_UDMA6, \
@@ -50,9 +49,8 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
50 49
51 { /* 1 */ 50 { /* 1 */
52 .name = "NS87410", 51 .name = "NS87410",
53 .enablebits = {{0x43,0x08,0x08}, {0x47,0x08,0x08}}, 52 .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} },
54 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 53 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
55 IDE_HFLAG_BOOTABLE,
56 .swdma_mask = ATA_SWDMA2, 54 .swdma_mask = ATA_SWDMA2,
57 .mwdma_mask = ATA_MWDMA2, 55 .mwdma_mask = ATA_MWDMA2,
58 .udma_mask = ATA_UDMA6, 56 .udma_mask = ATA_UDMA6,
@@ -99,7 +97,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
99 * Called when the PCI registration layer (or the IDE initialization) 97 * Called when the PCI registration layer (or the IDE initialization)
100 * finds a device matching our IDE device tables. 98 * finds a device matching our IDE device tables.
101 */ 99 */
102 100
103static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id) 101static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
104{ 102{
105 const struct ide_port_info *d = &generic_chipsets[id->driver_data]; 103 const struct ide_port_info *d = &generic_chipsets[id->driver_data];
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 9f01da46b016..84c36c117194 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -115,11 +115,10 @@ static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const cha
115 return dev->irq; 115 return dev->irq;
116} 116}
117 117
118static void __devinit init_hwif_hpt34x(ide_hwif_t *hwif) 118static const struct ide_port_ops hpt34x_port_ops = {
119{ 119 .set_pio_mode = hpt34x_set_pio_mode,
120 hwif->set_pio_mode = &hpt34x_set_pio_mode; 120 .set_dma_mode = hpt34x_set_mode,
121 hwif->set_dma_mode = &hpt34x_set_mode; 121};
122}
123 122
124#define IDE_HFLAGS_HPT34X \ 123#define IDE_HFLAGS_HPT34X \
125 (IDE_HFLAG_NO_ATAPI_DMA | \ 124 (IDE_HFLAG_NO_ATAPI_DMA | \
@@ -131,16 +130,14 @@ static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
131 { /* 0 */ 130 { /* 0 */
132 .name = "HPT343", 131 .name = "HPT343",
133 .init_chipset = init_chipset_hpt34x, 132 .init_chipset = init_chipset_hpt34x,
134 .init_hwif = init_hwif_hpt34x, 133 .port_ops = &hpt34x_port_ops,
135 .extra = 16, 134 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_NON_BOOTABLE,
136 .host_flags = IDE_HFLAGS_HPT34X,
137 .pio_mask = ATA_PIO5, 135 .pio_mask = ATA_PIO5,
138 }, 136 },
139 { /* 1 */ 137 { /* 1 */
140 .name = "HPT345", 138 .name = "HPT345",
141 .init_chipset = init_chipset_hpt34x, 139 .init_chipset = init_chipset_hpt34x,
142 .init_hwif = init_hwif_hpt34x, 140 .port_ops = &hpt34x_port_ops,
143 .extra = 16,
144 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD, 141 .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD,
145 .pio_mask = ATA_PIO5, 142 .pio_mask = ATA_PIO5,
146#ifdef CONFIG_HPT34X_AUTODMA 143#ifdef CONFIG_HPT34X_AUTODMA
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 82d0e318a1fe..c929dadaaaff 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -760,7 +760,7 @@ static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
760 } 760 }
761 } else 761 } else
762 outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2), 762 outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
763 hwif->io_ports[IDE_CONTROL_OFFSET]); 763 hwif->io_ports.ctl_addr);
764} 764}
765 765
766/* 766/*
@@ -776,7 +776,7 @@ static void hpt366_dma_lost_irq(ide_drive_t *drive)
776 pci_read_config_byte(dev, 0x52, &mcr3); 776 pci_read_config_byte(dev, 0x52, &mcr3);
777 pci_read_config_byte(dev, 0x5a, &scr1); 777 pci_read_config_byte(dev, 0x5a, &scr1);
778 printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n", 778 printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n",
779 drive->name, __FUNCTION__, mcr1, mcr3, scr1); 779 drive->name, __func__, mcr1, mcr3, scr1);
780 if (scr1 & 0x10) 780 if (scr1 & 0x10)
781 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); 781 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
782 ide_dma_lost_irq(drive); 782 ide_dma_lost_irq(drive);
@@ -808,7 +808,7 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
808 hpt370_clear_engine(drive); 808 hpt370_clear_engine(drive);
809} 809}
810 810
811static void hpt370_ide_dma_start(ide_drive_t *drive) 811static void hpt370_dma_start(ide_drive_t *drive)
812{ 812{
813#ifdef HPT_RESET_STATE_ENGINE 813#ifdef HPT_RESET_STATE_ENGINE
814 hpt370_clear_engine(drive); 814 hpt370_clear_engine(drive);
@@ -816,7 +816,7 @@ static void hpt370_ide_dma_start(ide_drive_t *drive)
816 ide_dma_start(drive); 816 ide_dma_start(drive);
817} 817}
818 818
819static int hpt370_ide_dma_end(ide_drive_t *drive) 819static int hpt370_dma_end(ide_drive_t *drive)
820{ 820{
821 ide_hwif_t *hwif = HWIF(drive); 821 ide_hwif_t *hwif = HWIF(drive);
822 u8 dma_stat = inb(hwif->dma_status); 822 u8 dma_stat = inb(hwif->dma_status);
@@ -838,7 +838,7 @@ static void hpt370_dma_timeout(ide_drive_t *drive)
838} 838}
839 839
840/* returns 1 if DMA IRQ issued, 0 otherwise */ 840/* returns 1 if DMA IRQ issued, 0 otherwise */
841static int hpt374_ide_dma_test_irq(ide_drive_t *drive) 841static int hpt374_dma_test_irq(ide_drive_t *drive)
842{ 842{
843 ide_hwif_t *hwif = HWIF(drive); 843 ide_hwif_t *hwif = HWIF(drive);
844 struct pci_dev *dev = to_pci_dev(hwif->dev); 844 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -858,11 +858,11 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
858 858
859 if (!drive->waiting_for_dma) 859 if (!drive->waiting_for_dma)
860 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 860 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
861 drive->name, __FUNCTION__); 861 drive->name, __func__);
862 return 0; 862 return 0;
863} 863}
864 864
865static int hpt374_ide_dma_end(ide_drive_t *drive) 865static int hpt374_dma_end(ide_drive_t *drive)
866{ 866{
867 ide_hwif_t *hwif = HWIF(drive); 867 ide_hwif_t *hwif = HWIF(drive);
868 struct pci_dev *dev = to_pci_dev(hwif->dev); 868 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -1271,17 +1271,6 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1271 /* Cache the channel's MISC. control registers' offset */ 1271 /* Cache the channel's MISC. control registers' offset */
1272 hwif->select_data = hwif->channel ? 0x54 : 0x50; 1272 hwif->select_data = hwif->channel ? 0x54 : 0x50;
1273 1273
1274 hwif->set_pio_mode = &hpt3xx_set_pio_mode;
1275 hwif->set_dma_mode = &hpt3xx_set_mode;
1276
1277 hwif->quirkproc = &hpt3xx_quirkproc;
1278 hwif->maskproc = &hpt3xx_maskproc;
1279
1280 hwif->udma_filter = &hpt3xx_udma_filter;
1281 hwif->mdma_filter = &hpt3xx_mdma_filter;
1282
1283 hwif->cable_detect = hpt3xx_cable_detect;
1284
1285 /* 1274 /*
1286 * HPT3xxN chips have some complications: 1275 * HPT3xxN chips have some complications:
1287 * 1276 *
@@ -1323,29 +1312,19 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1323 1312
1324 if (new_mcr != old_mcr) 1313 if (new_mcr != old_mcr)
1325 pci_write_config_byte(dev, hwif->select_data + 1, new_mcr); 1314 pci_write_config_byte(dev, hwif->select_data + 1, new_mcr);
1326
1327 if (hwif->dma_base == 0)
1328 return;
1329
1330 if (chip_type >= HPT374) {
1331 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
1332 hwif->ide_dma_end = &hpt374_ide_dma_end;
1333 } else if (chip_type >= HPT370) {
1334 hwif->dma_start = &hpt370_ide_dma_start;
1335 hwif->ide_dma_end = &hpt370_ide_dma_end;
1336 hwif->dma_timeout = &hpt370_dma_timeout;
1337 } else
1338 hwif->dma_lost_irq = &hpt366_dma_lost_irq;
1339} 1315}
1340 1316
1341static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase) 1317static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
1318 const struct ide_port_info *d)
1342{ 1319{
1343 struct pci_dev *dev = to_pci_dev(hwif->dev); 1320 struct pci_dev *dev = to_pci_dev(hwif->dev);
1344 u8 masterdma = 0, slavedma = 0; 1321 unsigned long flags, base = ide_pci_dma_base(hwif, d);
1345 u8 dma_new = 0, dma_old = 0; 1322 u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
1346 unsigned long flags;
1347 1323
1348 dma_old = inb(dmabase + 2); 1324 if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
1325 return -1;
1326
1327 dma_old = inb(base + 2);
1349 1328
1350 local_irq_save(flags); 1329 local_irq_save(flags);
1351 1330
@@ -1356,11 +1335,21 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
1356 if (masterdma & 0x30) dma_new |= 0x20; 1335 if (masterdma & 0x30) dma_new |= 0x20;
1357 if ( slavedma & 0x30) dma_new |= 0x40; 1336 if ( slavedma & 0x30) dma_new |= 0x40;
1358 if (dma_new != dma_old) 1337 if (dma_new != dma_old)
1359 outb(dma_new, dmabase + 2); 1338 outb(dma_new, base + 2);
1360 1339
1361 local_irq_restore(flags); 1340 local_irq_restore(flags);
1362 1341
1363 ide_setup_dma(hwif, dmabase); 1342 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
1343 hwif->name, base, base + 7);
1344
1345 hwif->extra_base = base + (hwif->channel ? 8 : 16);
1346
1347 if (ide_allocate_dma_engine(hwif))
1348 return -1;
1349
1350 ide_setup_dma(hwif, base);
1351
1352 return 0;
1364} 1353}
1365 1354
1366static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2) 1355static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
@@ -1416,6 +1405,49 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
1416 IDE_HFLAG_ABUSE_SET_DMA_MODE | \ 1405 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
1417 IDE_HFLAG_OFF_BOARD) 1406 IDE_HFLAG_OFF_BOARD)
1418 1407
1408static const struct ide_port_ops hpt3xx_port_ops = {
1409 .set_pio_mode = hpt3xx_set_pio_mode,
1410 .set_dma_mode = hpt3xx_set_mode,
1411 .quirkproc = hpt3xx_quirkproc,
1412 .maskproc = hpt3xx_maskproc,
1413 .mdma_filter = hpt3xx_mdma_filter,
1414 .udma_filter = hpt3xx_udma_filter,
1415 .cable_detect = hpt3xx_cable_detect,
1416};
1417
1418static const struct ide_dma_ops hpt37x_dma_ops = {
1419 .dma_host_set = ide_dma_host_set,
1420 .dma_setup = ide_dma_setup,
1421 .dma_exec_cmd = ide_dma_exec_cmd,
1422 .dma_start = ide_dma_start,
1423 .dma_end = hpt374_dma_end,
1424 .dma_test_irq = hpt374_dma_test_irq,
1425 .dma_lost_irq = ide_dma_lost_irq,
1426 .dma_timeout = ide_dma_timeout,
1427};
1428
1429static const struct ide_dma_ops hpt370_dma_ops = {
1430 .dma_host_set = ide_dma_host_set,
1431 .dma_setup = ide_dma_setup,
1432 .dma_exec_cmd = ide_dma_exec_cmd,
1433 .dma_start = hpt370_dma_start,
1434 .dma_end = hpt370_dma_end,
1435 .dma_test_irq = ide_dma_test_irq,
1436 .dma_lost_irq = ide_dma_lost_irq,
1437 .dma_timeout = hpt370_dma_timeout,
1438};
1439
1440static const struct ide_dma_ops hpt36x_dma_ops = {
1441 .dma_host_set = ide_dma_host_set,
1442 .dma_setup = ide_dma_setup,
1443 .dma_exec_cmd = ide_dma_exec_cmd,
1444 .dma_start = ide_dma_start,
1445 .dma_end = __ide_dma_end,
1446 .dma_test_irq = ide_dma_test_irq,
1447 .dma_lost_irq = hpt366_dma_lost_irq,
1448 .dma_timeout = ide_dma_timeout,
1449};
1450
1419static const struct ide_port_info hpt366_chipsets[] __devinitdata = { 1451static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1420 { /* 0 */ 1452 { /* 0 */
1421 .name = "HPT36x", 1453 .name = "HPT36x",
@@ -1429,7 +1461,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1429 * Bit 4 is for the primary channel, bit 5 for the secondary. 1461 * Bit 4 is for the primary channel, bit 5 for the secondary.
1430 */ 1462 */
1431 .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}}, 1463 .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
1432 .extra = 240, 1464 .port_ops = &hpt3xx_port_ops,
1465 .dma_ops = &hpt36x_dma_ops,
1433 .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE, 1466 .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
1434 .pio_mask = ATA_PIO4, 1467 .pio_mask = ATA_PIO4,
1435 .mwdma_mask = ATA_MWDMA2, 1468 .mwdma_mask = ATA_MWDMA2,
@@ -1439,7 +1472,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1439 .init_hwif = init_hwif_hpt366, 1472 .init_hwif = init_hwif_hpt366,
1440 .init_dma = init_dma_hpt366, 1473 .init_dma = init_dma_hpt366,
1441 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1474 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1442 .extra = 240, 1475 .port_ops = &hpt3xx_port_ops,
1476 .dma_ops = &hpt37x_dma_ops,
1443 .host_flags = IDE_HFLAGS_HPT3XX, 1477 .host_flags = IDE_HFLAGS_HPT3XX,
1444 .pio_mask = ATA_PIO4, 1478 .pio_mask = ATA_PIO4,
1445 .mwdma_mask = ATA_MWDMA2, 1479 .mwdma_mask = ATA_MWDMA2,
@@ -1449,7 +1483,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1449 .init_hwif = init_hwif_hpt366, 1483 .init_hwif = init_hwif_hpt366,
1450 .init_dma = init_dma_hpt366, 1484 .init_dma = init_dma_hpt366,
1451 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1485 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1452 .extra = 240, 1486 .port_ops = &hpt3xx_port_ops,
1487 .dma_ops = &hpt37x_dma_ops,
1453 .host_flags = IDE_HFLAGS_HPT3XX, 1488 .host_flags = IDE_HFLAGS_HPT3XX,
1454 .pio_mask = ATA_PIO4, 1489 .pio_mask = ATA_PIO4,
1455 .mwdma_mask = ATA_MWDMA2, 1490 .mwdma_mask = ATA_MWDMA2,
@@ -1459,7 +1494,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1459 .init_hwif = init_hwif_hpt366, 1494 .init_hwif = init_hwif_hpt366,
1460 .init_dma = init_dma_hpt366, 1495 .init_dma = init_dma_hpt366,
1461 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1496 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1462 .extra = 240, 1497 .port_ops = &hpt3xx_port_ops,
1498 .dma_ops = &hpt37x_dma_ops,
1463 .host_flags = IDE_HFLAGS_HPT3XX, 1499 .host_flags = IDE_HFLAGS_HPT3XX,
1464 .pio_mask = ATA_PIO4, 1500 .pio_mask = ATA_PIO4,
1465 .mwdma_mask = ATA_MWDMA2, 1501 .mwdma_mask = ATA_MWDMA2,
@@ -1470,7 +1506,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1470 .init_dma = init_dma_hpt366, 1506 .init_dma = init_dma_hpt366,
1471 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1507 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1472 .udma_mask = ATA_UDMA5, 1508 .udma_mask = ATA_UDMA5,
1473 .extra = 240, 1509 .port_ops = &hpt3xx_port_ops,
1510 .dma_ops = &hpt37x_dma_ops,
1474 .host_flags = IDE_HFLAGS_HPT3XX, 1511 .host_flags = IDE_HFLAGS_HPT3XX,
1475 .pio_mask = ATA_PIO4, 1512 .pio_mask = ATA_PIO4,
1476 .mwdma_mask = ATA_MWDMA2, 1513 .mwdma_mask = ATA_MWDMA2,
@@ -1480,7 +1517,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
1480 .init_hwif = init_hwif_hpt366, 1517 .init_hwif = init_hwif_hpt366,
1481 .init_dma = init_dma_hpt366, 1518 .init_dma = init_dma_hpt366,
1482 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1519 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1483 .extra = 240, 1520 .port_ops = &hpt3xx_port_ops,
1521 .dma_ops = &hpt37x_dma_ops,
1484 .host_flags = IDE_HFLAGS_HPT3XX, 1522 .host_flags = IDE_HFLAGS_HPT3XX,
1485 .pio_mask = ATA_PIO4, 1523 .pio_mask = ATA_PIO4,
1486 .mwdma_mask = ATA_MWDMA2, 1524 .mwdma_mask = ATA_MWDMA2,
@@ -1543,6 +1581,10 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
1543 d.name = info->chip_name; 1581 d.name = info->chip_name;
1544 d.udma_mask = info->udma_mask; 1582 d.udma_mask = info->udma_mask;
1545 1583
1584 /* fixup ->dma_ops for HPT370/HPT370A */
1585 if (info == &hpt370 || info == &hpt370a)
1586 d.dma_ops = &hpt370_dma_ops;
1587
1546 pci_set_drvdata(dev, (void *)info); 1588 pci_set_drvdata(dev, (void *)info);
1547 1589
1548 if (info == &hpt36x || info == &hpt374) 1590 if (info == &hpt36x || info == &hpt374)
@@ -1557,7 +1599,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
1557 hpt374_init(dev, dev2); 1599 hpt374_init(dev, dev2);
1558 else { 1600 else {
1559 if (hpt36x_init(dev, dev2)) 1601 if (hpt36x_init(dev, dev2))
1560 d.host_flags |= IDE_HFLAG_BOOTABLE; 1602 d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE;
1561 } 1603 }
1562 1604
1563 ret = ide_setup_pci_devices(dev, dev2, &d); 1605 ret = ide_setup_pci_devices(dev, dev2, &d);
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index e3427eaab430..9053c8771e6e 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -35,7 +35,7 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
35 static DEFINE_SPINLOCK(tune_lock); 35 static DEFINE_SPINLOCK(tune_lock);
36 int control = 0; 36 int control = 0;
37 37
38 static const u8 timings[][2]= { 38 static const u8 timings[][2] = {
39 { 0, 0 }, 39 { 0, 0 },
40 { 0, 0 }, 40 { 0, 0 },
41 { 1, 0 }, 41 { 1, 0 },
@@ -105,11 +105,10 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
105 105
106 if (!(reg48 & u_flag)) 106 if (!(reg48 & u_flag))
107 pci_write_config_byte(dev, 0x48, reg48 | u_flag); 107 pci_write_config_byte(dev, 0x48, reg48 | u_flag);
108 if (speed >= XFER_UDMA_5) { 108 if (speed >= XFER_UDMA_5)
109 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); 109 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
110 } else { 110 else
111 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 111 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
112 }
113 112
114 if ((reg4a & a_speed) != u_speed) 113 if ((reg4a & a_speed) != u_speed)
115 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed); 114 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
@@ -150,29 +149,18 @@ static u8 __devinit it8213_cable_detect(ide_hwif_t *hwif)
150 return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; 149 return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
151} 150}
152 151
153/** 152static const struct ide_port_ops it8213_port_ops = {
154 * init_hwif_it8213 - set up hwif structs 153 .set_pio_mode = it8213_set_pio_mode,
155 * @hwif: interface to set up 154 .set_dma_mode = it8213_set_dma_mode,
156 * 155 .cable_detect = it8213_cable_detect,
157 * We do the basic set up of the interface structure. 156};
158 */
159
160static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
161{
162 hwif->set_dma_mode = &it8213_set_dma_mode;
163 hwif->set_pio_mode = &it8213_set_pio_mode;
164
165 hwif->cable_detect = it8213_cable_detect;
166}
167
168 157
169#define DECLARE_ITE_DEV(name_str) \ 158#define DECLARE_ITE_DEV(name_str) \
170 { \ 159 { \
171 .name = name_str, \ 160 .name = name_str, \
172 .init_hwif = init_hwif_it8213, \ 161 .enablebits = { {0x41, 0x80, 0x80} }, \
173 .enablebits = {{0x41,0x80,0x80}}, \ 162 .port_ops = &it8213_port_ops, \
174 .host_flags = IDE_HFLAG_SINGLE | \ 163 .host_flags = IDE_HFLAG_SINGLE, \
175 IDE_HFLAG_BOOTABLE, \
176 .pio_mask = ATA_PIO4, \ 164 .pio_mask = ATA_PIO4, \
177 .swdma_mask = ATA_SWDMA2_ONLY, \ 165 .swdma_mask = ATA_SWDMA2_ONLY, \
178 .mwdma_mask = ATA_MWDMA12_ONLY, \ 166 .mwdma_mask = ATA_MWDMA12_ONLY, \
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index d8a167451fd6..6ab04115286b 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -418,7 +418,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
418} 418}
419 419
420/** 420/**
421 * ata66_it821x - check for 80 pin cable 421 * it821x_cable_detect - cable detection
422 * @hwif: interface to check 422 * @hwif: interface to check
423 * 423 *
424 * Check for the presence of an ATA66 capable cable on the 424 * Check for the presence of an ATA66 capable cable on the
@@ -426,7 +426,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
426 * the needed logic onboard. 426 * the needed logic onboard.
427 */ 427 */
428 428
429static u8 __devinit ata66_it821x(ide_hwif_t *hwif) 429static u8 __devinit it821x_cable_detect(ide_hwif_t *hwif)
430{ 430{
431 /* The reference driver also only does disk side */ 431 /* The reference driver also only does disk side */
432 return ATA_CBL_PATA80; 432 return ATA_CBL_PATA80;
@@ -511,6 +511,11 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
511 511
512} 512}
513 513
514static struct ide_dma_ops it821x_pass_through_dma_ops = {
515 .dma_start = it821x_dma_start,
516 .dma_end = it821x_dma_end,
517};
518
514/** 519/**
515 * init_hwif_it821x - set up hwif structs 520 * init_hwif_it821x - set up hwif structs
516 * @hwif: interface to set up 521 * @hwif: interface to set up
@@ -523,16 +528,10 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
523static void __devinit init_hwif_it821x(ide_hwif_t *hwif) 528static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
524{ 529{
525 struct pci_dev *dev = to_pci_dev(hwif->dev); 530 struct pci_dev *dev = to_pci_dev(hwif->dev);
526 struct it821x_dev *idev = kzalloc(sizeof(struct it821x_dev), GFP_KERNEL); 531 struct it821x_dev **itdevs = (struct it821x_dev **)pci_get_drvdata(dev);
532 struct it821x_dev *idev = itdevs[hwif->channel];
527 u8 conf; 533 u8 conf;
528 534
529 hwif->quirkproc = &it821x_quirkproc;
530
531 if (idev == NULL) {
532 printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n");
533 return;
534 }
535
536 ide_set_hwifdata(hwif, idev); 535 ide_set_hwifdata(hwif, idev);
537 536
538 pci_read_config_byte(dev, 0x50, &conf); 537 pci_read_config_byte(dev, 0x50, &conf);
@@ -567,17 +566,11 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
567 } 566 }
568 567
569 if (idev->smart == 0) { 568 if (idev->smart == 0) {
570 hwif->set_pio_mode = &it821x_set_pio_mode;
571 hwif->set_dma_mode = &it821x_set_dma_mode;
572
573 /* MWDMA/PIO clock switching for pass through mode */ 569 /* MWDMA/PIO clock switching for pass through mode */
574 hwif->dma_start = &it821x_dma_start; 570 hwif->dma_ops = &it821x_pass_through_dma_ops;
575 hwif->ide_dma_end = &it821x_dma_end;
576 } else 571 } else
577 hwif->host_flags |= IDE_HFLAG_NO_SET_MODE; 572 hwif->host_flags |= IDE_HFLAG_NO_SET_MODE;
578 573
579 hwif->cable_detect = ata66_it821x;
580
581 if (hwif->dma_base == 0) 574 if (hwif->dma_base == 0)
582 return; 575 return;
583 576
@@ -617,13 +610,20 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha
617 return 0; 610 return 0;
618} 611}
619 612
613static const struct ide_port_ops it821x_port_ops = {
614 /* it821x_set_{pio,dma}_mode() are only used in pass-through mode */
615 .set_pio_mode = it821x_set_pio_mode,
616 .set_dma_mode = it821x_set_dma_mode,
617 .quirkproc = it821x_quirkproc,
618 .cable_detect = it821x_cable_detect,
619};
620 620
621#define DECLARE_ITE_DEV(name_str) \ 621#define DECLARE_ITE_DEV(name_str) \
622 { \ 622 { \
623 .name = name_str, \ 623 .name = name_str, \
624 .init_chipset = init_chipset_it821x, \ 624 .init_chipset = init_chipset_it821x, \
625 .init_hwif = init_hwif_it821x, \ 625 .init_hwif = init_hwif_it821x, \
626 .host_flags = IDE_HFLAG_BOOTABLE, \ 626 .port_ops = &it821x_port_ops, \
627 .pio_mask = ATA_PIO4, \ 627 .pio_mask = ATA_PIO4, \
628 } 628 }
629 629
@@ -642,6 +642,22 @@ static const struct ide_port_info it821x_chipsets[] __devinitdata = {
642 642
643static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id) 643static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
644{ 644{
645 struct it821x_dev *itdevs[2] = { NULL, NULL} , *itdev;
646 unsigned int i;
647
648 for (i = 0; i < 2; i++) {
649 itdev = kzalloc(sizeof(*itdev), GFP_KERNEL);
650 if (itdev == NULL) {
651 kfree(itdevs[0]);
652 printk(KERN_ERR "it821x: out of memory\n");
653 return -ENOMEM;
654 }
655
656 itdevs[i] = itdev;
657 }
658
659 pci_set_drvdata(dev, itdevs);
660
645 return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]); 661 return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
646} 662}
647 663
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index a56bcb4f22f4..96ef7394f283 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -19,13 +19,13 @@ typedef enum {
19} port_type; 19} port_type;
20 20
21/** 21/**
22 * ata66_jmicron - Cable check 22 * jmicron_cable_detect - cable detection
23 * @hwif: IDE port 23 * @hwif: IDE port
24 * 24 *
25 * Returns the cable type. 25 * Returns the cable type.
26 */ 26 */
27 27
28static u8 __devinit ata66_jmicron(ide_hwif_t *hwif) 28static u8 __devinit jmicron_cable_detect(ide_hwif_t *hwif)
29{ 29{
30 struct pci_dev *pdev = to_pci_dev(hwif->dev); 30 struct pci_dev *pdev = to_pci_dev(hwif->dev);
31 31
@@ -63,8 +63,7 @@ static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
63 * actually do our cable checking etc. Thankfully we don't need 63 * actually do our cable checking etc. Thankfully we don't need
64 * to do the plumbing for other cases. 64 * to do the plumbing for other cases.
65 */ 65 */
66 switch (port_map[port]) 66 switch (port_map[port]) {
67 {
68 case PORT_PATA0: 67 case PORT_PATA0:
69 if (control & (1 << 3)) /* 40/80 pin primary */ 68 if (control & (1 << 3)) /* 40/80 pin primary */
70 return ATA_CBL_PATA40; 69 return ATA_CBL_PATA40;
@@ -96,26 +95,16 @@ static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode)
96{ 95{
97} 96}
98 97
99/** 98static const struct ide_port_ops jmicron_port_ops = {
100 * init_hwif_jmicron - set up hwif structs 99 .set_pio_mode = jmicron_set_pio_mode,
101 * @hwif: interface to set up 100 .set_dma_mode = jmicron_set_dma_mode,
102 * 101 .cable_detect = jmicron_cable_detect,
103 * Minimal set up is required for the Jmicron hardware. 102};
104 */
105
106static void __devinit init_hwif_jmicron(ide_hwif_t *hwif)
107{
108 hwif->set_pio_mode = &jmicron_set_pio_mode;
109 hwif->set_dma_mode = &jmicron_set_dma_mode;
110
111 hwif->cable_detect = ata66_jmicron;
112}
113 103
114static const struct ide_port_info jmicron_chipset __devinitdata = { 104static const struct ide_port_info jmicron_chipset __devinitdata = {
115 .name = "JMB", 105 .name = "JMB",
116 .init_hwif = init_hwif_jmicron,
117 .host_flags = IDE_HFLAG_BOOTABLE,
118 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } }, 106 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
107 .port_ops = &jmicron_port_ops,
119 .pio_mask = ATA_PIO5, 108 .pio_mask = ATA_PIO5,
120 .mwdma_mask = ATA_MWDMA2, 109 .mwdma_mask = ATA_MWDMA2,
121 .udma_mask = ATA_UDMA6, 110 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index 75513320aad9..c13e299077ec 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -72,8 +72,8 @@ static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
72 base = pci_resource_start(pdev, port * 2) & ~3; 72 base = pci_resource_start(pdev, port * 2) & ~3;
73 dmabase = pci_resource_start(pdev, 4) & ~3; 73 dmabase = pci_resource_start(pdev, 4) & ~3;
74 74
75 superio_ide_status[port] = base + IDE_STATUS_OFFSET; 75 superio_ide_status[port] = base + 7;
76 superio_ide_select[port] = base + IDE_SELECT_OFFSET; 76 superio_ide_select[port] = base + 6;
77 superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa); 77 superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
78 78
79 /* Clear error/interrupt, enable dma */ 79 /* Clear error/interrupt, enable dma */
@@ -150,7 +150,7 @@ static void ns87415_selectproc (ide_drive_t *drive)
150 ns87415_prepare_drive (drive, drive->using_dma); 150 ns87415_prepare_drive (drive, drive->using_dma);
151} 151}
152 152
153static int ns87415_ide_dma_end (ide_drive_t *drive) 153static int ns87415_dma_end(ide_drive_t *drive)
154{ 154{
155 ide_hwif_t *hwif = HWIF(drive); 155 ide_hwif_t *hwif = HWIF(drive);
156 u8 dma_stat = 0, dma_cmd = 0; 156 u8 dma_stat = 0, dma_cmd = 0;
@@ -170,7 +170,7 @@ static int ns87415_ide_dma_end (ide_drive_t *drive)
170 return (dma_stat & 7) != 4; 170 return (dma_stat & 7) != 4;
171} 171}
172 172
173static int ns87415_ide_dma_setup(ide_drive_t *drive) 173static int ns87415_dma_setup(ide_drive_t *drive)
174{ 174{
175 /* select DMA xfer */ 175 /* select DMA xfer */
176 ns87415_prepare_drive(drive, 1); 176 ns87415_prepare_drive(drive, 1);
@@ -195,8 +195,6 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
195 u8 stat; 195 u8 stat;
196#endif 196#endif
197 197
198 hwif->selectproc = &ns87415_selectproc;
199
200 /* 198 /*
201 * We cannot probe for IRQ: both ports share common IRQ on INTA. 199 * We cannot probe for IRQ: both ports share common IRQ on INTA.
202 * Also, leave IRQ masked during drive probing, to prevent infinite 200 * Also, leave IRQ masked during drive probing, to prevent infinite
@@ -233,12 +231,12 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
233 * SELECT_DRIVE() properly during first ide_probe_port(). 231 * SELECT_DRIVE() properly during first ide_probe_port().
234 */ 232 */
235 timeout = 10000; 233 timeout = 10000;
236 outb(12, hwif->io_ports[IDE_CONTROL_OFFSET]); 234 outb(12, hwif->io_ports.ctl_addr);
237 udelay(10); 235 udelay(10);
238 outb(8, hwif->io_ports[IDE_CONTROL_OFFSET]); 236 outb(8, hwif->io_ports.ctl_addr);
239 do { 237 do {
240 udelay(50); 238 udelay(50);
241 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 239 stat = hwif->INB(hwif->io_ports.status_addr);
242 if (stat == 0xff) 240 if (stat == 0xff)
243 break; 241 break;
244 } while ((stat & BUSY_STAT) && --timeout); 242 } while ((stat & BUSY_STAT) && --timeout);
@@ -246,7 +244,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
246 } 244 }
247 245
248 if (!using_inta) 246 if (!using_inta)
249 hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]); 247 hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
250 else if (!hwif->irq && hwif->mate && hwif->mate->irq) 248 else if (!hwif->irq && hwif->mate && hwif->mate->irq)
251 hwif->irq = hwif->mate->irq; /* share IRQ with mate */ 249 hwif->irq = hwif->mate->irq; /* share IRQ with mate */
252 250
@@ -254,19 +252,33 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
254 return; 252 return;
255 253
256 outb(0x60, hwif->dma_status); 254 outb(0x60, hwif->dma_status);
257 hwif->dma_setup = &ns87415_ide_dma_setup;
258 hwif->ide_dma_end = &ns87415_ide_dma_end;
259} 255}
260 256
257static const struct ide_port_ops ns87415_port_ops = {
258 .selectproc = ns87415_selectproc,
259};
260
261static const struct ide_dma_ops ns87415_dma_ops = {
262 .dma_host_set = ide_dma_host_set,
263 .dma_setup = ns87415_dma_setup,
264 .dma_exec_cmd = ide_dma_exec_cmd,
265 .dma_start = ide_dma_start,
266 .dma_end = ns87415_dma_end,
267 .dma_test_irq = ide_dma_test_irq,
268 .dma_lost_irq = ide_dma_lost_irq,
269 .dma_timeout = ide_dma_timeout,
270};
271
261static const struct ide_port_info ns87415_chipset __devinitdata = { 272static const struct ide_port_info ns87415_chipset __devinitdata = {
262 .name = "NS87415", 273 .name = "NS87415",
263#ifdef CONFIG_SUPERIO 274#ifdef CONFIG_SUPERIO
264 .init_iops = init_iops_ns87415, 275 .init_iops = init_iops_ns87415,
265#endif 276#endif
266 .init_hwif = init_hwif_ns87415, 277 .init_hwif = init_hwif_ns87415,
278 .port_ops = &ns87415_port_ops,
279 .dma_ops = &ns87415_dma_ops,
267 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 280 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
268 IDE_HFLAG_NO_ATAPI_DMA | 281 IDE_HFLAG_NO_ATAPI_DMA,
269 IDE_HFLAG_BOOTABLE,
270}; 282};
271 283
272static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id) 284static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c
index 46e8748f507e..6e99080497bf 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/pci/opti621.c
@@ -53,13 +53,12 @@
53 * If you then set the second drive to another PIO, the old value 53 * If you then set the second drive to another PIO, the old value
54 * (automatically selected) will be overrided by yours. 54 * (automatically selected) will be overrided by yours.
55 * There is a 25/33MHz switch in configuration 55 * There is a 25/33MHz switch in configuration
56 * register, but driver is written for use at any frequency which get 56 * register, but driver is written for use at any frequency.
57 * (use idebus=xx to select PCI bus speed).
58 * 57 *
59 * Version 0.1, Nov 8, 1996 58 * Version 0.1, Nov 8, 1996
60 * by Jaromir Koutek, for 2.1.8. 59 * by Jaromir Koutek, for 2.1.8.
61 * Initial version of driver. 60 * Initial version of driver.
62 * 61 *
63 * Version 0.2 62 * Version 0.2
64 * Number 0.2 skipped. 63 * Number 0.2 skipped.
65 * 64 *
@@ -75,7 +74,7 @@
75 * by Jaromir Koutek 74 * by Jaromir Koutek
76 * Updates for use with (again) new IDE block driver. 75 * Updates for use with (again) new IDE block driver.
77 * Update of documentation. 76 * Update of documentation.
78 * 77 *
79 * Version 0.6, Jan 2, 1999 78 * Version 0.6, Jan 2, 1999
80 * by Jaromir Koutek 79 * by Jaromir Koutek
81 * Reversed to version 0.3 of the driver, because 80 * Reversed to version 0.3 of the driver, because
@@ -208,29 +207,34 @@ typedef struct pio_clocks_s {
208 207
209static void compute_clocks(int pio, pio_clocks_t *clks) 208static void compute_clocks(int pio, pio_clocks_t *clks)
210{ 209{
211 if (pio != PIO_NOT_EXIST) { 210 if (pio != PIO_NOT_EXIST) {
212 int adr_setup, data_pls; 211 int adr_setup, data_pls;
213 int bus_speed = system_bus_clock(); 212 int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
214 213
215 adr_setup = ide_pio_timings[pio].setup_time; 214 adr_setup = ide_pio_timings[pio].setup_time;
216 data_pls = ide_pio_timings[pio].active_time; 215 data_pls = ide_pio_timings[pio].active_time;
217 clks->address_time = cmpt_clk(adr_setup, bus_speed); 216 clks->address_time = cmpt_clk(adr_setup, bus_speed);
218 clks->data_time = cmpt_clk(data_pls, bus_speed); 217 clks->data_time = cmpt_clk(data_pls, bus_speed);
219 clks->recovery_time = cmpt_clk(ide_pio_timings[pio].cycle_time 218 clks->recovery_time = cmpt_clk(ide_pio_timings[pio].cycle_time
220 - adr_setup-data_pls, bus_speed); 219 - adr_setup-data_pls, bus_speed);
221 if (clks->address_time<1) clks->address_time = 1; 220 if (clks->address_time < 1)
222 if (clks->address_time>4) clks->address_time = 4; 221 clks->address_time = 1;
223 if (clks->data_time<1) clks->data_time = 1; 222 if (clks->address_time > 4)
224 if (clks->data_time>16) clks->data_time = 16; 223 clks->address_time = 4;
225 if (clks->recovery_time<2) clks->recovery_time = 2; 224 if (clks->data_time < 1)
226 if (clks->recovery_time>17) clks->recovery_time = 17; 225 clks->data_time = 1;
226 if (clks->data_time > 16)
227 clks->data_time = 16;
228 if (clks->recovery_time < 2)
229 clks->recovery_time = 2;
230 if (clks->recovery_time > 17)
231 clks->recovery_time = 17;
227 } else { 232 } else {
228 clks->address_time = 1; 233 clks->address_time = 1;
229 clks->data_time = 1; 234 clks->data_time = 1;
230 clks->recovery_time = 2; 235 clks->recovery_time = 2;
231 /* minimal values */ 236 /* minimal values */
232 } 237 }
233
234} 238}
235 239
236static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio) 240static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -247,8 +251,8 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
247 251
248 /* sets drive->drive_data for both drives */ 252 /* sets drive->drive_data for both drives */
249 compute_pios(drive, pio); 253 compute_pios(drive, pio);
250 pio1 = hwif->drives[0].drive_data; 254 pio1 = hwif->drives[0].drive_data;
251 pio2 = hwif->drives[1].drive_data; 255 pio2 = hwif->drives[1].drive_data;
252 256
253 compute_clocks(pio1, &first); 257 compute_clocks(pio1, &first);
254 compute_clocks(pio2, &second); 258 compute_clocks(pio2, &second);
@@ -275,7 +279,7 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
275 279
276 spin_lock_irqsave(&opti621_lock, flags); 280 spin_lock_irqsave(&opti621_lock, flags);
277 281
278 reg_base = hwif->io_ports[IDE_DATA_OFFSET]; 282 reg_base = hwif->io_ports.data_addr;
279 283
280 /* allow Register-B */ 284 /* allow Register-B */
281 outb(0xc0, reg_base + CNTRL_REG); 285 outb(0xc0, reg_base + CNTRL_REG);
@@ -321,31 +325,25 @@ static void __devinit opti621_port_init_devs(ide_hwif_t *hwif)
321 hwif->drives[1].drive_data = PIO_DONT_KNOW; 325 hwif->drives[1].drive_data = PIO_DONT_KNOW;
322} 326}
323 327
324/* 328static const struct ide_port_ops opti621_port_ops = {
325 * init_hwif_opti621() is called once for each hwif found at boot. 329 .port_init_devs = opti621_port_init_devs,
326 */ 330 .set_pio_mode = opti621_set_pio_mode,
327static void __devinit init_hwif_opti621 (ide_hwif_t *hwif) 331};
328{
329 hwif->port_init_devs = opti621_port_init_devs;
330 hwif->set_pio_mode = &opti621_set_pio_mode;
331}
332 332
333static const struct ide_port_info opti621_chipsets[] __devinitdata = { 333static const struct ide_port_info opti621_chipsets[] __devinitdata = {
334 { /* 0 */ 334 { /* 0 */
335 .name = "OPTI621", 335 .name = "OPTI621",
336 .init_hwif = init_hwif_opti621, 336 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
337 .enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}}, 337 .port_ops = &opti621_port_ops,
338 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 338 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
339 IDE_HFLAG_BOOTABLE,
340 .pio_mask = ATA_PIO3, 339 .pio_mask = ATA_PIO3,
341 .swdma_mask = ATA_SWDMA2, 340 .swdma_mask = ATA_SWDMA2,
342 .mwdma_mask = ATA_MWDMA2, 341 .mwdma_mask = ATA_MWDMA2,
343 },{ /* 1 */ 342 }, { /* 1 */
344 .name = "OPTI621X", 343 .name = "OPTI621X",
345 .init_hwif = init_hwif_opti621, 344 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
346 .enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}}, 345 .port_ops = &opti621_port_ops,
347 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | 346 .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
348 IDE_HFLAG_BOOTABLE,
349 .pio_mask = ATA_PIO3, 347 .pio_mask = ATA_PIO3,
350 .swdma_mask = ATA_SWDMA2, 348 .swdma_mask = ATA_SWDMA2,
351 .mwdma_mask = ATA_MWDMA2, 349 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 1c8cb7797a4a..ec9bd7b352fc 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -34,7 +34,7 @@
34#undef DEBUG 34#undef DEBUG
35 35
36#ifdef DEBUG 36#ifdef DEBUG
37#define DBG(fmt, args...) printk("%s: " fmt, __FUNCTION__, ## args) 37#define DBG(fmt, args...) printk("%s: " fmt, __func__, ## args)
38#else 38#else
39#define DBG(fmt, args...) 39#define DBG(fmt, args...)
40#endif 40#endif
@@ -442,17 +442,6 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
442 return dev->irq; 442 return dev->irq;
443} 443}
444 444
445static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
446{
447 hwif->set_pio_mode = &pdcnew_set_pio_mode;
448 hwif->set_dma_mode = &pdcnew_set_dma_mode;
449
450 hwif->quirkproc = &pdcnew_quirkproc;
451 hwif->resetproc = &pdcnew_reset;
452
453 hwif->cable_detect = pdcnew_cable_detect;
454}
455
456static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev) 445static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
457{ 446{
458 struct pci_dev *dev2; 447 struct pci_dev *dev2;
@@ -476,11 +465,19 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
476 return NULL; 465 return NULL;
477} 466}
478 467
468static const struct ide_port_ops pdcnew_port_ops = {
469 .set_pio_mode = pdcnew_set_pio_mode,
470 .set_dma_mode = pdcnew_set_dma_mode,
471 .quirkproc = pdcnew_quirkproc,
472 .resetproc = pdcnew_reset,
473 .cable_detect = pdcnew_cable_detect,
474};
475
479#define DECLARE_PDCNEW_DEV(name_str, udma) \ 476#define DECLARE_PDCNEW_DEV(name_str, udma) \
480 { \ 477 { \
481 .name = name_str, \ 478 .name = name_str, \
482 .init_chipset = init_chipset_pdcnew, \ 479 .init_chipset = init_chipset_pdcnew, \
483 .init_hwif = init_hwif_pdc202new, \ 480 .port_ops = &pdcnew_port_ops, \
484 .host_flags = IDE_HFLAG_POST_SET_MODE | \ 481 .host_flags = IDE_HFLAG_POST_SET_MODE | \
485 IDE_HFLAG_ERROR_STOPS_FIFO | \ 482 IDE_HFLAG_ERROR_STOPS_FIFO | \
486 IDE_HFLAG_OFF_BOARD, \ 483 IDE_HFLAG_OFF_BOARD, \
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 150422ec3cfa..fca89eda5c02 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -115,7 +115,7 @@ static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
115 pdc202xx_set_mode(drive, XFER_PIO_0 + pio); 115 pdc202xx_set_mode(drive, XFER_PIO_0 + pio);
116} 116}
117 117
118static u8 __devinit pdc2026x_old_cable_detect(ide_hwif_t *hwif) 118static u8 __devinit pdc2026x_cable_detect(ide_hwif_t *hwif)
119{ 119{
120 struct pci_dev *dev = to_pci_dev(hwif->dev); 120 struct pci_dev *dev = to_pci_dev(hwif->dev);
121 u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10); 121 u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10);
@@ -163,7 +163,7 @@ static void pdc202xx_quirkproc(ide_drive_t *drive)
163 drive->quirk_list = 0; 163 drive->quirk_list = 0;
164} 164}
165 165
166static void pdc202xx_old_ide_dma_start(ide_drive_t *drive) 166static void pdc202xx_dma_start(ide_drive_t *drive)
167{ 167{
168 if (drive->current_speed > XFER_UDMA_2) 168 if (drive->current_speed > XFER_UDMA_2)
169 pdc_old_enable_66MHz_clock(drive->hwif); 169 pdc_old_enable_66MHz_clock(drive->hwif);
@@ -185,7 +185,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
185 ide_dma_start(drive); 185 ide_dma_start(drive);
186} 186}
187 187
188static int pdc202xx_old_ide_dma_end(ide_drive_t *drive) 188static int pdc202xx_dma_end(ide_drive_t *drive)
189{ 189{
190 if (drive->media != ide_disk || drive->addressing == 1) { 190 if (drive->media != ide_disk || drive->addressing == 1) {
191 ide_hwif_t *hwif = HWIF(drive); 191 ide_hwif_t *hwif = HWIF(drive);
@@ -202,7 +202,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
202 return __ide_dma_end(drive); 202 return __ide_dma_end(drive);
203} 203}
204 204
205static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive) 205static int pdc202xx_dma_test_irq(ide_drive_t *drive)
206{ 206{
207 ide_hwif_t *hwif = HWIF(drive); 207 ide_hwif_t *hwif = HWIF(drive);
208 unsigned long high_16 = hwif->extra_base - 16; 208 unsigned long high_16 = hwif->extra_base - 16;
@@ -226,26 +226,6 @@ somebody_else:
226 return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ 226 return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
227} 227}
228 228
229static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
230{
231 ide_hwif_t *hwif = HWIF(drive);
232
233 if (hwif->resetproc != NULL)
234 hwif->resetproc(drive);
235
236 ide_dma_lost_irq(drive);
237}
238
239static void pdc202xx_dma_timeout(ide_drive_t *drive)
240{
241 ide_hwif_t *hwif = HWIF(drive);
242
243 if (hwif->resetproc != NULL)
244 hwif->resetproc(drive);
245
246 ide_dma_timeout(drive);
247}
248
249static void pdc202xx_reset_host (ide_hwif_t *hwif) 229static void pdc202xx_reset_host (ide_hwif_t *hwif)
250{ 230{
251 unsigned long high_16 = hwif->extra_base - 16; 231 unsigned long high_16 = hwif->extra_base - 16;
@@ -271,68 +251,46 @@ static void pdc202xx_reset (ide_drive_t *drive)
271 ide_set_max_pio(drive); 251 ide_set_max_pio(drive);
272} 252}
273 253
274static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev, 254static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
275 const char *name)
276{ 255{
277 return dev->irq; 256 pdc202xx_reset(drive);
257 ide_dma_lost_irq(drive);
278} 258}
279 259
280static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif) 260static void pdc202xx_dma_timeout(ide_drive_t *drive)
281{ 261{
282 struct pci_dev *dev = to_pci_dev(hwif->dev); 262 pdc202xx_reset(drive);
283 263 ide_dma_timeout(drive);
284 hwif->set_pio_mode = &pdc202xx_set_pio_mode;
285 hwif->set_dma_mode = &pdc202xx_set_mode;
286
287 hwif->quirkproc = &pdc202xx_quirkproc;
288
289 if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
290 hwif->resetproc = &pdc202xx_reset;
291
292 hwif->cable_detect = pdc2026x_old_cable_detect;
293 }
294
295 if (hwif->dma_base == 0)
296 return;
297
298 hwif->dma_lost_irq = &pdc202xx_dma_lost_irq;
299 hwif->dma_timeout = &pdc202xx_dma_timeout;
300
301 if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
302 hwif->dma_start = &pdc202xx_old_ide_dma_start;
303 hwif->ide_dma_end = &pdc202xx_old_ide_dma_end;
304 }
305 hwif->ide_dma_test_irq = &pdc202xx_old_ide_dma_test_irq;
306} 264}
307 265
308static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase) 266static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
267 const char *name)
309{ 268{
269 unsigned long dmabase = pci_resource_start(dev, 4);
310 u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0; 270 u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0;
311 271
312 if (hwif->channel) { 272 if (dmabase == 0)
313 ide_setup_dma(hwif, dmabase); 273 goto out;
314 return;
315 }
316 274
317 udma_speed_flag = inb(dmabase | 0x1f); 275 udma_speed_flag = inb(dmabase | 0x1f);
318 primary_mode = inb(dmabase | 0x1a); 276 primary_mode = inb(dmabase | 0x1a);
319 secondary_mode = inb(dmabase | 0x1b); 277 secondary_mode = inb(dmabase | 0x1b);
320 printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \ 278 printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \
321 "Primary %s Mode " \ 279 "Primary %s Mode " \
322 "Secondary %s Mode.\n", hwif->cds->name, 280 "Secondary %s Mode.\n", pci_name(dev),
323 (udma_speed_flag & 1) ? "EN" : "DIS", 281 (udma_speed_flag & 1) ? "EN" : "DIS",
324 (primary_mode & 1) ? "MASTER" : "PCI", 282 (primary_mode & 1) ? "MASTER" : "PCI",
325 (secondary_mode & 1) ? "MASTER" : "PCI" ); 283 (secondary_mode & 1) ? "MASTER" : "PCI" );
326 284
327 if (!(udma_speed_flag & 1)) { 285 if (!(udma_speed_flag & 1)) {
328 printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ", 286 printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ",
329 hwif->cds->name, udma_speed_flag, 287 pci_name(dev), udma_speed_flag,
330 (udma_speed_flag|1)); 288 (udma_speed_flag|1));
331 outb(udma_speed_flag | 1, dmabase | 0x1f); 289 outb(udma_speed_flag | 1, dmabase | 0x1f);
332 printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN"); 290 printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
333 } 291 }
334 292out:
335 ide_setup_dma(hwif, dmabase); 293 return dev->irq;
336} 294}
337 295
338static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev, 296static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
@@ -357,13 +315,48 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
357 IDE_HFLAG_ABUSE_SET_DMA_MODE | \ 315 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
358 IDE_HFLAG_OFF_BOARD) 316 IDE_HFLAG_OFF_BOARD)
359 317
318static const struct ide_port_ops pdc20246_port_ops = {
319 .set_pio_mode = pdc202xx_set_pio_mode,
320 .set_dma_mode = pdc202xx_set_mode,
321 .quirkproc = pdc202xx_quirkproc,
322};
323
324static const struct ide_port_ops pdc2026x_port_ops = {
325 .set_pio_mode = pdc202xx_set_pio_mode,
326 .set_dma_mode = pdc202xx_set_mode,
327 .quirkproc = pdc202xx_quirkproc,
328 .resetproc = pdc202xx_reset,
329 .cable_detect = pdc2026x_cable_detect,
330};
331
332static const struct ide_dma_ops pdc20246_dma_ops = {
333 .dma_host_set = ide_dma_host_set,
334 .dma_setup = ide_dma_setup,
335 .dma_exec_cmd = ide_dma_exec_cmd,
336 .dma_start = ide_dma_start,
337 .dma_end = __ide_dma_end,
338 .dma_test_irq = pdc202xx_dma_test_irq,
339 .dma_lost_irq = pdc202xx_dma_lost_irq,
340 .dma_timeout = pdc202xx_dma_timeout,
341};
342
343static const struct ide_dma_ops pdc2026x_dma_ops = {
344 .dma_host_set = ide_dma_host_set,
345 .dma_setup = ide_dma_setup,
346 .dma_exec_cmd = ide_dma_exec_cmd,
347 .dma_start = pdc202xx_dma_start,
348 .dma_end = pdc202xx_dma_end,
349 .dma_test_irq = pdc202xx_dma_test_irq,
350 .dma_lost_irq = pdc202xx_dma_lost_irq,
351 .dma_timeout = pdc202xx_dma_timeout,
352};
353
360#define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \ 354#define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \
361 { \ 355 { \
362 .name = name_str, \ 356 .name = name_str, \
363 .init_chipset = init_chipset_pdc202xx, \ 357 .init_chipset = init_chipset_pdc202xx, \
364 .init_hwif = init_hwif_pdc202xx, \ 358 .port_ops = &pdc2026x_port_ops, \
365 .init_dma = init_dma_pdc202xx, \ 359 .dma_ops = &pdc2026x_dma_ops, \
366 .extra = 48, \
367 .host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \ 360 .host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \
368 .pio_mask = ATA_PIO4, \ 361 .pio_mask = ATA_PIO4, \
369 .mwdma_mask = ATA_MWDMA2, \ 362 .mwdma_mask = ATA_MWDMA2, \
@@ -374,9 +367,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
374 { /* 0 */ 367 { /* 0 */
375 .name = "PDC20246", 368 .name = "PDC20246",
376 .init_chipset = init_chipset_pdc202xx, 369 .init_chipset = init_chipset_pdc202xx,
377 .init_hwif = init_hwif_pdc202xx, 370 .port_ops = &pdc20246_port_ops,
378 .init_dma = init_dma_pdc202xx, 371 .dma_ops = &pdc20246_dma_ops,
379 .extra = 16,
380 .host_flags = IDE_HFLAGS_PDC202XX, 372 .host_flags = IDE_HFLAGS_PDC202XX,
381 .pio_mask = ATA_PIO4, 373 .pio_mask = ATA_PIO4,
382 .mwdma_mask = ATA_MWDMA2, 374 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index decef0f47674..21c5dd23f928 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -285,11 +285,6 @@ static u8 __devinit piix_cable_detect(ide_hwif_t *hwif)
285 285
286static void __devinit init_hwif_piix(ide_hwif_t *hwif) 286static void __devinit init_hwif_piix(ide_hwif_t *hwif)
287{ 287{
288 hwif->set_pio_mode = &piix_set_pio_mode;
289 hwif->set_dma_mode = &piix_set_dma_mode;
290
291 hwif->cable_detect = piix_cable_detect;
292
293 if (!hwif->dma_base) 288 if (!hwif->dma_base)
294 return; 289 return;
295 290
@@ -306,10 +301,16 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
306 hwif->ide_dma_clear_irq = &piix_dma_clear_irq; 301 hwif->ide_dma_clear_irq = &piix_dma_clear_irq;
307} 302}
308 303
304static const struct ide_port_ops piix_port_ops = {
305 .set_pio_mode = piix_set_pio_mode,
306 .set_dma_mode = piix_set_dma_mode,
307 .cable_detect = piix_cable_detect,
308};
309
309#ifndef CONFIG_IA64 310#ifndef CONFIG_IA64
310 #define IDE_HFLAGS_PIIX (IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE) 311 #define IDE_HFLAGS_PIIX IDE_HFLAG_LEGACY_IRQS
311#else 312#else
312 #define IDE_HFLAGS_PIIX IDE_HFLAG_BOOTABLE 313 #define IDE_HFLAGS_PIIX 0
313#endif 314#endif
314 315
315#define DECLARE_PIIX_DEV(name_str, udma) \ 316#define DECLARE_PIIX_DEV(name_str, udma) \
@@ -317,6 +318,7 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
317 .name = name_str, \ 318 .name = name_str, \
318 .init_hwif = init_hwif_piix, \ 319 .init_hwif = init_hwif_piix, \
319 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 320 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
321 .port_ops = &piix_port_ops, \
320 .host_flags = IDE_HFLAGS_PIIX, \ 322 .host_flags = IDE_HFLAGS_PIIX, \
321 .pio_mask = ATA_PIO4, \ 323 .pio_mask = ATA_PIO4, \
322 .swdma_mask = ATA_SWDMA2_ONLY, \ 324 .swdma_mask = ATA_SWDMA2_ONLY, \
@@ -330,6 +332,7 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
330 .init_chipset = init_chipset_ich, \ 332 .init_chipset = init_chipset_ich, \
331 .init_hwif = init_hwif_ich, \ 333 .init_hwif = init_hwif_ich, \
332 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 334 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
335 .port_ops = &piix_port_ops, \
333 .host_flags = IDE_HFLAGS_PIIX, \ 336 .host_flags = IDE_HFLAGS_PIIX, \
334 .pio_mask = ATA_PIO4, \ 337 .pio_mask = ATA_PIO4, \
335 .swdma_mask = ATA_SWDMA2_ONLY, \ 338 .swdma_mask = ATA_SWDMA2_ONLY, \
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/pci/rz1000.c
index 51676612f78f..532154adba29 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/pci/rz1000.c
@@ -43,7 +43,7 @@ static const struct ide_port_info rz1000_chipset __devinitdata = {
43 .name = "RZ100x", 43 .name = "RZ100x",
44 .init_hwif = init_hwif_rz1000, 44 .init_hwif = init_hwif_rz1000,
45 .chipset = ide_rz1000, 45 .chipset = ide_rz1000,
46 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_BOOTABLE, 46 .host_flags = IDE_HFLAG_NO_DMA,
47}; 47};
48 48
49static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id) 49static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 561aa47c7720..14c787b5d95f 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -165,7 +165,7 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
165 * 165 *
166 * returns 1 on error, 0 otherwise 166 * returns 1 on error, 0 otherwise
167 */ 167 */
168static int sc1200_ide_dma_end (ide_drive_t *drive) 168static int sc1200_dma_end(ide_drive_t *drive)
169{ 169{
170 ide_hwif_t *hwif = HWIF(drive); 170 ide_hwif_t *hwif = HWIF(drive);
171 unsigned long dma_base = hwif->dma_base; 171 unsigned long dma_base = hwif->dma_base;
@@ -214,7 +214,7 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
214 printk("SC1200: %s: changing (U)DMA mode\n", drive->name); 214 printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
215 ide_dma_off_quietly(drive); 215 ide_dma_off_quietly(drive);
216 if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma) 216 if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma)
217 hwif->dma_host_set(drive, 1); 217 hwif->dma_ops->dma_host_set(drive, 1);
218 return; 218 return;
219 } 219 }
220 220
@@ -286,29 +286,30 @@ static int sc1200_resume (struct pci_dev *dev)
286} 286}
287#endif 287#endif
288 288
289/* 289static const struct ide_port_ops sc1200_port_ops = {
290 * This gets invoked by the IDE driver once for each channel, 290 .set_pio_mode = sc1200_set_pio_mode,
291 * and performs channel-specific pre-initialization before drive probing. 291 .set_dma_mode = sc1200_set_dma_mode,
292 */ 292 .udma_filter = sc1200_udma_filter,
293static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif) 293};
294{
295 hwif->set_pio_mode = &sc1200_set_pio_mode;
296 hwif->set_dma_mode = &sc1200_set_dma_mode;
297
298 if (hwif->dma_base == 0)
299 return;
300 294
301 hwif->udma_filter = sc1200_udma_filter; 295static const struct ide_dma_ops sc1200_dma_ops = {
302 hwif->ide_dma_end = &sc1200_ide_dma_end; 296 .dma_host_set = ide_dma_host_set,
303} 297 .dma_setup = ide_dma_setup,
298 .dma_exec_cmd = ide_dma_exec_cmd,
299 .dma_start = ide_dma_start,
300 .dma_end = sc1200_dma_end,
301 .dma_test_irq = ide_dma_test_irq,
302 .dma_lost_irq = ide_dma_lost_irq,
303 .dma_timeout = ide_dma_timeout,
304};
304 305
305static const struct ide_port_info sc1200_chipset __devinitdata = { 306static const struct ide_port_info sc1200_chipset __devinitdata = {
306 .name = "SC1200", 307 .name = "SC1200",
307 .init_hwif = init_hwif_sc1200, 308 .port_ops = &sc1200_port_ops,
309 .dma_ops = &sc1200_dma_ops,
308 .host_flags = IDE_HFLAG_SERIALIZE | 310 .host_flags = IDE_HFLAG_SERIALIZE |
309 IDE_HFLAG_POST_SET_MODE | 311 IDE_HFLAG_POST_SET_MODE |
310 IDE_HFLAG_ABUSE_DMA_MODES | 312 IDE_HFLAG_ABUSE_DMA_MODES,
311 IDE_HFLAG_BOOTABLE,
312 .pio_mask = ATA_PIO4, 313 .pio_mask = ATA_PIO4,
313 .mwdma_mask = ATA_MWDMA2, 314 .mwdma_mask = ATA_MWDMA2,
314 .udma_mask = ATA_UDMA2, 315 .udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index ef07c7a8b97a..ad7cdf9060ca 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -65,7 +65,7 @@
65 65
66static struct scc_ports { 66static struct scc_ports {
67 unsigned long ctl, dma; 67 unsigned long ctl, dma;
68 unsigned char hwif_id; /* for removing hwif from system */ 68 ide_hwif_t *hwif; /* for removing port from system */
69} scc_ports[MAX_HWIFS]; 69} scc_ports[MAX_HWIFS];
70 70
71/* PIO transfer mode table */ 71/* PIO transfer mode table */
@@ -317,14 +317,14 @@ static int scc_dma_setup(ide_drive_t *drive)
317 317
318 318
319/** 319/**
320 * scc_ide_dma_end - Stop DMA 320 * scc_dma_end - Stop DMA
321 * @drive: IDE drive 321 * @drive: IDE drive
322 * 322 *
323 * Check and clear INT Status register. 323 * Check and clear INT Status register.
324 * Then call __ide_dma_end(). 324 * Then call __ide_dma_end().
325 */ 325 */
326 326
327static int scc_ide_dma_end(ide_drive_t * drive) 327static int scc_dma_end(ide_drive_t *drive)
328{ 328{
329 ide_hwif_t *hwif = HWIF(drive); 329 ide_hwif_t *hwif = HWIF(drive);
330 unsigned long intsts_port = hwif->dma_base + 0x014; 330 unsigned long intsts_port = hwif->dma_base + 0x014;
@@ -334,7 +334,7 @@ static int scc_ide_dma_end(ide_drive_t * drive)
334 334
335 /* errata A308 workaround: Step5 (check data loss) */ 335 /* errata A308 workaround: Step5 (check data loss) */
336 /* We don't check non ide_disk because it is limited to UDMA4 */ 336 /* We don't check non ide_disk because it is limited to UDMA4 */
337 if (!(in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET]) 337 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
338 & ERR_STAT) && 338 & ERR_STAT) &&
339 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) { 339 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
340 reg = in_be32((void __iomem *)intsts_port); 340 reg = in_be32((void __iomem *)intsts_port);
@@ -438,7 +438,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
438 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014); 438 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
439 439
440 /* SCC errata A252,A308 workaround: Step4 */ 440 /* SCC errata A252,A308 workaround: Step4 */
441 if ((in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET]) 441 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
442 & ERR_STAT) && 442 & ERR_STAT) &&
443 (int_stat & INTSTS_INTRQ)) 443 (int_stat & INTSTS_INTRQ))
444 return 1; 444 return 1;
@@ -449,7 +449,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
449 449
450 if (!drive->waiting_for_dma) 450 if (!drive->waiting_for_dma)
451 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 451 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
452 drive->name, __FUNCTION__); 452 drive->name, __func__);
453 return 0; 453 return 0;
454} 454}
455 455
@@ -483,7 +483,7 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
483 unsigned long dma_size = pci_resource_len(dev, 1); 483 unsigned long dma_size = pci_resource_len(dev, 1);
484 void __iomem *ctl_addr; 484 void __iomem *ctl_addr;
485 void __iomem *dma_addr; 485 void __iomem *dma_addr;
486 int i; 486 int i, ret;
487 487
488 for (i = 0; i < MAX_HWIFS; i++) { 488 for (i = 0; i < MAX_HWIFS; i++) {
489 if (scc_ports[i].ctl == 0) 489 if (scc_ports[i].ctl == 0)
@@ -492,21 +492,17 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
492 if (i >= MAX_HWIFS) 492 if (i >= MAX_HWIFS)
493 return -ENOMEM; 493 return -ENOMEM;
494 494
495 if (!request_mem_region(ctl_base, ctl_size, name)) { 495 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
496 printk(KERN_WARNING "%s: IDE controller MMIO ports not available.\n", SCC_PATA_NAME); 496 if (ret < 0) {
497 goto fail_0; 497 printk(KERN_ERR "%s: can't reserve resources\n", name);
498 } 498 return ret;
499
500 if (!request_mem_region(dma_base, dma_size, name)) {
501 printk(KERN_WARNING "%s: IDE controller MMIO ports not available.\n", SCC_PATA_NAME);
502 goto fail_1;
503 } 499 }
504 500
505 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL) 501 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
506 goto fail_2; 502 goto fail_0;
507 503
508 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL) 504 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
509 goto fail_3; 505 goto fail_1;
510 506
511 pci_set_master(dev); 507 pci_set_master(dev);
512 scc_ports[i].ctl = (unsigned long)ctl_addr; 508 scc_ports[i].ctl = (unsigned long)ctl_addr;
@@ -515,12 +511,8 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
515 511
516 return 1; 512 return 1;
517 513
518 fail_3:
519 iounmap(ctl_addr);
520 fail_2:
521 release_mem_region(dma_base, dma_size);
522 fail_1: 514 fail_1:
523 release_mem_region(ctl_base, ctl_size); 515 iounmap(ctl_addr);
524 fail_0: 516 fail_0:
525 return -ENOMEM; 517 return -ENOMEM;
526} 518}
@@ -534,26 +526,21 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
534 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 526 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
535 int i; 527 int i;
536 528
537 for (i = 0; i < MAX_HWIFS; i++) { 529 hwif = ide_find_port();
538 hwif = &ide_hwifs[i]; 530 if (hwif == NULL) {
539 if (hwif->chipset == ide_unknown)
540 break; /* pick an unused entry */
541 }
542 if (i == MAX_HWIFS) {
543 printk(KERN_ERR "%s: too many IDE interfaces, " 531 printk(KERN_ERR "%s: too many IDE interfaces, "
544 "no room in table\n", SCC_PATA_NAME); 532 "no room in table\n", SCC_PATA_NAME);
545 return -ENOMEM; 533 return -ENOMEM;
546 } 534 }
547 535
548 memset(&hw, 0, sizeof(hw)); 536 memset(&hw, 0, sizeof(hw));
549 for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; i++) 537 for (i = 0; i <= 8; i++)
550 hw.io_ports[i] = ports->dma + 0x20 + i * 4; 538 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
551 hw.irq = dev->irq; 539 hw.irq = dev->irq;
552 hw.dev = &dev->dev; 540 hw.dev = &dev->dev;
553 hw.chipset = ide_pci; 541 hw.chipset = ide_pci;
554 ide_init_port_hw(hwif, &hw); 542 ide_init_port_hw(hwif, &hw);
555 hwif->dev = &dev->dev; 543 hwif->dev = &dev->dev;
556 hwif->cds = d;
557 544
558 idx[0] = hwif->index; 545 idx[0] = hwif->index;
559 546
@@ -696,7 +683,7 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
696{ 683{
697 struct scc_ports *ports = ide_get_hwifdata(hwif); 684 struct scc_ports *ports = ide_get_hwifdata(hwif);
698 685
699 ports->hwif_id = hwif->index; 686 ports->hwif = hwif;
700 687
701 hwif->dma_command = hwif->dma_base; 688 hwif->dma_command = hwif->dma_base;
702 hwif->dma_status = hwif->dma_base + 0x04; 689 hwif->dma_status = hwif->dma_base + 0x04;
@@ -705,28 +692,38 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
705 /* PTERADD */ 692 /* PTERADD */
706 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); 693 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
707 694
708 hwif->dma_setup = scc_dma_setup;
709 hwif->ide_dma_end = scc_ide_dma_end;
710 hwif->set_pio_mode = scc_set_pio_mode;
711 hwif->set_dma_mode = scc_set_dma_mode;
712 hwif->ide_dma_test_irq = scc_dma_test_irq;
713 hwif->udma_filter = scc_udma_filter;
714
715 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN) 695 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
716 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */ 696 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
717 else 697 else
718 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ 698 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
719
720 hwif->cable_detect = scc_cable_detect;
721} 699}
722 700
701static const struct ide_port_ops scc_port_ops = {
702 .set_pio_mode = scc_set_pio_mode,
703 .set_dma_mode = scc_set_dma_mode,
704 .udma_filter = scc_udma_filter,
705 .cable_detect = scc_cable_detect,
706};
707
708static const struct ide_dma_ops scc_dma_ops = {
709 .dma_host_set = ide_dma_host_set,
710 .dma_setup = scc_dma_setup,
711 .dma_exec_cmd = ide_dma_exec_cmd,
712 .dma_start = ide_dma_start,
713 .dma_end = scc_dma_end,
714 .dma_test_irq = scc_dma_test_irq,
715 .dma_lost_irq = ide_dma_lost_irq,
716 .dma_timeout = ide_dma_timeout,
717};
718
723#define DECLARE_SCC_DEV(name_str) \ 719#define DECLARE_SCC_DEV(name_str) \
724 { \ 720 { \
725 .name = name_str, \ 721 .name = name_str, \
726 .init_iops = init_iops_scc, \ 722 .init_iops = init_iops_scc, \
727 .init_hwif = init_hwif_scc, \ 723 .init_hwif = init_hwif_scc, \
728 .host_flags = IDE_HFLAG_SINGLE | \ 724 .port_ops = &scc_port_ops, \
729 IDE_HFLAG_BOOTABLE, \ 725 .dma_ops = &scc_dma_ops, \
726 .host_flags = IDE_HFLAG_SINGLE, \
730 .pio_mask = ATA_PIO4, \ 727 .pio_mask = ATA_PIO4, \
731 } 728 }
732 729
@@ -758,11 +755,7 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
758static void __devexit scc_remove(struct pci_dev *dev) 755static void __devexit scc_remove(struct pci_dev *dev)
759{ 756{
760 struct scc_ports *ports = pci_get_drvdata(dev); 757 struct scc_ports *ports = pci_get_drvdata(dev);
761 ide_hwif_t *hwif = &ide_hwifs[ports->hwif_id]; 758 ide_hwif_t *hwif = ports->hwif;
762 unsigned long ctl_base = pci_resource_start(dev, 0);
763 unsigned long dma_base = pci_resource_start(dev, 1);
764 unsigned long ctl_size = pci_resource_len(dev, 0);
765 unsigned long dma_size = pci_resource_len(dev, 1);
766 759
767 if (hwif->dmatable_cpu) { 760 if (hwif->dmatable_cpu) {
768 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES, 761 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -770,13 +763,11 @@ static void __devexit scc_remove(struct pci_dev *dev)
770 hwif->dmatable_cpu = NULL; 763 hwif->dmatable_cpu = NULL;
771 } 764 }
772 765
773 ide_unregister(hwif->index); 766 ide_unregister(hwif);
774 767
775 hwif->chipset = ide_unknown;
776 iounmap((void*)ports->dma); 768 iounmap((void*)ports->dma);
777 iounmap((void*)ports->ctl); 769 iounmap((void*)ports->ctl);
778 release_mem_region(dma_base, dma_size); 770 pci_release_selected_regions(dev, (1 << 2) - 1);
779 release_mem_region(ctl_base, ctl_size);
780 memset(ports, 0, sizeof(*ports)); 771 memset(ports, 0, sizeof(*ports));
781} 772}
782 773
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index c11880b0709f..a1fb20826a5b 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -312,7 +312,7 @@ static u8 __devinit ata66_svwks_cobalt(ide_hwif_t *hwif)
312 return ATA_CBL_PATA40; 312 return ATA_CBL_PATA40;
313} 313}
314 314
315static u8 __devinit ata66_svwks(ide_hwif_t *hwif) 315static u8 __devinit svwks_cable_detect(ide_hwif_t *hwif)
316{ 316{
317 struct pci_dev *dev = to_pci_dev(hwif->dev); 317 struct pci_dev *dev = to_pci_dev(hwif->dev);
318 318
@@ -336,28 +336,28 @@ static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
336 return ATA_CBL_PATA40; 336 return ATA_CBL_PATA40;
337} 337}
338 338
339static void __devinit init_hwif_svwks (ide_hwif_t *hwif) 339static const struct ide_port_ops osb4_port_ops = {
340{ 340 .set_pio_mode = svwks_set_pio_mode,
341 struct pci_dev *dev = to_pci_dev(hwif->dev); 341 .set_dma_mode = svwks_set_dma_mode,
342 342 .udma_filter = svwks_udma_filter,
343 hwif->set_pio_mode = &svwks_set_pio_mode; 343};
344 hwif->set_dma_mode = &svwks_set_dma_mode;
345 hwif->udma_filter = &svwks_udma_filter;
346 344
347 if (dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) 345static const struct ide_port_ops svwks_port_ops = {
348 hwif->cable_detect = ata66_svwks; 346 .set_pio_mode = svwks_set_pio_mode,
349} 347 .set_dma_mode = svwks_set_dma_mode,
348 .udma_filter = svwks_udma_filter,
349 .cable_detect = svwks_cable_detect,
350};
350 351
351#define IDE_HFLAGS_SVWKS \ 352#define IDE_HFLAGS_SVWKS \
352 (IDE_HFLAG_LEGACY_IRQS | \ 353 (IDE_HFLAG_LEGACY_IRQS | \
353 IDE_HFLAG_ABUSE_SET_DMA_MODE | \ 354 IDE_HFLAG_ABUSE_SET_DMA_MODE)
354 IDE_HFLAG_BOOTABLE)
355 355
356static const struct ide_port_info serverworks_chipsets[] __devinitdata = { 356static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
357 { /* 0 */ 357 { /* 0 */
358 .name = "SvrWks OSB4", 358 .name = "SvrWks OSB4",
359 .init_chipset = init_chipset_svwks, 359 .init_chipset = init_chipset_svwks,
360 .init_hwif = init_hwif_svwks, 360 .port_ops = &osb4_port_ops,
361 .host_flags = IDE_HFLAGS_SVWKS, 361 .host_flags = IDE_HFLAGS_SVWKS,
362 .pio_mask = ATA_PIO4, 362 .pio_mask = ATA_PIO4,
363 .mwdma_mask = ATA_MWDMA2, 363 .mwdma_mask = ATA_MWDMA2,
@@ -365,7 +365,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
365 },{ /* 1 */ 365 },{ /* 1 */
366 .name = "SvrWks CSB5", 366 .name = "SvrWks CSB5",
367 .init_chipset = init_chipset_svwks, 367 .init_chipset = init_chipset_svwks,
368 .init_hwif = init_hwif_svwks, 368 .port_ops = &svwks_port_ops,
369 .host_flags = IDE_HFLAGS_SVWKS, 369 .host_flags = IDE_HFLAGS_SVWKS,
370 .pio_mask = ATA_PIO4, 370 .pio_mask = ATA_PIO4,
371 .mwdma_mask = ATA_MWDMA2, 371 .mwdma_mask = ATA_MWDMA2,
@@ -373,7 +373,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
373 },{ /* 2 */ 373 },{ /* 2 */
374 .name = "SvrWks CSB6", 374 .name = "SvrWks CSB6",
375 .init_chipset = init_chipset_svwks, 375 .init_chipset = init_chipset_svwks,
376 .init_hwif = init_hwif_svwks, 376 .port_ops = &svwks_port_ops,
377 .host_flags = IDE_HFLAGS_SVWKS, 377 .host_flags = IDE_HFLAGS_SVWKS,
378 .pio_mask = ATA_PIO4, 378 .pio_mask = ATA_PIO4,
379 .mwdma_mask = ATA_MWDMA2, 379 .mwdma_mask = ATA_MWDMA2,
@@ -381,7 +381,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
381 },{ /* 3 */ 381 },{ /* 3 */
382 .name = "SvrWks CSB6", 382 .name = "SvrWks CSB6",
383 .init_chipset = init_chipset_svwks, 383 .init_chipset = init_chipset_svwks,
384 .init_hwif = init_hwif_svwks, 384 .port_ops = &svwks_port_ops,
385 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, 385 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
386 .pio_mask = ATA_PIO4, 386 .pio_mask = ATA_PIO4,
387 .mwdma_mask = ATA_MWDMA2, 387 .mwdma_mask = ATA_MWDMA2,
@@ -389,7 +389,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
389 },{ /* 4 */ 389 },{ /* 4 */
390 .name = "SvrWks HT1000", 390 .name = "SvrWks HT1000",
391 .init_chipset = init_chipset_svwks, 391 .init_chipset = init_chipset_svwks,
392 .init_hwif = init_hwif_svwks, 392 .port_ops = &svwks_port_ops,
393 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, 393 .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
394 .pio_mask = ATA_PIO4, 394 .pio_mask = ATA_PIO4,
395 .mwdma_mask = ATA_MWDMA2, 395 .mwdma_mask = ATA_MWDMA2,
@@ -418,7 +418,7 @@ static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device
418 else if (idx == 2 || idx == 3) { 418 else if (idx == 2 || idx == 3) {
419 if ((PCI_FUNC(dev->devfn) & 1) == 0) { 419 if ((PCI_FUNC(dev->devfn) & 1) == 0) {
420 if (pci_resource_start(dev, 0) != 0x01f1) 420 if (pci_resource_start(dev, 0) != 0x01f1)
421 d.host_flags &= ~IDE_HFLAG_BOOTABLE; 421 d.host_flags |= IDE_HFLAG_NON_BOOTABLE;
422 d.host_flags |= IDE_HFLAG_SINGLE; 422 d.host_flags |= IDE_HFLAG_SINGLE;
423 } else 423 } else
424 d.host_flags &= ~IDE_HFLAG_SINGLE; 424 d.host_flags &= ~IDE_HFLAG_SINGLE;
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 9d1a3038af9b..63e28f4e6d3b 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -98,28 +98,28 @@ sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
98 int i; 98 int i;
99 99
100 /* Registers are word (32 bit) aligned */ 100 /* Registers are word (32 bit) aligned */
101 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) 101 for (i = 0; i <= 7; i++)
102 hw->io_ports[i] = reg + i * 4; 102 hw->io_ports_array[i] = reg + i * 4;
103 103
104 if (ctrl_port) 104 if (ctrl_port)
105 hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; 105 hw->io_ports.ctl_addr = ctrl_port;
106 106
107 if (irq_port) 107 if (irq_port)
108 hw->io_ports[IDE_IRQ_OFFSET] = irq_port; 108 hw->io_ports.irq_addr = irq_port;
109} 109}
110 110
111static void 111static void
112sgiioc4_maskproc(ide_drive_t * drive, int mask) 112sgiioc4_maskproc(ide_drive_t * drive, int mask)
113{ 113{
114 writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2), 114 writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
115 (void __iomem *)drive->hwif->io_ports[IDE_CONTROL_OFFSET]); 115 (void __iomem *)drive->hwif->io_ports.ctl_addr);
116} 116}
117 117
118static int 118static int
119sgiioc4_checkirq(ide_hwif_t * hwif) 119sgiioc4_checkirq(ide_hwif_t * hwif)
120{ 120{
121 unsigned long intr_addr = 121 unsigned long intr_addr =
122 hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4; 122 hwif->io_ports.irq_addr + IOC4_INTR_REG * 4;
123 123
124 if ((u8)readl((void __iomem *)intr_addr) & 0x03) 124 if ((u8)readl((void __iomem *)intr_addr) & 0x03)
125 return 1; 125 return 1;
@@ -134,8 +134,8 @@ sgiioc4_clearirq(ide_drive_t * drive)
134{ 134{
135 u32 intr_reg; 135 u32 intr_reg;
136 ide_hwif_t *hwif = HWIF(drive); 136 ide_hwif_t *hwif = HWIF(drive);
137 unsigned long other_ir = 137 struct ide_io_ports *io_ports = &hwif->io_ports;
138 hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2); 138 unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2);
139 139
140 /* Code to check for PCI error conditions */ 140 /* Code to check for PCI error conditions */
141 intr_reg = readl((void __iomem *)other_ir); 141 intr_reg = readl((void __iomem *)other_ir);
@@ -147,12 +147,12 @@ sgiioc4_clearirq(ide_drive_t * drive)
147 * a "clear" status if it got cleared. If not, then spin 147 * a "clear" status if it got cleared. If not, then spin
148 * for a bit trying to clear it. 148 * for a bit trying to clear it.
149 */ 149 */
150 u8 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); 150 u8 stat = sgiioc4_INB(io_ports->status_addr);
151 int count = 0; 151 int count = 0;
152 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); 152 stat = sgiioc4_INB(io_ports->status_addr);
153 while ((stat & 0x80) && (count++ < 100)) { 153 while ((stat & 0x80) && (count++ < 100)) {
154 udelay(1); 154 udelay(1);
155 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); 155 stat = sgiioc4_INB(io_ports->status_addr);
156 } 156 }
157 157
158 if (intr_reg & 0x02) { 158 if (intr_reg & 0x02) {
@@ -162,18 +162,18 @@ sgiioc4_clearirq(ide_drive_t * drive)
162 pci_stat_cmd_reg; 162 pci_stat_cmd_reg;
163 163
164 pci_err_addr_low = 164 pci_err_addr_low =
165 readl((void __iomem *)hwif->io_ports[IDE_IRQ_OFFSET]); 165 readl((void __iomem *)io_ports->irq_addr);
166 pci_err_addr_high = 166 pci_err_addr_high =
167 readl((void __iomem *)(hwif->io_ports[IDE_IRQ_OFFSET] + 4)); 167 readl((void __iomem *)(io_ports->irq_addr + 4));
168 pci_read_config_dword(dev, PCI_COMMAND, 168 pci_read_config_dword(dev, PCI_COMMAND,
169 &pci_stat_cmd_reg); 169 &pci_stat_cmd_reg);
170 printk(KERN_ERR 170 printk(KERN_ERR
171 "%s(%s) : PCI Bus Error when doing DMA:" 171 "%s(%s) : PCI Bus Error when doing DMA:"
172 " status-cmd reg is 0x%x\n", 172 " status-cmd reg is 0x%x\n",
173 __FUNCTION__, drive->name, pci_stat_cmd_reg); 173 __func__, drive->name, pci_stat_cmd_reg);
174 printk(KERN_ERR 174 printk(KERN_ERR
175 "%s(%s) : PCI Error Address is 0x%x%x\n", 175 "%s(%s) : PCI Error Address is 0x%x%x\n",
176 __FUNCTION__, drive->name, 176 __func__, drive->name,
177 pci_err_addr_high, pci_err_addr_low); 177 pci_err_addr_high, pci_err_addr_low);
178 /* Clear the PCI Error indicator */ 178 /* Clear the PCI Error indicator */
179 pci_write_config_dword(dev, PCI_COMMAND, 0x00000146); 179 pci_write_config_dword(dev, PCI_COMMAND, 0x00000146);
@@ -188,7 +188,7 @@ sgiioc4_clearirq(ide_drive_t * drive)
188 return intr_reg & 3; 188 return intr_reg & 3;
189} 189}
190 190
191static void sgiioc4_ide_dma_start(ide_drive_t * drive) 191static void sgiioc4_dma_start(ide_drive_t *drive)
192{ 192{
193 ide_hwif_t *hwif = HWIF(drive); 193 ide_hwif_t *hwif = HWIF(drive);
194 unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4; 194 unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4;
@@ -215,8 +215,7 @@ sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base)
215} 215}
216 216
217/* Stops the IOC4 DMA Engine */ 217/* Stops the IOC4 DMA Engine */
218static int 218static int sgiioc4_dma_end(ide_drive_t *drive)
219sgiioc4_ide_dma_end(ide_drive_t * drive)
220{ 219{
221 u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; 220 u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
222 ide_hwif_t *hwif = HWIF(drive); 221 ide_hwif_t *hwif = HWIF(drive);
@@ -232,7 +231,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
232 printk(KERN_ERR 231 printk(KERN_ERR
233 "%s(%s): IOC4 DMA STOP bit is still 1 :" 232 "%s(%s): IOC4 DMA STOP bit is still 1 :"
234 "ioc4_dma_reg 0x%x\n", 233 "ioc4_dma_reg 0x%x\n",
235 __FUNCTION__, drive->name, ioc4_dma); 234 __func__, drive->name, ioc4_dma);
236 dma_stat = 1; 235 dma_stat = 1;
237 } 236 }
238 237
@@ -251,7 +250,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
251 udelay(1); 250 udelay(1);
252 } 251 }
253 if (!valid) { 252 if (!valid) {
254 printk(KERN_ERR "%s(%s) : DMA incomplete\n", __FUNCTION__, 253 printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__,
255 drive->name); 254 drive->name);
256 dma_stat = 1; 255 dma_stat = 1;
257 } 256 }
@@ -264,7 +263,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
264 printk(KERN_ERR 263 printk(KERN_ERR
265 "%s(%s): WARNING!! byte_count_dev %d " 264 "%s(%s): WARNING!! byte_count_dev %d "
266 "!= byte_count_mem %d\n", 265 "!= byte_count_mem %d\n",
267 __FUNCTION__, drive->name, bc_dev, bc_mem); 266 __func__, drive->name, bc_dev, bc_mem);
268 } 267 }
269 } 268 }
270 269
@@ -279,8 +278,7 @@ static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
279} 278}
280 279
281/* returns 1 if dma irq issued, 0 otherwise */ 280/* returns 1 if dma irq issued, 0 otherwise */
282static int 281static int sgiioc4_dma_test_irq(ide_drive_t *drive)
283sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
284{ 282{
285 return sgiioc4_checkirq(HWIF(drive)); 283 return sgiioc4_checkirq(HWIF(drive));
286} 284}
@@ -294,7 +292,7 @@ static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
294static void 292static void
295sgiioc4_resetproc(ide_drive_t * drive) 293sgiioc4_resetproc(ide_drive_t * drive)
296{ 294{
297 sgiioc4_ide_dma_end(drive); 295 sgiioc4_dma_end(drive);
298 sgiioc4_clearirq(drive); 296 sgiioc4_clearirq(drive);
299} 297}
300 298
@@ -329,13 +327,17 @@ sgiioc4_INB(unsigned long port)
329 327
330/* Creates a dma map for the scatter-gather list entries */ 328/* Creates a dma map for the scatter-gather list entries */
331static int __devinit 329static int __devinit
332ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) 330ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
333{ 331{
334 struct pci_dev *dev = to_pci_dev(hwif->dev); 332 struct pci_dev *dev = to_pci_dev(hwif->dev);
333 unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
335 void __iomem *virt_dma_base; 334 void __iomem *virt_dma_base;
336 int num_ports = sizeof (ioc4_dma_regs_t); 335 int num_ports = sizeof (ioc4_dma_regs_t);
337 void *pad; 336 void *pad;
338 337
338 if (dma_base == 0)
339 return -1;
340
339 printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, 341 printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name,
340 dma_base, dma_base + num_ports - 1); 342 dma_base, dma_base + num_ports - 1);
341 343
@@ -343,7 +345,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
343 printk(KERN_ERR 345 printk(KERN_ERR
344 "%s(%s) -- ERROR, Addresses 0x%p to 0x%p " 346 "%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
345 "ALREADY in use\n", 347 "ALREADY in use\n",
346 __FUNCTION__, hwif->name, (void *) dma_base, 348 __func__, hwif->name, (void *) dma_base,
347 (void *) dma_base + num_ports - 1); 349 (void *) dma_base + num_ports - 1);
348 return -1; 350 return -1;
349 } 351 }
@@ -352,7 +354,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
352 if (virt_dma_base == NULL) { 354 if (virt_dma_base == NULL) {
353 printk(KERN_ERR 355 printk(KERN_ERR
354 "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n", 356 "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n",
355 __FUNCTION__, hwif->name, dma_base, dma_base + num_ports - 1); 357 __func__, hwif->name, dma_base, dma_base + num_ports - 1);
356 goto dma_remap_failure; 358 goto dma_remap_failure;
357 } 359 }
358 hwif->dma_base = (unsigned long) virt_dma_base; 360 hwif->dma_base = (unsigned long) virt_dma_base;
@@ -378,7 +380,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
378 hwif->dmatable_cpu, hwif->dmatable_dma); 380 hwif->dmatable_cpu, hwif->dmatable_dma);
379 printk(KERN_INFO 381 printk(KERN_INFO
380 "%s() -- Error! Unable to allocate DMA Maps for drive %s\n", 382 "%s() -- Error! Unable to allocate DMA Maps for drive %s\n",
381 __FUNCTION__, hwif->name); 383 __func__, hwif->name);
382 printk(KERN_INFO 384 printk(KERN_INFO
383 "Changing from DMA to PIO mode for Drive %s\n", hwif->name); 385 "Changing from DMA to PIO mode for Drive %s\n", hwif->name);
384 386
@@ -406,14 +408,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
406 if (ioc4_dma & IOC4_S_DMA_ACTIVE) { 408 if (ioc4_dma & IOC4_S_DMA_ACTIVE) {
407 printk(KERN_WARNING 409 printk(KERN_WARNING
408 "%s(%s):Warning!! DMA from previous transfer was still active\n", 410 "%s(%s):Warning!! DMA from previous transfer was still active\n",
409 __FUNCTION__, drive->name); 411 __func__, drive->name);
410 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); 412 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
411 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); 413 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
412 414
413 if (ioc4_dma & IOC4_S_DMA_STOP) 415 if (ioc4_dma & IOC4_S_DMA_STOP)
414 printk(KERN_ERR 416 printk(KERN_ERR
415 "%s(%s) : IOC4 Dma STOP bit is still 1\n", 417 "%s(%s) : IOC4 Dma STOP bit is still 1\n",
416 __FUNCTION__, drive->name); 418 __func__, drive->name);
417 } 419 }
418 420
419 ioc4_dma = readl((void __iomem *)ioc4_dma_addr); 421 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
@@ -421,14 +423,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
421 printk(KERN_WARNING 423 printk(KERN_WARNING
422 "%s(%s) : Warning!! - DMA Error during Previous" 424 "%s(%s) : Warning!! - DMA Error during Previous"
423 " transfer | status 0x%x\n", 425 " transfer | status 0x%x\n",
424 __FUNCTION__, drive->name, ioc4_dma); 426 __func__, drive->name, ioc4_dma);
425 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); 427 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
426 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); 428 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
427 429
428 if (ioc4_dma & IOC4_S_DMA_STOP) 430 if (ioc4_dma & IOC4_S_DMA_STOP)
429 printk(KERN_ERR 431 printk(KERN_ERR
430 "%s(%s) : IOC4 DMA STOP bit is still 1\n", 432 "%s(%s) : IOC4 DMA STOP bit is still 1\n",
431 __FUNCTION__, drive->name); 433 __func__, drive->name);
432 } 434 }
433 435
434 /* Address of the Scatter Gather List */ 436 /* Address of the Scatter Gather List */
@@ -519,7 +521,7 @@ use_pio_instead:
519 return 0; /* revert to PIO for this request */ 521 return 0; /* revert to PIO for this request */
520} 522}
521 523
522static int sgiioc4_ide_dma_setup(ide_drive_t *drive) 524static int sgiioc4_dma_setup(ide_drive_t *drive)
523{ 525{
524 struct request *rq = HWGROUP(drive)->rq; 526 struct request *rq = HWGROUP(drive)->rq;
525 unsigned int count = 0; 527 unsigned int count = 0;
@@ -548,62 +550,45 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
548 return 0; 550 return 0;
549} 551}
550 552
551static void __devinit 553static const struct ide_port_ops sgiioc4_port_ops = {
552ide_init_sgiioc4(ide_hwif_t * hwif) 554 .set_dma_mode = sgiioc4_set_dma_mode,
553{ 555 /* reset DMA engine, clear IRQs */
554 hwif->mmio = 1; 556 .resetproc = sgiioc4_resetproc,
555 hwif->set_pio_mode = NULL; /* Sets timing for PIO mode */ 557 /* mask on/off NIEN register */
556 hwif->set_dma_mode = &sgiioc4_set_dma_mode; 558 .maskproc = sgiioc4_maskproc,
557 hwif->selectproc = NULL;/* Use the default routine to select drive */ 559};
558 hwif->reset_poll = NULL;/* No HBA specific reset_poll needed */
559 hwif->pre_reset = NULL; /* No HBA specific pre_set needed */
560 hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine,
561 clear interrupts */
562 hwif->maskproc = &sgiioc4_maskproc; /* Mask on/off NIEN register */
563 hwif->quirkproc = NULL;
564
565 hwif->INB = &sgiioc4_INB;
566
567 if (hwif->dma_base == 0)
568 return;
569 560
570 hwif->dma_host_set = &sgiioc4_dma_host_set; 561static const struct ide_dma_ops sgiioc4_dma_ops = {
571 hwif->dma_setup = &sgiioc4_ide_dma_setup; 562 .dma_host_set = sgiioc4_dma_host_set,
572 hwif->dma_start = &sgiioc4_ide_dma_start; 563 .dma_setup = sgiioc4_dma_setup,
573 hwif->ide_dma_end = &sgiioc4_ide_dma_end; 564 .dma_start = sgiioc4_dma_start,
574 hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq; 565 .dma_end = sgiioc4_dma_end,
575 hwif->dma_lost_irq = &sgiioc4_dma_lost_irq; 566 .dma_test_irq = sgiioc4_dma_test_irq,
576 hwif->dma_timeout = &ide_dma_timeout; 567 .dma_lost_irq = sgiioc4_dma_lost_irq,
577} 568 .dma_timeout = ide_dma_timeout,
569};
578 570
579static const struct ide_port_info sgiioc4_port_info __devinitdata = { 571static const struct ide_port_info sgiioc4_port_info __devinitdata = {
580 .chipset = ide_pci, 572 .chipset = ide_pci,
581 .host_flags = IDE_HFLAG_NO_DMA | /* no SFF-style DMA */ 573 .init_dma = ide_dma_sgiioc4,
582 IDE_HFLAG_NO_AUTOTUNE, 574 .port_ops = &sgiioc4_port_ops,
575 .dma_ops = &sgiioc4_dma_ops,
583 .mwdma_mask = ATA_MWDMA2_ONLY, 576 .mwdma_mask = ATA_MWDMA2_ONLY,
584}; 577};
585 578
586static int __devinit 579static int __devinit
587sgiioc4_ide_setup_pci_device(struct pci_dev *dev) 580sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
588{ 581{
589 unsigned long cmd_base, dma_base, irqport; 582 unsigned long cmd_base, irqport;
590 unsigned long bar0, cmd_phys_base, ctl; 583 unsigned long bar0, cmd_phys_base, ctl;
591 void __iomem *virt_base; 584 void __iomem *virt_base;
592 ide_hwif_t *hwif; 585 ide_hwif_t *hwif;
593 int h;
594 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 586 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
595 hw_regs_t hw; 587 hw_regs_t hw;
596 struct ide_port_info d = sgiioc4_port_info; 588 struct ide_port_info d = sgiioc4_port_info;
597 589
598 /* 590 hwif = ide_find_port();
599 * Find an empty HWIF; if none available, return -ENOMEM. 591 if (hwif == NULL) {
600 */
601 for (h = 0; h < MAX_HWIFS; ++h) {
602 hwif = &ide_hwifs[h];
603 if (hwif->chipset == ide_unknown)
604 break;
605 }
606 if (h == MAX_HWIFS) {
607 printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", 592 printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n",
608 DRV_NAME); 593 DRV_NAME);
609 return -ENOMEM; 594 return -ENOMEM;
@@ -620,7 +605,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
620 cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET; 605 cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET;
621 ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET; 606 ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET;
622 irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET; 607 irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET;
623 dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
624 608
625 cmd_phys_base = bar0 + IOC4_CMD_OFFSET; 609 cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
626 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, 610 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
@@ -628,7 +612,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
628 printk(KERN_ERR 612 printk(KERN_ERR
629 "%s : %s -- ERROR, Addresses " 613 "%s : %s -- ERROR, Addresses "
630 "0x%p to 0x%p ALREADY in use\n", 614 "0x%p to 0x%p ALREADY in use\n",
631 __FUNCTION__, hwif->name, (void *) cmd_phys_base, 615 __func__, hwif->name, (void *) cmd_phys_base,
632 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); 616 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
633 return -ENOMEM; 617 return -ENOMEM;
634 } 618 }
@@ -649,13 +633,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
649 /* Initializing chipset IRQ Registers */ 633 /* Initializing chipset IRQ Registers */
650 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 634 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
651 635
652 if (dma_base == 0 || ide_dma_sgiioc4(hwif, dma_base)) { 636 hwif->INB = &sgiioc4_INB;
653 printk(KERN_INFO "%s: %s Bus-Master DMA disabled\n",
654 hwif->name, DRV_NAME);
655 d.mwdma_mask = 0;
656 }
657
658 ide_init_sgiioc4(hwif);
659 637
660 idx[0] = hwif->index; 638 idx[0] = hwif->index;
661 639
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index b6be1b45f329..c2040a017f47 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -301,7 +301,7 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
301} 301}
302 302
303/* returns 1 if dma irq issued, 0 otherwise */ 303/* returns 1 if dma irq issued, 0 otherwise */
304static int siimage_io_ide_dma_test_irq (ide_drive_t *drive) 304static int siimage_io_dma_test_irq(ide_drive_t *drive)
305{ 305{
306 ide_hwif_t *hwif = HWIF(drive); 306 ide_hwif_t *hwif = HWIF(drive);
307 struct pci_dev *dev = to_pci_dev(hwif->dev); 307 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -320,14 +320,14 @@ static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
320} 320}
321 321
322/** 322/**
323 * siimage_mmio_ide_dma_test_irq - check we caused an IRQ 323 * siimage_mmio_dma_test_irq - check we caused an IRQ
324 * @drive: drive we are testing 324 * @drive: drive we are testing
325 * 325 *
326 * Check if we caused an IDE DMA interrupt. We may also have caused 326 * Check if we caused an IDE DMA interrupt. We may also have caused
327 * SATA status interrupts, if so we clean them up and continue. 327 * SATA status interrupts, if so we clean them up and continue.
328 */ 328 */
329 329
330static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive) 330static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
331{ 331{
332 ide_hwif_t *hwif = HWIF(drive); 332 ide_hwif_t *hwif = HWIF(drive);
333 unsigned long addr = siimage_selreg(hwif, 0x1); 333 unsigned long addr = siimage_selreg(hwif, 0x1);
@@ -347,7 +347,7 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
347 printk(KERN_WARNING "%s: sata_error = 0x%08x, " 347 printk(KERN_WARNING "%s: sata_error = 0x%08x, "
348 "watchdog = %d, %s\n", 348 "watchdog = %d, %s\n",
349 drive->name, sata_error, watchdog, 349 drive->name, sata_error, watchdog,
350 __FUNCTION__); 350 __func__);
351 351
352 } else { 352 } else {
353 watchdog = (ext_stat & 0x8000) ? 1 : 0; 353 watchdog = (ext_stat & 0x8000) ? 1 : 0;
@@ -369,6 +369,14 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
369 return 0; 369 return 0;
370} 370}
371 371
372static int siimage_dma_test_irq(ide_drive_t *drive)
373{
374 if (drive->hwif->mmio)
375 return siimage_mmio_dma_test_irq(drive);
376 else
377 return siimage_io_dma_test_irq(drive);
378}
379
372/** 380/**
373 * sil_sata_reset_poll - wait for SATA reset 381 * sil_sata_reset_poll - wait for SATA reset
374 * @drive: drive we are resetting 382 * @drive: drive we are resetting
@@ -614,9 +622,10 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
614 struct pci_dev *dev = to_pci_dev(hwif->dev); 622 struct pci_dev *dev = to_pci_dev(hwif->dev);
615 void *addr = pci_get_drvdata(dev); 623 void *addr = pci_get_drvdata(dev);
616 u8 ch = hwif->channel; 624 u8 ch = hwif->channel;
617 hw_regs_t hw;
618 unsigned long base; 625 unsigned long base;
619 626
627 struct ide_io_ports *io_ports = &hwif->io_ports;
628
620 /* 629 /*
621 * Fill in the basic HWIF bits 630 * Fill in the basic HWIF bits
622 */ 631 */
@@ -630,7 +639,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
630 * based I/O 639 * based I/O
631 */ 640 */
632 641
633 memset(&hw, 0, sizeof(hw_regs_t)); 642 memset(io_ports, 0, sizeof(*io_ports));
634 643
635 base = (unsigned long)addr; 644 base = (unsigned long)addr;
636 if (ch) 645 if (ch)
@@ -643,17 +652,15 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
643 * so we can't currently use it sanely since we want to 652 * so we can't currently use it sanely since we want to
644 * use LBA48 mode. 653 * use LBA48 mode.
645 */ 654 */
646 hw.io_ports[IDE_DATA_OFFSET] = base; 655 io_ports->data_addr = base;
647 hw.io_ports[IDE_ERROR_OFFSET] = base + 1; 656 io_ports->error_addr = base + 1;
648 hw.io_ports[IDE_NSECTOR_OFFSET] = base + 2; 657 io_ports->nsect_addr = base + 2;
649 hw.io_ports[IDE_SECTOR_OFFSET] = base + 3; 658 io_ports->lbal_addr = base + 3;
650 hw.io_ports[IDE_LCYL_OFFSET] = base + 4; 659 io_ports->lbam_addr = base + 4;
651 hw.io_ports[IDE_HCYL_OFFSET] = base + 5; 660 io_ports->lbah_addr = base + 5;
652 hw.io_ports[IDE_SELECT_OFFSET] = base + 6; 661 io_ports->device_addr = base + 6;
653 hw.io_ports[IDE_STATUS_OFFSET] = base + 7; 662 io_ports->status_addr = base + 7;
654 hw.io_ports[IDE_CONTROL_OFFSET] = base + 10; 663 io_ports->ctl_addr = base + 10;
655
656 hw.io_ports[IDE_IRQ_OFFSET] = 0;
657 664
658 if (pdev_is_sata(dev)) { 665 if (pdev_is_sata(dev)) {
659 base = (unsigned long)addr; 666 base = (unsigned long)addr;
@@ -664,8 +671,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
664 hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100; 671 hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
665 } 672 }
666 673
667 memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
668
669 hwif->irq = dev->irq; 674 hwif->irq = dev->irq;
670 675
671 hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00); 676 hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00);
@@ -735,14 +740,14 @@ static void __devinit init_iops_siimage(ide_hwif_t *hwif)
735} 740}
736 741
737/** 742/**
738 * ata66_siimage - check for 80 pin cable 743 * sil_cable_detect - cable detection
739 * @hwif: interface to check 744 * @hwif: interface to check
740 * 745 *
741 * Check for the presence of an ATA66 capable cable on the 746 * Check for the presence of an ATA66 capable cable on the
742 * interface. 747 * interface.
743 */ 748 */
744 749
745static u8 __devinit ata66_siimage(ide_hwif_t *hwif) 750static u8 __devinit sil_cable_detect(ide_hwif_t *hwif)
746{ 751{
747 struct pci_dev *dev = to_pci_dev(hwif->dev); 752 struct pci_dev *dev = to_pci_dev(hwif->dev);
748 unsigned long addr = siimage_selreg(hwif, 0); 753 unsigned long addr = siimage_selreg(hwif, 0);
@@ -756,68 +761,44 @@ static u8 __devinit ata66_siimage(ide_hwif_t *hwif)
756 return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 761 return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
757} 762}
758 763
759/** 764static const struct ide_port_ops sil_pata_port_ops = {
760 * init_hwif_siimage - set up hwif structs 765 .set_pio_mode = sil_set_pio_mode,
761 * @hwif: interface to set up 766 .set_dma_mode = sil_set_dma_mode,
762 * 767 .quirkproc = sil_quirkproc,
763 * We do the basic set up of the interface structure. The SIIMAGE 768 .udma_filter = sil_pata_udma_filter,
764 * requires several custom handlers so we override the default 769 .cable_detect = sil_cable_detect,
765 * ide DMA handlers appropriately 770};
766 */
767
768static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
769{
770 u8 sata = is_sata(hwif);
771
772 hwif->set_pio_mode = &sil_set_pio_mode;
773 hwif->set_dma_mode = &sil_set_dma_mode;
774 hwif->quirkproc = &sil_quirkproc;
775
776 if (sata) {
777 static int first = 1;
778
779 hwif->reset_poll = &sil_sata_reset_poll;
780 hwif->pre_reset = &sil_sata_pre_reset;
781 hwif->udma_filter = &sil_sata_udma_filter;
782
783 if (first) {
784 printk(KERN_INFO "siimage: For full SATA support you should use the libata sata_sil module.\n");
785 first = 0;
786 }
787 } else
788 hwif->udma_filter = &sil_pata_udma_filter;
789
790 hwif->cable_detect = ata66_siimage;
791
792 if (hwif->dma_base == 0)
793 return;
794 771
795 if (sata) 772static const struct ide_port_ops sil_sata_port_ops = {
796 hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA; 773 .set_pio_mode = sil_set_pio_mode,
774 .set_dma_mode = sil_set_dma_mode,
775 .reset_poll = sil_sata_reset_poll,
776 .pre_reset = sil_sata_pre_reset,
777 .quirkproc = sil_quirkproc,
778 .udma_filter = sil_sata_udma_filter,
779 .cable_detect = sil_cable_detect,
780};
797 781
798 if (hwif->mmio) { 782static struct ide_dma_ops sil_dma_ops = {
799 hwif->ide_dma_test_irq = &siimage_mmio_ide_dma_test_irq; 783 .dma_test_irq = siimage_dma_test_irq,
800 } else { 784};
801 hwif->ide_dma_test_irq = & siimage_io_ide_dma_test_irq;
802 }
803}
804 785
805#define DECLARE_SII_DEV(name_str) \ 786#define DECLARE_SII_DEV(name_str, p_ops) \
806 { \ 787 { \
807 .name = name_str, \ 788 .name = name_str, \
808 .init_chipset = init_chipset_siimage, \ 789 .init_chipset = init_chipset_siimage, \
809 .init_iops = init_iops_siimage, \ 790 .init_iops = init_iops_siimage, \
810 .init_hwif = init_hwif_siimage, \ 791 .port_ops = p_ops, \
811 .host_flags = IDE_HFLAG_BOOTABLE, \ 792 .dma_ops = &sil_dma_ops, \
812 .pio_mask = ATA_PIO4, \ 793 .pio_mask = ATA_PIO4, \
813 .mwdma_mask = ATA_MWDMA2, \ 794 .mwdma_mask = ATA_MWDMA2, \
814 .udma_mask = ATA_UDMA6, \ 795 .udma_mask = ATA_UDMA6, \
815 } 796 }
816 797
817static const struct ide_port_info siimage_chipsets[] __devinitdata = { 798static const struct ide_port_info siimage_chipsets[] __devinitdata = {
818 /* 0 */ DECLARE_SII_DEV("SiI680"), 799 /* 0 */ DECLARE_SII_DEV("SiI680", &sil_pata_port_ops),
819 /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA"), 800 /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA", &sil_sata_port_ops),
820 /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA") 801 /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA", &sil_sata_port_ops)
821}; 802};
822 803
823/** 804/**
@@ -831,7 +812,24 @@ static const struct ide_port_info siimage_chipsets[] __devinitdata = {
831 812
832static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id) 813static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
833{ 814{
834 return ide_setup_pci_device(dev, &siimage_chipsets[id->driver_data]); 815 struct ide_port_info d;
816 u8 idx = id->driver_data;
817
818 d = siimage_chipsets[idx];
819
820 if (idx) {
821 static int first = 1;
822
823 if (first) {
824 printk(KERN_INFO "siimage: For full SATA support you "
825 "should use the libata sata_sil module.\n");
826 first = 0;
827 }
828
829 d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
830 }
831
832 return ide_setup_pci_device(dev, &d);
835} 833}
836 834
837static const struct pci_device_id siimage_pci_tbl[] = { 835static const struct pci_device_id siimage_pci_tbl[] = {
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 512bb4c1fd5c..4b0b85d8faf5 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -59,10 +59,10 @@
59#define ATA_16 0x01 59#define ATA_16 0x01
60#define ATA_33 0x02 60#define ATA_33 0x02
61#define ATA_66 0x03 61#define ATA_66 0x03
62#define ATA_100a 0x04 // SiS730/SiS550 is ATA100 with ATA66 layout 62#define ATA_100a 0x04 /* SiS730/SiS550 is ATA100 with ATA66 layout */
63#define ATA_100 0x05 63#define ATA_100 0x05
64#define ATA_133a 0x06 // SiS961b with 133 support 64#define ATA_133a 0x06 /* SiS961b with 133 support */
65#define ATA_133 0x07 // SiS962/963 65#define ATA_133 0x07 /* SiS962/963 */
66 66
67static u8 chipset_family; 67static u8 chipset_family;
68 68
@@ -111,69 +111,70 @@ static const struct {
111 Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */ 111 Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */
112 112
113/* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */ 113/* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */
114static u8 cycle_time_offset[] = {0,0,5,4,4,0,0}; 114static u8 cycle_time_offset[] = { 0, 0, 5, 4, 4, 0, 0 };
115static u8 cycle_time_range[] = {0,0,2,3,3,4,4}; 115static u8 cycle_time_range[] = { 0, 0, 2, 3, 3, 4, 4 };
116static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = { 116static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
117 {0,0,0,0,0,0,0}, /* no udma */ 117 { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
118 {0,0,0,0,0,0,0}, /* no udma */ 118 { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
119 {3,2,1,0,0,0,0}, /* ATA_33 */ 119 { 3, 2, 1, 0, 0, 0, 0 }, /* ATA_33 */
120 {7,5,3,2,1,0,0}, /* ATA_66 */ 120 { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_66 */
121 {7,5,3,2,1,0,0}, /* ATA_100a (730 specific), differences are on cycle_time range and offset */ 121 { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_100a (730 specific),
122 {11,7,5,4,2,1,0}, /* ATA_100 */ 122 different cycle_time range and offset */
123 {15,10,7,5,3,2,1}, /* ATA_133a (earliest 691 southbridges) */ 123 { 11, 7, 5, 4, 2, 1, 0 }, /* ATA_100 */
124 {15,10,7,5,3,2,1}, /* ATA_133 */ 124 { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133a (earliest 691 southbridges) */
125 { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133 */
125}; 126};
126/* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133 127/* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133
127 See SiS962 data sheet for more detail */ 128 See SiS962 data sheet for more detail */
128static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = { 129static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
129 {0,0,0,0,0,0,0}, /* no udma */ 130 { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
130 {0,0,0,0,0,0,0}, /* no udma */ 131 { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
131 {2,1,1,0,0,0,0}, 132 { 2, 1, 1, 0, 0, 0, 0 },
132 {4,3,2,1,0,0,0}, 133 { 4, 3, 2, 1, 0, 0, 0 },
133 {4,3,2,1,0,0,0}, 134 { 4, 3, 2, 1, 0, 0, 0 },
134 {6,4,3,1,1,1,0}, 135 { 6, 4, 3, 1, 1, 1, 0 },
135 {9,6,4,2,2,2,2}, 136 { 9, 6, 4, 2, 2, 2, 2 },
136 {9,6,4,2,2,2,2}, 137 { 9, 6, 4, 2, 2, 2, 2 },
137}; 138};
138/* Initialize time, Active time, Recovery time vary across 139/* Initialize time, Active time, Recovery time vary across
139 IDE clock settings. These 3 arrays hold the register value 140 IDE clock settings. These 3 arrays hold the register value
140 for PIO0/1/2/3/4 and DMA0/1/2 mode in order */ 141 for PIO0/1/2/3/4 and DMA0/1/2 mode in order */
141static u8 ini_time_value[][8] = { 142static u8 ini_time_value[][8] = {
142 {0,0,0,0,0,0,0,0}, 143 { 0, 0, 0, 0, 0, 0, 0, 0 },
143 {0,0,0,0,0,0,0,0}, 144 { 0, 0, 0, 0, 0, 0, 0, 0 },
144 {2,1,0,0,0,1,0,0}, 145 { 2, 1, 0, 0, 0, 1, 0, 0 },
145 {4,3,1,1,1,3,1,1}, 146 { 4, 3, 1, 1, 1, 3, 1, 1 },
146 {4,3,1,1,1,3,1,1}, 147 { 4, 3, 1, 1, 1, 3, 1, 1 },
147 {6,4,2,2,2,4,2,2}, 148 { 6, 4, 2, 2, 2, 4, 2, 2 },
148 {9,6,3,3,3,6,3,3}, 149 { 9, 6, 3, 3, 3, 6, 3, 3 },
149 {9,6,3,3,3,6,3,3}, 150 { 9, 6, 3, 3, 3, 6, 3, 3 },
150}; 151};
151static u8 act_time_value[][8] = { 152static u8 act_time_value[][8] = {
152 {0,0,0,0,0,0,0,0}, 153 { 0, 0, 0, 0, 0, 0, 0, 0 },
153 {0,0,0,0,0,0,0,0}, 154 { 0, 0, 0, 0, 0, 0, 0, 0 },
154 {9,9,9,2,2,7,2,2}, 155 { 9, 9, 9, 2, 2, 7, 2, 2 },
155 {19,19,19,5,4,14,5,4}, 156 { 19, 19, 19, 5, 4, 14, 5, 4 },
156 {19,19,19,5,4,14,5,4}, 157 { 19, 19, 19, 5, 4, 14, 5, 4 },
157 {28,28,28,7,6,21,7,6}, 158 { 28, 28, 28, 7, 6, 21, 7, 6 },
158 {38,38,38,10,9,28,10,9}, 159 { 38, 38, 38, 10, 9, 28, 10, 9 },
159 {38,38,38,10,9,28,10,9}, 160 { 38, 38, 38, 10, 9, 28, 10, 9 },
160}; 161};
161static u8 rco_time_value[][8] = { 162static u8 rco_time_value[][8] = {
162 {0,0,0,0,0,0,0,0}, 163 { 0, 0, 0, 0, 0, 0, 0, 0 },
163 {0,0,0,0,0,0,0,0}, 164 { 0, 0, 0, 0, 0, 0, 0, 0 },
164 {9,2,0,2,0,7,1,1}, 165 { 9, 2, 0, 2, 0, 7, 1, 1 },
165 {19,5,1,5,2,16,3,2}, 166 { 19, 5, 1, 5, 2, 16, 3, 2 },
166 {19,5,1,5,2,16,3,2}, 167 { 19, 5, 1, 5, 2, 16, 3, 2 },
167 {30,9,3,9,4,25,6,4}, 168 { 30, 9, 3, 9, 4, 25, 6, 4 },
168 {40,12,4,12,5,34,12,5}, 169 { 40, 12, 4, 12, 5, 34, 12, 5 },
169 {40,12,4,12,5,34,12,5}, 170 { 40, 12, 4, 12, 5, 34, 12, 5 },
170}; 171};
171 172
172/* 173/*
173 * Printing configuration 174 * Printing configuration
174 */ 175 */
175/* Used for chipset type printing at boot time */ 176/* Used for chipset type printing at boot time */
176static char* chipset_capability[] = { 177static char *chipset_capability[] = {
177 "ATA", "ATA 16", 178 "ATA", "ATA 16",
178 "ATA 33", "ATA 66", 179 "ATA 33", "ATA 66",
179 "ATA 100 (1st gen)", "ATA 100 (2nd gen)", 180 "ATA 100 (1st gen)", "ATA 100 (2nd gen)",
@@ -272,7 +273,7 @@ static void sis_program_timings(ide_drive_t *drive, const u8 mode)
272 sis_ata133_program_timings(drive, mode); 273 sis_ata133_program_timings(drive, mode);
273} 274}
274 275
275static void config_drive_art_rwp (ide_drive_t *drive) 276static void config_drive_art_rwp(ide_drive_t *drive)
276{ 277{
277 ide_hwif_t *hwif = HWIF(drive); 278 ide_hwif_t *hwif = HWIF(drive);
278 struct pci_dev *dev = to_pci_dev(hwif->dev); 279 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -346,7 +347,7 @@ static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
346 sis_program_timings(drive, speed); 347 sis_program_timings(drive, speed);
347} 348}
348 349
349static u8 sis5513_ata133_udma_filter(ide_drive_t *drive) 350static u8 sis_ata133_udma_filter(ide_drive_t *drive)
350{ 351{
351 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 352 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
352 u32 regdw = 0; 353 u32 regdw = 0;
@@ -358,8 +359,7 @@ static u8 sis5513_ata133_udma_filter(ide_drive_t *drive)
358 return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5; 359 return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5;
359} 360}
360 361
361/* Chip detection and general config */ 362static int __devinit sis_find_family(struct pci_dev *dev)
362static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const char *name)
363{ 363{
364 struct pci_dev *host; 364 struct pci_dev *host;
365 int i = 0; 365 int i = 0;
@@ -381,7 +381,7 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
381 chipset_family = ATA_100a; 381 chipset_family = ATA_100a;
382 } 382 }
383 pci_dev_put(host); 383 pci_dev_put(host);
384 384
385 printk(KERN_INFO "SIS5513: %s %s controller\n", 385 printk(KERN_INFO "SIS5513: %s %s controller\n",
386 SiSHostChipInfo[i].name, chipset_capability[chipset_family]); 386 SiSHostChipInfo[i].name, chipset_capability[chipset_family]);
387 } 387 }
@@ -440,63 +440,60 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
440 } 440 }
441 } 441 }
442 442
443 if (!chipset_family) 443 return chipset_family;
444 return -1; 444}
445 445
446static unsigned int __devinit init_chipset_sis5513(struct pci_dev *dev,
447 const char *name)
448{
446 /* Make general config ops here 449 /* Make general config ops here
447 1/ tell IDE channels to operate in Compatibility mode only 450 1/ tell IDE channels to operate in Compatibility mode only
448 2/ tell old chips to allow per drive IDE timings */ 451 2/ tell old chips to allow per drive IDE timings */
449 452
450 { 453 u8 reg;
451 u8 reg; 454 u16 regw;
452 u16 regw; 455
453 456 switch (chipset_family) {
454 switch(chipset_family) { 457 case ATA_133:
455 case ATA_133: 458 /* SiS962 operation mode */
456 /* SiS962 operation mode */ 459 pci_read_config_word(dev, 0x50, &regw);
457 pci_read_config_word(dev, 0x50, &regw); 460 if (regw & 0x08)
458 if (regw & 0x08) 461 pci_write_config_word(dev, 0x50, regw&0xfff7);
459 pci_write_config_word(dev, 0x50, regw&0xfff7); 462 pci_read_config_word(dev, 0x52, &regw);
460 pci_read_config_word(dev, 0x52, &regw); 463 if (regw & 0x08)
461 if (regw & 0x08) 464 pci_write_config_word(dev, 0x52, regw&0xfff7);
462 pci_write_config_word(dev, 0x52, regw&0xfff7); 465 break;
463 break; 466 case ATA_133a:
464 case ATA_133a: 467 case ATA_100:
465 case ATA_100: 468 /* Fixup latency */
466 /* Fixup latency */ 469 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
467 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); 470 /* Set compatibility bit */
468 /* Set compatibility bit */ 471 pci_read_config_byte(dev, 0x49, &reg);
469 pci_read_config_byte(dev, 0x49, &reg); 472 if (!(reg & 0x01))
470 if (!(reg & 0x01)) { 473 pci_write_config_byte(dev, 0x49, reg|0x01);
471 pci_write_config_byte(dev, 0x49, reg|0x01); 474 break;
472 } 475 case ATA_100a:
473 break; 476 case ATA_66:
474 case ATA_100a: 477 /* Fixup latency */
475 case ATA_66: 478 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10);
476 /* Fixup latency */ 479
477 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10); 480 /* On ATA_66 chips the bit was elsewhere */
478 481 pci_read_config_byte(dev, 0x52, &reg);
479 /* On ATA_66 chips the bit was elsewhere */ 482 if (!(reg & 0x04))
480 pci_read_config_byte(dev, 0x52, &reg); 483 pci_write_config_byte(dev, 0x52, reg|0x04);
481 if (!(reg & 0x04)) { 484 break;
482 pci_write_config_byte(dev, 0x52, reg|0x04); 485 case ATA_33:
483 } 486 /* On ATA_33 we didn't have a single bit to set */
484 break; 487 pci_read_config_byte(dev, 0x09, &reg);
485 case ATA_33: 488 if ((reg & 0x0f) != 0x00)
486 /* On ATA_33 we didn't have a single bit to set */ 489 pci_write_config_byte(dev, 0x09, reg&0xf0);
487 pci_read_config_byte(dev, 0x09, &reg); 490 case ATA_16:
488 if ((reg & 0x0f) != 0x00) { 491 /* force per drive recovery and active timings
489 pci_write_config_byte(dev, 0x09, reg&0xf0); 492 needed on ATA_33 and below chips */
490 } 493 pci_read_config_byte(dev, 0x52, &reg);
491 case ATA_16: 494 if (!(reg & 0x08))
492 /* force per drive recovery and active timings 495 pci_write_config_byte(dev, 0x52, reg|0x08);
493 needed on ATA_33 and below chips */ 496 break;
494 pci_read_config_byte(dev, 0x52, &reg);
495 if (!(reg & 0x08)) {
496 pci_write_config_byte(dev, 0x52, reg|0x08);
497 }
498 break;
499 }
500 } 497 }
501 498
502 return 0; 499 return 0;
@@ -517,7 +514,7 @@ static const struct sis_laptop sis_laptop[] = {
517 { 0, } 514 { 0, }
518}; 515};
519 516
520static u8 __devinit ata66_sis5513(ide_hwif_t *hwif) 517static u8 __devinit sis_cable_detect(ide_hwif_t *hwif)
521{ 518{
522 struct pci_dev *pdev = to_pci_dev(hwif->dev); 519 struct pci_dev *pdev = to_pci_dev(hwif->dev);
523 const struct sis_laptop *lap = &sis_laptop[0]; 520 const struct sis_laptop *lap = &sis_laptop[0];
@@ -546,38 +543,44 @@ static u8 __devinit ata66_sis5513(ide_hwif_t *hwif)
546 return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 543 return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
547} 544}
548 545
549static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif) 546static const struct ide_port_ops sis_port_ops = {
550{ 547 .set_pio_mode = sis_set_pio_mode,
551 u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f }; 548 .set_dma_mode = sis_set_dma_mode,
552 549 .cable_detect = sis_cable_detect,
553 hwif->set_pio_mode = &sis_set_pio_mode; 550};
554 hwif->set_dma_mode = &sis_set_dma_mode;
555
556 if (chipset_family >= ATA_133)
557 hwif->udma_filter = sis5513_ata133_udma_filter;
558
559 hwif->cable_detect = ata66_sis5513;
560
561 if (hwif->dma_base == 0)
562 return;
563 551
564 hwif->ultra_mask = udma_rates[chipset_family]; 552static const struct ide_port_ops sis_ata133_port_ops = {
565} 553 .set_pio_mode = sis_set_pio_mode,
554 .set_dma_mode = sis_set_dma_mode,
555 .udma_filter = sis_ata133_udma_filter,
556 .cable_detect = sis_cable_detect,
557};
566 558
567static const struct ide_port_info sis5513_chipset __devinitdata = { 559static const struct ide_port_info sis5513_chipset __devinitdata = {
568 .name = "SIS5513", 560 .name = "SIS5513",
569 .init_chipset = init_chipset_sis5513, 561 .init_chipset = init_chipset_sis5513,
570 .init_hwif = init_hwif_sis5513, 562 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
571 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 563 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA,
572 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA |
573 IDE_HFLAG_BOOTABLE,
574 .pio_mask = ATA_PIO4, 564 .pio_mask = ATA_PIO4,
575 .mwdma_mask = ATA_MWDMA2, 565 .mwdma_mask = ATA_MWDMA2,
576}; 566};
577 567
578static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id) 568static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
579{ 569{
580 return ide_setup_pci_device(dev, &sis5513_chipset); 570 struct ide_port_info d = sis5513_chipset;
571 u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
572
573 if (sis_find_family(dev) == 0)
574 return -ENOTSUPP;
575
576 if (chipset_family >= ATA_133)
577 d.port_ops = &sis_ata133_port_ops;
578 else
579 d.port_ops = &sis_port_ops;
580
581 d.udma_mask = udma_rates[chipset_family];
582
583 return ide_setup_pci_device(dev, &d);
581} 584}
582 585
583static const struct pci_device_id sis5513_pci_tbl[] = { 586static const struct pci_device_id sis5513_pci_tbl[] = {
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 1f00251a4a87..ce84fa045d39 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -179,7 +179,7 @@ static void sl82c105_dma_start(ide_drive_t *drive)
179 struct pci_dev *dev = to_pci_dev(hwif->dev); 179 struct pci_dev *dev = to_pci_dev(hwif->dev);
180 int reg = 0x44 + drive->dn * 4; 180 int reg = 0x44 + drive->dn * 4;
181 181
182 DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name)); 182 DBG(("%s(drive:%s)\n", __func__, drive->name));
183 183
184 pci_write_config_word(dev, reg, drive->drive_data >> 16); 184 pci_write_config_word(dev, reg, drive->drive_data >> 16);
185 185
@@ -203,7 +203,7 @@ static int sl82c105_dma_end(ide_drive_t *drive)
203 int reg = 0x44 + drive->dn * 4; 203 int reg = 0x44 + drive->dn * 4;
204 int ret; 204 int ret;
205 205
206 DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name)); 206 DBG(("%s(drive:%s)\n", __func__, drive->name));
207 207
208 ret = __ide_dma_end(drive); 208 ret = __ide_dma_end(drive);
209 209
@@ -232,7 +232,7 @@ static void sl82c105_resetproc(ide_drive_t *drive)
232 * Return the revision of the Winbond bridge 232 * Return the revision of the Winbond bridge
233 * which this function is part of. 233 * which this function is part of.
234 */ 234 */
235static unsigned int sl82c105_bridge_revision(struct pci_dev *dev) 235static u8 sl82c105_bridge_revision(struct pci_dev *dev)
236{ 236{
237 struct pci_dev *bridge; 237 struct pci_dev *bridge;
238 238
@@ -282,64 +282,59 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c
282 return dev->irq; 282 return dev->irq;
283} 283}
284 284
285/* 285static const struct ide_port_ops sl82c105_port_ops = {
286 * Initialise IDE channel 286 .set_pio_mode = sl82c105_set_pio_mode,
287 */ 287 .set_dma_mode = sl82c105_set_dma_mode,
288static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif) 288 .resetproc = sl82c105_resetproc,
289{ 289};
290 struct pci_dev *dev = to_pci_dev(hwif->dev);
291 unsigned int rev;
292
293 DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index));
294
295 hwif->set_pio_mode = &sl82c105_set_pio_mode;
296 hwif->set_dma_mode = &sl82c105_set_dma_mode;
297 hwif->resetproc = &sl82c105_resetproc;
298
299 if (!hwif->dma_base)
300 return;
301
302 rev = sl82c105_bridge_revision(dev);
303 if (rev <= 5) {
304 /*
305 * Never ever EVER under any circumstances enable
306 * DMA when the bridge is this old.
307 */
308 printk(" %s: Winbond W83C553 bridge revision %d, "
309 "BM-DMA disabled\n", hwif->name, rev);
310 return;
311 }
312
313 hwif->mwdma_mask = ATA_MWDMA2;
314
315 hwif->dma_lost_irq = &sl82c105_dma_lost_irq;
316 hwif->dma_start = &sl82c105_dma_start;
317 hwif->ide_dma_end = &sl82c105_dma_end;
318 hwif->dma_timeout = &sl82c105_dma_timeout;
319 290
320 if (hwif->mate) 291static const struct ide_dma_ops sl82c105_dma_ops = {
321 hwif->serialized = hwif->mate->serialized = 1; 292 .dma_host_set = ide_dma_host_set,
322} 293 .dma_setup = ide_dma_setup,
294 .dma_exec_cmd = ide_dma_exec_cmd,
295 .dma_start = sl82c105_dma_start,
296 .dma_end = sl82c105_dma_end,
297 .dma_test_irq = ide_dma_test_irq,
298 .dma_lost_irq = sl82c105_dma_lost_irq,
299 .dma_timeout = sl82c105_dma_timeout,
300};
323 301
324static const struct ide_port_info sl82c105_chipset __devinitdata = { 302static const struct ide_port_info sl82c105_chipset __devinitdata = {
325 .name = "W82C105", 303 .name = "W82C105",
326 .init_chipset = init_chipset_sl82c105, 304 .init_chipset = init_chipset_sl82c105,
327 .init_hwif = init_hwif_sl82c105,
328 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}}, 305 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
306 .port_ops = &sl82c105_port_ops,
307 .dma_ops = &sl82c105_dma_ops,
329 .host_flags = IDE_HFLAG_IO_32BIT | 308 .host_flags = IDE_HFLAG_IO_32BIT |
330 IDE_HFLAG_UNMASK_IRQS | 309 IDE_HFLAG_UNMASK_IRQS |
331/* FIXME: check for Compatibility mode in generic IDE PCI code */ 310/* FIXME: check for Compatibility mode in generic IDE PCI code */
332#if defined(CONFIG_LOPEC) || defined(CONFIG_SANDPOINT) 311#if defined(CONFIG_LOPEC) || defined(CONFIG_SANDPOINT)
333 IDE_HFLAG_FORCE_LEGACY_IRQS | 312 IDE_HFLAG_FORCE_LEGACY_IRQS |
334#endif 313#endif
335 IDE_HFLAG_NO_AUTODMA | 314 IDE_HFLAG_SERIALIZE_DMA |
336 IDE_HFLAG_BOOTABLE, 315 IDE_HFLAG_NO_AUTODMA,
337 .pio_mask = ATA_PIO5, 316 .pio_mask = ATA_PIO5,
317 .mwdma_mask = ATA_MWDMA2,
338}; 318};
339 319
340static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id) 320static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
341{ 321{
342 return ide_setup_pci_device(dev, &sl82c105_chipset); 322 struct ide_port_info d = sl82c105_chipset;
323 u8 rev = sl82c105_bridge_revision(dev);
324
325 if (rev <= 5) {
326 /*
327 * Never ever EVER under any circumstances enable
328 * DMA when the bridge is this old.
329 */
330 printk(KERN_INFO "W82C105_IDE: Winbond W83C553 bridge "
331 "revision %d, BM-DMA disabled\n", rev);
332 d.dma_ops = NULL;
333 d.mwdma_mask = 0;
334 d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA;
335 }
336
337 return ide_setup_pci_device(dev, &d);
343} 338}
344 339
345static const struct pci_device_id sl82c105_pci_tbl[] = { 340static const struct pci_device_id sl82c105_pci_tbl[] = {
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 65f4c2ffaa59..dae6e2c94d86 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -27,9 +27,9 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
27 unsigned long flags; 27 unsigned long flags;
28 u16 master_data; 28 u16 master_data;
29 u8 slave_data; 29 u8 slave_data;
30 int control = 0; 30 int control = 0;
31 /* ISP RTC */ 31 /* ISP RTC */
32 static const u8 timings[][2]= { 32 static const u8 timings[][2] = {
33 { 0, 0 }, 33 { 0, 0 },
34 { 0, 0 }, 34 { 0, 0 },
35 { 1, 0 }, 35 { 1, 0 },
@@ -125,19 +125,17 @@ static u8 __devinit slc90e66_cable_detect(ide_hwif_t *hwif)
125 return (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; 125 return (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
126} 126}
127 127
128static void __devinit init_hwif_slc90e66(ide_hwif_t *hwif) 128static const struct ide_port_ops slc90e66_port_ops = {
129{ 129 .set_pio_mode = slc90e66_set_pio_mode,
130 hwif->set_pio_mode = &slc90e66_set_pio_mode; 130 .set_dma_mode = slc90e66_set_dma_mode,
131 hwif->set_dma_mode = &slc90e66_set_dma_mode; 131 .cable_detect = slc90e66_cable_detect,
132 132};
133 hwif->cable_detect = slc90e66_cable_detect;
134}
135 133
136static const struct ide_port_info slc90e66_chipset __devinitdata = { 134static const struct ide_port_info slc90e66_chipset __devinitdata = {
137 .name = "SLC90E66", 135 .name = "SLC90E66",
138 .init_hwif = init_hwif_slc90e66, 136 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
139 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, 137 .port_ops = &slc90e66_port_ops,
140 .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE, 138 .host_flags = IDE_HFLAG_LEGACY_IRQS,
141 .pio_mask = ATA_PIO4, 139 .pio_mask = ATA_PIO4,
142 .swdma_mask = ATA_SWDMA2_ONLY, 140 .swdma_mask = ATA_SWDMA2_ONLY,
143 .mwdma_mask = ATA_MWDMA12_ONLY, 141 .mwdma_mask = ATA_MWDMA12_ONLY,
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 1e4a6262bcef..9b4b27a4c711 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -18,20 +18,20 @@ static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
18 u16 mode, scr = inw(scr_port); 18 u16 mode, scr = inw(scr_port);
19 19
20 switch (speed) { 20 switch (speed) {
21 case XFER_UDMA_4: mode = 0x00c0; break; 21 case XFER_UDMA_4: mode = 0x00c0; break;
22 case XFER_UDMA_3: mode = 0x00b0; break; 22 case XFER_UDMA_3: mode = 0x00b0; break;
23 case XFER_UDMA_2: mode = 0x00a0; break; 23 case XFER_UDMA_2: mode = 0x00a0; break;
24 case XFER_UDMA_1: mode = 0x0090; break; 24 case XFER_UDMA_1: mode = 0x0090; break;
25 case XFER_UDMA_0: mode = 0x0080; break; 25 case XFER_UDMA_0: mode = 0x0080; break;
26 case XFER_MW_DMA_2: mode = 0x0070; break; 26 case XFER_MW_DMA_2: mode = 0x0070; break;
27 case XFER_MW_DMA_1: mode = 0x0060; break; 27 case XFER_MW_DMA_1: mode = 0x0060; break;
28 case XFER_MW_DMA_0: mode = 0x0050; break; 28 case XFER_MW_DMA_0: mode = 0x0050; break;
29 case XFER_PIO_4: mode = 0x0400; break; 29 case XFER_PIO_4: mode = 0x0400; break;
30 case XFER_PIO_3: mode = 0x0300; break; 30 case XFER_PIO_3: mode = 0x0300; break;
31 case XFER_PIO_2: mode = 0x0200; break; 31 case XFER_PIO_2: mode = 0x0200; break;
32 case XFER_PIO_1: mode = 0x0100; break; 32 case XFER_PIO_1: mode = 0x0100; break;
33 case XFER_PIO_0: 33 case XFER_PIO_0:
34 default: mode = 0x0000; break; 34 default: mode = 0x0000; break;
35 } 35 }
36 36
37 scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f; 37 scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
@@ -157,11 +157,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
157 /* Store the system control register base for convenience... */ 157 /* Store the system control register base for convenience... */
158 hwif->config_data = sc_base; 158 hwif->config_data = sc_base;
159 159
160 hwif->set_pio_mode = &tc86c001_set_pio_mode;
161 hwif->set_dma_mode = &tc86c001_set_mode;
162
163 hwif->cable_detect = tc86c001_cable_detect;
164
165 if (!hwif->dma_base) 160 if (!hwif->dma_base)
166 return; 161 return;
167 162
@@ -173,8 +168,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
173 168
174 /* Sector Count Register limit */ 169 /* Sector Count Register limit */
175 hwif->rqsize = 0xffff; 170 hwif->rqsize = 0xffff;
176
177 hwif->dma_start = &tc86c001_dma_start;
178} 171}
179 172
180static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev, 173static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
@@ -187,10 +180,29 @@ static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
187 return err; 180 return err;
188} 181}
189 182
183static const struct ide_port_ops tc86c001_port_ops = {
184 .set_pio_mode = tc86c001_set_pio_mode,
185 .set_dma_mode = tc86c001_set_mode,
186 .cable_detect = tc86c001_cable_detect,
187};
188
189static const struct ide_dma_ops tc86c001_dma_ops = {
190 .dma_host_set = ide_dma_host_set,
191 .dma_setup = ide_dma_setup,
192 .dma_exec_cmd = ide_dma_exec_cmd,
193 .dma_start = tc86c001_dma_start,
194 .dma_end = __ide_dma_end,
195 .dma_test_irq = ide_dma_test_irq,
196 .dma_lost_irq = ide_dma_lost_irq,
197 .dma_timeout = ide_dma_timeout,
198};
199
190static const struct ide_port_info tc86c001_chipset __devinitdata = { 200static const struct ide_port_info tc86c001_chipset __devinitdata = {
191 .name = "TC86C001", 201 .name = "TC86C001",
192 .init_chipset = init_chipset_tc86c001, 202 .init_chipset = init_chipset_tc86c001,
193 .init_hwif = init_hwif_tc86c001, 203 .init_hwif = init_hwif_tc86c001,
204 .port_ops = &tc86c001_port_ops,
205 .dma_ops = &tc86c001_dma_ops,
194 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD | 206 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
195 IDE_HFLAG_ABUSE_SET_DMA_MODE, 207 IDE_HFLAG_ABUSE_SET_DMA_MODE,
196 .pio_mask = ATA_PIO4, 208 .pio_mask = ATA_PIO4,
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index a67d02a3f96e..db65a558d4ec 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -87,17 +87,15 @@ static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio)
87 triflex_set_mode(drive, XFER_PIO_0 + pio); 87 triflex_set_mode(drive, XFER_PIO_0 + pio);
88} 88}
89 89
90static void __devinit init_hwif_triflex(ide_hwif_t *hwif) 90static const struct ide_port_ops triflex_port_ops = {
91{ 91 .set_pio_mode = triflex_set_pio_mode,
92 hwif->set_pio_mode = &triflex_set_pio_mode; 92 .set_dma_mode = triflex_set_mode,
93 hwif->set_dma_mode = &triflex_set_mode; 93};
94}
95 94
96static const struct ide_port_info triflex_device __devinitdata = { 95static const struct ide_port_info triflex_device __devinitdata = {
97 .name = "TRIFLEX", 96 .name = "TRIFLEX",
98 .init_hwif = init_hwif_triflex,
99 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}}, 97 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
100 .host_flags = IDE_HFLAG_BOOTABLE, 98 .port_ops = &triflex_port_ops,
101 .pio_mask = ATA_PIO4, 99 .pio_mask = ATA_PIO4,
102 .swdma_mask = ATA_SWDMA2, 100 .swdma_mask = ATA_SWDMA2,
103 .mwdma_mask = ATA_MWDMA2, 101 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index de750f7a43e9..a8a3138682ef 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -214,7 +214,7 @@ static void trm290_dma_start(ide_drive_t *drive)
214{ 214{
215} 215}
216 216
217static int trm290_ide_dma_end (ide_drive_t *drive) 217static int trm290_dma_end(ide_drive_t *drive)
218{ 218{
219 u16 status; 219 u16 status;
220 220
@@ -225,7 +225,7 @@ static int trm290_ide_dma_end (ide_drive_t *drive)
225 return status != 0x00ff; 225 return status != 0x00ff;
226} 226}
227 227
228static int trm290_ide_dma_test_irq (ide_drive_t *drive) 228static int trm290_dma_test_irq(ide_drive_t *drive)
229{ 229{
230 u16 status; 230 u16 status;
231 231
@@ -254,22 +254,11 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
254 hwif->config_data = cfg_base; 254 hwif->config_data = cfg_base;
255 hwif->dma_base = (cfg_base + 4) ^ (hwif->channel ? 0x80 : 0); 255 hwif->dma_base = (cfg_base + 4) ^ (hwif->channel ? 0x80 : 0);
256 256
257 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx", 257 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
258 hwif->name, hwif->dma_base, hwif->dma_base + 3); 258 hwif->name, hwif->dma_base, hwif->dma_base + 3);
259 259
260 if (!request_region(hwif->dma_base, 4, hwif->name)) { 260 if (ide_allocate_dma_engine(hwif))
261 printk(KERN_CONT " -- Error, ports in use.\n");
262 return; 261 return;
263 }
264
265 hwif->dmatable_cpu = pci_alloc_consistent(dev, PRD_ENTRIES * PRD_BYTES,
266 &hwif->dmatable_dma);
267 if (!hwif->dmatable_cpu) {
268 printk(KERN_CONT " -- Error, unable to allocate DMA table.\n");
269 release_region(hwif->dma_base, 4);
270 return;
271 }
272 printk(KERN_CONT "\n");
273 262
274 local_irq_save(flags); 263 local_irq_save(flags);
275 /* put config reg into first byte of hwif->select_data */ 264 /* put config reg into first byte of hwif->select_data */
@@ -291,14 +280,6 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
291 /* sharing IRQ with mate */ 280 /* sharing IRQ with mate */
292 hwif->irq = hwif->mate->irq; 281 hwif->irq = hwif->mate->irq;
293 282
294 hwif->dma_host_set = &trm290_dma_host_set;
295 hwif->dma_setup = &trm290_dma_setup;
296 hwif->dma_exec_cmd = &trm290_dma_exec_cmd;
297 hwif->dma_start = &trm290_dma_start;
298 hwif->ide_dma_end = &trm290_ide_dma_end;
299 hwif->ide_dma_test_irq = &trm290_ide_dma_test_irq;
300
301 hwif->selectproc = &trm290_selectproc;
302#if 1 283#if 1
303 { 284 {
304 /* 285 /*
@@ -317,7 +298,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
317 if (old != compat && old_mask == 0xff) { 298 if (old != compat && old_mask == 0xff) {
318 /* leave lower 10 bits untouched */ 299 /* leave lower 10 bits untouched */
319 compat += (next_offset += 0x400); 300 compat += (next_offset += 0x400);
320 hwif->io_ports[IDE_CONTROL_OFFSET] = compat + 2; 301 hwif->io_ports.ctl_addr = compat + 2;
321 outw(compat | 1, hwif->config_data); 302 outw(compat | 1, hwif->config_data);
322 new = inw(hwif->config_data); 303 new = inw(hwif->config_data);
323 printk(KERN_INFO "%s: control basereg workaround: " 304 printk(KERN_INFO "%s: control basereg workaround: "
@@ -328,16 +309,32 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
328#endif 309#endif
329} 310}
330 311
312static const struct ide_port_ops trm290_port_ops = {
313 .selectproc = trm290_selectproc,
314};
315
316static struct ide_dma_ops trm290_dma_ops = {
317 .dma_host_set = trm290_dma_host_set,
318 .dma_setup = trm290_dma_setup,
319 .dma_exec_cmd = trm290_dma_exec_cmd,
320 .dma_start = trm290_dma_start,
321 .dma_end = trm290_dma_end,
322 .dma_test_irq = trm290_dma_test_irq,
323 .dma_lost_irq = ide_dma_lost_irq,
324 .dma_timeout = ide_dma_timeout,
325};
326
331static const struct ide_port_info trm290_chipset __devinitdata = { 327static const struct ide_port_info trm290_chipset __devinitdata = {
332 .name = "TRM290", 328 .name = "TRM290",
333 .init_hwif = init_hwif_trm290, 329 .init_hwif = init_hwif_trm290,
334 .chipset = ide_trm290, 330 .chipset = ide_trm290,
331 .port_ops = &trm290_port_ops,
332 .dma_ops = &trm290_dma_ops,
335 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 333 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
336#if 0 /* play it safe for now */ 334#if 0 /* play it safe for now */
337 IDE_HFLAG_TRUST_BIOS_FOR_DMA | 335 IDE_HFLAG_TRUST_BIOS_FOR_DMA |
338#endif 336#endif
339 IDE_HFLAG_NO_AUTODMA | 337 IDE_HFLAG_NO_AUTODMA |
340 IDE_HFLAG_BOOTABLE |
341 IDE_HFLAG_NO_LBA48, 338 IDE_HFLAG_NO_LBA48,
342}; 339};
343 340
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 9004e7521889..566e0ecb8db1 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -340,7 +340,7 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
340 * Determine system bus clock. 340 * Determine system bus clock.
341 */ 341 */
342 342
343 via_clock = system_bus_clock() * 1000; 343 via_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
344 344
345 switch (via_clock) { 345 switch (via_clock) {
346 case 33000: via_clock = 33333; break; 346 case 33000: via_clock = 33333; break;
@@ -415,25 +415,21 @@ static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif)
415 return ATA_CBL_PATA40; 415 return ATA_CBL_PATA40;
416} 416}
417 417
418static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif) 418static const struct ide_port_ops via_port_ops = {
419{ 419 .set_pio_mode = via_set_pio_mode,
420 hwif->set_pio_mode = &via_set_pio_mode; 420 .set_dma_mode = via_set_drive,
421 hwif->set_dma_mode = &via_set_drive; 421 .cable_detect = via82cxxx_cable_detect,
422 422};
423 hwif->cable_detect = via82cxxx_cable_detect;
424}
425 423
426static const struct ide_port_info via82cxxx_chipset __devinitdata = { 424static const struct ide_port_info via82cxxx_chipset __devinitdata = {
427 .name = "VP_IDE", 425 .name = "VP_IDE",
428 .init_chipset = init_chipset_via82cxxx, 426 .init_chipset = init_chipset_via82cxxx,
429 .init_hwif = init_hwif_via82cxxx,
430 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, 427 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
428 .port_ops = &via_port_ops,
431 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | 429 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
432 IDE_HFLAG_PIO_NO_DOWNGRADE |
433 IDE_HFLAG_ABUSE_SET_DMA_MODE | 430 IDE_HFLAG_ABUSE_SET_DMA_MODE |
434 IDE_HFLAG_POST_SET_MODE | 431 IDE_HFLAG_POST_SET_MODE |
435 IDE_HFLAG_IO_32BIT | 432 IDE_HFLAG_IO_32BIT,
436 IDE_HFLAG_BOOTABLE,
437 .pio_mask = ATA_PIO5, 433 .pio_mask = ATA_PIO5,
438 .swdma_mask = ATA_SWDMA2, 434 .swdma_mask = ATA_SWDMA2,
439 .mwdma_mask = ATA_MWDMA2, 435 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
index a784a97ca7ec..f0e638dcc3ab 100644
--- a/drivers/ide/ppc/mpc8xx.c
+++ b/drivers/ide/ppc/mpc8xx.c
@@ -36,6 +36,8 @@
36#include <asm/machdep.h> 36#include <asm/machdep.h>
37#include <asm/irq.h> 37#include <asm/irq.h>
38 38
39#define DRV_NAME "ide-mpc8xx"
40
39static int identify (volatile u8 *p); 41static int identify (volatile u8 *p);
40static void print_fixed (volatile u8 *p); 42static void print_fixed (volatile u8 *p);
41static void print_funcid (int func); 43static void print_funcid (int func);
@@ -127,9 +129,9 @@ static int pcmcia_schlvl = PCMCIA_SCHLVL;
127 * MPC8xx's internal PCMCIA interface 129 * MPC8xx's internal PCMCIA interface
128 */ 130 */
129#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT) 131#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
130static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) 132static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
131{ 133{
132 unsigned long *p = hw->io_ports; 134 unsigned long *p = hw->io_ports_array;
133 int i; 135 int i;
134 136
135 typedef struct { 137 typedef struct {
@@ -182,6 +184,13 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
182 pcmcia_phy_base, pcmcia_phy_end, 184 pcmcia_phy_base, pcmcia_phy_end,
183 pcmcia_phy_end - pcmcia_phy_base); 185 pcmcia_phy_end - pcmcia_phy_base);
184 186
187 if (!request_mem_region(pcmcia_phy_base,
188 pcmcia_phy_end - pcmcia_phy_base,
189 DRV_NAME)) {
190 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
191 return -EBUSY;
192 }
193
185 pcmcia_base=(unsigned long)ioremap(pcmcia_phy_base, 194 pcmcia_base=(unsigned long)ioremap(pcmcia_phy_base,
186 pcmcia_phy_end-pcmcia_phy_base); 195 pcmcia_phy_end-pcmcia_phy_base);
187 196
@@ -236,7 +245,7 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
236 if (pcmp->pcmc_pipr & (M8XX_PCMCIA_CD1(_slot_)|M8XX_PCMCIA_CD2(_slot_))) { 245 if (pcmp->pcmc_pipr & (M8XX_PCMCIA_CD1(_slot_)|M8XX_PCMCIA_CD2(_slot_))) {
237 printk ("No card in slot %c: PIPR=%08x\n", 246 printk ("No card in slot %c: PIPR=%08x\n",
238 'A' + _slot_, (u32) pcmp->pcmc_pipr); 247 'A' + _slot_, (u32) pcmp->pcmc_pipr);
239 return; /* No card in slot */ 248 return -ENODEV; /* No card in slot */
240 } 249 }
241 250
242 check_ide_device (pcmcia_base); 251 check_ide_device (pcmcia_base);
@@ -279,9 +288,6 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
279 } 288 }
280#endif /* CONFIG_IDE_8xx_PCCARD */ 289#endif /* CONFIG_IDE_8xx_PCCARD */
281 290
282 ide_hwifs[data_port].pio_mask = ATA_PIO4;
283 ide_hwifs[data_port].set_pio_mode = m8xx_ide_set_pio_mode;
284
285 /* Enable Harddisk Interrupt, 291 /* Enable Harddisk Interrupt,
286 * and make it edge sensitive 292 * and make it edge sensitive
287 */ 293 */
@@ -296,6 +302,8 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
296 /* Enable falling edge irq */ 302 /* Enable falling edge irq */
297 pcmp->pcmc_per = 0x100000 >> (16 * _slot_); 303 pcmp->pcmc_per = 0x100000 >> (16 * _slot_);
298#endif /* CONFIG_IDE_8xx_PCCARD */ 304#endif /* CONFIG_IDE_8xx_PCCARD */
305
306 return 0;
299} 307}
300#endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */ 308#endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */
301 309
@@ -304,9 +312,9 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
304 * MPC8xx's internal PCMCIA interface 312 * MPC8xx's internal PCMCIA interface
305 */ 313 */
306#if defined(CONFIG_IDE_EXT_DIRECT) 314#if defined(CONFIG_IDE_EXT_DIRECT)
307static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) 315static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
308{ 316{
309 unsigned long *p = hw->io_ports; 317 unsigned long *p = hw->io_ports_array;
310 int i; 318 int i;
311 319
312 u32 ide_phy_base; 320 u32 ide_phy_base;
@@ -327,7 +335,12 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
327 printk ("IDE phys mem : %08x...%08x (size %08x)\n", 335 printk ("IDE phys mem : %08x...%08x (size %08x)\n",
328 ide_phy_base, ide_phy_end, 336 ide_phy_base, ide_phy_end,
329 ide_phy_end - ide_phy_base); 337 ide_phy_end - ide_phy_base);
330 338
339 if (!request_mem_region(ide_phy_base, 0x200, DRV_NAME)) {
340 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
341 return -EBUSY;
342 }
343
331 ide_base=(unsigned long)ioremap(ide_phy_base, 344 ide_base=(unsigned long)ioremap(ide_phy_base,
332 ide_phy_end-ide_phy_base); 345 ide_phy_end-ide_phy_base);
333 346
@@ -357,15 +370,14 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
357 hw->irq = ioport_dsc[data_port].irq; 370 hw->irq = ioport_dsc[data_port].irq;
358 hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack; 371 hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
359 372
360 ide_hwifs[data_port].pio_mask = ATA_PIO4;
361 ide_hwifs[data_port].set_pio_mode = m8xx_ide_set_pio_mode;
362
363 /* Enable Harddisk Interrupt, 373 /* Enable Harddisk Interrupt,
364 * and make it edge sensitive 374 * and make it edge sensitive
365 */ 375 */
366 /* (11-18) Set edge detect for irq, no wakeup from low power mode */ 376 /* (11-18) Set edge detect for irq, no wakeup from low power mode */
367 ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |= 377 ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |=
368 (0x80000000 >> ioport_dsc[data_port].irq); 378 (0x80000000 >> ioport_dsc[data_port].irq);
379
380 return 0;
369} 381}
370#endif /* CONFIG_IDE_8xx_DIRECT */ 382#endif /* CONFIG_IDE_8xx_DIRECT */
371 383
@@ -426,10 +438,14 @@ static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
426#elif defined(CONFIG_IDE_EXT_DIRECT) 438#elif defined(CONFIG_IDE_EXT_DIRECT)
427 439
428 printk("%s[%d] %s: not implemented yet!\n", 440 printk("%s[%d] %s: not implemented yet!\n",
429 __FILE__,__LINE__,__FUNCTION__); 441 __FILE__, __LINE__, __func__);
430#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */ 442#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */
431} 443}
432 444
445static const struct ide_port_ops m8xx_port_ops = {
446 .set_pio_mode = m8xx_ide_set_pio_mode,
447};
448
433static void 449static void
434ide_interrupt_ack (void *dev) 450ide_interrupt_ack (void *dev)
435{ 451{
@@ -794,14 +810,30 @@ static int __init mpc8xx_ide_probe(void)
794 810
795#ifdef IDE0_BASE_OFFSET 811#ifdef IDE0_BASE_OFFSET
796 memset(&hw, 0, sizeof(hw)); 812 memset(&hw, 0, sizeof(hw));
797 m8xx_ide_init_ports(&hw, 0); 813 if (!m8xx_ide_init_ports(&hw, 0)) {
798 ide_init_port_hw(&ide_hwifs[0], &hw); 814 ide_hwif_t *hwif = ide_find_port();
799 idx[0] = 0; 815
816 if (hwif) {
817 ide_init_port_hw(hwif, &hw);
818 hwif->pio_mask = ATA_PIO4;
819 hwif->port_ops = &m8xx_port_ops;
820
821 idx[0] = hwif->index;
822 }
823 }
800#ifdef IDE1_BASE_OFFSET 824#ifdef IDE1_BASE_OFFSET
801 memset(&hw, 0, sizeof(hw)); 825 memset(&hw, 0, sizeof(hw));
802 m8xx_ide_init_ports(&hw, 1); 826 if (!m8xx_ide_init_ports(&hw, 1)) {
803 ide_init_port_hw(&ide_hwifs[1], &hw); 827 ide_hwif_t *mate = ide_find_port();
804 idx[1] = 1; 828
829 if (mate) {
830 ide_init_port_hw(mate, &hw);
831 mate->pio_mask = ATA_PIO4;
832 mate->port_ops = &m8xx_port_ops;
833
834 idx[1] = mate->index;
835 }
836 }
805#endif 837#endif
806#endif 838#endif
807 839
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 88619b50d9ef..3cac6b2790dd 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -79,8 +79,6 @@ typedef struct pmac_ide_hwif {
79 79
80} pmac_ide_hwif_t; 80} pmac_ide_hwif_t;
81 81
82static pmac_ide_hwif_t pmac_ide[MAX_HWIFS];
83
84enum { 82enum {
85 controller_ohare, /* OHare based */ 83 controller_ohare, /* OHare based */
86 controller_heathrow, /* Heathrow/Paddington */ 84 controller_heathrow, /* Heathrow/Paddington */
@@ -411,7 +409,7 @@ kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
411 */ 409 */
412#define IDE_WAKEUP_DELAY (1*HZ) 410#define IDE_WAKEUP_DELAY (1*HZ)
413 411
414static int pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif); 412static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
415static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq); 413static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
416static void pmac_ide_selectproc(ide_drive_t *drive); 414static void pmac_ide_selectproc(ide_drive_t *drive);
417static void pmac_ide_kauai_selectproc(ide_drive_t *drive); 415static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
@@ -419,7 +417,7 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
419#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 417#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
420 418
421#define PMAC_IDE_REG(x) \ 419#define PMAC_IDE_REG(x) \
422 ((void __iomem *)((drive)->hwif->io_ports[IDE_DATA_OFFSET] + (x))) 420 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
423 421
424/* 422/*
425 * Apply the timings of the proper unit (master/slave) to the shared 423 * Apply the timings of the proper unit (master/slave) to the shared
@@ -920,12 +918,29 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
920 return 0; 918 return 0;
921} 919}
922 920
921static const struct ide_port_ops pmac_ide_ata6_port_ops = {
922 .set_pio_mode = pmac_ide_set_pio_mode,
923 .set_dma_mode = pmac_ide_set_dma_mode,
924 .selectproc = pmac_ide_kauai_selectproc,
925};
926
927static const struct ide_port_ops pmac_ide_port_ops = {
928 .set_pio_mode = pmac_ide_set_pio_mode,
929 .set_dma_mode = pmac_ide_set_dma_mode,
930 .selectproc = pmac_ide_selectproc,
931};
932
933static const struct ide_dma_ops pmac_dma_ops;
934
923static const struct ide_port_info pmac_port_info = { 935static const struct ide_port_info pmac_port_info = {
936 .init_dma = pmac_ide_init_dma,
924 .chipset = ide_pmac, 937 .chipset = ide_pmac,
938#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
939 .dma_ops = &pmac_dma_ops,
940#endif
941 .port_ops = &pmac_ide_port_ops,
925 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 942 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
926 IDE_HFLAG_PIO_NO_DOWNGRADE |
927 IDE_HFLAG_POST_SET_MODE | 943 IDE_HFLAG_POST_SET_MODE |
928 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
929 IDE_HFLAG_UNMASK_IRQS, 944 IDE_HFLAG_UNMASK_IRQS,
930 .pio_mask = ATA_PIO4, 945 .pio_mask = ATA_PIO4,
931 .mwdma_mask = ATA_MWDMA2, 946 .mwdma_mask = ATA_MWDMA2,
@@ -950,12 +965,15 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
950 pmif->broken_dma = pmif->broken_dma_warn = 0; 965 pmif->broken_dma = pmif->broken_dma_warn = 0;
951 if (of_device_is_compatible(np, "shasta-ata")) { 966 if (of_device_is_compatible(np, "shasta-ata")) {
952 pmif->kind = controller_sh_ata6; 967 pmif->kind = controller_sh_ata6;
968 d.port_ops = &pmac_ide_ata6_port_ops;
953 d.udma_mask = ATA_UDMA6; 969 d.udma_mask = ATA_UDMA6;
954 } else if (of_device_is_compatible(np, "kauai-ata")) { 970 } else if (of_device_is_compatible(np, "kauai-ata")) {
955 pmif->kind = controller_un_ata6; 971 pmif->kind = controller_un_ata6;
972 d.port_ops = &pmac_ide_ata6_port_ops;
956 d.udma_mask = ATA_UDMA5; 973 d.udma_mask = ATA_UDMA5;
957 } else if (of_device_is_compatible(np, "K2-UATA")) { 974 } else if (of_device_is_compatible(np, "K2-UATA")) {
958 pmif->kind = controller_k2_ata6; 975 pmif->kind = controller_k2_ata6;
976 d.port_ops = &pmac_ide_ata6_port_ops;
959 d.udma_mask = ATA_UDMA5; 977 d.udma_mask = ATA_UDMA5;
960 } else if (of_device_is_compatible(np, "keylargo-ata")) { 978 } else if (of_device_is_compatible(np, "keylargo-ata")) {
961 if (strcmp(np->name, "ata-4") == 0) { 979 if (strcmp(np->name, "ata-4") == 0) {
@@ -1032,37 +1050,29 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
1032 default_hwif_mmiops(hwif); 1050 default_hwif_mmiops(hwif);
1033 hwif->OUTBSYNC = pmac_outbsync; 1051 hwif->OUTBSYNC = pmac_outbsync;
1034 1052
1035 /* Tell common code _not_ to mess with resources */
1036 hwif->mmio = 1;
1037 hwif->hwif_data = pmif; 1053 hwif->hwif_data = pmif;
1038 ide_init_port_hw(hwif, hw); 1054 ide_init_port_hw(hwif, hw);
1039 hwif->noprobe = pmif->mediabay;
1040 hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 1055 hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
1041 hwif->set_pio_mode = pmac_ide_set_pio_mode;
1042 if (pmif->kind == controller_un_ata6
1043 || pmif->kind == controller_k2_ata6
1044 || pmif->kind == controller_sh_ata6)
1045 hwif->selectproc = pmac_ide_kauai_selectproc;
1046 else
1047 hwif->selectproc = pmac_ide_selectproc;
1048 hwif->set_dma_mode = pmac_ide_set_dma_mode;
1049 1056
1050 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n", 1057 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
1051 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id, 1058 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
1052 pmif->mediabay ? " (mediabay)" : "", hwif->irq); 1059 pmif->mediabay ? " (mediabay)" : "", hwif->irq);
1053 1060
1061 if (pmif->mediabay) {
1054#ifdef CONFIG_PMAC_MEDIABAY 1062#ifdef CONFIG_PMAC_MEDIABAY
1055 if (pmif->mediabay && check_media_bay_by_base(pmif->regbase, MB_CD) == 0) 1063 if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
1056 hwif->noprobe = 0; 1064#else
1057#endif /* CONFIG_PMAC_MEDIABAY */ 1065 if (1) {
1066#endif
1067 hwif->drives[0].noprobe = 1;
1068 hwif->drives[1].noprobe = 1;
1069 }
1070 }
1058 1071
1059#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1072#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1060 if (pmif->cable_80 == 0) 1073 if (pmif->cable_80 == 0)
1061 d.udma_mask &= ATA_UDMA2; 1074 d.udma_mask &= ATA_UDMA2;
1062 /* has a DBDMA controller channel */
1063 if (pmif->dma_regs == 0 || pmac_ide_setup_dma(pmif, hwif) < 0)
1064#endif 1075#endif
1065 d.udma_mask = d.mwdma_mask = 0;
1066 1076
1067 idx[0] = hwif->index; 1077 idx[0] = hwif->index;
1068 1078
@@ -1076,8 +1086,9 @@ static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base)
1076 int i; 1086 int i;
1077 1087
1078 for (i = 0; i < 8; ++i) 1088 for (i = 0; i < 8; ++i)
1079 hw->io_ports[i] = base + i * 0x10; 1089 hw->io_ports_array[i] = base + i * 0x10;
1080 hw->io_ports[8] = base + 0x160; 1090
1091 hw->io_ports.ctl_addr = base + 0x160;
1081} 1092}
1082 1093
1083/* 1094/*
@@ -1088,35 +1099,36 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1088{ 1099{
1089 void __iomem *base; 1100 void __iomem *base;
1090 unsigned long regbase; 1101 unsigned long regbase;
1091 int irq;
1092 ide_hwif_t *hwif; 1102 ide_hwif_t *hwif;
1093 pmac_ide_hwif_t *pmif; 1103 pmac_ide_hwif_t *pmif;
1094 int i, rc; 1104 int irq, rc;
1095 hw_regs_t hw; 1105 hw_regs_t hw;
1096 1106
1097 i = 0; 1107 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
1098 while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0 1108 if (pmif == NULL)
1099 || pmac_ide[i].node != NULL)) 1109 return -ENOMEM;
1100 ++i; 1110
1101 if (i >= MAX_HWIFS) { 1111 hwif = ide_find_port();
1112 if (hwif == NULL) {
1102 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n"); 1113 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
1103 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name); 1114 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
1104 return -ENODEV; 1115 rc = -ENODEV;
1116 goto out_free_pmif;
1105 } 1117 }
1106 1118
1107 pmif = &pmac_ide[i];
1108 hwif = &ide_hwifs[i];
1109
1110 if (macio_resource_count(mdev) == 0) { 1119 if (macio_resource_count(mdev) == 0) {
1111 printk(KERN_WARNING "ide%d: no address for %s\n", 1120 printk(KERN_WARNING "ide-pmac: no address for %s\n",
1112 i, mdev->ofdev.node->full_name); 1121 mdev->ofdev.node->full_name);
1113 return -ENXIO; 1122 rc = -ENXIO;
1123 goto out_free_pmif;
1114 } 1124 }
1115 1125
1116 /* Request memory resource for IO ports */ 1126 /* Request memory resource for IO ports */
1117 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) { 1127 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
1118 printk(KERN_ERR "ide%d: can't request mmio resource !\n", i); 1128 printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
1119 return -EBUSY; 1129 "%s!\n", mdev->ofdev.node->full_name);
1130 rc = -EBUSY;
1131 goto out_free_pmif;
1120 } 1132 }
1121 1133
1122 /* XXX This is bogus. Should be fixed in the registry by checking 1134 /* XXX This is bogus. Should be fixed in the registry by checking
@@ -1125,8 +1137,8 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1125 * where that happens though... 1137 * where that happens though...
1126 */ 1138 */
1127 if (macio_irq_count(mdev) == 0) { 1139 if (macio_irq_count(mdev) == 0) {
1128 printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n", 1140 printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
1129 i, mdev->ofdev.node->full_name); 1141 "13\n", mdev->ofdev.node->full_name);
1130 irq = irq_create_mapping(NULL, 13); 1142 irq = irq_create_mapping(NULL, 13);
1131 } else 1143 } else
1132 irq = macio_irq(mdev, 0); 1144 irq = macio_irq(mdev, 0);
@@ -1144,7 +1156,9 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1144#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1156#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1145 if (macio_resource_count(mdev) >= 2) { 1157 if (macio_resource_count(mdev) >= 2) {
1146 if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) 1158 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1147 printk(KERN_WARNING "ide%d: can't request DMA resource !\n", i); 1159 printk(KERN_WARNING "ide-pmac: can't request DMA "
1160 "resource for %s!\n",
1161 mdev->ofdev.node->full_name);
1148 else 1162 else
1149 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); 1163 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
1150 } else 1164 } else
@@ -1166,11 +1180,15 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1166 iounmap(pmif->dma_regs); 1180 iounmap(pmif->dma_regs);
1167 macio_release_resource(mdev, 1); 1181 macio_release_resource(mdev, 1);
1168 } 1182 }
1169 memset(pmif, 0, sizeof(*pmif));
1170 macio_release_resource(mdev, 0); 1183 macio_release_resource(mdev, 0);
1184 kfree(pmif);
1171 } 1185 }
1172 1186
1173 return rc; 1187 return rc;
1188
1189out_free_pmif:
1190 kfree(pmif);
1191 return rc;
1174} 1192}
1175 1193
1176static int 1194static int
@@ -1215,7 +1233,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1215 pmac_ide_hwif_t *pmif; 1233 pmac_ide_hwif_t *pmif;
1216 void __iomem *base; 1234 void __iomem *base;
1217 unsigned long rbase, rlen; 1235 unsigned long rbase, rlen;
1218 int i, rc; 1236 int rc;
1219 hw_regs_t hw; 1237 hw_regs_t hw;
1220 1238
1221 np = pci_device_to_OF_node(pdev); 1239 np = pci_device_to_OF_node(pdev);
@@ -1223,30 +1241,32 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1223 printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n"); 1241 printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
1224 return -ENODEV; 1242 return -ENODEV;
1225 } 1243 }
1226 i = 0; 1244
1227 while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0 1245 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
1228 || pmac_ide[i].node != NULL)) 1246 if (pmif == NULL)
1229 ++i; 1247 return -ENOMEM;
1230 if (i >= MAX_HWIFS) { 1248
1249 hwif = ide_find_port();
1250 if (hwif == NULL) {
1231 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n"); 1251 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
1232 printk(KERN_ERR " %s\n", np->full_name); 1252 printk(KERN_ERR " %s\n", np->full_name);
1233 return -ENODEV; 1253 rc = -ENODEV;
1254 goto out_free_pmif;
1234 } 1255 }
1235 1256
1236 pmif = &pmac_ide[i];
1237 hwif = &ide_hwifs[i];
1238
1239 if (pci_enable_device(pdev)) { 1257 if (pci_enable_device(pdev)) {
1240 printk(KERN_WARNING "ide%i: Can't enable PCI device for %s\n", 1258 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
1241 i, np->full_name); 1259 "%s\n", np->full_name);
1242 return -ENXIO; 1260 rc = -ENXIO;
1261 goto out_free_pmif;
1243 } 1262 }
1244 pci_set_master(pdev); 1263 pci_set_master(pdev);
1245 1264
1246 if (pci_request_regions(pdev, "Kauai ATA")) { 1265 if (pci_request_regions(pdev, "Kauai ATA")) {
1247 printk(KERN_ERR "ide%d: Cannot obtain PCI resources for %s\n", 1266 printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
1248 i, np->full_name); 1267 "%s\n", np->full_name);
1249 return -ENXIO; 1268 rc = -ENXIO;
1269 goto out_free_pmif;
1250 } 1270 }
1251 1271
1252 hwif->dev = &pdev->dev; 1272 hwif->dev = &pdev->dev;
@@ -1276,11 +1296,15 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1276 /* The inteface is released to the common IDE layer */ 1296 /* The inteface is released to the common IDE layer */
1277 pci_set_drvdata(pdev, NULL); 1297 pci_set_drvdata(pdev, NULL);
1278 iounmap(base); 1298 iounmap(base);
1279 memset(pmif, 0, sizeof(*pmif));
1280 pci_release_regions(pdev); 1299 pci_release_regions(pdev);
1300 kfree(pmif);
1281 } 1301 }
1282 1302
1283 return rc; 1303 return rc;
1304
1305out_free_pmif:
1306 kfree(pmif);
1307 return rc;
1284} 1308}
1285 1309
1286static int 1310static int
@@ -1652,18 +1676,31 @@ pmac_ide_dma_lost_irq (ide_drive_t *drive)
1652 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status); 1676 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
1653} 1677}
1654 1678
1679static const struct ide_dma_ops pmac_dma_ops = {
1680 .dma_host_set = pmac_ide_dma_host_set,
1681 .dma_setup = pmac_ide_dma_setup,
1682 .dma_exec_cmd = pmac_ide_dma_exec_cmd,
1683 .dma_start = pmac_ide_dma_start,
1684 .dma_end = pmac_ide_dma_end,
1685 .dma_test_irq = pmac_ide_dma_test_irq,
1686 .dma_timeout = ide_dma_timeout,
1687 .dma_lost_irq = pmac_ide_dma_lost_irq,
1688};
1689
1655/* 1690/*
1656 * Allocate the data structures needed for using DMA with an interface 1691 * Allocate the data structures needed for using DMA with an interface
1657 * and fill the proper list of functions pointers 1692 * and fill the proper list of functions pointers
1658 */ 1693 */
1659static int __devinit pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) 1694static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1695 const struct ide_port_info *d)
1660{ 1696{
1697 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1661 struct pci_dev *dev = to_pci_dev(hwif->dev); 1698 struct pci_dev *dev = to_pci_dev(hwif->dev);
1662 1699
1663 /* We won't need pci_dev if we switch to generic consistent 1700 /* We won't need pci_dev if we switch to generic consistent
1664 * DMA routines ... 1701 * DMA routines ...
1665 */ 1702 */
1666 if (dev == NULL) 1703 if (dev == NULL || pmif->dma_regs == 0)
1667 return -ENODEV; 1704 return -ENODEV;
1668 /* 1705 /*
1669 * Allocate space for the DBDMA commands. 1706 * Allocate space for the DBDMA commands.
@@ -1682,18 +1719,14 @@ static int __devinit pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1682 1719
1683 hwif->sg_max_nents = MAX_DCMDS; 1720 hwif->sg_max_nents = MAX_DCMDS;
1684 1721
1685 hwif->dma_host_set = &pmac_ide_dma_host_set;
1686 hwif->dma_setup = &pmac_ide_dma_setup;
1687 hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd;
1688 hwif->dma_start = &pmac_ide_dma_start;
1689 hwif->ide_dma_end = &pmac_ide_dma_end;
1690 hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
1691 hwif->dma_timeout = &ide_dma_timeout;
1692 hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
1693
1694 return 0; 1722 return 0;
1695} 1723}
1696 1724#else
1725static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1726 const struct ide_port_info *d)
1727{
1728 return -EOPNOTSUPP;
1729}
1697#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1730#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1698 1731
1699module_init(pmac_ide_probe); 1732module_init(pmac_ide_probe);
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index f7ede0e42881..5171601fb255 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -20,73 +20,6 @@
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/irq.h> 21#include <asm/irq.h>
22 22
23
24/**
25 * ide_match_hwif - match a PCI IDE against an ide_hwif
26 * @io_base: I/O base of device
27 * @bootable: set if its bootable
28 * @name: name of device
29 *
30 * Match a PCI IDE port against an entry in ide_hwifs[],
31 * based on io_base port if possible. Return the matching hwif,
32 * or a new hwif. If we find an error (clashing, out of devices, etc)
33 * return NULL
34 *
35 * FIXME: we need to handle mmio matches here too
36 */
37
38static ide_hwif_t *ide_match_hwif(unsigned long io_base, u8 bootable, const char *name)
39{
40 int h;
41 ide_hwif_t *hwif;
42
43 /*
44 * Look for a hwif with matching io_base default value.
45 * If chipset is "ide_unknown", then claim that hwif slot.
46 * Otherwise, some other chipset has already claimed it.. :(
47 */
48 for (h = 0; h < MAX_HWIFS; ++h) {
49 hwif = &ide_hwifs[h];
50 if (hwif->io_ports[IDE_DATA_OFFSET] == io_base) {
51 if (hwif->chipset == ide_unknown)
52 return hwif; /* match */
53 printk(KERN_ERR "%s: port 0x%04lx already claimed by %s\n",
54 name, io_base, hwif->name);
55 return NULL; /* already claimed */
56 }
57 }
58 /*
59 * Okay, there is no hwif matching our io_base,
60 * so we'll just claim an unassigned slot.
61 * Give preference to claiming other slots before claiming ide0/ide1,
62 * just in case there's another interface yet-to-be-scanned
63 * which uses ports 1f0/170 (the ide0/ide1 defaults).
64 *
65 * Unless there is a bootable card that does not use the standard
66 * ports 1f0/170 (the ide0/ide1 defaults). The (bootable) flag.
67 */
68 if (bootable) {
69 for (h = 0; h < MAX_HWIFS; ++h) {
70 hwif = &ide_hwifs[h];
71 if (hwif->chipset == ide_unknown)
72 return hwif; /* pick an unused entry */
73 }
74 } else {
75 for (h = 2; h < MAX_HWIFS; ++h) {
76 hwif = ide_hwifs + h;
77 if (hwif->chipset == ide_unknown)
78 return hwif; /* pick an unused entry */
79 }
80 }
81 for (h = 0; h < 2 && h < MAX_HWIFS; ++h) {
82 hwif = ide_hwifs + h;
83 if (hwif->chipset == ide_unknown)
84 return hwif; /* pick an unused entry */
85 }
86 printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", name);
87 return NULL;
88}
89
90/** 23/**
91 * ide_setup_pci_baseregs - place a PCI IDE controller native 24 * ide_setup_pci_baseregs - place a PCI IDE controller native
92 * @dev: PCI device of interface to switch native 25 * @dev: PCI device of interface to switch native
@@ -94,13 +27,13 @@ static ide_hwif_t *ide_match_hwif(unsigned long io_base, u8 bootable, const char
94 * 27 *
95 * We attempt to place the PCI interface into PCI native mode. If 28 * We attempt to place the PCI interface into PCI native mode. If
96 * we succeed the BARs are ok and the controller is in PCI mode. 29 * we succeed the BARs are ok and the controller is in PCI mode.
97 * Returns 0 on success or an errno code. 30 * Returns 0 on success or an errno code.
98 * 31 *
99 * FIXME: if we program the interface and then fail to set the BARS 32 * FIXME: if we program the interface and then fail to set the BARS
100 * we don't switch it back to legacy mode. Do we actually care ?? 33 * we don't switch it back to legacy mode. Do we actually care ??
101 */ 34 */
102 35
103static int ide_setup_pci_baseregs (struct pci_dev *dev, const char *name) 36static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
104{ 37{
105 u8 progif = 0; 38 u8 progif = 0;
106 39
@@ -139,16 +72,16 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
139} 72}
140 73
141/** 74/**
142 * ide_get_or_set_dma_base - setup BMIBA 75 * ide_pci_dma_base - setup BMIBA
143 * @d: IDE port info
144 * @hwif: IDE interface 76 * @hwif: IDE interface
77 * @d: IDE port info
145 * 78 *
146 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space. 79 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
147 * Where a device has a partner that is already in DMA mode we check 80 * Where a device has a partner that is already in DMA mode we check
148 * and enforce IDE simplex rules. 81 * and enforce IDE simplex rules.
149 */ 82 */
150 83
151static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_hwif_t *hwif) 84unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
152{ 85{
153 struct pci_dev *dev = to_pci_dev(hwif->dev); 86 struct pci_dev *dev = to_pci_dev(hwif->dev);
154 unsigned long dma_base = 0; 87 unsigned long dma_base = 0;
@@ -199,6 +132,31 @@ static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_
199out: 132out:
200 return dma_base; 133 return dma_base;
201} 134}
135EXPORT_SYMBOL_GPL(ide_pci_dma_base);
136
137/*
138 * Set up BM-DMA capability (PnP BIOS should have done this)
139 */
140int ide_pci_set_master(struct pci_dev *dev, const char *name)
141{
142 u16 pcicmd;
143
144 pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
145
146 if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
147 pci_set_master(dev);
148
149 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
150 (pcicmd & PCI_COMMAND_MASTER) == 0) {
151 printk(KERN_ERR "%s: error updating PCICMD on %s\n",
152 name, pci_name(dev));
153 return -EIO;
154 }
155 }
156
157 return 0;
158}
159EXPORT_SYMBOL_GPL(ide_pci_set_master);
202#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ 160#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
203 161
204void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d) 162void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
@@ -207,7 +165,6 @@ void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
207 " PCI slot %s\n", d->name, dev->vendor, dev->device, 165 " PCI slot %s\n", d->name, dev->vendor, dev->device,
208 dev->revision, pci_name(dev)); 166 dev->revision, pci_name(dev));
209} 167}
210
211EXPORT_SYMBOL_GPL(ide_setup_pci_noise); 168EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
212 169
213 170
@@ -220,13 +177,13 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
220 * but if that fails then we only need IO space. The PCI code should 177 * but if that fails then we only need IO space. The PCI code should
221 * have setup the proper resources for us already for controllers in 178 * have setup the proper resources for us already for controllers in
222 * legacy mode. 179 * legacy mode.
223 * 180 *
224 * Returns zero on success or an error code 181 * Returns zero on success or an error code
225 */ 182 */
226 183
227static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) 184static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
228{ 185{
229 int ret; 186 int ret, bars;
230 187
231 if (pci_enable_device(dev)) { 188 if (pci_enable_device(dev)) {
232 ret = pci_enable_device_io(dev); 189 ret = pci_enable_device_io(dev);
@@ -249,13 +206,21 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
249 goto out; 206 goto out;
250 } 207 }
251 208
252 /* FIXME: Temporary - until we put in the hotplug interface logic 209 if (d->host_flags & IDE_HFLAG_SINGLE)
253 Check that the bits we want are not in use by someone else. */ 210 bars = (1 << 2) - 1;
254 ret = pci_request_region(dev, 4, "ide_tmp"); 211 else
255 if (ret < 0) 212 bars = (1 << 4) - 1;
256 goto out; 213
214 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
215 if (d->host_flags & IDE_HFLAG_CS5520)
216 bars |= (1 << 2);
217 else
218 bars |= (1 << 4);
219 }
257 220
258 pci_release_region(dev, 4); 221 ret = pci_request_selected_regions(dev, bars, d->name);
222 if (ret < 0)
223 printk(KERN_ERR "%s: can't reserve resources\n", d->name);
259out: 224out:
260 return ret; 225 return ret;
261} 226}
@@ -279,8 +244,8 @@ static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
279 * Maybe the user deliberately *disabled* the device, 244 * Maybe the user deliberately *disabled* the device,
280 * but we'll eventually ignore it again if no drives respond. 245 * but we'll eventually ignore it again if no drives respond.
281 */ 246 */
282 if (ide_setup_pci_baseregs(dev, d->name) || pci_write_config_word(dev, PCI_COMMAND, pcicmd|PCI_COMMAND_IO)) 247 if (ide_setup_pci_baseregs(dev, d->name) ||
283 { 248 pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
284 printk(KERN_INFO "%s: device disabled (BIOS)\n", d->name); 249 printk(KERN_INFO "%s: device disabled (BIOS)\n", d->name);
285 return -ENODEV; 250 return -ENODEV;
286 } 251 }
@@ -301,26 +266,24 @@ static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
301 * @d: IDE port info 266 * @d: IDE port info
302 * @bar: BAR number 267 * @bar: BAR number
303 * 268 *
304 * Checks if a BAR is configured and points to MMIO space. If so 269 * Checks if a BAR is configured and points to MMIO space. If so,
305 * print an error and return an error code. Otherwise return 0 270 * return an error code. Otherwise return 0
306 */ 271 */
307 272
308static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d, int bar) 273static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
274 int bar)
309{ 275{
310 ulong flags = pci_resource_flags(dev, bar); 276 ulong flags = pci_resource_flags(dev, bar);
311 277
312 /* Unconfigured ? */ 278 /* Unconfigured ? */
313 if (!flags || pci_resource_len(dev, bar) == 0) 279 if (!flags || pci_resource_len(dev, bar) == 0)
314 return 0; 280 return 0;
315 281
316 /* I/O space */ 282 /* I/O space */
317 if(flags & PCI_BASE_ADDRESS_IO_MASK) 283 if (flags & IORESOURCE_IO)
318 return 0; 284 return 0;
319 285
320 /* Bad */ 286 /* Bad */
321 printk(KERN_ERR "%s: IO baseregs (BIOS) are reported "
322 "as MEM, report to "
323 "<andre@linux-ide.org>.\n", d->name);
324 return -EINVAL; 287 return -EINVAL;
325} 288}
326 289
@@ -344,14 +307,16 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
344{ 307{
345 unsigned long ctl = 0, base = 0; 308 unsigned long ctl = 0, base = 0;
346 ide_hwif_t *hwif; 309 ide_hwif_t *hwif;
347 u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0;
348 struct hw_regs_s hw; 310 struct hw_regs_s hw;
349 311
350 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { 312 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
351 /* Possibly we should fail if these checks report true */ 313 if (ide_pci_check_iomem(dev, d, 2 * port) ||
352 ide_pci_check_iomem(dev, d, 2*port); 314 ide_pci_check_iomem(dev, d, 2 * port + 1)) {
353 ide_pci_check_iomem(dev, d, 2*port+1); 315 printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported "
354 316 "as MEM for port %d!\n", d->name, port);
317 return NULL;
318 }
319
355 ctl = pci_resource_start(dev, 2*port+1); 320 ctl = pci_resource_start(dev, 2*port+1);
356 base = pci_resource_start(dev, 2*port); 321 base = pci_resource_start(dev, 2*port);
357 if ((ctl && !base) || (base && !ctl)) { 322 if ((ctl && !base) || (base && !ctl)) {
@@ -360,14 +325,18 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
360 return NULL; 325 return NULL;
361 } 326 }
362 } 327 }
363 if (!ctl) 328 if (!ctl) {
364 {
365 /* Use default values */ 329 /* Use default values */
366 ctl = port ? 0x374 : 0x3f4; 330 ctl = port ? 0x374 : 0x3f4;
367 base = port ? 0x170 : 0x1f0; 331 base = port ? 0x170 : 0x1f0;
368 } 332 }
369 if ((hwif = ide_match_hwif(base, bootable, d->name)) == NULL) 333
370 return NULL; /* no room in ide_hwifs[] */ 334 hwif = ide_find_port_slot(d);
335 if (hwif == NULL) {
336 printk(KERN_ERR "%s: too many IDE interfaces, no room in "
337 "table\n", d->name);
338 return NULL;
339 }
371 340
372 memset(&hw, 0, sizeof(hw)); 341 memset(&hw, 0, sizeof(hw));
373 hw.irq = irq; 342 hw.irq = irq;
@@ -378,7 +347,6 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
378 ide_init_port_hw(hwif, &hw); 347 ide_init_port_hw(hwif, &hw);
379 348
380 hwif->dev = &dev->dev; 349 hwif->dev = &dev->dev;
381 hwif->cds = d;
382 350
383 return hwif; 351 return hwif;
384} 352}
@@ -394,40 +362,33 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
394 * state 362 * state
395 */ 363 */
396 364
397void ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) 365int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
398{ 366{
399 struct pci_dev *dev = to_pci_dev(hwif->dev); 367 struct pci_dev *dev = to_pci_dev(hwif->dev);
400 u16 pcicmd;
401
402 pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
403 368
404 if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 || 369 if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
405 ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && 370 ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
406 (dev->class & 0x80))) { 371 (dev->class & 0x80))) {
407 unsigned long dma_base = ide_get_or_set_dma_base(d, hwif); 372 unsigned long base = ide_pci_dma_base(hwif, d);
408 if (dma_base && !(pcicmd & PCI_COMMAND_MASTER)) { 373
409 /* 374 if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
410 * Set up BM-DMA capability 375 return -1;
411 * (PnP BIOS should have done this) 376
412 */ 377 if (hwif->mmio)
413 pci_set_master(dev); 378 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
414 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) || !(pcicmd & PCI_COMMAND_MASTER)) { 379 else
415 printk(KERN_ERR "%s: %s error updating PCICMD\n", 380 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
416 hwif->name, d->name); 381 hwif->name, base, base + 7);
417 dma_base = 0; 382
418 } 383 hwif->extra_base = base + (hwif->channel ? 8 : 16);
419 } 384
420 if (dma_base) { 385 if (ide_allocate_dma_engine(hwif))
421 if (d->init_dma) { 386 return -1;
422 d->init_dma(hwif, dma_base); 387
423 } else { 388 ide_setup_dma(hwif, base);
424 ide_setup_dma(hwif, dma_base);
425 }
426 } else {
427 printk(KERN_INFO "%s: %s Bus-Master DMA disabled "
428 "(BIOS)\n", hwif->name, d->name);
429 }
430 } 389 }
390
391 return 0;
431} 392}
432#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ 393#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
433 394
@@ -514,7 +475,6 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
514 *(idx + port) = hwif->index; 475 *(idx + port) = hwif->index;
515 } 476 }
516} 477}
517
518EXPORT_SYMBOL_GPL(ide_pci_setup_ports); 478EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
519 479
520/* 480/*
@@ -597,7 +557,6 @@ int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d)
597 557
598 return ret; 558 return ret;
599} 559}
600
601EXPORT_SYMBOL_GPL(ide_setup_pci_device); 560EXPORT_SYMBOL_GPL(ide_setup_pci_device);
602 561
603int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2, 562int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
@@ -621,5 +580,4 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
621out: 580out:
622 return ret; 581 return ret;
623} 582}
624
625EXPORT_SYMBOL_GPL(ide_setup_pci_devices); 583EXPORT_SYMBOL_GPL(ide_setup_pci_devices);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0d13fe0a260b..3d6d9461c31d 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -160,6 +160,7 @@ struct ehca_qp {
160 }; 160 };
161 u32 qp_type; 161 u32 qp_type;
162 enum ehca_ext_qp_type ext_type; 162 enum ehca_ext_qp_type ext_type;
163 enum ib_qp_state state;
163 struct ipz_queue ipz_squeue; 164 struct ipz_queue ipz_squeue;
164 struct ipz_queue ipz_rqueue; 165 struct ipz_queue ipz_rqueue;
165 struct h_galpas galpas; 166 struct h_galpas galpas;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index b5ca94c6b8d9..ca5eb0cb628c 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
633 unsigned long flags; 633 unsigned long flags;
634 634
635 WARN_ON_ONCE(!in_interrupt()); 635 WARN_ON_ONCE(!in_interrupt());
636 if (ehca_debug_level) 636 if (ehca_debug_level >= 3)
637 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 637 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
638 638
639 spin_lock_irqsave(&pool->last_cpu_lock, flags); 639 spin_lock_irqsave(&pool->last_cpu_lock, flags);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 65b3362cdb9b..65048976198c 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -50,7 +50,7 @@
50#include "ehca_tools.h" 50#include "ehca_tools.h"
51#include "hcp_if.h" 51#include "hcp_if.h"
52 52
53#define HCAD_VERSION "0025" 53#define HCAD_VERSION "0026"
54 54
55MODULE_LICENSE("Dual BSD/GPL"); 55MODULE_LICENSE("Dual BSD/GPL");
56MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 56MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION);
60static int ehca_open_aqp1 = 0; 60static int ehca_open_aqp1 = 0;
61static int ehca_hw_level = 0; 61static int ehca_hw_level = 0;
62static int ehca_poll_all_eqs = 1; 62static int ehca_poll_all_eqs = 1;
63static int ehca_mr_largepage = 1;
64 63
65int ehca_debug_level = 0; 64int ehca_debug_level = 0;
66int ehca_nr_ports = 2; 65int ehca_nr_ports = 2;
@@ -70,45 +69,40 @@ int ehca_static_rate = -1;
70int ehca_scaling_code = 0; 69int ehca_scaling_code = 0;
71int ehca_lock_hcalls = -1; 70int ehca_lock_hcalls = -1;
72 71
73module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO); 72module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
74module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); 73module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
75module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); 74module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
76module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); 75module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
77module_param_named(use_hp_mr, ehca_use_hp_mr, int, S_IRUGO); 76module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
78module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); 77module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
79module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, S_IRUGO); 78module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
80module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); 79module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
81module_param_named(scaling_code, ehca_scaling_code, int, S_IRUGO); 80module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
82module_param_named(mr_largepage, ehca_mr_largepage, int, S_IRUGO);
83module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); 81module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO);
84 82
85MODULE_PARM_DESC(open_aqp1, 83MODULE_PARM_DESC(open_aqp1,
86 "AQP1 on startup (0: no (default), 1: yes)"); 84 "Open AQP1 on startup (default: no)");
87MODULE_PARM_DESC(debug_level, 85MODULE_PARM_DESC(debug_level,
88 "debug level" 86 "Amount of debug output (0: none (default), 1: traces, "
89 " (0: no debug traces (default), 1: with debug traces)"); 87 "2: some dumps, 3: lots)");
90MODULE_PARM_DESC(hw_level, 88MODULE_PARM_DESC(hw_level,
91 "hardware level" 89 "Hardware level (0: autosensing (default), "
92 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); 90 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
93MODULE_PARM_DESC(nr_ports, 91MODULE_PARM_DESC(nr_ports,
94 "number of connected ports (-1: autodetect, 1: port one only, " 92 "number of connected ports (-1: autodetect, 1: port one only, "
95 "2: two ports (default)"); 93 "2: two ports (default)");
96MODULE_PARM_DESC(use_hp_mr, 94MODULE_PARM_DESC(use_hp_mr,
97 "high performance MRs (0: no (default), 1: yes)"); 95 "Use high performance MRs (default: no)");
98MODULE_PARM_DESC(port_act_time, 96MODULE_PARM_DESC(port_act_time,
99 "time to wait for port activation (default: 30 sec)"); 97 "Time to wait for port activation (default: 30 sec)");
100MODULE_PARM_DESC(poll_all_eqs, 98MODULE_PARM_DESC(poll_all_eqs,
101 "polls all event queues periodically" 99 "Poll all event queues periodically (default: yes)");
102 " (0: no, 1: yes (default))");
103MODULE_PARM_DESC(static_rate, 100MODULE_PARM_DESC(static_rate,
104 "set permanent static rate (default: disabled)"); 101 "Set permanent static rate (default: no static rate)");
105MODULE_PARM_DESC(scaling_code, 102MODULE_PARM_DESC(scaling_code,
106 "set scaling code (0: disabled/default, 1: enabled)"); 103 "Enable scaling code (default: no)");
107MODULE_PARM_DESC(mr_largepage,
108 "use large page for MR (0: use PAGE_SIZE (default), "
109 "1: use large page depending on MR size");
110MODULE_PARM_DESC(lock_hcalls, 104MODULE_PARM_DESC(lock_hcalls,
111 "serialize all hCalls made by the driver " 105 "Serialize all hCalls made by the driver "
112 "(default: autodetect)"); 106 "(default: autodetect)");
113 107
114DEFINE_RWLOCK(ehca_qp_idr_lock); 108DEFINE_RWLOCK(ehca_qp_idr_lock);
@@ -275,6 +269,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
275 u64 h_ret; 269 u64 h_ret;
276 struct hipz_query_hca *rblock; 270 struct hipz_query_hca *rblock;
277 struct hipz_query_port *port; 271 struct hipz_query_port *port;
272 const char *loc_code;
278 273
279 static const u32 pgsize_map[] = { 274 static const u32 pgsize_map[] = {
280 HCA_CAP_MR_PGSIZE_4K, 0x1000, 275 HCA_CAP_MR_PGSIZE_4K, 0x1000,
@@ -283,6 +278,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
283 HCA_CAP_MR_PGSIZE_16M, 0x1000000, 278 HCA_CAP_MR_PGSIZE_16M, 0x1000000,
284 }; 279 };
285 280
281 ehca_gen_dbg("Probing adapter %s...",
282 shca->ofdev->node->full_name);
283 loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
284 if (loc_code)
285 ehca_gen_dbg(" ... location lode=%s", loc_code);
286
286 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 287 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
287 if (!rblock) { 288 if (!rblock) {
288 ehca_gen_err("Cannot allocate rblock memory."); 289 ehca_gen_err("Cannot allocate rblock memory.");
@@ -350,11 +351,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
350 351
351 /* translate supported MR page sizes; always support 4K */ 352 /* translate supported MR page sizes; always support 4K */
352 shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; 353 shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
353 if (ehca_mr_largepage) { /* support extra sizes only if enabled */ 354 for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
354 for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) 355 if (rblock->memory_page_size_supported & pgsize_map[i])
355 if (rblock->memory_page_size_supported & pgsize_map[i]) 356 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
356 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
357 }
358 357
359 /* query max MTU from first port -- it's the same for all ports */ 358 /* query max MTU from first port -- it's the same for all ports */
360 port = (struct hipz_query_port *)rblock; 359 port = (struct hipz_query_port *)rblock;
@@ -567,8 +566,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
567 566
568static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) 567static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
569{ 568{
570 return snprintf(buf, PAGE_SIZE, "%d\n", 569 return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
571 ehca_debug_level);
572} 570}
573 571
574static ssize_t ehca_store_debug_level(struct device_driver *ddp, 572static ssize_t ehca_store_debug_level(struct device_driver *ddp,
@@ -657,14 +655,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
657} 655}
658static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); 656static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
659 657
660static ssize_t ehca_show_mr_largepage(struct device *dev,
661 struct device_attribute *attr,
662 char *buf)
663{
664 return sprintf(buf, "%d\n", ehca_mr_largepage);
665}
666static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL);
667
668static struct attribute *ehca_dev_attrs[] = { 658static struct attribute *ehca_dev_attrs[] = {
669 &dev_attr_adapter_handle.attr, 659 &dev_attr_adapter_handle.attr,
670 &dev_attr_num_ports.attr, 660 &dev_attr_num_ports.attr,
@@ -681,7 +671,6 @@ static struct attribute *ehca_dev_attrs[] = {
681 &dev_attr_cur_mw.attr, 671 &dev_attr_cur_mw.attr,
682 &dev_attr_max_pd.attr, 672 &dev_attr_max_pd.attr,
683 &dev_attr_max_ah.attr, 673 &dev_attr_max_ah.attr,
684 &dev_attr_mr_largepage.attr,
685 NULL 674 NULL
686}; 675};
687 676
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index f26997fc00f8..46ae4eb2c4e1 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1794 int t; 1794 int t;
1795 for (t = start_idx; t <= end_idx; t++) { 1795 for (t = start_idx; t <= end_idx; t++) {
1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
1797 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1797 if (ehca_debug_level >= 3)
1798 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1798 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1799 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1800 ehca_gen_err("uncontiguous page found pgaddr=%lx " 1801 ehca_gen_err("uncontiguous page found pgaddr=%lx "
1801 "prev_pgaddr=%lx page_list_i=%x", 1802 "prev_pgaddr=%lx page_list_i=%x",
@@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1862 pgaddr & 1863 pgaddr &
1863 ~(pginfo->hwpage_size - 1)); 1864 ~(pginfo->hwpage_size - 1));
1864 } 1865 }
1865 ehca_gen_dbg("kpage=%lx chunk_page=%lx " 1866 if (ehca_debug_level >= 3) {
1866 "value=%016lx", *kpage, pgaddr, 1867 u64 val = *(u64 *)abs_to_virt(
1867 *(u64 *)abs_to_virt( 1868 phys_to_abs(pgaddr));
1868 phys_to_abs(pgaddr))); 1869 ehca_gen_dbg("kpage=%lx chunk_page=%lx "
1870 "value=%016lx",
1871 *kpage, pgaddr, val);
1872 }
1869 prev_pgaddr = pgaddr; 1873 prev_pgaddr = pgaddr;
1870 i++; 1874 i++;
1871 pginfo->kpage_cnt++; 1875 pginfo->kpage_cnt++;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 3eb14a52cbf2..57bef1152cc2 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -550,6 +550,7 @@ static struct ehca_qp *internal_create_qp(
550 spin_lock_init(&my_qp->spinlock_r); 550 spin_lock_init(&my_qp->spinlock_r);
551 my_qp->qp_type = qp_type; 551 my_qp->qp_type = qp_type;
552 my_qp->ext_type = parms.ext_type; 552 my_qp->ext_type = parms.ext_type;
553 my_qp->state = IB_QPS_RESET;
553 554
554 if (init_attr->recv_cq) 555 if (init_attr->recv_cq)
555 my_qp->recv_cq = 556 my_qp->recv_cq =
@@ -965,7 +966,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
965 qp_num, bad_send_wqe_p); 966 qp_num, bad_send_wqe_p);
966 /* convert wqe pointer to vadr */ 967 /* convert wqe pointer to vadr */
967 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); 968 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
968 if (ehca_debug_level) 969 if (ehca_debug_level >= 2)
969 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); 970 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
970 squeue = &my_qp->ipz_squeue; 971 squeue = &my_qp->ipz_squeue;
971 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { 972 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
@@ -978,7 +979,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
978 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); 979 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
979 *bad_wqe_cnt = 0; 980 *bad_wqe_cnt = 0;
980 while (wqe->optype != 0xff && wqe->wqef != 0xff) { 981 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
981 if (ehca_debug_level) 982 if (ehca_debug_level >= 2)
982 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); 983 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
983 wqe->nr_of_data_seg = 0; /* suppress data access */ 984 wqe->nr_of_data_seg = 0; /* suppress data access */
984 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ 985 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
@@ -1450,7 +1451,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1450 /* no support for max_send/recv_sge yet */ 1451 /* no support for max_send/recv_sge yet */
1451 } 1452 }
1452 1453
1453 if (ehca_debug_level) 1454 if (ehca_debug_level >= 2)
1454 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); 1455 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1455 1456
1456 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, 1457 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
@@ -1508,6 +1509,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1508 if (attr_mask & IB_QP_QKEY) 1509 if (attr_mask & IB_QP_QKEY)
1509 my_qp->qkey = attr->qkey; 1510 my_qp->qkey = attr->qkey;
1510 1511
1512 my_qp->state = qp_new_state;
1513
1511modify_qp_exit2: 1514modify_qp_exit2:
1512 if (squeue_locked) { /* this means: sqe -> rts */ 1515 if (squeue_locked) { /* this means: sqe -> rts */
1513 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 1516 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1763,7 +1766,7 @@ int ehca_query_qp(struct ib_qp *qp,
1763 if (qp_init_attr) 1766 if (qp_init_attr)
1764 *qp_init_attr = my_qp->init_attr; 1767 *qp_init_attr = my_qp->init_attr;
1765 1768
1766 if (ehca_debug_level) 1769 if (ehca_debug_level >= 2)
1767 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 1770 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
1768 1771
1769query_qp_exit1: 1772query_qp_exit1:
@@ -1811,7 +1814,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1811 goto modify_srq_exit0; 1814 goto modify_srq_exit0;
1812 } 1815 }
1813 1816
1814 if (ehca_debug_level) 1817 if (ehca_debug_level >= 2)
1815 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 1818 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
1816 1819
1817 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, 1820 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
@@ -1864,7 +1867,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
1864 srq_attr->srq_limit = EHCA_BMASK_GET( 1867 srq_attr->srq_limit = EHCA_BMASK_GET(
1865 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); 1868 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
1866 1869
1867 if (ehca_debug_level) 1870 if (ehca_debug_level >= 2)
1868 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 1871 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
1869 1872
1870query_srq_exit1: 1873query_srq_exit1:
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index a20bbf466188..bbe0436f4f75 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
81 recv_wr->sg_list[cnt_ds].length; 81 recv_wr->sg_list[cnt_ds].length;
82 } 82 }
83 83
84 if (ehca_debug_level) { 84 if (ehca_debug_level >= 3) {
85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", 85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
86 ipz_rqueue); 86 ipz_rqueue);
87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); 87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
@@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
281 return -EINVAL; 281 return -EINVAL;
282 } 282 }
283 283
284 if (ehca_debug_level) { 284 if (ehca_debug_level >= 3) {
285 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); 285 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
286 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); 286 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
287 } 287 }
@@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp,
421 int ret = 0; 421 int ret = 0;
422 unsigned long flags; 422 unsigned long flags;
423 423
424 if (unlikely(my_qp->state != IB_QPS_RTS)) {
425 ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num);
426 return -EINVAL;
427 }
428
424 /* LOCK the QUEUE */ 429 /* LOCK the QUEUE */
425 spin_lock_irqsave(&my_qp->spinlock_s, flags); 430 spin_lock_irqsave(&my_qp->spinlock_s, flags);
426 431
@@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp,
454 goto post_send_exit0; 459 goto post_send_exit0;
455 } 460 }
456 wqe_cnt++; 461 wqe_cnt++;
457 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
458 my_qp, qp->qp_num, wqe_cnt);
459 } /* eof for cur_send_wr */ 462 } /* eof for cur_send_wr */
460 463
461post_send_exit0: 464post_send_exit0:
462 iosync(); /* serialize GAL register access */ 465 iosync(); /* serialize GAL register access */
463 hipz_update_sqa(my_qp, wqe_cnt); 466 hipz_update_sqa(my_qp, wqe_cnt);
467 if (unlikely(ret || ehca_debug_level >= 2))
468 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
469 my_qp, qp->qp_num, wqe_cnt, ret);
464 my_qp->message_count += wqe_cnt; 470 my_qp->message_count += wqe_cnt;
465 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 471 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
466 return ret; 472 return ret;
@@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp,
520 goto post_recv_exit0; 526 goto post_recv_exit0;
521 } 527 }
522 wqe_cnt++; 528 wqe_cnt++;
523 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
524 my_qp, my_qp->real_qp_num, wqe_cnt);
525 } /* eof for cur_recv_wr */ 529 } /* eof for cur_recv_wr */
526 530
527post_recv_exit0: 531post_recv_exit0:
528 iosync(); /* serialize GAL register access */ 532 iosync(); /* serialize GAL register access */
529 hipz_update_rqa(my_qp, wqe_cnt); 533 hipz_update_rqa(my_qp, wqe_cnt);
534 if (unlikely(ret || ehca_debug_level >= 2))
535 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
536 my_qp, my_qp->real_qp_num, wqe_cnt, ret);
530 spin_unlock_irqrestore(&my_qp->spinlock_r, flags); 537 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
531 return ret; 538 return ret;
532} 539}
@@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
570 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 577 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
571 struct ehca_cqe *cqe; 578 struct ehca_cqe *cqe;
572 struct ehca_qp *my_qp; 579 struct ehca_qp *my_qp;
573 int cqe_count = 0; 580 int cqe_count = 0, is_error;
574 581
575poll_cq_one_read_cqe: 582poll_cq_one_read_cqe:
576 cqe = (struct ehca_cqe *) 583 cqe = (struct ehca_cqe *)
577 ipz_qeit_get_inc_valid(&my_cq->ipz_queue); 584 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
578 if (!cqe) { 585 if (!cqe) {
579 ret = -EAGAIN; 586 ret = -EAGAIN;
580 ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p " 587 if (ehca_debug_level >= 3)
581 "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret); 588 ehca_dbg(cq->device, "Completion queue is empty "
582 goto poll_cq_one_exit0; 589 "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
590 goto poll_cq_one_exit0;
583 } 591 }
584 592
585 /* prevents loads being reordered across this point */ 593 /* prevents loads being reordered across this point */
@@ -609,7 +617,7 @@ poll_cq_one_read_cqe:
609 ehca_dbg(cq->device, 617 ehca_dbg(cq->device,
610 "Got CQE with purged bit qp_num=%x src_qp=%x", 618 "Got CQE with purged bit qp_num=%x src_qp=%x",
611 cqe->local_qp_number, cqe->remote_qp_number); 619 cqe->local_qp_number, cqe->remote_qp_number);
612 if (ehca_debug_level) 620 if (ehca_debug_level >= 2)
613 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", 621 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
614 cqe->local_qp_number, 622 cqe->local_qp_number,
615 cqe->remote_qp_number); 623 cqe->remote_qp_number);
@@ -622,11 +630,13 @@ poll_cq_one_read_cqe:
622 } 630 }
623 } 631 }
624 632
625 /* tracing cqe */ 633 is_error = cqe->status & WC_STATUS_ERROR_BIT;
626 if (unlikely(ehca_debug_level)) { 634
635 /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
636 if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
627 ehca_dbg(cq->device, 637 ehca_dbg(cq->device,
628 "Received COMPLETION ehca_cq=%p cq_num=%x -----", 638 "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
629 my_cq, my_cq->cq_number); 639 is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
630 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", 640 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
631 my_cq, my_cq->cq_number); 641 my_cq, my_cq->cq_number);
632 ehca_dbg(cq->device, 642 ehca_dbg(cq->device,
@@ -649,8 +659,9 @@ poll_cq_one_read_cqe:
649 /* update also queue adder to throw away this entry!!! */ 659 /* update also queue adder to throw away this entry!!! */
650 goto poll_cq_one_exit0; 660 goto poll_cq_one_exit0;
651 } 661 }
662
652 /* eval ib_wc_status */ 663 /* eval ib_wc_status */
653 if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { 664 if (unlikely(is_error)) {
654 /* complete with errors */ 665 /* complete with errors */
655 map_ib_wc_status(cqe->status, &wc->status); 666 map_ib_wc_status(cqe->status, &wc->status);
656 wc->vendor_err = wc->status; 667 wc->vendor_err = wc->status;
@@ -671,14 +682,6 @@ poll_cq_one_read_cqe:
671 wc->imm_data = cpu_to_be32(cqe->immediate_data); 682 wc->imm_data = cpu_to_be32(cqe->immediate_data);
672 wc->sl = cqe->service_level; 683 wc->sl = cqe->service_level;
673 684
674 if (unlikely(wc->status != IB_WC_SUCCESS))
675 ehca_dbg(cq->device,
676 "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
677 "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
678 "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
679 cqe->status, cqe->local_qp_number,
680 cqe->remote_qp_number, cqe->work_request_id, cqe);
681
682poll_cq_one_exit0: 685poll_cq_one_exit0:
683 if (cqe_count > 0) 686 if (cqe_count > 0)
684 hipz_update_feca(my_cq, cqe_count); 687 hipz_update_feca(my_cq, cqe_count);
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 1b07f2beafaf..e43ed8f8a0c8 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
211 break; 211 break;
212 212
213 case 1: /* qp rqueue_addr */ 213 case 1: /* qp rqueue_addr */
214 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", 214 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
215 qp->ib_qp.qp_num);
216 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, 215 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
217 &qp->mm_count_rqueue); 216 &qp->mm_count_rqueue);
218 if (unlikely(ret)) { 217 if (unlikely(ret)) {
@@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
224 break; 223 break;
225 224
226 case 2: /* qp squeue_addr */ 225 case 2: /* qp squeue_addr */
227 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", 226 ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
228 qp->ib_qp.qp_num);
229 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, 227 ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
230 &qp->mm_count_squeue); 228 &qp->mm_count_squeue);
231 if (unlikely(ret)) { 229 if (unlikely(ret)) {
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 7029aa653751..5245e13c3a30 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
123 int i, sleep_msecs; 123 int i, sleep_msecs;
124 unsigned long flags = 0; 124 unsigned long flags = 0;
125 125
126 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, 126 if (unlikely(ehca_debug_level >= 2))
127 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); 127 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
128 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
128 129
129 for (i = 0; i < 5; i++) { 130 for (i = 0; i < 5; i++) {
130 /* serialize hCalls to work around firmware issue */ 131 /* serialize hCalls to work around firmware issue */
@@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
148 opcode, ret, arg1, arg2, arg3, 149 opcode, ret, arg1, arg2, arg3,
149 arg4, arg5, arg6, arg7); 150 arg4, arg5, arg6, arg7);
150 else 151 else
151 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); 152 if (unlikely(ehca_debug_level >= 2))
153 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
152 154
153 return ret; 155 return ret;
154 } 156 }
@@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode,
172 int i, sleep_msecs; 174 int i, sleep_msecs;
173 unsigned long flags = 0; 175 unsigned long flags = 0;
174 176
175 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, 177 if (unlikely(ehca_debug_level >= 2))
176 arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); 178 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
179 arg1, arg2, arg3, arg4, arg5,
180 arg6, arg7, arg8, arg9);
177 181
178 for (i = 0; i < 5; i++) { 182 for (i = 0; i < 5; i++) {
179 /* serialize hCalls to work around firmware issue */ 183 /* serialize hCalls to work around firmware issue */
@@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode,
201 ret, outs[0], outs[1], outs[2], outs[3], 205 ret, outs[0], outs[1], outs[2], outs[3],
202 outs[4], outs[5], outs[6], outs[7], 206 outs[4], outs[5], outs[6], outs[7],
203 outs[8]); 207 outs[8]);
204 } else 208 } else if (unlikely(ehca_debug_level >= 2))
205 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, 209 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
206 ret, outs[0], outs[1], outs[2], outs[3], 210 ret, outs[0], outs[1], outs[2], outs[3],
207 outs[4], outs[5], outs[6], outs[7], 211 outs[4], outs[5], outs[6], outs[7],
@@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
381 r_cb, /* r6 */ 385 r_cb, /* r6 */
382 0, 0, 0, 0); 386 0, 0, 0, 0);
383 387
384 if (ehca_debug_level) 388 if (ehca_debug_level >= 2)
385 ehca_dmp(query_port_response_block, 64, "response_block"); 389 ehca_dmp(query_port_response_block, 64, "response_block");
386 390
387 return ret; 391 return ret;
@@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
731 u64 ret; 735 u64 ret;
732 u64 outs[PLPAR_HCALL9_BUFSIZE]; 736 u64 outs[PLPAR_HCALL9_BUFSIZE];
733 737
734 ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
735 "vaddr=%lx length=%lx",
736 (u32)PAGE_SIZE, access_ctrl, vaddr, length);
737 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
738 adapter_handle.handle, /* r4 */ 739 adapter_handle.handle, /* r4 */
739 5, /* r5 */ 740 5, /* r5 */
@@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
758{ 759{
759 u64 ret; 760 u64 ret;
760 761
761 if (unlikely(ehca_debug_level >= 2)) { 762 if (unlikely(ehca_debug_level >= 3)) {
762 if (count > 1) { 763 if (count > 1) {
763 u64 *kpage; 764 u64 *kpage;
764 int i; 765 int i;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 3557e7edc9b6..5e570bb0bb6f 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
204 204
205 uar = &to_mucontext(context)->uar; 205 uar = &to_mucontext(context)->uar;
206 } else { 206 } else {
207 err = mlx4_ib_db_alloc(dev, &cq->db, 1); 207 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
208 if (err) 208 if (err)
209 goto err_cq; 209 goto err_cq;
210 210
@@ -250,7 +250,7 @@ err_mtt:
250 250
251err_db: 251err_db:
252 if (!context) 252 if (!context)
253 mlx4_ib_db_free(dev, &cq->db); 253 mlx4_db_free(dev->dev, &cq->db);
254 254
255err_cq: 255err_cq:
256 kfree(cq); 256 kfree(cq);
@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
435 ib_umem_release(mcq->umem); 435 ib_umem_release(mcq->umem);
436 } else { 436 } else {
437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); 437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
438 mlx4_ib_db_free(dev, &mcq->db); 438 mlx4_db_free(dev->dev, &mcq->db);
439 } 439 }
440 440
441 kfree(mcq); 441 kfree(mcq);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 1c36087aef14..8e342cc9baec 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -34,124 +34,6 @@
34 34
35#include "mlx4_ib.h" 35#include "mlx4_ib.h"
36 36
37struct mlx4_ib_db_pgdir {
38 struct list_head list;
39 DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
40 DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
41 unsigned long *bits[2];
42 __be32 *db_page;
43 dma_addr_t db_dma;
44};
45
46static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
47{
48 struct mlx4_ib_db_pgdir *pgdir;
49
50 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
51 if (!pgdir)
52 return NULL;
53
54 bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
55 pgdir->bits[0] = pgdir->order0;
56 pgdir->bits[1] = pgdir->order1;
57 pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
58 PAGE_SIZE, &pgdir->db_dma,
59 GFP_KERNEL);
60 if (!pgdir->db_page) {
61 kfree(pgdir);
62 return NULL;
63 }
64
65 return pgdir;
66}
67
68static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
69 struct mlx4_ib_db *db, int order)
70{
71 int o;
72 int i;
73
74 for (o = order; o <= 1; ++o) {
75 i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
76 if (i < MLX4_IB_DB_PER_PAGE >> o)
77 goto found;
78 }
79
80 return -ENOMEM;
81
82found:
83 clear_bit(i, pgdir->bits[o]);
84
85 i <<= o;
86
87 if (o > order)
88 set_bit(i ^ 1, pgdir->bits[order]);
89
90 db->u.pgdir = pgdir;
91 db->index = i;
92 db->db = pgdir->db_page + db->index;
93 db->dma = pgdir->db_dma + db->index * 4;
94 db->order = order;
95
96 return 0;
97}
98
99int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
100{
101 struct mlx4_ib_db_pgdir *pgdir;
102 int ret = 0;
103
104 mutex_lock(&dev->pgdir_mutex);
105
106 list_for_each_entry(pgdir, &dev->pgdir_list, list)
107 if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
108 goto out;
109
110 pgdir = mlx4_ib_alloc_db_pgdir(dev);
111 if (!pgdir) {
112 ret = -ENOMEM;
113 goto out;
114 }
115
116 list_add(&pgdir->list, &dev->pgdir_list);
117
118 /* This should never fail -- we just allocated an empty page: */
119 WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
120
121out:
122 mutex_unlock(&dev->pgdir_mutex);
123
124 return ret;
125}
126
127void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
128{
129 int o;
130 int i;
131
132 mutex_lock(&dev->pgdir_mutex);
133
134 o = db->order;
135 i = db->index;
136
137 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
138 clear_bit(i ^ 1, db->u.pgdir->order0);
139 ++o;
140 }
141
142 i >>= o;
143 set_bit(i, db->u.pgdir->bits[o]);
144
145 if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
146 dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
147 db->u.pgdir->db_page, db->u.pgdir->db_dma);
148 list_del(&db->u.pgdir->list);
149 kfree(db->u.pgdir);
150 }
151
152 mutex_unlock(&dev->pgdir_mutex);
153}
154
155struct mlx4_ib_user_db_page { 37struct mlx4_ib_user_db_page {
156 struct list_head list; 38 struct list_head list;
157 struct ib_umem *umem; 39 struct ib_umem *umem;
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page {
160}; 42};
161 43
162int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, 44int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
163 struct mlx4_ib_db *db) 45 struct mlx4_db *db)
164{ 46{
165 struct mlx4_ib_user_db_page *page; 47 struct mlx4_ib_user_db_page *page;
166 struct ib_umem_chunk *chunk; 48 struct ib_umem_chunk *chunk;
@@ -202,7 +84,7 @@ out:
202 return err; 84 return err;
203} 85}
204 86
205void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) 87void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
206{ 88{
207 mutex_lock(&context->db_page_mutex); 89 mutex_lock(&context->db_page_mutex);
208 90
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4d9b5ac42202..4d61e32866c6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
557 goto err_uar; 557 goto err_uar;
558 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 558 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
559 559
560 INIT_LIST_HEAD(&ibdev->pgdir_list);
561 mutex_init(&ibdev->pgdir_mutex);
562
563 ibdev->dev = dev; 560 ibdev->dev = dev;
564 561
565 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); 562 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 9e637323c155..5cf994794d25 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -43,24 +43,6 @@
43#include <linux/mlx4/device.h> 43#include <linux/mlx4/device.h>
44#include <linux/mlx4/doorbell.h> 44#include <linux/mlx4/doorbell.h>
45 45
46enum {
47 MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4
48};
49
50struct mlx4_ib_db_pgdir;
51struct mlx4_ib_user_db_page;
52
53struct mlx4_ib_db {
54 __be32 *db;
55 union {
56 struct mlx4_ib_db_pgdir *pgdir;
57 struct mlx4_ib_user_db_page *user_page;
58 } u;
59 dma_addr_t dma;
60 int index;
61 int order;
62};
63
64struct mlx4_ib_ucontext { 46struct mlx4_ib_ucontext {
65 struct ib_ucontext ibucontext; 47 struct ib_ucontext ibucontext;
66 struct mlx4_uar uar; 48 struct mlx4_uar uar;
@@ -88,7 +70,7 @@ struct mlx4_ib_cq {
88 struct mlx4_cq mcq; 70 struct mlx4_cq mcq;
89 struct mlx4_ib_cq_buf buf; 71 struct mlx4_ib_cq_buf buf;
90 struct mlx4_ib_cq_resize *resize_buf; 72 struct mlx4_ib_cq_resize *resize_buf;
91 struct mlx4_ib_db db; 73 struct mlx4_db db;
92 spinlock_t lock; 74 spinlock_t lock;
93 struct mutex resize_mutex; 75 struct mutex resize_mutex;
94 struct ib_umem *umem; 76 struct ib_umem *umem;
@@ -127,7 +109,7 @@ struct mlx4_ib_qp {
127 struct mlx4_qp mqp; 109 struct mlx4_qp mqp;
128 struct mlx4_buf buf; 110 struct mlx4_buf buf;
129 111
130 struct mlx4_ib_db db; 112 struct mlx4_db db;
131 struct mlx4_ib_wq rq; 113 struct mlx4_ib_wq rq;
132 114
133 u32 doorbell_qpn; 115 u32 doorbell_qpn;
@@ -154,7 +136,7 @@ struct mlx4_ib_srq {
154 struct ib_srq ibsrq; 136 struct ib_srq ibsrq;
155 struct mlx4_srq msrq; 137 struct mlx4_srq msrq;
156 struct mlx4_buf buf; 138 struct mlx4_buf buf;
157 struct mlx4_ib_db db; 139 struct mlx4_db db;
158 u64 *wrid; 140 u64 *wrid;
159 spinlock_t lock; 141 spinlock_t lock;
160 int head; 142 int head;
@@ -175,9 +157,6 @@ struct mlx4_ib_dev {
175 struct mlx4_dev *dev; 157 struct mlx4_dev *dev;
176 void __iomem *uar_map; 158 void __iomem *uar_map;
177 159
178 struct list_head pgdir_list;
179 struct mutex pgdir_mutex;
180
181 struct mlx4_uar priv_uar; 160 struct mlx4_uar priv_uar;
182 u32 priv_pdn; 161 u32 priv_pdn;
183 MLX4_DECLARE_DOORBELL_LOCK(uar_lock); 162 MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
248 return container_of(ibah, struct mlx4_ib_ah, ibah); 227 return container_of(ibah, struct mlx4_ib_ah, ibah);
249} 228}
250 229
251int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order);
252void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db);
253int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, 230int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
254 struct mlx4_ib_db *db); 231 struct mlx4_db *db);
255void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db); 232void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
256 233
257struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); 234struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
258int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, 235int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b75efae7e449..80ea8b9e7761 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
514 goto err; 514 goto err;
515 515
516 if (!init_attr->srq) { 516 if (!init_attr->srq) {
517 err = mlx4_ib_db_alloc(dev, &qp->db, 0); 517 err = mlx4_db_alloc(dev->dev, &qp->db, 0);
518 if (err) 518 if (err)
519 goto err; 519 goto err;
520 520
@@ -580,7 +580,7 @@ err_buf:
580 580
581err_db: 581err_db:
582 if (!pd->uobject && !init_attr->srq) 582 if (!pd->uobject && !init_attr->srq)
583 mlx4_ib_db_free(dev, &qp->db); 583 mlx4_db_free(dev->dev, &qp->db);
584 584
585err: 585err:
586 return err; 586 return err;
@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
666 kfree(qp->rq.wrid); 666 kfree(qp->rq.wrid);
667 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 667 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
668 if (!qp->ibqp.srq) 668 if (!qp->ibqp.srq)
669 mlx4_ib_db_free(dev, &qp->db); 669 mlx4_db_free(dev->dev, &qp->db);
670 } 670 }
671} 671}
672 672
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index beaa3b06cf58..204619702f9d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
129 if (err) 129 if (err)
130 goto err_mtt; 130 goto err_mtt;
131 } else { 131 } else {
132 err = mlx4_ib_db_alloc(dev, &srq->db, 0); 132 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
133 if (err) 133 if (err)
134 goto err_srq; 134 goto err_srq;
135 135
@@ -200,7 +200,7 @@ err_buf:
200 200
201err_db: 201err_db:
202 if (!pd->uobject) 202 if (!pd->uobject)
203 mlx4_ib_db_free(dev, &srq->db); 203 mlx4_db_free(dev->dev, &srq->db);
204 204
205err_srq: 205err_srq:
206 kfree(srq); 206 kfree(srq);
@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
267 kfree(msrq->wrid); 267 kfree(msrq->wrid);
268 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, 268 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
269 &msrq->buf); 269 &msrq->buf);
270 mlx4_ib_db_free(dev, &msrq->db); 270 mlx4_db_free(dev->dev, &msrq->db);
271 } 271 }
272 272
273 kfree(msrq); 273 kfree(msrq);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b046262ed638..a4e9269a29bd 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -139,8 +139,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
139 139
140 addr = ntohl(ifa->ifa_address); 140 addr = ntohl(ifa->ifa_address);
141 mask = ntohl(ifa->ifa_mask); 141 mask = ntohl(ifa->ifa_mask);
142 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n", 142 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT
143 addr, mask); 143 ", netmask " NIPQUAD_FMT ".\n",
144 HIPQUAD(addr), HIPQUAD(mask));
144 list_for_each_entry(nesdev, &nes_dev_list, list) { 145 list_for_each_entry(nesdev, &nes_dev_list, list) {
145 nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", 146 nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
146 nesdev, nesdev->netdev[0]->name); 147 nesdev, nesdev->netdev[0]->name);
@@ -353,13 +354,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
353 */ 354 */
354static void nes_print_macaddr(struct net_device *netdev) 355static void nes_print_macaddr(struct net_device *netdev)
355{ 356{
356 nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n", 357 DECLARE_MAC_BUF(mac);
357 netdev->name,
358 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
359 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
360 netdev->irq);
361}
362 358
359 nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n",
360 netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq);
361}
363 362
364/** 363/**
365 * nes_interrupt - handle interrupts 364 * nes_interrupt - handle interrupts
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index d0738623bcf3..d940fc27129a 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
852 /* get a handle on the hte */ 852 /* get a handle on the hte */
853 hte = &cm_core->connected_nodes; 853 hte = &cm_core->connected_nodes;
854 854
855 nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n", 855 nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n",
856 loc_addr, loc_port, cm_core, hte); 856 HIPQUAD(loc_addr), loc_port, cm_core, hte);
857 857
858 /* walk list and find cm_node associated with this session ID */ 858 /* walk list and find cm_node associated with this session ID */
859 spin_lock_irqsave(&cm_core->ht_lock, flags); 859 spin_lock_irqsave(&cm_core->ht_lock, flags);
@@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
902 } 902 }
903 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 903 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
904 904
905 nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n", 905 nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n",
906 dst_addr, dst_port); 906 HIPQUAD(dst_addr), dst_port);
907 907
908 /* no listener */ 908 /* no listener */
909 return NULL; 909 return NULL;
@@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1054 int arpindex = 0; 1054 int arpindex = 0;
1055 struct nes_device *nesdev; 1055 struct nes_device *nesdev;
1056 struct nes_adapter *nesadapter; 1056 struct nes_adapter *nesadapter;
1057 DECLARE_MAC_BUF(mac);
1057 1058
1058 /* create an hte and cm_node for this instance */ 1059 /* create an hte and cm_node for this instance */
1059 cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); 1060 cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
@@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1066 cm_node->loc_port = cm_info->loc_port; 1067 cm_node->loc_port = cm_info->loc_port;
1067 cm_node->rem_port = cm_info->rem_port; 1068 cm_node->rem_port = cm_info->rem_port;
1068 cm_node->send_write0 = send_first; 1069 cm_node->send_write0 = send_first;
1069 nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n", 1070 nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n",
1070 cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port); 1071 HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
1072 HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
1071 cm_node->listener = listener; 1073 cm_node->listener = listener;
1072 cm_node->netdev = nesvnic->netdev; 1074 cm_node->netdev = nesvnic->netdev;
1073 cm_node->cm_id = cm_info->cm_id; 1075 cm_node->cm_id = cm_info->cm_id;
@@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1116 1118
1117 /* copy the mac addr to node context */ 1119 /* copy the mac addr to node context */
1118 memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); 1120 memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
1119 nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x," 1121 nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n",
1120 " %02x, %02x, %02x, %02x, %02x\n", 1122 print_mac(mac, cm_node->rem_mac));
1121 cm_node->rem_mac[0], cm_node->rem_mac[1],
1122 cm_node->rem_mac[2], cm_node->rem_mac[3],
1123 cm_node->rem_mac[4], cm_node->rem_mac[5]);
1124 1123
1125 add_hte_node(cm_core, cm_node); 1124 add_hte_node(cm_core, cm_node);
1126 atomic_inc(&cm_nodes_created); 1125 atomic_inc(&cm_nodes_created);
@@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni
1850 nfo.rem_addr = ntohl(iph->saddr); 1849 nfo.rem_addr = ntohl(iph->saddr);
1851 nfo.rem_port = ntohs(tcph->source); 1850 nfo.rem_port = ntohs(tcph->source);
1852 1851
1853 nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n", 1852 nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT
1854 iph->daddr, tcph->dest, iph->saddr, tcph->source); 1853 ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n",
1854 NIPQUAD(iph->daddr), tcph->dest,
1855 NIPQUAD(iph->saddr), tcph->source);
1855 1856
1856 /* note: this call is going to increment cm_node ref count */ 1857 /* note: this call is going to increment cm_node ref count */
1857 cm_node = find_node(cm_core, 1858 cm_node = find_node(cm_core,
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index aa53aab91bf8..08964cc7e98a 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -636,6 +636,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
636 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); 636 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
637 return 0; 637 return 0;
638 } 638 }
639
640 i = 0;
641 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
642 mdelay(1);
643 if (i >= 10000) {
644 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
645 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
646 return 0;
647 }
639 } 648 }
640 649
641 /* port reset */ 650 /* port reset */
@@ -684,17 +693,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
684 } 693 }
685 } 694 }
686 695
687
688
689 i = 0;
690 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
691 mdelay(1);
692 if (i >= 10000) {
693 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
694 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
695 return 0;
696 }
697
698 return port_count; 696 return port_count;
699} 697}
700 698
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index b7e2844f096b..8f36e231bdf5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -905,7 +905,7 @@ struct nes_hw_qp {
905}; 905};
906 906
907struct nes_hw_cq { 907struct nes_hw_cq {
908 struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */ 908 struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */
909 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); 909 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
910 dma_addr_t cq_pbase; /* PCI memory for host rings */ 910 dma_addr_t cq_pbase; /* PCI memory for host rings */
911 u16 cq_head; 911 u16 cq_head;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 01cd0effc492..e5366b013c1a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -787,16 +787,14 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
787 int i; 787 int i;
788 u32 macaddr_low; 788 u32 macaddr_low;
789 u16 macaddr_high; 789 u16 macaddr_high;
790 DECLARE_MAC_BUF(mac);
790 791
791 if (!is_valid_ether_addr(mac_addr->sa_data)) 792 if (!is_valid_ether_addr(mac_addr->sa_data))
792 return -EADDRNOTAVAIL; 793 return -EADDRNOTAVAIL;
793 794
794 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); 795 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
795 printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", 796 printk(PFX "%s: Address length = %d, Address = %s\n",
796 __func__, netdev->addr_len, 797 __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data));
797 mac_addr->sa_data[0], mac_addr->sa_data[1],
798 mac_addr->sa_data[2], mac_addr->sa_data[3],
799 mac_addr->sa_data[4], mac_addr->sa_data[5]);
800 macaddr_high = ((u16)netdev->dev_addr[0]) << 8; 798 macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
801 macaddr_high += (u16)netdev->dev_addr[1]; 799 macaddr_high += (u16)netdev->dev_addr[1];
802 macaddr_low = ((u32)netdev->dev_addr[2]) << 24; 800 macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
@@ -878,11 +876,11 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
878 if (mc_nic_index < 0) 876 if (mc_nic_index < 0)
879 mc_nic_index = nesvnic->nic_index; 877 mc_nic_index = nesvnic->nic_index;
880 if (multicast_addr) { 878 if (multicast_addr) {
881 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n", 879 DECLARE_MAC_BUF(mac);
882 multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1], 880 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
883 multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3], 881 print_mac(mac, multicast_addr->dmi_addr),
884 multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5], 882 perfect_filter_register_address+(mc_index * 8),
885 perfect_filter_register_address+(mc_index * 8), mc_nic_index); 883 mc_nic_index);
886 macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; 884 macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
887 macaddr_high += (u16)multicast_addr->dmi_addr[1]; 885 macaddr_high += (u16)multicast_addr->dmi_addr[1];
888 macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; 886 macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index f9db07c2717d..c6d5631a6995 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -660,7 +660,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
660 660
661 /* DELETE or RESOLVE */ 661 /* DELETE or RESOLVE */
662 if (arp_index == nesadapter->arp_table_size) { 662 if (arp_index == nesadapter->arp_table_size) {
663 nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n"); 663 nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n",
664 HIPQUAD(ip_addr),
665 action == NES_ARP_RESOLVE ? "resolve" : "delete");
664 return -1; 666 return -1;
665 } 667 }
666 668
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f9a5d4390892..ee74f7c7a6da 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
1976 1976
1977 if (nescq->cq_mem_size) 1977 if (nescq->cq_mem_size)
1978 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, 1978 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
1979 (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); 1979 nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
1980 kfree(nescq); 1980 kfree(nescq);
1981 1981
1982 return ret; 1982 return ret;
@@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3610 while (cqe_count < num_entries) { 3610 while (cqe_count < num_entries) {
3611 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & 3611 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
3612 NES_CQE_VALID) { 3612 NES_CQE_VALID) {
3613 /*
3614 * Make sure we read CQ entry contents *after*
3615 * we've checked the valid bit.
3616 */
3617 rmb();
3618
3613 cqe = nescq->hw_cq.cq_vbase[head]; 3619 cqe = nescq->hw_cq.cq_vbase[head];
3614 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; 3620 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
3615 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); 3621 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 73b2b176ad0e..f1f142dc64b1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -56,11 +56,11 @@
56/* constants */ 56/* constants */
57 57
58enum { 58enum {
59 IPOIB_PACKET_SIZE = 2048,
60 IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
61
62 IPOIB_ENCAP_LEN = 4, 59 IPOIB_ENCAP_LEN = 4,
63 60
61 IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
62 IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
63
64 IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ 64 IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
65 IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, 65 IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
66 IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, 66 IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -139,7 +139,7 @@ struct ipoib_mcast {
139 139
140struct ipoib_rx_buf { 140struct ipoib_rx_buf {
141 struct sk_buff *skb; 141 struct sk_buff *skb;
142 u64 mapping; 142 u64 mapping[IPOIB_UD_RX_SG];
143}; 143};
144 144
145struct ipoib_tx_buf { 145struct ipoib_tx_buf {
@@ -294,6 +294,7 @@ struct ipoib_dev_priv {
294 294
295 unsigned int admin_mtu; 295 unsigned int admin_mtu;
296 unsigned int mcast_mtu; 296 unsigned int mcast_mtu;
297 unsigned int max_ib_mtu;
297 298
298 struct ipoib_rx_buf *rx_ring; 299 struct ipoib_rx_buf *rx_ring;
299 300
@@ -305,6 +306,9 @@ struct ipoib_dev_priv {
305 struct ib_send_wr tx_wr; 306 struct ib_send_wr tx_wr;
306 unsigned tx_outstanding; 307 unsigned tx_outstanding;
307 308
309 struct ib_recv_wr rx_wr;
310 struct ib_sge rx_sge[IPOIB_UD_RX_SG];
311
308 struct ib_wc ibwc[IPOIB_NUM_WC]; 312 struct ib_wc ibwc[IPOIB_NUM_WC];
309 313
310 struct list_head dead_ahs; 314 struct list_head dead_ahs;
@@ -366,6 +370,14 @@ struct ipoib_neigh {
366 struct list_head list; 370 struct list_head list;
367}; 371};
368 372
373#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
374#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
375
376static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
377{
378 return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
379}
380
369/* 381/*
370 * We stash a pointer to our private neighbour information after our 382 * We stash a pointer to our private neighbour information after our
371 * hardware address in neigh->ha. The ALIGN() expression here makes 383 * hardware address in neigh->ha. The ALIGN() expression here makes
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0205eb7c1bd3..7cf1fa7074ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref)
89 spin_unlock_irqrestore(&priv->lock, flags); 89 spin_unlock_irqrestore(&priv->lock, flags);
90} 90}
91 91
92static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
93 u64 mapping[IPOIB_UD_RX_SG])
94{
95 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
96 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
97 DMA_FROM_DEVICE);
98 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
99 DMA_FROM_DEVICE);
100 } else
101 ib_dma_unmap_single(priv->ca, mapping[0],
102 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
103 DMA_FROM_DEVICE);
104}
105
106static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
107 struct sk_buff *skb,
108 unsigned int length)
109{
110 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
111 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
112 unsigned int size;
113 /*
114 * There is only two buffers needed for max_payload = 4K,
115 * first buf size is IPOIB_UD_HEAD_SIZE
116 */
117 skb->tail += IPOIB_UD_HEAD_SIZE;
118 skb->len += length;
119
120 size = length - IPOIB_UD_HEAD_SIZE;
121
122 frag->size = size;
123 skb->data_len += size;
124 skb->truesize += size;
125 } else
126 skb_put(skb, length);
127
128}
129
92static int ipoib_ib_post_receive(struct net_device *dev, int id) 130static int ipoib_ib_post_receive(struct net_device *dev, int id)
93{ 131{
94 struct ipoib_dev_priv *priv = netdev_priv(dev); 132 struct ipoib_dev_priv *priv = netdev_priv(dev);
95 struct ib_sge list;
96 struct ib_recv_wr param;
97 struct ib_recv_wr *bad_wr; 133 struct ib_recv_wr *bad_wr;
98 int ret; 134 int ret;
99 135
100 list.addr = priv->rx_ring[id].mapping; 136 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
101 list.length = IPOIB_BUF_SIZE; 137 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
102 list.lkey = priv->mr->lkey; 138 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
103 139
104 param.next = NULL;
105 param.wr_id = id | IPOIB_OP_RECV;
106 param.sg_list = &list;
107 param.num_sge = 1;
108 140
109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 141 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
110 if (unlikely(ret)) { 142 if (unlikely(ret)) {
111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 143 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
112 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, 144 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
113 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
114 dev_kfree_skb_any(priv->rx_ring[id].skb); 145 dev_kfree_skb_any(priv->rx_ring[id].skb);
115 priv->rx_ring[id].skb = NULL; 146 priv->rx_ring[id].skb = NULL;
116 } 147 }
@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
118 return ret; 149 return ret;
119} 150}
120 151
121static int ipoib_alloc_rx_skb(struct net_device *dev, int id) 152static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
122{ 153{
123 struct ipoib_dev_priv *priv = netdev_priv(dev); 154 struct ipoib_dev_priv *priv = netdev_priv(dev);
124 struct sk_buff *skb; 155 struct sk_buff *skb;
125 u64 addr; 156 int buf_size;
157 u64 *mapping;
126 158
127 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 159 if (ipoib_ud_need_sg(priv->max_ib_mtu))
128 if (!skb) 160 buf_size = IPOIB_UD_HEAD_SIZE;
129 return -ENOMEM; 161 else
162 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
163
164 skb = dev_alloc_skb(buf_size + 4);
165 if (unlikely(!skb))
166 return NULL;
130 167
131 /* 168 /*
132 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte 169 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
135 */ 172 */
136 skb_reserve(skb, 4); 173 skb_reserve(skb, 4);
137 174
138 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, 175 mapping = priv->rx_ring[id].mapping;
139 DMA_FROM_DEVICE); 176 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
140 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 177 DMA_FROM_DEVICE);
141 dev_kfree_skb_any(skb); 178 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
142 return -EIO; 179 goto error;
180
181 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
182 struct page *page = alloc_page(GFP_ATOMIC);
183 if (!page)
184 goto partial_error;
185 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
186 mapping[1] =
187 ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
188 0, PAGE_SIZE, DMA_FROM_DEVICE);
189 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
190 goto partial_error;
143 } 191 }
144 192
145 priv->rx_ring[id].skb = skb; 193 priv->rx_ring[id].skb = skb;
146 priv->rx_ring[id].mapping = addr; 194 return skb;
147 195
148 return 0; 196partial_error:
197 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
198error:
199 dev_kfree_skb_any(skb);
200 return NULL;
149} 201}
150 202
151static int ipoib_ib_post_receives(struct net_device *dev) 203static int ipoib_ib_post_receives(struct net_device *dev)
@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev)
154 int i; 206 int i;
155 207
156 for (i = 0; i < ipoib_recvq_size; ++i) { 208 for (i = 0; i < ipoib_recvq_size; ++i) {
157 if (ipoib_alloc_rx_skb(dev, i)) { 209 if (!ipoib_alloc_rx_skb(dev, i)) {
158 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 210 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
159 return -ENOMEM; 211 return -ENOMEM;
160 } 212 }
@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
172 struct ipoib_dev_priv *priv = netdev_priv(dev); 224 struct ipoib_dev_priv *priv = netdev_priv(dev);
173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 225 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
174 struct sk_buff *skb; 226 struct sk_buff *skb;
175 u64 addr; 227 u64 mapping[IPOIB_UD_RX_SG];
176 228
177 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", 229 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
178 wr_id, wc->status); 230 wr_id, wc->status);
@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
184 } 236 }
185 237
186 skb = priv->rx_ring[wr_id].skb; 238 skb = priv->rx_ring[wr_id].skb;
187 addr = priv->rx_ring[wr_id].mapping;
188 239
189 if (unlikely(wc->status != IB_WC_SUCCESS)) { 240 if (unlikely(wc->status != IB_WC_SUCCESS)) {
190 if (wc->status != IB_WC_WR_FLUSH_ERR) 241 if (wc->status != IB_WC_WR_FLUSH_ERR)
191 ipoib_warn(priv, "failed recv event " 242 ipoib_warn(priv, "failed recv event "
192 "(status=%d, wrid=%d vend_err %x)\n", 243 "(status=%d, wrid=%d vend_err %x)\n",
193 wc->status, wr_id, wc->vendor_err); 244 wc->status, wr_id, wc->vendor_err);
194 ib_dma_unmap_single(priv->ca, addr, 245 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
195 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
196 dev_kfree_skb_any(skb); 246 dev_kfree_skb_any(skb);
197 priv->rx_ring[wr_id].skb = NULL; 247 priv->rx_ring[wr_id].skb = NULL;
198 return; 248 return;
@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
205 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) 255 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
206 goto repost; 256 goto repost;
207 257
258 memcpy(mapping, priv->rx_ring[wr_id].mapping,
259 IPOIB_UD_RX_SG * sizeof *mapping);
260
208 /* 261 /*
209 * If we can't allocate a new RX buffer, dump 262 * If we can't allocate a new RX buffer, dump
210 * this packet and reuse the old buffer. 263 * this packet and reuse the old buffer.
211 */ 264 */
212 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { 265 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
213 ++dev->stats.rx_dropped; 266 ++dev->stats.rx_dropped;
214 goto repost; 267 goto repost;
215 } 268 }
@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
217 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 270 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
218 wc->byte_len, wc->slid); 271 wc->byte_len, wc->slid);
219 272
220 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 273 ipoib_ud_dma_unmap_rx(priv, mapping);
274 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
221 275
222 skb_put(skb, wc->byte_len);
223 skb_pull(skb, IB_GRH_BYTES); 276 skb_pull(skb, IB_GRH_BYTES);
224 277
225 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 278 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
733 rx_req = &priv->rx_ring[i]; 786 rx_req = &priv->rx_ring[i];
734 if (!rx_req->skb) 787 if (!rx_req->skb)
735 continue; 788 continue;
736 ib_dma_unmap_single(priv->ca, 789 ipoib_ud_dma_unmap_rx(priv,
737 rx_req->mapping, 790 priv->rx_ring[i].mapping);
738 IPOIB_BUF_SIZE,
739 DMA_FROM_DEVICE);
740 dev_kfree_skb_any(rx_req->skb); 791 dev_kfree_skb_any(rx_req->skb);
741 rx_req->skb = NULL; 792 rx_req->skb = NULL;
742 } 793 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bd07f02cf02b..7a4ed9d3d844 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
195 return 0; 195 return 0;
196 } 196 }
197 197
198 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) 198 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
199 return -EINVAL; 199 return -EINVAL;
200 200
201 priv->admin_mtu = new_mtu; 201 priv->admin_mtu = new_mtu;
@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev)
971 NETIF_F_LLTX | 971 NETIF_F_LLTX |
972 NETIF_F_HIGHDMA); 972 NETIF_F_HIGHDMA);
973 973
974 /* MTU will be reset when mcast join happens */
975 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
976 priv->mcast_mtu = priv->admin_mtu = dev->mtu;
977
978 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 974 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
979 975
980 netif_carrier_off(dev); 976 netif_carrier_off(dev);
@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format,
1107{ 1103{
1108 struct ipoib_dev_priv *priv; 1104 struct ipoib_dev_priv *priv;
1109 struct ib_device_attr *device_attr; 1105 struct ib_device_attr *device_attr;
1106 struct ib_port_attr attr;
1110 int result = -ENOMEM; 1107 int result = -ENOMEM;
1111 1108
1112 priv = ipoib_intf_alloc(format); 1109 priv = ipoib_intf_alloc(format);
@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format,
1115 1112
1116 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1113 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1117 1114
1115 if (!ib_query_port(hca, port, &attr))
1116 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1117 else {
1118 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1119 hca->name, port);
1120 goto device_init_failed;
1121 }
1122
1123 /* MTU will be reset when mcast join happens */
1124 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1125 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1126
1118 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1127 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1119 if (result) { 1128 if (result) {
1120 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1129 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 31a53c5bcb13..d00a2c174aee 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
567 return; 567 return;
568 } 568 }
569 569
570 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - 570 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
571 IPOIB_ENCAP_LEN;
572 571
573 if (!ipoib_cm_admin_enabled(dev)) 572 if (!ipoib_cm_admin_enabled(dev))
574 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 573 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 8a20e3742c43..07c03f178a49 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
150 .max_send_wr = ipoib_sendq_size, 150 .max_send_wr = ipoib_sendq_size,
151 .max_recv_wr = ipoib_recvq_size, 151 .max_recv_wr = ipoib_recvq_size,
152 .max_send_sge = 1, 152 .max_send_sge = 1,
153 .max_recv_sge = 1 153 .max_recv_sge = IPOIB_UD_RX_SG
154 }, 154 },
155 .sq_sig_type = IB_SIGNAL_ALL_WR, 155 .sq_sig_type = IB_SIGNAL_ALL_WR,
156 .qp_type = IB_QPT_UD 156 .qp_type = IB_QPT_UD
@@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
215 priv->tx_wr.sg_list = priv->tx_sge; 215 priv->tx_wr.sg_list = priv->tx_sge;
216 priv->tx_wr.send_flags = IB_SEND_SIGNALED; 216 priv->tx_wr.send_flags = IB_SEND_SIGNALED;
217 217
218 priv->rx_sge[0].lkey = priv->mr->lkey;
219 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
220 priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
221 priv->rx_sge[1].length = PAGE_SIZE;
222 priv->rx_sge[1].lkey = priv->mr->lkey;
223 priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
224 } else {
225 priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
226 priv->rx_wr.num_sge = 1;
227 }
228 priv->rx_wr.next = NULL;
229 priv->rx_wr.sg_list = priv->rx_sge;
230
218 return 0; 231 return 0;
219 232
220out_free_cq: 233out_free_cq:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 293f5b892e3f..431fdeaa2dc4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
89 goto err; 89 goto err;
90 } 90 }
91 91
92 priv->max_ib_mtu = ppriv->max_ib_mtu;
92 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 93 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
93 94
94 priv->pkey = pkey; 95 priv->pkey = pkey;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 4b07bdadb81e..b29e3affb805 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -444,6 +444,23 @@ exit:
444 __FUNCTION__, retval); 444 __FUNCTION__, retval);
445} 445}
446 446
447static void xpad_bulk_out(struct urb *urb)
448{
449 switch (urb->status) {
450 case 0:
451 /* success */
452 break;
453 case -ECONNRESET:
454 case -ENOENT:
455 case -ESHUTDOWN:
456 /* this urb is terminated, clean up */
457 dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
458 break;
459 default:
460 dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
461 }
462}
463
447#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS) 464#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
448static void xpad_irq_out(struct urb *urb) 465static void xpad_irq_out(struct urb *urb)
449{ 466{
@@ -475,23 +492,6 @@ exit:
475 __FUNCTION__, retval); 492 __FUNCTION__, retval);
476} 493}
477 494
478static void xpad_bulk_out(struct urb *urb)
479{
480 switch (urb->status) {
481 case 0:
482 /* success */
483 break;
484 case -ECONNRESET:
485 case -ENOENT:
486 case -ESHUTDOWN:
487 /* this urb is terminated, clean up */
488 dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
489 break;
490 default:
491 dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
492 }
493}
494
495static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) 495static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
496{ 496{
497 struct usb_endpoint_descriptor *ep_irq_out; 497 struct usb_endpoint_descriptor *ep_irq_out;
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index f972ff377b63..cc9f27514aef 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -114,8 +114,8 @@ static int emumousebtn_input_register(void)
114 if (!emumousebtn) 114 if (!emumousebtn)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
117 lockdep_set_class(emumousebtn->event_lock, &emumousebtn_event_class); 117 lockdep_set_class(&emumousebtn->event_lock, &emumousebtn_event_class);
118 lockdep_set_class(emumousebtn->mutex, &emumousebtn_mutex_class); 118 lockdep_set_class(&emumousebtn->mutex, &emumousebtn_mutex_class);
119 119
120 emumousebtn->name = "Macintosh mouse button emulation"; 120 emumousebtn->name = "Macintosh mouse button emulation";
121 emumousebtn->id.bustype = BUS_ADB; 121 emumousebtn->id.bustype = BUS_ADB;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 6477fc66cc23..346223856f59 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -299,7 +299,7 @@ static int stk7700d_tuner_attach(struct dvb_usb_adapter *adap)
299} 299}
300 300
301/* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */ 301/* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */
302struct dibx000_agc_config xc3028_agc_config = { 302static struct dibx000_agc_config xc3028_agc_config = {
303 BAND_VHF | BAND_UHF, /* band_caps */ 303 BAND_VHF | BAND_UHF, /* band_caps */
304 304
305 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0, 305 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0,
@@ -342,7 +342,7 @@ struct dibx000_agc_config xc3028_agc_config = {
342}; 342};
343 343
344/* PLL Configuration for COFDM BW_MHz = 8.00 with external clock = 30.00 */ 344/* PLL Configuration for COFDM BW_MHz = 8.00 with external clock = 30.00 */
345struct dibx000_bandwidth_config xc3028_bw_config = { 345static struct dibx000_bandwidth_config xc3028_bw_config = {
346 60000, 30000, /* internal, sampling */ 346 60000, 30000, /* internal, sampling */
347 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */ 347 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */
348 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, 348 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc,
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 68fab616f55d..f5fceb3cdb3c 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -307,6 +307,14 @@ config DVB_AU8522
307 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want 307 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
308 to support this frontend. 308 to support this frontend.
309 309
310config DVB_S5H1411
311 tristate "Samsung S5H1411 based"
312 depends on DVB_CORE && I2C
313 default m if DVB_FE_CUSTOMISE
314 help
315 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
316 to support this frontend.
317
310comment "Tuners/PLL support" 318comment "Tuners/PLL support"
311 depends on DVB_CORE 319 depends on DVB_CORE
312 320
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 2f873fc0f649..9747c73dc826 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_DVB_TUNER_XC5000) += xc5000.o
55obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o 55obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
56obj-$(CONFIG_DVB_AU8522) += au8522.o 56obj-$(CONFIG_DVB_AU8522) += au8522.o
57obj-$(CONFIG_DVB_TDA10048) += tda10048.o 57obj-$(CONFIG_DVB_TDA10048) += tda10048.o
58obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
diff --git a/drivers/media/dvb/frontends/mt312.h b/drivers/media/dvb/frontends/mt312.h
index 96338f0c4dd4..de796eab3911 100644
--- a/drivers/media/dvb/frontends/mt312.h
+++ b/drivers/media/dvb/frontends/mt312.h
@@ -33,7 +33,7 @@ struct mt312_config {
33 u8 demod_address; 33 u8 demod_address;
34 34
35 /* inverted voltage setting */ 35 /* inverted voltage setting */
36 int voltage_inverted:1; 36 unsigned int voltage_inverted:1;
37}; 37};
38 38
39#if defined(CONFIG_DVB_MT312) || (defined(CONFIG_DVB_MT312_MODULE) && defined(MODULE)) 39#if defined(CONFIG_DVB_MT312) || (defined(CONFIG_DVB_MT312_MODULE) && defined(MODULE))
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
new file mode 100644
index 000000000000..eb5bfc99d4e9
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -0,0 +1,888 @@
1/*
2 Samsung S5H1411 VSB/QAM demodulator driver
3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
20*/
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/string.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include "dvb_frontend.h"
29#include "dvb-pll.h"
30#include "s5h1411.h"
31
32struct s5h1411_state {
33
34 struct i2c_adapter *i2c;
35
36 /* configuration settings */
37 const struct s5h1411_config *config;
38
39 struct dvb_frontend frontend;
40
41 fe_modulation_t current_modulation;
42
43 u32 current_frequency;
44 int if_freq;
45
46 u8 inversion;
47};
48
49static int debug;
50
51#define dprintk(arg...) do { \
52 if (debug) \
53 printk(arg); \
54 } while (0)
55
56/* Register values to initialise the demod, defaults to VSB */
57static struct init_tab {
58 u8 addr;
59 u8 reg;
60 u16 data;
61} init_tab[] = {
62 { S5H1411_I2C_TOP_ADDR, 0x00, 0x0071, },
63 { S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, },
64 { S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, },
65 { S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, },
66 { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342a, },
67 { S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, },
68 { S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, },
69 { S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, },
70 { S5H1411_I2C_TOP_ADDR, 0x27, 0x0f04, },
71 { S5H1411_I2C_TOP_ADDR, 0x28, 0x070f, },
72 { S5H1411_I2C_TOP_ADDR, 0x29, 0x2820, },
73 { S5H1411_I2C_TOP_ADDR, 0x2a, 0x102e, },
74 { S5H1411_I2C_TOP_ADDR, 0x2b, 0x0220, },
75 { S5H1411_I2C_TOP_ADDR, 0x2e, 0x0d0e, },
76 { S5H1411_I2C_TOP_ADDR, 0x2f, 0x1013, },
77 { S5H1411_I2C_TOP_ADDR, 0x31, 0x171b, },
78 { S5H1411_I2C_TOP_ADDR, 0x32, 0x0e0f, },
79 { S5H1411_I2C_TOP_ADDR, 0x33, 0x0f10, },
80 { S5H1411_I2C_TOP_ADDR, 0x34, 0x170e, },
81 { S5H1411_I2C_TOP_ADDR, 0x35, 0x4b10, },
82 { S5H1411_I2C_TOP_ADDR, 0x36, 0x0f17, },
83 { S5H1411_I2C_TOP_ADDR, 0x3c, 0x1577, },
84 { S5H1411_I2C_TOP_ADDR, 0x3d, 0x081a, },
85 { S5H1411_I2C_TOP_ADDR, 0x3e, 0x77ee, },
86 { S5H1411_I2C_TOP_ADDR, 0x40, 0x1e09, },
87 { S5H1411_I2C_TOP_ADDR, 0x41, 0x0f0c, },
88 { S5H1411_I2C_TOP_ADDR, 0x42, 0x1f10, },
89 { S5H1411_I2C_TOP_ADDR, 0x4d, 0x0509, },
90 { S5H1411_I2C_TOP_ADDR, 0x4e, 0x0a00, },
91 { S5H1411_I2C_TOP_ADDR, 0x50, 0x0000, },
92 { S5H1411_I2C_TOP_ADDR, 0x5b, 0x0000, },
93 { S5H1411_I2C_TOP_ADDR, 0x5c, 0x0008, },
94 { S5H1411_I2C_TOP_ADDR, 0x57, 0x1101, },
95 { S5H1411_I2C_TOP_ADDR, 0x65, 0x007c, },
96 { S5H1411_I2C_TOP_ADDR, 0x68, 0x0512, },
97 { S5H1411_I2C_TOP_ADDR, 0x69, 0x0258, },
98 { S5H1411_I2C_TOP_ADDR, 0x70, 0x0004, },
99 { S5H1411_I2C_TOP_ADDR, 0x71, 0x0007, },
100 { S5H1411_I2C_TOP_ADDR, 0x76, 0x00a9, },
101 { S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, },
102 { S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, },
103 { S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, },
104 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xafbb, },
105 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, },
106 { S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, },
107 { S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, },
108 { S5H1411_I2C_TOP_ADDR, 0xb8, 0x003f, },
109 { S5H1411_I2C_TOP_ADDR, 0xb9, 0x2700, },
110 { S5H1411_I2C_TOP_ADDR, 0xba, 0xfac8, },
111 { S5H1411_I2C_TOP_ADDR, 0xbe, 0x1003, },
112 { S5H1411_I2C_TOP_ADDR, 0xbf, 0x103f, },
113 { S5H1411_I2C_TOP_ADDR, 0xce, 0x2000, },
114 { S5H1411_I2C_TOP_ADDR, 0xcf, 0x0800, },
115 { S5H1411_I2C_TOP_ADDR, 0xd0, 0x0800, },
116 { S5H1411_I2C_TOP_ADDR, 0xd1, 0x0400, },
117 { S5H1411_I2C_TOP_ADDR, 0xd2, 0x0800, },
118 { S5H1411_I2C_TOP_ADDR, 0xd3, 0x2000, },
119 { S5H1411_I2C_TOP_ADDR, 0xd4, 0x3000, },
120 { S5H1411_I2C_TOP_ADDR, 0xdb, 0x4a9b, },
121 { S5H1411_I2C_TOP_ADDR, 0xdc, 0x1000, },
122 { S5H1411_I2C_TOP_ADDR, 0xde, 0x0001, },
123 { S5H1411_I2C_TOP_ADDR, 0xdf, 0x0000, },
124 { S5H1411_I2C_TOP_ADDR, 0xe3, 0x0301, },
125 { S5H1411_I2C_QAM_ADDR, 0xf3, 0x0000, },
126 { S5H1411_I2C_QAM_ADDR, 0xf3, 0x0001, },
127 { S5H1411_I2C_QAM_ADDR, 0x08, 0x0600, },
128 { S5H1411_I2C_QAM_ADDR, 0x18, 0x4201, },
129 { S5H1411_I2C_QAM_ADDR, 0x1e, 0x6476, },
130 { S5H1411_I2C_QAM_ADDR, 0x21, 0x0830, },
131 { S5H1411_I2C_QAM_ADDR, 0x0c, 0x5679, },
132 { S5H1411_I2C_QAM_ADDR, 0x0d, 0x579b, },
133 { S5H1411_I2C_QAM_ADDR, 0x24, 0x0102, },
134 { S5H1411_I2C_QAM_ADDR, 0x31, 0x7488, },
135 { S5H1411_I2C_QAM_ADDR, 0x32, 0x0a08, },
136 { S5H1411_I2C_QAM_ADDR, 0x3d, 0x8689, },
137 { S5H1411_I2C_QAM_ADDR, 0x49, 0x0048, },
138 { S5H1411_I2C_QAM_ADDR, 0x57, 0x2012, },
139 { S5H1411_I2C_QAM_ADDR, 0x5d, 0x7676, },
140 { S5H1411_I2C_QAM_ADDR, 0x04, 0x0400, },
141 { S5H1411_I2C_QAM_ADDR, 0x58, 0x00c0, },
142 { S5H1411_I2C_QAM_ADDR, 0x5b, 0x0100, },
143};
144
145/* VSB SNR lookup table */
146static struct vsb_snr_tab {
147 u16 val;
148 u16 data;
149} vsb_snr_tab[] = {
150 { 0x39f, 300, },
151 { 0x39b, 295, },
152 { 0x397, 290, },
153 { 0x394, 285, },
154 { 0x38f, 280, },
155 { 0x38b, 275, },
156 { 0x387, 270, },
157 { 0x382, 265, },
158 { 0x37d, 260, },
159 { 0x377, 255, },
160 { 0x370, 250, },
161 { 0x36a, 245, },
162 { 0x364, 240, },
163 { 0x35b, 235, },
164 { 0x353, 230, },
165 { 0x349, 225, },
166 { 0x340, 320, },
167 { 0x337, 215, },
168 { 0x327, 210, },
169 { 0x31b, 205, },
170 { 0x310, 200, },
171 { 0x302, 195, },
172 { 0x2f3, 190, },
173 { 0x2e4, 185, },
174 { 0x2d7, 180, },
175 { 0x2cd, 175, },
176 { 0x2bb, 170, },
177 { 0x2a9, 165, },
178 { 0x29e, 160, },
179 { 0x284, 155, },
180 { 0x27a, 150, },
181 { 0x260, 145, },
182 { 0x23a, 140, },
183 { 0x224, 135, },
184 { 0x213, 130, },
185 { 0x204, 125, },
186 { 0x1fe, 120, },
187 { 0, 0, },
188};
189
190/* QAM64 SNR lookup table */
191static struct qam64_snr_tab {
192 u16 val;
193 u16 data;
194} qam64_snr_tab[] = {
195 { 0x0001, 0, },
196 { 0x0af0, 300, },
197 { 0x0d80, 290, },
198 { 0x10a0, 280, },
199 { 0x14b5, 270, },
200 { 0x1590, 268, },
201 { 0x1680, 266, },
202 { 0x17b0, 264, },
203 { 0x18c0, 262, },
204 { 0x19b0, 260, },
205 { 0x1ad0, 258, },
206 { 0x1d00, 256, },
207 { 0x1da0, 254, },
208 { 0x1ef0, 252, },
209 { 0x2050, 250, },
210 { 0x20f0, 249, },
211 { 0x21d0, 248, },
212 { 0x22b0, 247, },
213 { 0x23a0, 246, },
214 { 0x2470, 245, },
215 { 0x24f0, 244, },
216 { 0x25a0, 243, },
217 { 0x26c0, 242, },
218 { 0x27b0, 241, },
219 { 0x28d0, 240, },
220 { 0x29b0, 239, },
221 { 0x2ad0, 238, },
222 { 0x2ba0, 237, },
223 { 0x2c80, 236, },
224 { 0x2d20, 235, },
225 { 0x2e00, 234, },
226 { 0x2f10, 233, },
227 { 0x3050, 232, },
228 { 0x3190, 231, },
229 { 0x3300, 230, },
230 { 0x3340, 229, },
231 { 0x3200, 228, },
232 { 0x3550, 227, },
233 { 0x3610, 226, },
234 { 0x3600, 225, },
235 { 0x3700, 224, },
236 { 0x3800, 223, },
237 { 0x3920, 222, },
238 { 0x3a20, 221, },
239 { 0x3b30, 220, },
240 { 0x3d00, 219, },
241 { 0x3e00, 218, },
242 { 0x4000, 217, },
243 { 0x4100, 216, },
244 { 0x4300, 215, },
245 { 0x4400, 214, },
246 { 0x4600, 213, },
247 { 0x4700, 212, },
248 { 0x4800, 211, },
249 { 0x4a00, 210, },
250 { 0x4b00, 209, },
251 { 0x4d00, 208, },
252 { 0x4f00, 207, },
253 { 0x5050, 206, },
254 { 0x5200, 205, },
255 { 0x53c0, 204, },
256 { 0x5450, 203, },
257 { 0x5650, 202, },
258 { 0x5820, 201, },
259 { 0x6000, 200, },
260 { 0xffff, 0, },
261};
262
263/* QAM256 SNR lookup table */
264static struct qam256_snr_tab {
265 u16 val;
266 u16 data;
267} qam256_snr_tab[] = {
268 { 0x0001, 0, },
269 { 0x0970, 400, },
270 { 0x0a90, 390, },
271 { 0x0b90, 380, },
272 { 0x0d90, 370, },
273 { 0x0ff0, 360, },
274 { 0x1240, 350, },
275 { 0x1345, 348, },
276 { 0x13c0, 346, },
277 { 0x14c0, 344, },
278 { 0x1500, 342, },
279 { 0x1610, 340, },
280 { 0x1700, 338, },
281 { 0x1800, 336, },
282 { 0x18b0, 334, },
283 { 0x1900, 332, },
284 { 0x1ab0, 330, },
285 { 0x1bc0, 328, },
286 { 0x1cb0, 326, },
287 { 0x1db0, 324, },
288 { 0x1eb0, 322, },
289 { 0x2030, 320, },
290 { 0x2200, 318, },
291 { 0x2280, 316, },
292 { 0x2410, 314, },
293 { 0x25b0, 312, },
294 { 0x27a0, 310, },
295 { 0x2840, 308, },
296 { 0x29d0, 306, },
297 { 0x2b10, 304, },
298 { 0x2d30, 302, },
299 { 0x2f20, 300, },
300 { 0x30c0, 298, },
301 { 0x3260, 297, },
302 { 0x32c0, 296, },
303 { 0x3300, 295, },
304 { 0x33b0, 294, },
305 { 0x34b0, 293, },
306 { 0x35a0, 292, },
307 { 0x3650, 291, },
308 { 0x3800, 290, },
309 { 0x3900, 289, },
310 { 0x3a50, 288, },
311 { 0x3b30, 287, },
312 { 0x3cb0, 286, },
313 { 0x3e20, 285, },
314 { 0x3fa0, 284, },
315 { 0x40a0, 283, },
316 { 0x41c0, 282, },
317 { 0x42f0, 281, },
318 { 0x44a0, 280, },
319 { 0x4600, 279, },
320 { 0x47b0, 278, },
321 { 0x4900, 277, },
322 { 0x4a00, 276, },
323 { 0x4ba0, 275, },
324 { 0x4d00, 274, },
325 { 0x4f00, 273, },
326 { 0x5000, 272, },
327 { 0x51f0, 272, },
328 { 0x53a0, 270, },
329 { 0x5520, 269, },
330 { 0x5700, 268, },
331 { 0x5800, 267, },
332 { 0x5a00, 266, },
333 { 0x5c00, 265, },
334 { 0x5d00, 264, },
335 { 0x5f00, 263, },
336 { 0x6000, 262, },
337 { 0x6200, 261, },
338 { 0x6400, 260, },
339 { 0xffff, 0, },
340};
341
342/* 8 bit registers, 16 bit values */
343static int s5h1411_writereg(struct s5h1411_state *state,
344 u8 addr, u8 reg, u16 data)
345{
346 int ret;
347 u8 buf [] = { reg, data >> 8, data & 0xff };
348
349 struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 };
350
351 ret = i2c_transfer(state->i2c, &msg, 1);
352
353 if (ret != 1)
354 printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
355 "ret == %i)\n", __func__, addr, reg, data, ret);
356
357 return (ret != 1) ? -1 : 0;
358}
359
360static u16 s5h1411_readreg(struct s5h1411_state *state, u8 addr, u8 reg)
361{
362 int ret;
363 u8 b0 [] = { reg };
364 u8 b1 [] = { 0, 0 };
365
366 struct i2c_msg msg [] = {
367 { .addr = addr, .flags = 0, .buf = b0, .len = 1 },
368 { .addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 2 } };
369
370 ret = i2c_transfer(state->i2c, msg, 2);
371
372 if (ret != 2)
373 printk(KERN_ERR "%s: readreg error (ret == %i)\n",
374 __func__, ret);
375 return (b1[0] << 8) | b1[1];
376}
377
378static int s5h1411_softreset(struct dvb_frontend *fe)
379{
380 struct s5h1411_state *state = fe->demodulator_priv;
381
382 dprintk("%s()\n", __func__);
383
384 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf7, 0);
385 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf7, 1);
386 return 0;
387}
388
389static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
390{
391 struct s5h1411_state *state = fe->demodulator_priv;
392
393 dprintk("%s(%d KHz)\n", __func__, KHz);
394
395 switch (KHz) {
396 case 3250:
397 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d9);
398 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342);
399 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9);
400 break;
401 case 3500:
402 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1225);
403 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x1e96);
404 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x1225);
405 break;
406 case 4000:
407 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x14bc);
408 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0xb53e);
409 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x14bd);
410 break;
411 default:
412 dprintk("%s(%d KHz) Invalid, defaulting to 5380\n",
413 __func__, KHz);
414 /* no break, need to continue */
415 case 5380:
416 case 44000:
417 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4);
418 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x3655);
419 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x1be4);
420 break;
421 }
422
423 state->if_freq = KHz;
424
425 return 0;
426}
427
428static int s5h1411_set_mpeg_timing(struct dvb_frontend *fe, int mode)
429{
430 struct s5h1411_state *state = fe->demodulator_priv;
431 u16 val;
432
433 dprintk("%s(%d)\n", __func__, mode);
434
435 val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbe) & 0xcfff;
436 switch (mode) {
437 case S5H1411_MPEGTIMING_CONTINOUS_INVERTING_CLOCK:
438 val |= 0x0000;
439 break;
440 case S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK:
441 dprintk("%s(%d) Mode1 or Defaulting\n", __func__, mode);
442 val |= 0x1000;
443 break;
444 case S5H1411_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK:
445 val |= 0x2000;
446 break;
447 case S5H1411_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK:
448 val |= 0x3000;
449 break;
450 default:
451 return -EINVAL;
452 }
453
454 /* Configure MPEG Signal Timing charactistics */
455 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbe, val);
456}
457
458static int s5h1411_set_spectralinversion(struct dvb_frontend *fe, int inversion)
459{
460 struct s5h1411_state *state = fe->demodulator_priv;
461 u16 val;
462
463 dprintk("%s(%d)\n", __func__, inversion);
464 val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x24) & ~0x1000;
465
466 if (inversion == 1)
467 val |= 0x1000; /* Inverted */
468 else
469 val |= 0x0000;
470
471 state->inversion = inversion;
472 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val);
473}
474
475static int s5h1411_enable_modulation(struct dvb_frontend *fe,
476 fe_modulation_t m)
477{
478 struct s5h1411_state *state = fe->demodulator_priv;
479
480 dprintk("%s(0x%08x)\n", __func__, m);
481
482 switch (m) {
483 case VSB_8:
484 dprintk("%s() VSB_8\n", __func__);
485 s5h1411_set_if_freq(fe, state->config->vsb_if);
486 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x71);
487 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf6, 0x00);
488 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xcd, 0xf1);
489 break;
490 case QAM_64:
491 case QAM_256:
492 dprintk("%s() QAM_AUTO (64/256)\n", __func__);
493 s5h1411_set_if_freq(fe, state->config->qam_if);
494 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x0171);
495 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf6, 0x0001);
496 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x16, 0x1101);
497 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xcd, 0x00f0);
498 break;
499 default:
500 dprintk("%s() Invalid modulation\n", __func__);
501 return -EINVAL;
502 }
503
504 state->current_modulation = m;
505 s5h1411_softreset(fe);
506
507 return 0;
508}
509
510static int s5h1411_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
511{
512 struct s5h1411_state *state = fe->demodulator_priv;
513
514 dprintk("%s(%d)\n", __func__, enable);
515
516 if (enable)
517 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
518 else
519 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 0);
520}
521
522static int s5h1411_set_gpio(struct dvb_frontend *fe, int enable)
523{
524 struct s5h1411_state *state = fe->demodulator_priv;
525 u16 val;
526
527 dprintk("%s(%d)\n", __func__, enable);
528
529 val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xe0) & ~0x02;
530
531 if (enable)
532 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0,
533 val | 0x02);
534 else
535 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val);
536}
537
538static int s5h1411_sleep(struct dvb_frontend *fe, int enable)
539{
540 struct s5h1411_state *state = fe->demodulator_priv;
541
542 dprintk("%s(%d)\n", __func__, enable);
543
544 if (enable)
545 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf4, 1);
546 else {
547 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf4, 0);
548 s5h1411_softreset(fe);
549 }
550
551 return 0;
552}
553
554static int s5h1411_register_reset(struct dvb_frontend *fe)
555{
556 struct s5h1411_state *state = fe->demodulator_priv;
557
558 dprintk("%s()\n", __func__);
559
560 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf3, 0);
561}
562
563/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
564static int s5h1411_set_frontend(struct dvb_frontend *fe,
565 struct dvb_frontend_parameters *p)
566{
567 struct s5h1411_state *state = fe->demodulator_priv;
568
569 dprintk("%s(frequency=%d)\n", __func__, p->frequency);
570
571 s5h1411_softreset(fe);
572
573 state->current_frequency = p->frequency;
574
575 s5h1411_enable_modulation(fe, p->u.vsb.modulation);
576
577 /* Allow the demod to settle */
578 msleep(100);
579
580 if (fe->ops.tuner_ops.set_params) {
581 if (fe->ops.i2c_gate_ctrl)
582 fe->ops.i2c_gate_ctrl(fe, 1);
583
584 fe->ops.tuner_ops.set_params(fe, p);
585
586 if (fe->ops.i2c_gate_ctrl)
587 fe->ops.i2c_gate_ctrl(fe, 0);
588 }
589
590 return 0;
591}
592
593/* Reset the demod hardware and reset all of the configuration registers
594 to a default state. */
595static int s5h1411_init(struct dvb_frontend *fe)
596{
597 struct s5h1411_state *state = fe->demodulator_priv;
598 int i;
599
600 dprintk("%s()\n", __func__);
601
602 s5h1411_sleep(fe, 0);
603 s5h1411_register_reset(fe);
604
605 for (i = 0; i < ARRAY_SIZE(init_tab); i++)
606 s5h1411_writereg(state, init_tab[i].addr,
607 init_tab[i].reg,
608 init_tab[i].data);
609
610 /* The datasheet says that after initialisation, VSB is default */
611 state->current_modulation = VSB_8;
612
613 if (state->config->output_mode == S5H1411_SERIAL_OUTPUT)
614 /* Serial */
615 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1101);
616 else
617 /* Parallel */
618 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1001);
619
620 s5h1411_set_spectralinversion(fe, state->config->inversion);
621 s5h1411_set_if_freq(fe, state->config->vsb_if);
622 s5h1411_set_gpio(fe, state->config->gpio);
623 s5h1411_set_mpeg_timing(fe, state->config->mpeg_timing);
624 s5h1411_softreset(fe);
625
626 /* Note: Leaving the I2C gate closed. */
627 s5h1411_i2c_gate_ctrl(fe, 0);
628
629 return 0;
630}
631
632static int s5h1411_read_status(struct dvb_frontend *fe, fe_status_t *status)
633{
634 struct s5h1411_state *state = fe->demodulator_priv;
635 u16 reg;
636 u32 tuner_status = 0;
637
638 *status = 0;
639
640 /* Get the demodulator status */
641 reg = (s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2) >> 15)
642 & 0x0001;
643 if (reg)
644 *status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_SIGNAL;
645
646 switch (state->current_modulation) {
647 case QAM_64:
648 case QAM_256:
649 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0);
650 if (reg & 0x100)
651 *status |= FE_HAS_VITERBI;
652 if (reg & 0x10)
653 *status |= FE_HAS_SYNC;
654 break;
655 case VSB_8:
656 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x5e);
657 if (reg & 0x0001)
658 *status |= FE_HAS_SYNC;
659 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2);
660 if (reg & 0x1000)
661 *status |= FE_HAS_VITERBI;
662 break;
663 default:
664 return -EINVAL;
665 }
666
667 switch (state->config->status_mode) {
668 case S5H1411_DEMODLOCKING:
669 if (*status & FE_HAS_VITERBI)
670 *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
671 break;
672 case S5H1411_TUNERLOCKING:
673 /* Get the tuner status */
674 if (fe->ops.tuner_ops.get_status) {
675 if (fe->ops.i2c_gate_ctrl)
676 fe->ops.i2c_gate_ctrl(fe, 1);
677
678 fe->ops.tuner_ops.get_status(fe, &tuner_status);
679
680 if (fe->ops.i2c_gate_ctrl)
681 fe->ops.i2c_gate_ctrl(fe, 0);
682 }
683 if (tuner_status)
684 *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
685 break;
686 }
687
688 dprintk("%s() status 0x%08x\n", __func__, *status);
689
690 return 0;
691}
692
693static int s5h1411_qam256_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
694{
695 int i, ret = -EINVAL;
696 dprintk("%s()\n", __func__);
697
698 for (i = 0; i < ARRAY_SIZE(qam256_snr_tab); i++) {
699 if (v < qam256_snr_tab[i].val) {
700 *snr = qam256_snr_tab[i].data;
701 ret = 0;
702 break;
703 }
704 }
705 return ret;
706}
707
708static int s5h1411_qam64_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
709{
710 int i, ret = -EINVAL;
711 dprintk("%s()\n", __func__);
712
713 for (i = 0; i < ARRAY_SIZE(qam64_snr_tab); i++) {
714 if (v < qam64_snr_tab[i].val) {
715 *snr = qam64_snr_tab[i].data;
716 ret = 0;
717 break;
718 }
719 }
720 return ret;
721}
722
723static int s5h1411_vsb_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
724{
725 int i, ret = -EINVAL;
726 dprintk("%s()\n", __func__);
727
728 for (i = 0; i < ARRAY_SIZE(vsb_snr_tab); i++) {
729 if (v > vsb_snr_tab[i].val) {
730 *snr = vsb_snr_tab[i].data;
731 ret = 0;
732 break;
733 }
734 }
735 dprintk("%s() snr=%d\n", __func__, *snr);
736 return ret;
737}
738
739static int s5h1411_read_snr(struct dvb_frontend *fe, u16 *snr)
740{
741 struct s5h1411_state *state = fe->demodulator_priv;
742 u16 reg;
743 dprintk("%s()\n", __func__);
744
745 switch (state->current_modulation) {
746 case QAM_64:
747 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf1);
748 return s5h1411_qam64_lookup_snr(fe, snr, reg);
749 case QAM_256:
750 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf1);
751 return s5h1411_qam256_lookup_snr(fe, snr, reg);
752 case VSB_8:
753 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR,
754 0xf2) & 0x3ff;
755 return s5h1411_vsb_lookup_snr(fe, snr, reg);
756 default:
757 break;
758 }
759
760 return -EINVAL;
761}
762
763static int s5h1411_read_signal_strength(struct dvb_frontend *fe,
764 u16 *signal_strength)
765{
766 return s5h1411_read_snr(fe, signal_strength);
767}
768
769static int s5h1411_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
770{
771 struct s5h1411_state *state = fe->demodulator_priv;
772
773 *ucblocks = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xc9);
774
775 return 0;
776}
777
778static int s5h1411_read_ber(struct dvb_frontend *fe, u32 *ber)
779{
780 return s5h1411_read_ucblocks(fe, ber);
781}
782
783static int s5h1411_get_frontend(struct dvb_frontend *fe,
784 struct dvb_frontend_parameters *p)
785{
786 struct s5h1411_state *state = fe->demodulator_priv;
787
788 p->frequency = state->current_frequency;
789 p->u.vsb.modulation = state->current_modulation;
790
791 return 0;
792}
793
794static int s5h1411_get_tune_settings(struct dvb_frontend *fe,
795 struct dvb_frontend_tune_settings *tune)
796{
797 tune->min_delay_ms = 1000;
798 return 0;
799}
800
801static void s5h1411_release(struct dvb_frontend *fe)
802{
803 struct s5h1411_state *state = fe->demodulator_priv;
804 kfree(state);
805}
806
807static struct dvb_frontend_ops s5h1411_ops;
808
809struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
810 struct i2c_adapter *i2c)
811{
812 struct s5h1411_state *state = NULL;
813 u16 reg;
814
815 /* allocate memory for the internal state */
816 state = kmalloc(sizeof(struct s5h1411_state), GFP_KERNEL);
817 if (state == NULL)
818 goto error;
819
820 /* setup the state */
821 state->config = config;
822 state->i2c = i2c;
823 state->current_modulation = VSB_8;
824 state->inversion = state->config->inversion;
825
826 /* check if the demod exists */
827 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x05);
828 if (reg != 0x0066)
829 goto error;
830
831 /* create dvb_frontend */
832 memcpy(&state->frontend.ops, &s5h1411_ops,
833 sizeof(struct dvb_frontend_ops));
834
835 state->frontend.demodulator_priv = state;
836
837 if (s5h1411_init(&state->frontend) != 0) {
838 printk(KERN_ERR "%s: Failed to initialize correctly\n",
839 __func__);
840 goto error;
841 }
842
843 /* Note: Leaving the I2C gate open here. */
844 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
845
846 return &state->frontend;
847
848error:
849 kfree(state);
850 return NULL;
851}
852EXPORT_SYMBOL(s5h1411_attach);
853
854static struct dvb_frontend_ops s5h1411_ops = {
855
856 .info = {
857 .name = "Samsung S5H1411 QAM/8VSB Frontend",
858 .type = FE_ATSC,
859 .frequency_min = 54000000,
860 .frequency_max = 858000000,
861 .frequency_stepsize = 62500,
862 .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
863 },
864
865 .init = s5h1411_init,
866 .i2c_gate_ctrl = s5h1411_i2c_gate_ctrl,
867 .set_frontend = s5h1411_set_frontend,
868 .get_frontend = s5h1411_get_frontend,
869 .get_tune_settings = s5h1411_get_tune_settings,
870 .read_status = s5h1411_read_status,
871 .read_ber = s5h1411_read_ber,
872 .read_signal_strength = s5h1411_read_signal_strength,
873 .read_snr = s5h1411_read_snr,
874 .read_ucblocks = s5h1411_read_ucblocks,
875 .release = s5h1411_release,
876};
877
878module_param(debug, int, 0644);
879MODULE_PARM_DESC(debug, "Enable verbose debug messages");
880
881MODULE_DESCRIPTION("Samsung S5H1411 QAM-B/ATSC Demodulator driver");
882MODULE_AUTHOR("Steven Toth");
883MODULE_LICENSE("GPL");
884
885/*
886 * Local variables:
887 * c-basic-offset: 8
888 */
diff --git a/drivers/media/dvb/frontends/s5h1411.h b/drivers/media/dvb/frontends/s5h1411.h
new file mode 100644
index 000000000000..1855f64ed4d8
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1411.h
@@ -0,0 +1,90 @@
1/*
2 Samsung S5H1411 VSB/QAM demodulator driver
3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
20*/
21
22#ifndef __S5H1411_H__
23#define __S5H1411_H__
24
25#include <linux/dvb/frontend.h>
26
27#define S5H1411_I2C_TOP_ADDR (0x32 >> 1)
28#define S5H1411_I2C_QAM_ADDR (0x34 >> 1)
29
30struct s5h1411_config {
31
32 /* serial/parallel output */
33#define S5H1411_PARALLEL_OUTPUT 0
34#define S5H1411_SERIAL_OUTPUT 1
35 u8 output_mode;
36
37 /* GPIO Setting */
38#define S5H1411_GPIO_OFF 0
39#define S5H1411_GPIO_ON 1
40 u8 gpio;
41
42 /* MPEG signal timing */
43#define S5H1411_MPEGTIMING_CONTINOUS_INVERTING_CLOCK 0
44#define S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK 1
45#define S5H1411_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK 2
46#define S5H1411_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK 3
47 u16 mpeg_timing;
48
49 /* IF Freq for QAM and VSB in KHz */
50#define S5H1411_IF_2500 2500
51#define S5H1411_IF_3500 3500
52#define S5H1411_IF_4000 4000
53#define S5H1411_IF_5380 5380
54#define S5H1411_IF_44000 44000
55#define S5H1411_VSB_IF_DEFAULT S5H1411_IF_44000
56#define S5H1411_QAM_IF_DEFAULT S5H1411_IF_44000
57 u16 qam_if;
58 u16 vsb_if;
59
60 /* Spectral Inversion */
61#define S5H1411_INVERSION_OFF 0
62#define S5H1411_INVERSION_ON 1
63 u8 inversion;
64
65 /* Return lock status based on tuner lock, or demod lock */
66#define S5H1411_TUNERLOCKING 0
67#define S5H1411_DEMODLOCKING 1
68 u8 status_mode;
69};
70
71#if defined(CONFIG_DVB_S5H1411) || \
72 (defined(CONFIG_DVB_S5H1411_MODULE) && defined(MODULE))
73extern struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
74 struct i2c_adapter *i2c);
75#else
76static inline struct dvb_frontend *s5h1411_attach(
77 const struct s5h1411_config *config,
78 struct i2c_adapter *i2c)
79{
80 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
81 return NULL;
82}
83#endif /* CONFIG_DVB_S5H1411 */
84
85#endif /* __S5H1411_H__ */
86
87/*
88 * Local variables:
89 * c-basic-offset: 8
90 */
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index c97c4bd24841..41708267e7a4 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -1,7 +1,7 @@
1 1
2config VIDEO_AU0828 2config VIDEO_AU0828
3 tristate "Auvitek AU0828 support" 3 tristate "Auvitek AU0828 support"
4 depends on VIDEO_DEV && I2C && INPUT 4 depends on VIDEO_DEV && I2C && INPUT && DVB_CORE
5 select I2C_ALGOBIT 5 select I2C_ALGOBIT
6 select DVB_AU8522 if !DVB_FE_CUSTOMIZE 6 select DVB_AU8522 if !DVB_FE_CUSTOMIZE
7 select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE 7 select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 8ca91f814277..a2a6983444fa 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -36,7 +36,6 @@ struct au0828_board au0828_boards[] = {
36 .name = "DViCO FusionHDTV USB", 36 .name = "DViCO FusionHDTV USB",
37 }, 37 },
38}; 38};
39const unsigned int au0828_bcount = ARRAY_SIZE(au0828_boards);
40 39
41/* Tuner callback function for au0828 boards. Currently only needed 40/* Tuner callback function for au0828 boards. Currently only needed
42 * for HVR1500Q, which has an xc5000 tuner. 41 * for HVR1500Q, which has an xc5000 tuner.
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index e65d5642cb1d..54bfc0f05295 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -32,18 +32,10 @@
32 * 4 = I2C related 32 * 4 = I2C related
33 * 8 = Bridge related 33 * 8 = Bridge related
34 */ 34 */
35unsigned int debug; 35int au0828_debug;
36module_param(debug, int, 0644); 36module_param_named(debug, au0828_debug, int, 0644);
37MODULE_PARM_DESC(debug, "enable debug messages"); 37MODULE_PARM_DESC(debug, "enable debug messages");
38 38
39unsigned int usb_debug;
40module_param(usb_debug, int, 0644);
41MODULE_PARM_DESC(usb_debug, "enable usb debug messages");
42
43unsigned int bridge_debug;
44module_param(bridge_debug, int, 0644);
45MODULE_PARM_DESC(bridge_debug, "enable bridge debug messages");
46
47#define _AU0828_BULKPIPE 0x03 39#define _AU0828_BULKPIPE 0x03
48#define _BULKPIPESIZE 0xffff 40#define _BULKPIPESIZE 0xffff
49 41
@@ -229,24 +221,18 @@ static int __init au0828_init(void)
229{ 221{
230 int ret; 222 int ret;
231 223
232 if (debug) 224 if (au0828_debug & 1)
233 printk(KERN_INFO "%s() Debugging is enabled\n", __func__); 225 printk(KERN_INFO "%s() Debugging is enabled\n", __func__);
234 226
235 if (usb_debug) { 227 if (au0828_debug & 2)
236 printk(KERN_INFO "%s() USB Debugging is enabled\n", __func__); 228 printk(KERN_INFO "%s() USB Debugging is enabled\n", __func__);
237 debug |= 2;
238 }
239 229
240 if (i2c_debug) { 230 if (au0828_debug & 4)
241 printk(KERN_INFO "%s() I2C Debugging is enabled\n", __func__); 231 printk(KERN_INFO "%s() I2C Debugging is enabled\n", __func__);
242 debug |= 4;
243 }
244 232
245 if (bridge_debug) { 233 if (au0828_debug & 8)
246 printk(KERN_INFO "%s() Bridge Debugging is enabled\n", 234 printk(KERN_INFO "%s() Bridge Debugging is enabled\n",
247 __func__); 235 __func__);
248 debug |= 8;
249 }
250 236
251 printk(KERN_INFO "au0828 driver loaded\n"); 237 printk(KERN_INFO "au0828 driver loaded\n");
252 238
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 85d0ae9a322f..5040d7fc4af5 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -204,7 +204,7 @@ static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed)
204 return ret; 204 return ret;
205} 205}
206 206
207int dvb_register(struct au0828_dev *dev) 207static int dvb_register(struct au0828_dev *dev)
208{ 208{
209 struct au0828_dvb *dvb = &dev->dvb; 209 struct au0828_dvb *dvb = &dev->dvb;
210 int result; 210 int result;
diff --git a/drivers/media/video/au0828/au0828-i2c.c b/drivers/media/video/au0828/au0828-i2c.c
index 94c8b74a6651..741a4937b050 100644
--- a/drivers/media/video/au0828/au0828-i2c.c
+++ b/drivers/media/video/au0828/au0828-i2c.c
@@ -29,11 +29,7 @@
29 29
30#include <media/v4l2-common.h> 30#include <media/v4l2-common.h>
31 31
32unsigned int i2c_debug; 32static int i2c_scan;
33module_param(i2c_debug, int, 0444);
34MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
35
36unsigned int i2c_scan;
37module_param(i2c_scan, int, 0444); 33module_param(i2c_scan, int, 0444);
38MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time"); 34MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
39 35
diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
index 0200b9fc5dc4..7beb571798e5 100644
--- a/drivers/media/video/au0828/au0828.h
+++ b/drivers/media/video/au0828/au0828.h
@@ -96,15 +96,12 @@ struct au0828_buff {
96/* au0828-core.c */ 96/* au0828-core.c */
97extern u32 au0828_read(struct au0828_dev *dev, u16 reg); 97extern u32 au0828_read(struct au0828_dev *dev, u16 reg);
98extern u32 au0828_write(struct au0828_dev *dev, u16 reg, u32 val); 98extern u32 au0828_write(struct au0828_dev *dev, u16 reg, u32 val);
99extern unsigned int debug; 99extern int au0828_debug;
100extern unsigned int usb_debug;
101extern unsigned int bridge_debug;
102 100
103/* ----------------------------------------------------------- */ 101/* ----------------------------------------------------------- */
104/* au0828-cards.c */ 102/* au0828-cards.c */
105extern struct au0828_board au0828_boards[]; 103extern struct au0828_board au0828_boards[];
106extern struct usb_device_id au0828_usb_id_table[]; 104extern struct usb_device_id au0828_usb_id_table[];
107extern const unsigned int au0828_bcount;
108extern void au0828_gpio_setup(struct au0828_dev *dev); 105extern void au0828_gpio_setup(struct au0828_dev *dev);
109extern int au0828_tuner_callback(void *priv, int command, int arg); 106extern int au0828_tuner_callback(void *priv, int command, int arg);
110extern void au0828_card_setup(struct au0828_dev *dev); 107extern void au0828_card_setup(struct au0828_dev *dev);
@@ -115,7 +112,6 @@ extern int au0828_i2c_register(struct au0828_dev *dev);
115extern int au0828_i2c_unregister(struct au0828_dev *dev); 112extern int au0828_i2c_unregister(struct au0828_dev *dev);
116extern void au0828_call_i2c_clients(struct au0828_dev *dev, 113extern void au0828_call_i2c_clients(struct au0828_dev *dev,
117 unsigned int cmd, void *arg); 114 unsigned int cmd, void *arg);
118extern unsigned int i2c_debug;
119 115
120/* ----------------------------------------------------------- */ 116/* ----------------------------------------------------------- */
121/* au0828-dvb.c */ 117/* au0828-dvb.c */
@@ -123,6 +119,6 @@ extern int au0828_dvb_register(struct au0828_dev *dev);
123extern void au0828_dvb_unregister(struct au0828_dev *dev); 119extern void au0828_dvb_unregister(struct au0828_dev *dev);
124 120
125#define dprintk(level, fmt, arg...)\ 121#define dprintk(level, fmt, arg...)\
126 do { if (debug & level)\ 122 do { if (au0828_debug & level)\
127 printk(KERN_DEBUG DRIVER_NAME "/0: " fmt, ## arg);\ 123 printk(KERN_DEBUG DRIVER_NAME "/0: " fmt, ## arg);\
128 } while (0) 124 } while (0)
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 870d6e197d65..f05649727b60 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -191,7 +191,7 @@ static struct tda18271_config hauppauge_hvr1200_tuner_config = {
191 .gate = TDA18271_GATE_ANALOG, 191 .gate = TDA18271_GATE_ANALOG,
192}; 192};
193 193
194struct dibx000_agc_config xc3028_agc_config = { 194static struct dibx000_agc_config xc3028_agc_config = {
195 BAND_VHF | BAND_UHF, /* band_caps */ 195 BAND_VHF | BAND_UHF, /* band_caps */
196 196
197 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0, 197 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0,
@@ -237,7 +237,7 @@ struct dibx000_agc_config xc3028_agc_config = {
237 237
238/* PLL Configuration for COFDM BW_MHz = 8.000000 238/* PLL Configuration for COFDM BW_MHz = 8.000000
239 * With external clock = 30.000000 */ 239 * With external clock = 30.000000 */
240struct dibx000_bandwidth_config xc3028_bw_config = { 240static struct dibx000_bandwidth_config xc3028_bw_config = {
241 60000, /* internal */ 241 60000, /* internal */
242 30000, /* sampling */ 242 30000, /* sampling */
243 1, /* pll_cfg: prediv */ 243 1, /* pll_cfg: prediv */
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index bcf6d9ba063d..27635cdcbaf2 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -58,6 +58,7 @@ config VIDEO_CX88_DVB
58 select DVB_CX24123 if !DVB_FE_CUSTOMISE 58 select DVB_CX24123 if !DVB_FE_CUSTOMISE
59 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 59 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
60 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 60 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
61 select DVB_S5H1411 if !DVB_FE_CUSTOMISE
61 ---help--- 62 ---help---
62 This adds support for DVB/ATSC cards based on the 63 This adds support for DVB/ATSC cards based on the
63 Conexant 2388x chip. 64 Conexant 2388x chip.
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 61c4f72644b8..6c0c94c5ef91 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -546,10 +546,12 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
546 if (retval < 0) 546 if (retval < 0)
547 return retval; 547 return retval;
548 548
549 dev->mailbox = blackbird_find_mailbox(dev); 549 retval = blackbird_find_mailbox(dev);
550 if (dev->mailbox < 0) 550 if (retval < 0)
551 return -1; 551 return -1;
552 552
553 dev->mailbox = retval;
554
553 retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ 555 retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
554 if (retval < 0) { 556 if (retval < 0) {
555 dprintk(0, "ERROR: Firmware ping failed!\n"); 557 dprintk(0, "ERROR: Firmware ping failed!\n");
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 620159d05506..2b6b283cda15 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1591,6 +1591,7 @@ static const struct cx88_board cx88_boards[] = {
1591 .vmux = 2, 1591 .vmux = 2,
1592 .gpio0 = 0x16d9, 1592 .gpio0 = 0x16d9,
1593 }}, 1593 }},
1594 .mpeg = CX88_MPEG_DVB,
1594 }, 1595 },
1595 [CX88_BOARD_PROLINK_PV_8000GT] = { 1596 [CX88_BOARD_PROLINK_PV_8000GT] = {
1596 .name = "Prolink Pixelview MPEG 8000GT", 1597 .name = "Prolink Pixelview MPEG 8000GT",
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index f1251b844e08..1c7fe6862a60 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -47,6 +47,7 @@
47#include "isl6421.h" 47#include "isl6421.h"
48#include "tuner-simple.h" 48#include "tuner-simple.h"
49#include "tda9887.h" 49#include "tda9887.h"
50#include "s5h1411.h"
50 51
51MODULE_DESCRIPTION("driver for cx2388x based DVB cards"); 52MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
52MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); 53MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -463,6 +464,22 @@ static struct zl10353_config cx88_geniatech_x8000_mt = {
463 .no_tuner = 1, 464 .no_tuner = 1,
464}; 465};
465 466
467static struct s5h1411_config dvico_fusionhdtv7_config = {
468 .output_mode = S5H1411_SERIAL_OUTPUT,
469 .gpio = S5H1411_GPIO_ON,
470 .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
471 .qam_if = S5H1411_IF_44000,
472 .vsb_if = S5H1411_IF_44000,
473 .inversion = S5H1411_INVERSION_OFF,
474 .status_mode = S5H1411_DEMODLOCKING
475};
476
477static struct xc5000_config dvico_fusionhdtv7_tuner_config = {
478 .i2c_address = 0xc2 >> 1,
479 .if_khz = 5380,
480 .tuner_callback = cx88_tuner_callback,
481};
482
466static int attach_xc3028(u8 addr, struct cx8802_dev *dev) 483static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
467{ 484{
468 struct dvb_frontend *fe; 485 struct dvb_frontend *fe;
@@ -844,6 +861,21 @@ static int dvb_register(struct cx8802_dev *dev)
844 if (attach_xc3028(0x61, dev) < 0) 861 if (attach_xc3028(0x61, dev) < 0)
845 return -EINVAL; 862 return -EINVAL;
846 break; 863 break;
864 case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
865 dev->dvb.frontend = dvb_attach(s5h1411_attach,
866 &dvico_fusionhdtv7_config,
867 &dev->core->i2c_adap);
868 if (dev->dvb.frontend != NULL) {
869 /* tuner_config.video_dev must point to
870 * i2c_adap.algo_data
871 */
872 dvico_fusionhdtv7_tuner_config.priv =
873 dev->core->i2c_adap.algo_data;
874 dvb_attach(xc5000_attach, dev->dvb.frontend,
875 &dev->core->i2c_adap,
876 &dvico_fusionhdtv7_tuner_config);
877 }
878 break;
847 default: 879 default:
848 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n", 880 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
849 dev->core->name); 881 dev->core->name);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index f8c41d8c74c4..5d837c16ee22 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -650,7 +650,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
650 650
651 dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs, 651 dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
652 GFP_KERNEL); 652 GFP_KERNEL);
653 if (!dev->isoc_ctl.urb) { 653 if (!dev->isoc_ctl.transfer_buffer) {
654 em28xx_errdev("cannot allocate memory for usbtransfer\n"); 654 em28xx_errdev("cannot allocate memory for usbtransfer\n");
655 kfree(dev->isoc_ctl.urb); 655 kfree(dev->isoc_ctl.urb);
656 return -ENOMEM; 656 return -ENOMEM;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 11c5fdedc23b..7b65f5e537f8 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -509,8 +509,11 @@ static int ir_probe(struct i2c_adapter *adap)
509 static const int probe_cx88[] = { 0x18, 0x6b, 0x71, -1 }; 509 static const int probe_cx88[] = { 0x18, 0x6b, 0x71, -1 };
510 static const int probe_cx23885[] = { 0x6b, -1 }; 510 static const int probe_cx23885[] = { 0x6b, -1 };
511 const int *probe; 511 const int *probe;
512 struct i2c_client *c; 512 struct i2c_msg msg = {
513 unsigned char buf; 513 .flags = I2C_M_RD,
514 .len = 0,
515 .buf = NULL,
516 };
514 int i, rc; 517 int i, rc;
515 518
516 switch (adap->id) { 519 switch (adap->id) {
@@ -536,23 +539,17 @@ static int ir_probe(struct i2c_adapter *adap)
536 return 0; 539 return 0;
537 } 540 }
538 541
539 c = kzalloc(sizeof(*c), GFP_KERNEL);
540 if (!c)
541 return -ENOMEM;
542
543 c->adapter = adap;
544 for (i = 0; -1 != probe[i]; i++) { 542 for (i = 0; -1 != probe[i]; i++) {
545 c->addr = probe[i]; 543 msg.addr = probe[i];
546 rc = i2c_master_recv(c, &buf, 0); 544 rc = i2c_transfer(adap, &msg, 1);
547 dprintk(1,"probe 0x%02x @ %s: %s\n", 545 dprintk(1,"probe 0x%02x @ %s: %s\n",
548 probe[i], adap->name, 546 probe[i], adap->name,
549 (0 == rc) ? "yes" : "no"); 547 (1 == rc) ? "yes" : "no");
550 if (0 == rc) { 548 if (1 == rc) {
551 ir_attach(adap, probe[i], 0, 0); 549 ir_attach(adap, probe[i], 0, 0);
552 break; 550 break;
553 } 551 }
554 } 552 }
555 kfree(c);
556 return 0; 553 return 0;
557} 554}
558 555
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
index a8da90f69dd9..158b3d0c6532 100644
--- a/drivers/media/video/pvrusb2/Kconfig
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -64,6 +64,7 @@ config VIDEO_PVRUSB2_DVB
64 depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL 64 depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL
65 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 65 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
66 select DVB_S5H1409 if !DVB_FE_CUSTOMISE 66 select DVB_S5H1409 if !DVB_FE_CUSTOMISE
67 select DVB_S5H1411 if !DVB_FE_CUSTOMISE
67 select DVB_TDA10048 if !DVB_FE_CUSTOMIZE 68 select DVB_TDA10048 if !DVB_FE_CUSTOMIZE
68 select DVB_TDA18271 if !DVB_FE_CUSTOMIZE 69 select DVB_TDA18271 if !DVB_FE_CUSTOMIZE
69 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 70 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index 2dd06a90adce..3a141d93e1a9 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -36,6 +36,7 @@ pvr2_device_desc structures.
36#include "pvrusb2-hdw-internal.h" 36#include "pvrusb2-hdw-internal.h"
37#include "lgdt330x.h" 37#include "lgdt330x.h"
38#include "s5h1409.h" 38#include "s5h1409.h"
39#include "s5h1411.h"
39#include "tda10048.h" 40#include "tda10048.h"
40#include "tda18271.h" 41#include "tda18271.h"
41#include "tda8290.h" 42#include "tda8290.h"
@@ -368,6 +369,15 @@ static struct s5h1409_config pvr2_s5h1409_config = {
368 .status_mode = S5H1409_DEMODLOCKING, 369 .status_mode = S5H1409_DEMODLOCKING,
369}; 370};
370 371
372static struct s5h1411_config pvr2_s5h1411_config = {
373 .output_mode = S5H1411_PARALLEL_OUTPUT,
374 .gpio = S5H1411_GPIO_OFF,
375 .vsb_if = S5H1411_IF_44000,
376 .qam_if = S5H1411_IF_4000,
377 .inversion = S5H1411_INVERSION_ON,
378 .status_mode = S5H1411_DEMODLOCKING,
379};
380
371static struct tda18271_std_map hauppauge_tda18271_std_map = { 381static struct tda18271_std_map hauppauge_tda18271_std_map = {
372 .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3, 382 .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3,
373 .if_lvl = 6, .rfagc_top = 0x37, }, 383 .if_lvl = 6, .rfagc_top = 0x37, },
@@ -390,6 +400,16 @@ static int pvr2_s5h1409_attach(struct pvr2_dvb_adapter *adap)
390 return -EIO; 400 return -EIO;
391} 401}
392 402
403static int pvr2_s5h1411_attach(struct pvr2_dvb_adapter *adap)
404{
405 adap->fe = dvb_attach(s5h1411_attach, &pvr2_s5h1411_config,
406 &adap->channel.hdw->i2c_adap);
407 if (adap->fe)
408 return 0;
409
410 return -EIO;
411}
412
393static int pvr2_tda18271_8295_attach(struct pvr2_dvb_adapter *adap) 413static int pvr2_tda18271_8295_attach(struct pvr2_dvb_adapter *adap)
394{ 414{
395 dvb_attach(tda829x_attach, adap->fe, 415 dvb_attach(tda829x_attach, adap->fe,
@@ -406,6 +426,11 @@ struct pvr2_dvb_props pvr2_750xx_dvb_props = {
406 .frontend_attach = pvr2_s5h1409_attach, 426 .frontend_attach = pvr2_s5h1409_attach,
407 .tuner_attach = pvr2_tda18271_8295_attach, 427 .tuner_attach = pvr2_tda18271_8295_attach,
408}; 428};
429
430struct pvr2_dvb_props pvr2_751xx_dvb_props = {
431 .frontend_attach = pvr2_s5h1411_attach,
432 .tuner_attach = pvr2_tda18271_8295_attach,
433};
409#endif 434#endif
410 435
411static const char *pvr2_client_75xxx[] = { 436static const char *pvr2_client_75xxx[] = {
@@ -454,6 +479,9 @@ static const struct pvr2_device_desc pvr2_device_751xx = {
454 .digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE, 479 .digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE,
455 .default_std_mask = V4L2_STD_NTSC_M, 480 .default_std_mask = V4L2_STD_NTSC_M,
456 .led_scheme = PVR2_LED_SCHEME_HAUPPAUGE, 481 .led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
482#ifdef CONFIG_VIDEO_PVRUSB2_DVB
483 .dvb_props = &pvr2_751xx_dvb_props,
484#endif
457}; 485};
458 486
459 487
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.h b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
index c2e2b06fe2e0..d016f8b6c70b 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
@@ -104,28 +104,28 @@ struct pvr2_device_desc {
104 unsigned char digital_control_scheme; 104 unsigned char digital_control_scheme;
105 105
106 /* If set, we don't bother trying to load cx23416 firmware. */ 106 /* If set, we don't bother trying to load cx23416 firmware. */
107 int flag_skip_cx23416_firmware:1; 107 unsigned int flag_skip_cx23416_firmware:1;
108 108
109 /* If set, the encoder must be healthy in order for digital mode to 109 /* If set, the encoder must be healthy in order for digital mode to
110 work (otherwise we assume that digital streaming will work even 110 work (otherwise we assume that digital streaming will work even
111 if we fail to locate firmware for the encoder). If the device 111 if we fail to locate firmware for the encoder). If the device
112 doesn't support digital streaming then this flag has no 112 doesn't support digital streaming then this flag has no
113 effect. */ 113 effect. */
114 int flag_digital_requires_cx23416:1; 114 unsigned int flag_digital_requires_cx23416:1;
115 115
116 /* Device has a hauppauge eeprom which we can interrogate. */ 116 /* Device has a hauppauge eeprom which we can interrogate. */
117 int flag_has_hauppauge_rom:1; 117 unsigned int flag_has_hauppauge_rom:1;
118 118
119 /* Device does not require a powerup command to be issued. */ 119 /* Device does not require a powerup command to be issued. */
120 int flag_no_powerup:1; 120 unsigned int flag_no_powerup:1;
121 121
122 /* Device has a cx25840 - this enables special additional logic to 122 /* Device has a cx25840 - this enables special additional logic to
123 handle it. */ 123 handle it. */
124 int flag_has_cx25840:1; 124 unsigned int flag_has_cx25840:1;
125 125
126 /* Device has a wm8775 - this enables special additional logic to 126 /* Device has a wm8775 - this enables special additional logic to
127 ensure that it is found. */ 127 ensure that it is found. */
128 int flag_has_wm8775:1; 128 unsigned int flag_has_wm8775:1;
129 129
130 /* Device has IR hardware that can be faked into looking like a 130 /* Device has IR hardware that can be faked into looking like a
131 normal Hauppauge i2c IR receiver. This is currently very 131 normal Hauppauge i2c IR receiver. This is currently very
@@ -135,15 +135,15 @@ struct pvr2_device_desc {
135 to virtualize the presence of the non-existant IR receiver chip and 135 to virtualize the presence of the non-existant IR receiver chip and
136 implement the virtual receiver in terms of appropriate FX2 136 implement the virtual receiver in terms of appropriate FX2
137 commands. */ 137 commands. */
138 int flag_has_hauppauge_custom_ir:1; 138 unsigned int flag_has_hauppauge_custom_ir:1;
139 139
140 /* These bits define which kinds of sources the device can handle. 140 /* These bits define which kinds of sources the device can handle.
141 Note: Digital tuner presence is inferred by the 141 Note: Digital tuner presence is inferred by the
142 digital_control_scheme enumeration. */ 142 digital_control_scheme enumeration. */
143 int flag_has_fmradio:1; /* Has FM radio receiver */ 143 unsigned int flag_has_fmradio:1; /* Has FM radio receiver */
144 int flag_has_analogtuner:1; /* Has analog tuner */ 144 unsigned int flag_has_analogtuner:1; /* Has analog tuner */
145 int flag_has_composite:1; /* Has composite input */ 145 unsigned int flag_has_composite:1; /* Has composite input */
146 int flag_has_svideo:1; /* Has s-video input */ 146 unsigned int flag_has_svideo:1; /* Has s-video input */
147}; 147};
148 148
149extern struct usb_device_id pvr2_device_table[]; 149extern struct usb_device_id pvr2_device_table[];
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 529e00952a8d..2b72e10e6b9f 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -369,19 +369,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
369 break; 369 break;
370 } 370 }
371 case TUNER_TEA5767: 371 case TUNER_TEA5767:
372 if (tea5767_attach(&t->fe, t->i2c->adapter, t->i2c->addr) == NULL) { 372 if (!tea5767_attach(&t->fe, t->i2c->adapter, t->i2c->addr))
373 t->type = TUNER_ABSENT; 373 goto attach_failed;
374 t->mode_mask = T_UNINITIALIZED;
375 return;
376 }
377 t->mode_mask = T_RADIO; 374 t->mode_mask = T_RADIO;
378 break; 375 break;
379 case TUNER_TEA5761: 376 case TUNER_TEA5761:
380 if (tea5761_attach(&t->fe, t->i2c->adapter, t->i2c->addr) == NULL) { 377 if (!tea5761_attach(&t->fe, t->i2c->adapter, t->i2c->addr))
381 t->type = TUNER_ABSENT; 378 goto attach_failed;
382 t->mode_mask = T_UNINITIALIZED;
383 return;
384 }
385 t->mode_mask = T_RADIO; 379 t->mode_mask = T_RADIO;
386 break; 380 break;
387 case TUNER_PHILIPS_FMD1216ME_MK3: 381 case TUNER_PHILIPS_FMD1216ME_MK3:
@@ -394,12 +388,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
394 buffer[2] = 0x86; 388 buffer[2] = 0x86;
395 buffer[3] = 0x54; 389 buffer[3] = 0x54;
396 i2c_master_send(c, buffer, 4); 390 i2c_master_send(c, buffer, 4);
397 if (simple_tuner_attach(&t->fe, t->i2c->adapter, t->i2c->addr, 391 if (!simple_tuner_attach(&t->fe, t->i2c->adapter, t->i2c->addr,
398 t->type) == NULL) { 392 t->type))
399 t->type = TUNER_ABSENT; 393 goto attach_failed;
400 t->mode_mask = T_UNINITIALIZED;
401 return;
402 }
403 break; 394 break;
404 case TUNER_PHILIPS_TD1316: 395 case TUNER_PHILIPS_TD1316:
405 buffer[0] = 0x0b; 396 buffer[0] = 0x0b;
@@ -407,12 +398,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
407 buffer[2] = 0x86; 398 buffer[2] = 0x86;
408 buffer[3] = 0xa4; 399 buffer[3] = 0xa4;
409 i2c_master_send(c,buffer,4); 400 i2c_master_send(c,buffer,4);
410 if (simple_tuner_attach(&t->fe, t->i2c->adapter, 401 if (!simple_tuner_attach(&t->fe, t->i2c->adapter,
411 t->i2c->addr, t->type) == NULL) { 402 t->i2c->addr, t->type))
412 t->type = TUNER_ABSENT; 403 goto attach_failed;
413 t->mode_mask = T_UNINITIALIZED;
414 return;
415 }
416 break; 404 break;
417 case TUNER_XC2028: 405 case TUNER_XC2028:
418 { 406 {
@@ -421,40 +409,34 @@ static void set_type(struct i2c_client *c, unsigned int type,
421 .i2c_addr = t->i2c->addr, 409 .i2c_addr = t->i2c->addr,
422 .callback = t->tuner_callback, 410 .callback = t->tuner_callback,
423 }; 411 };
424 if (!xc2028_attach(&t->fe, &cfg)) { 412 if (!xc2028_attach(&t->fe, &cfg))
425 t->type = TUNER_ABSENT; 413 goto attach_failed;
426 t->mode_mask = T_UNINITIALIZED;
427 return;
428 }
429 break; 414 break;
430 } 415 }
431 case TUNER_TDA9887: 416 case TUNER_TDA9887:
432 tda9887_attach(&t->fe, t->i2c->adapter, t->i2c->addr); 417 tda9887_attach(&t->fe, t->i2c->adapter, t->i2c->addr);
433 break; 418 break;
434 case TUNER_XC5000: 419 case TUNER_XC5000:
420 {
421 struct dvb_tuner_ops *xc_tuner_ops;
422
435 xc5000_cfg.i2c_address = t->i2c->addr; 423 xc5000_cfg.i2c_address = t->i2c->addr;
436 xc5000_cfg.if_khz = 5380; 424 xc5000_cfg.if_khz = 5380;
437 xc5000_cfg.priv = c->adapter->algo_data; 425 xc5000_cfg.priv = c->adapter->algo_data;
438 xc5000_cfg.tuner_callback = t->tuner_callback; 426 xc5000_cfg.tuner_callback = t->tuner_callback;
439 if (!xc5000_attach(&t->fe, t->i2c->adapter, &xc5000_cfg)) { 427 if (!xc5000_attach(&t->fe, t->i2c->adapter, &xc5000_cfg))
440 t->type = TUNER_ABSENT; 428 goto attach_failed;
441 t->mode_mask = T_UNINITIALIZED; 429
442 return;
443 }
444 {
445 struct dvb_tuner_ops *xc_tuner_ops;
446 xc_tuner_ops = &t->fe.ops.tuner_ops; 430 xc_tuner_ops = &t->fe.ops.tuner_ops;
447 if(xc_tuner_ops->init != NULL) 431 if (xc_tuner_ops->init)
448 xc_tuner_ops->init(&t->fe); 432 xc_tuner_ops->init(&t->fe);
449 }
450 break; 433 break;
434 }
451 default: 435 default:
452 if (simple_tuner_attach(&t->fe, t->i2c->adapter, 436 if (!simple_tuner_attach(&t->fe, t->i2c->adapter,
453 t->i2c->addr, t->type) == NULL) { 437 t->i2c->addr, t->type))
454 t->type = TUNER_ABSENT; 438 goto attach_failed;
455 t->mode_mask = T_UNINITIALIZED; 439
456 return;
457 }
458 break; 440 break;
459 } 441 }
460 442
@@ -476,11 +458,27 @@ static void set_type(struct i2c_client *c, unsigned int type,
476 if (t->mode_mask == T_UNINITIALIZED) 458 if (t->mode_mask == T_UNINITIALIZED)
477 t->mode_mask = new_mode_mask; 459 t->mode_mask = new_mode_mask;
478 460
479 set_freq(c, (V4L2_TUNER_RADIO == t->mode) ? t->radio_freq : t->tv_freq); 461 /* xc2028/3028 and xc5000 requires a firmware to be set-up later
462 trying to set a frequency here will just fail
463 FIXME: better to move set_freq to the tuner code. This is needed
464 on analog tuners for PLL to properly work
465 */
466 if (t->type != TUNER_XC2028 && t->type != TUNER_XC5000)
467 set_freq(c, (V4L2_TUNER_RADIO == t->mode) ?
468 t->radio_freq : t->tv_freq);
469
480 tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", 470 tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
481 c->adapter->name, c->driver->driver.name, c->addr << 1, type, 471 c->adapter->name, c->driver->driver.name, c->addr << 1, type,
482 t->mode_mask); 472 t->mode_mask);
483 tuner_i2c_address_check(t); 473 tuner_i2c_address_check(t);
474 return;
475
476attach_failed:
477 tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
478 t->type = TUNER_ABSENT;
479 t->mode_mask = T_UNINITIALIZED;
480
481 return;
484} 482}
485 483
486/* 484/*
@@ -495,14 +493,16 @@ static void set_addr(struct i2c_client *c, struct tuner_setup *tun_setup)
495{ 493{
496 struct tuner *t = i2c_get_clientdata(c); 494 struct tuner *t = i2c_get_clientdata(c);
497 495
498 tuner_dbg("set addr for type %i\n", t->type);
499
500 if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) && 496 if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
501 (t->mode_mask & tun_setup->mode_mask))) || 497 (t->mode_mask & tun_setup->mode_mask))) ||
502 (tun_setup->addr == c->addr)) { 498 (tun_setup->addr == c->addr)) {
503 set_type(c, tun_setup->type, tun_setup->mode_mask, 499 set_type(c, tun_setup->type, tun_setup->mode_mask,
504 tun_setup->config, tun_setup->tuner_callback); 500 tun_setup->config, tun_setup->tuner_callback);
505 } 501 } else
502 tuner_dbg("set addr discarded for type %i, mask %x. "
503 "Asked to change tuner at addr 0x%02x, with mask %x\n",
504 t->type, t->mode_mask,
505 tun_setup->addr, tun_setup->mode_mask);
506} 506}
507 507
508static inline int check_mode(struct tuner *t, char *cmd) 508static inline int check_mode(struct tuner *t, char *cmd)
diff --git a/drivers/media/video/tuner-xc2028.c b/drivers/media/video/tuner-xc2028.c
index cc3db7d79a0d..9e9003cffc7f 100644
--- a/drivers/media/video/tuner-xc2028.c
+++ b/drivers/media/video/tuner-xc2028.c
@@ -432,7 +432,7 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
432 432
433 type &= type_mask; 433 type &= type_mask;
434 434
435 if (!type & SCODE) 435 if (!(type & SCODE))
436 type_mask = ~0; 436 type_mask = ~0;
437 437
438 /* Seek for exact match */ 438 /* Seek for exact match */
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index b1e9592acb90..845be1864f68 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -888,7 +888,7 @@ static int vivi_open(struct inode *inode, struct file *file)
888{ 888{
889 int minor = iminor(inode); 889 int minor = iminor(inode);
890 struct vivi_dev *dev; 890 struct vivi_dev *dev;
891 struct vivi_fh *fh; 891 struct vivi_fh *fh = NULL;
892 int i; 892 int i;
893 int retval = 0; 893 int retval = 0;
894 894
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index fafb57fed761..0736cff9d97a 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -31,7 +31,6 @@
31static LIST_HEAD(container_list); 31static LIST_HEAD(container_list);
32static DEFINE_MUTEX(container_list_lock); 32static DEFINE_MUTEX(container_list_lock);
33static struct class enclosure_class; 33static struct class enclosure_class;
34static struct class enclosure_component_class;
35 34
36/** 35/**
37 * enclosure_find - find an enclosure given a device 36 * enclosure_find - find an enclosure given a device
@@ -166,6 +165,40 @@ void enclosure_unregister(struct enclosure_device *edev)
166} 165}
167EXPORT_SYMBOL_GPL(enclosure_unregister); 166EXPORT_SYMBOL_GPL(enclosure_unregister);
168 167
168#define ENCLOSURE_NAME_SIZE 64
169
170static void enclosure_link_name(struct enclosure_component *cdev, char *name)
171{
172 strcpy(name, "enclosure_device:");
173 strcat(name, cdev->cdev.bus_id);
174}
175
176static void enclosure_remove_links(struct enclosure_component *cdev)
177{
178 char name[ENCLOSURE_NAME_SIZE];
179
180 enclosure_link_name(cdev, name);
181 sysfs_remove_link(&cdev->dev->kobj, name);
182 sysfs_remove_link(&cdev->cdev.kobj, "device");
183}
184
185static int enclosure_add_links(struct enclosure_component *cdev)
186{
187 int error;
188 char name[ENCLOSURE_NAME_SIZE];
189
190 error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device");
191 if (error)
192 return error;
193
194 enclosure_link_name(cdev, name);
195 error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name);
196 if (error)
197 sysfs_remove_link(&cdev->cdev.kobj, "device");
198
199 return error;
200}
201
169static void enclosure_release(struct device *cdev) 202static void enclosure_release(struct device *cdev)
170{ 203{
171 struct enclosure_device *edev = to_enclosure_device(cdev); 204 struct enclosure_device *edev = to_enclosure_device(cdev);
@@ -178,10 +211,15 @@ static void enclosure_component_release(struct device *dev)
178{ 211{
179 struct enclosure_component *cdev = to_enclosure_component(dev); 212 struct enclosure_component *cdev = to_enclosure_component(dev);
180 213
181 put_device(cdev->dev); 214 if (cdev->dev) {
215 enclosure_remove_links(cdev);
216 put_device(cdev->dev);
217 }
182 put_device(dev->parent); 218 put_device(dev->parent);
183} 219}
184 220
221static struct attribute_group *enclosure_groups[];
222
185/** 223/**
186 * enclosure_component_register - add a particular component to an enclosure 224 * enclosure_component_register - add a particular component to an enclosure
187 * @edev: the enclosure to add the component 225 * @edev: the enclosure to add the component
@@ -217,12 +255,14 @@ enclosure_component_register(struct enclosure_device *edev,
217 ecomp->number = number; 255 ecomp->number = number;
218 cdev = &ecomp->cdev; 256 cdev = &ecomp->cdev;
219 cdev->parent = get_device(&edev->edev); 257 cdev->parent = get_device(&edev->edev);
220 cdev->class = &enclosure_component_class;
221 if (name) 258 if (name)
222 snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name); 259 snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name);
223 else 260 else
224 snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number); 261 snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number);
225 262
263 cdev->release = enclosure_component_release;
264 cdev->groups = enclosure_groups;
265
226 err = device_register(cdev); 266 err = device_register(cdev);
227 if (err) 267 if (err)
228 ERR_PTR(err); 268 ERR_PTR(err);
@@ -255,10 +295,12 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
255 295
256 cdev = &edev->component[component]; 296 cdev = &edev->component[component];
257 297
258 device_del(&cdev->cdev); 298 if (cdev->dev)
299 enclosure_remove_links(cdev);
300
259 put_device(cdev->dev); 301 put_device(cdev->dev);
260 cdev->dev = get_device(dev); 302 cdev->dev = get_device(dev);
261 return device_add(&cdev->cdev); 303 return enclosure_add_links(cdev);
262} 304}
263EXPORT_SYMBOL_GPL(enclosure_add_device); 305EXPORT_SYMBOL_GPL(enclosure_add_device);
264 306
@@ -442,24 +484,32 @@ static ssize_t get_component_type(struct device *cdev,
442} 484}
443 485
444 486
445static struct device_attribute enclosure_component_attrs[] = { 487static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
446 __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault, 488 set_component_fault);
447 set_component_fault), 489static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
448 __ATTR(status, S_IRUGO | S_IWUSR, get_component_status, 490 set_component_status);
449 set_component_status), 491static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
450 __ATTR(active, S_IRUGO | S_IWUSR, get_component_active, 492 set_component_active);
451 set_component_active), 493static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
452 __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate, 494 set_component_locate);
453 set_component_locate), 495static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
454 __ATTR(type, S_IRUGO, get_component_type, NULL), 496
455 __ATTR_NULL 497static struct attribute *enclosure_component_attrs[] = {
498 &dev_attr_fault.attr,
499 &dev_attr_status.attr,
500 &dev_attr_active.attr,
501 &dev_attr_locate.attr,
502 &dev_attr_type.attr,
503 NULL
456}; 504};
457 505
458static struct class enclosure_component_class = { 506static struct attribute_group enclosure_group = {
459 .name = "enclosure_component", 507 .attrs = enclosure_component_attrs,
460 .owner = THIS_MODULE, 508};
461 .dev_attrs = enclosure_component_attrs, 509
462 .dev_release = enclosure_component_release, 510static struct attribute_group *enclosure_groups[] = {
511 &enclosure_group,
512 NULL
463}; 513};
464 514
465static int __init enclosure_init(void) 515static int __init enclosure_init(void)
@@ -469,20 +519,12 @@ static int __init enclosure_init(void)
469 err = class_register(&enclosure_class); 519 err = class_register(&enclosure_class);
470 if (err) 520 if (err)
471 return err; 521 return err;
472 err = class_register(&enclosure_component_class);
473 if (err)
474 goto err_out;
475 522
476 return 0; 523 return 0;
477 err_out:
478 class_unregister(&enclosure_class);
479
480 return err;
481} 524}
482 525
483static void __exit enclosure_exit(void) 526static void __exit enclosure_exit(void)
484{ 527{
485 class_unregister(&enclosure_component_class);
486 class_unregister(&enclosure_class); 528 class_unregister(&enclosure_class);
487} 529}
488 530
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0697aa8ea774..8082c1d142df 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2011,7 +2011,7 @@ config E1000_DISABLE_PACKET_SPLIT
2011 2011
2012config E1000E 2012config E1000E
2013 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" 2013 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
2014 depends on PCI 2014 depends on PCI && (!SPARC32 || BROKEN)
2015 ---help--- 2015 ---help---
2016 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit 2016 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
2017 ethernet family of adapters. For PCI or PCI-X e1000 adapters, 2017 ethernet family of adapters. For PCI or PCI-X e1000 adapters,
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 75ef9d0d974d..f9d6b4dca180 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
196 } 196 }
197} 197}
198EXPORT_SYMBOL_GPL(mlx4_buf_free); 198EXPORT_SYMBOL_GPL(mlx4_buf_free);
199
200static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
201{
202 struct mlx4_db_pgdir *pgdir;
203
204 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
205 if (!pgdir)
206 return NULL;
207
208 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
209 pgdir->bits[0] = pgdir->order0;
210 pgdir->bits[1] = pgdir->order1;
211 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
212 &pgdir->db_dma, GFP_KERNEL);
213 if (!pgdir->db_page) {
214 kfree(pgdir);
215 return NULL;
216 }
217
218 return pgdir;
219}
220
221static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
222 struct mlx4_db *db, int order)
223{
224 int o;
225 int i;
226
227 for (o = order; o <= 1; ++o) {
228 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
229 if (i < MLX4_DB_PER_PAGE >> o)
230 goto found;
231 }
232
233 return -ENOMEM;
234
235found:
236 clear_bit(i, pgdir->bits[o]);
237
238 i <<= o;
239
240 if (o > order)
241 set_bit(i ^ 1, pgdir->bits[order]);
242
243 db->u.pgdir = pgdir;
244 db->index = i;
245 db->db = pgdir->db_page + db->index;
246 db->dma = pgdir->db_dma + db->index * 4;
247 db->order = order;
248
249 return 0;
250}
251
252int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
253{
254 struct mlx4_priv *priv = mlx4_priv(dev);
255 struct mlx4_db_pgdir *pgdir;
256 int ret = 0;
257
258 mutex_lock(&priv->pgdir_mutex);
259
260 list_for_each_entry(pgdir, &priv->pgdir_list, list)
261 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
262 goto out;
263
264 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
265 if (!pgdir) {
266 ret = -ENOMEM;
267 goto out;
268 }
269
270 list_add(&pgdir->list, &priv->pgdir_list);
271
272 /* This should never fail -- we just allocated an empty page: */
273 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
274
275out:
276 mutex_unlock(&priv->pgdir_mutex);
277
278 return ret;
279}
280EXPORT_SYMBOL_GPL(mlx4_db_alloc);
281
282void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
283{
284 struct mlx4_priv *priv = mlx4_priv(dev);
285 int o;
286 int i;
287
288 mutex_lock(&priv->pgdir_mutex);
289
290 o = db->order;
291 i = db->index;
292
293 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
294 clear_bit(i ^ 1, db->u.pgdir->order0);
295 ++o;
296 }
297 i >>= o;
298 set_bit(i, db->u.pgdir->bits[o]);
299
300 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
301 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
302 db->u.pgdir->db_page, db->u.pgdir->db_dma);
303 list_del(&db->u.pgdir->list);
304 kfree(db->u.pgdir);
305 }
306
307 mutex_unlock(&priv->pgdir_mutex);
308}
309EXPORT_SYMBOL_GPL(mlx4_db_free);
310
311int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
312 int size, int max_direct)
313{
314 int err;
315
316 err = mlx4_db_alloc(dev, &wqres->db, 1);
317 if (err)
318 return err;
319
320 *wqres->db.db = 0;
321
322 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
323 if (err)
324 goto err_db;
325
326 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
327 &wqres->mtt);
328 if (err)
329 goto err_buf;
330
331 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
332 if (err)
333 goto err_mtt;
334
335 return 0;
336
337err_mtt:
338 mlx4_mtt_cleanup(dev, &wqres->mtt);
339err_buf:
340 mlx4_buf_free(dev, size, &wqres->buf);
341err_db:
342 mlx4_db_free(dev, &wqres->db);
343
344 return err;
345}
346EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
347
348void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
349 int size)
350{
351 mlx4_mtt_cleanup(dev, &wqres->mtt);
352 mlx4_buf_free(dev, size, &wqres->buf);
353 mlx4_db_free(dev, &wqres->db);
354}
355EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index caa5bcf54e35..6fda0af9d0a6 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
180 cq_context->mtt_base_addr_h = mtt_addr >> 32; 180 cq_context->mtt_base_addr_h = mtt_addr >> 32;
181 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 181 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
182 182
183 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); 183 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
184 184
185 mlx4_free_cmd_mailbox(dev, mailbox); 185 mlx4_free_cmd_mailbox(dev, mailbox);
186 return err; 186 return err;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 49a4acab5e82..a6aa49fc1d68 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
798 INIT_LIST_HEAD(&priv->ctx_list); 798 INIT_LIST_HEAD(&priv->ctx_list);
799 spin_lock_init(&priv->ctx_lock); 799 spin_lock_init(&priv->ctx_lock);
800 800
801 INIT_LIST_HEAD(&priv->pgdir_list);
802 mutex_init(&priv->pgdir_mutex);
803
801 /* 804 /*
802 * Now reset the HCA before we touch the PCI capabilities or 805 * Now reset the HCA before we touch the PCI capabilities or
803 * attempt a firmware command, since a boot ROM may have left 806 * attempt a firmware command, since a boot ROM may have left
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 73336810e652..a4023c2dd050 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -257,6 +257,9 @@ struct mlx4_priv {
257 struct list_head ctx_list; 257 struct list_head ctx_list;
258 spinlock_t ctx_lock; 258 spinlock_t ctx_lock;
259 259
260 struct list_head pgdir_list;
261 struct mutex pgdir_mutex;
262
260 struct mlx4_fw fw; 263 struct mlx4_fw fw;
261 struct mlx4_cmd cmd; 264 struct mlx4_cmd cmd;
262 265
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index fa24e6597591..ee5484c44a18 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
299} 299}
300EXPORT_SYMBOL_GPL(mlx4_qp_query); 300EXPORT_SYMBOL_GPL(mlx4_qp_query);
301 301
302int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
303 struct mlx4_qp_context *context,
304 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
305{
306 int err;
307 int i;
308 enum mlx4_qp_state states[] = {
309 MLX4_QP_STATE_RST,
310 MLX4_QP_STATE_INIT,
311 MLX4_QP_STATE_RTR,
312 MLX4_QP_STATE_RTS
313 };
314
315 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
316 context->flags &= cpu_to_be32(~(0xf << 28));
317 context->flags |= cpu_to_be32(states[i + 1] << 28);
318 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
319 context, 0, 0, qp);
320 if (err) {
321 mlx4_err(dev, "Failed to bring QP to state: "
322 "%d with error: %d\n",
323 states[i + 1], err);
324 return err;
325 }
326
327 *qp_state = states[i + 1];
328 }
329
330 return 0;
331}
332EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 5a888704a8d0..4f4e7cf105d4 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -5,7 +5,7 @@
5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
6 6
7obj-y += s390mach.o sysinfo.o s390_rdev.o 7obj-y += s390mach.o sysinfo.o s390_rdev.o
8obj-y += cio/ block/ char/ crypto/ net/ scsi/ 8obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
9 9
10drivers-y += drivers/s390/built-in.o 10drivers-y += drivers/s390/built-in.o
11 11
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
new file mode 100644
index 000000000000..4a5ec39f9ca6
--- /dev/null
+++ b/drivers/s390/kvm/Makefile
@@ -0,0 +1,9 @@
1# Makefile for kvm guest drivers on s390
2#
3# Copyright IBM Corp. 2008
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation.
8
9obj-$(CONFIG_VIRTIO) += kvm_virtio.o
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
new file mode 100644
index 000000000000..bbef3764fbf8
--- /dev/null
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -0,0 +1,338 @@
1/*
2 * kvm_virtio.c - virtio for kvm on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */
12
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/err.h>
16#include <linux/virtio.h>
17#include <linux/virtio_config.h>
18#include <linux/interrupt.h>
19#include <linux/virtio_ring.h>
20#include <asm/io.h>
21#include <asm/kvm_para.h>
22#include <asm/kvm_virtio.h>
23#include <asm/setup.h>
24#include <asm/s390_ext.h>
25
26#define VIRTIO_SUBCODE_64 0x0D00
27
28/*
29 * The pointer to our (page) of device descriptions.
30 */
31static void *kvm_devices;
32
33/*
34 * Unique numbering for kvm devices.
35 */
36static unsigned int dev_index;
37
38struct kvm_device {
39 struct virtio_device vdev;
40 struct kvm_device_desc *desc;
41};
42
43#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
44
45/*
46 * memory layout:
47 * - kvm_device_descriptor
48 * struct kvm_device_desc
49 * - configuration
50 * struct kvm_vqconfig
51 * - feature bits
52 * - config space
53 */
54static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
55{
56 return (struct kvm_vqconfig *)(desc + 1);
57}
58
59static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
60{
61 return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
62}
63
64static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
65{
66 return kvm_vq_features(desc) + desc->feature_len * 2;
67}
68
69/*
70 * The total size of the config page used by this device (incl. desc)
71 */
72static unsigned desc_size(const struct kvm_device_desc *desc)
73{
74 return sizeof(*desc)
75 + desc->num_vq * sizeof(struct kvm_vqconfig)
76 + desc->feature_len * 2
77 + desc->config_len;
78}
79
80/*
81 * This tests (and acknowleges) a feature bit.
82 */
83static bool kvm_feature(struct virtio_device *vdev, unsigned fbit)
84{
85 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
86 u8 *features;
87
88 if (fbit / 8 > desc->feature_len)
89 return false;
90
91 features = kvm_vq_features(desc);
92 if (!(features[fbit / 8] & (1 << (fbit % 8))))
93 return false;
94
95 /*
96 * We set the matching bit in the other half of the bitmap to tell the
97 * Host we want to use this feature.
98 */
99 features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8));
100 return true;
101}
102
103/*
104 * Reading and writing elements in config space
105 */
106static void kvm_get(struct virtio_device *vdev, unsigned int offset,
107 void *buf, unsigned len)
108{
109 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
110
111 BUG_ON(offset + len > desc->config_len);
112 memcpy(buf, kvm_vq_configspace(desc) + offset, len);
113}
114
115static void kvm_set(struct virtio_device *vdev, unsigned int offset,
116 const void *buf, unsigned len)
117{
118 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
119
120 BUG_ON(offset + len > desc->config_len);
121 memcpy(kvm_vq_configspace(desc) + offset, buf, len);
122}
123
124/*
125 * The operations to get and set the status word just access
126 * the status field of the device descriptor. set_status will also
127 * make a hypercall to the host, to tell about status changes
128 */
129static u8 kvm_get_status(struct virtio_device *vdev)
130{
131 return to_kvmdev(vdev)->desc->status;
132}
133
134static void kvm_set_status(struct virtio_device *vdev, u8 status)
135{
136 BUG_ON(!status);
137 to_kvmdev(vdev)->desc->status = status;
138 kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
139 (unsigned long) to_kvmdev(vdev)->desc);
140}
141
142/*
143 * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
144 * descriptor address. The Host will zero the status and all the
145 * features.
146 */
147static void kvm_reset(struct virtio_device *vdev)
148{
149 kvm_hypercall1(KVM_S390_VIRTIO_RESET,
150 (unsigned long) to_kvmdev(vdev)->desc);
151}
152
153/*
154 * When the virtio_ring code wants to notify the Host, it calls us here and we
155 * make a hypercall. We hand the address of the virtqueue so the Host
156 * knows which virtqueue we're talking about.
157 */
158static void kvm_notify(struct virtqueue *vq)
159{
160 struct kvm_vqconfig *config = vq->priv;
161
162 kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
163}
164
165/*
166 * This routine finds the first virtqueue described in the configuration of
167 * this device and sets it up.
168 */
169static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
170 unsigned index,
171 void (*callback)(struct virtqueue *vq))
172{
173 struct kvm_device *kdev = to_kvmdev(vdev);
174 struct kvm_vqconfig *config;
175 struct virtqueue *vq;
176 int err;
177
178 if (index >= kdev->desc->num_vq)
179 return ERR_PTR(-ENOENT);
180
181 config = kvm_vq_config(kdev->desc)+index;
182
183 if (add_shared_memory(config->address,
184 vring_size(config->num, PAGE_SIZE))) {
185 err = -ENOMEM;
186 goto out;
187 }
188
189 vq = vring_new_virtqueue(config->num, vdev, (void *) config->address,
190 kvm_notify, callback);
191 if (!vq) {
192 err = -ENOMEM;
193 goto unmap;
194 }
195
196 /*
197 * register a callback token
198 * The host will sent this via the external interrupt parameter
199 */
200 config->token = (u64) vq;
201
202 vq->priv = config;
203 return vq;
204unmap:
205 remove_shared_memory(config->address, vring_size(config->num,
206 PAGE_SIZE));
207out:
208 return ERR_PTR(err);
209}
210
211static void kvm_del_vq(struct virtqueue *vq)
212{
213 struct kvm_vqconfig *config = vq->priv;
214
215 vring_del_virtqueue(vq);
216 remove_shared_memory(config->address,
217 vring_size(config->num, PAGE_SIZE));
218}
219
220/*
221 * The config ops structure as defined by virtio config
222 */
223static struct virtio_config_ops kvm_vq_configspace_ops = {
224 .feature = kvm_feature,
225 .get = kvm_get,
226 .set = kvm_set,
227 .get_status = kvm_get_status,
228 .set_status = kvm_set_status,
229 .reset = kvm_reset,
230 .find_vq = kvm_find_vq,
231 .del_vq = kvm_del_vq,
232};
233
234/*
235 * The root device for the kvm virtio devices.
236 * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
237 */
238static struct device kvm_root = {
239 .parent = NULL,
240 .bus_id = "kvm_s390",
241};
242
243/*
244 * adds a new device and register it with virtio
245 * appropriate drivers are loaded by the device model
246 */
247static void add_kvm_device(struct kvm_device_desc *d)
248{
249 struct kvm_device *kdev;
250
251 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
252 if (!kdev) {
253 printk(KERN_EMERG "Cannot allocate kvm dev %u\n",
254 dev_index++);
255 return;
256 }
257
258 kdev->vdev.dev.parent = &kvm_root;
259 kdev->vdev.index = dev_index++;
260 kdev->vdev.id.device = d->type;
261 kdev->vdev.config = &kvm_vq_configspace_ops;
262 kdev->desc = d;
263
264 if (register_virtio_device(&kdev->vdev) != 0) {
265 printk(KERN_ERR "Failed to register kvm device %u\n",
266 kdev->vdev.index);
267 kfree(kdev);
268 }
269}
270
271/*
272 * scan_devices() simply iterates through the device page.
273 * The type 0 is reserved to mean "end of devices".
274 */
275static void scan_devices(void)
276{
277 unsigned int i;
278 struct kvm_device_desc *d;
279
280 for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
281 d = kvm_devices + i;
282
283 if (d->type == 0)
284 break;
285
286 add_kvm_device(d);
287 }
288}
289
290/*
291 * we emulate the request_irq behaviour on top of s390 extints
292 */
293static void kvm_extint_handler(u16 code)
294{
295 void *data = (void *) *(long *) __LC_PFAULT_INTPARM;
296 u16 subcode = S390_lowcore.cpu_addr;
297
298 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
299 return;
300
301 vring_interrupt(0, data);
302}
303
304/*
305 * Init function for virtio
306 * devices are in a single page above top of "normal" mem
307 */
308static int __init kvm_devices_init(void)
309{
310 int rc;
311
312 if (!MACHINE_IS_KVM)
313 return -ENODEV;
314
315 rc = device_register(&kvm_root);
316 if (rc) {
317 printk(KERN_ERR "Could not register kvm_s390 root device");
318 return rc;
319 }
320
321 if (add_shared_memory((max_pfn) << PAGE_SHIFT, PAGE_SIZE)) {
322 device_unregister(&kvm_root);
323 return -ENOMEM;
324 }
325
326 kvm_devices = (void *) (max_pfn << PAGE_SHIFT);
327
328 ctl_set_bit(0, 9);
329 register_external_interrupt(0x2603, kvm_extint_handler);
330
331 scan_devices();
332 return 0;
333}
334
335/*
336 * We do this after core stuff, but before the drivers.
337 */
338postcore_initcall(kvm_devices_init);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 7c3f02816e95..9af2330f07a2 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1927,7 +1927,8 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1927 1927
1928 /* setup new FSF request */ 1928 /* setup new FSF request */
1929 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1929 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1930 0, NULL, &lock_flags, &fsf_req); 1930 ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags,
1931 &fsf_req);
1931 if (retval) { 1932 if (retval) {
1932 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1933 ZFCP_LOG_INFO("error: Could not create exchange configuration "
1933 "data request for adapter %s.\n", 1934 "data request for adapter %s.\n",
@@ -2035,21 +2036,21 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2035 min(FC_SERIAL_NUMBER_SIZE, 17)); 2036 min(FC_SERIAL_NUMBER_SIZE, 17));
2036 } 2037 }
2037 2038
2038 ZFCP_LOG_NORMAL("The adapter %s reported the following " 2039 if (fsf_req->erp_action)
2039 "characteristics:\n" 2040 ZFCP_LOG_NORMAL("The adapter %s reported the following "
2040 "WWNN 0x%016Lx, " 2041 "characteristics:\n"
2041 "WWPN 0x%016Lx, " 2042 "WWNN 0x%016Lx, WWPN 0x%016Lx, "
2042 "S_ID 0x%06x,\n" 2043 "S_ID 0x%06x,\n"
2043 "adapter version 0x%x, " 2044 "adapter version 0x%x, "
2044 "LIC version 0x%x, " 2045 "LIC version 0x%x, "
2045 "FC link speed %d Gb/s\n", 2046 "FC link speed %d Gb/s\n",
2046 zfcp_get_busid_by_adapter(adapter), 2047 zfcp_get_busid_by_adapter(adapter),
2047 (wwn_t) fc_host_node_name(shost), 2048 (wwn_t) fc_host_node_name(shost),
2048 (wwn_t) fc_host_port_name(shost), 2049 (wwn_t) fc_host_port_name(shost),
2049 fc_host_port_id(shost), 2050 fc_host_port_id(shost),
2050 adapter->hydra_version, 2051 adapter->hydra_version,
2051 adapter->fsf_lic_version, 2052 adapter->fsf_lic_version,
2052 fc_host_speed(shost)); 2053 fc_host_speed(shost));
2053 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) { 2054 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
2054 ZFCP_LOG_NORMAL("error: the adapter %s " 2055 ZFCP_LOG_NORMAL("error: the adapter %s "
2055 "only supports newer control block " 2056 "only supports newer control block "
@@ -2114,8 +2115,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2114 zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req); 2115 zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
2115 return -EIO; 2116 return -EIO;
2116 case FC_PORTTYPE_NPORT: 2117 case FC_PORTTYPE_NPORT:
2117 ZFCP_LOG_NORMAL("Switched fabric fibrechannel " 2118 if (fsf_req->erp_action)
2118 "network detected at adapter %s.\n", 2119 ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
2120 "network detected at adapter "
2121 "%s.\n",
2119 zfcp_get_busid_by_adapter(adapter)); 2122 zfcp_get_busid_by_adapter(adapter));
2120 break; 2123 break;
2121 default: 2124 default:
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 8cce5cc11d50..099970b27001 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -213,6 +213,7 @@
213#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 213#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
214#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 214#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
215#define FSF_FEATURE_UPDATE_ALERT 0x00000100 215#define FSF_FEATURE_UPDATE_ALERT 0x00000100
216#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
216 217
217/* host connection features */ 218/* host connection features */
218#define FSF_FEATURE_NPIV_MODE 0x00000001 219#define FSF_FEATURE_NPIV_MODE 0x00000001
@@ -340,6 +341,15 @@ struct fsf_qtcb_prefix {
340 u8 res1[20]; 341 u8 res1[20];
341} __attribute__ ((packed)); 342} __attribute__ ((packed));
342 343
344struct fsf_statistics_info {
345 u64 input_req;
346 u64 output_req;
347 u64 control_req;
348 u64 input_mb;
349 u64 output_mb;
350 u64 seconds_act;
351} __attribute__ ((packed));
352
343union fsf_status_qual { 353union fsf_status_qual {
344 u8 byte[FSF_STATUS_QUALIFIER_SIZE]; 354 u8 byte[FSF_STATUS_QUALIFIER_SIZE];
345 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)]; 355 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
@@ -436,7 +446,8 @@ struct fsf_qtcb_bottom_config {
436 u32 hardware_version; 446 u32 hardware_version;
437 u8 serial_number[32]; 447 u8 serial_number[32];
438 struct fsf_nport_serv_param plogi_payload; 448 struct fsf_nport_serv_param plogi_payload;
439 u8 res4[160]; 449 struct fsf_statistics_info stat_info;
450 u8 res4[112];
440} __attribute__ ((packed)); 451} __attribute__ ((packed));
441 452
442struct fsf_qtcb_bottom_port { 453struct fsf_qtcb_bottom_port {
@@ -469,7 +480,10 @@ struct fsf_qtcb_bottom_port {
469 u64 control_requests; 480 u64 control_requests;
470 u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */ 481 u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */
471 u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */ 482 u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */
472 u8 res2[256]; 483 u8 cp_util;
484 u8 cb_util;
485 u8 a_util;
486 u8 res2[253];
473} __attribute__ ((packed)); 487} __attribute__ ((packed));
474 488
475union fsf_qtcb_bottom { 489union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f81850624eed..01687559dc06 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -40,6 +40,7 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
40 unsigned int, unsigned int); 40 unsigned int, unsigned int);
41 41
42static struct device_attribute *zfcp_sysfs_sdev_attrs[]; 42static struct device_attribute *zfcp_sysfs_sdev_attrs[];
43static struct device_attribute *zfcp_a_stats_attrs[];
43 44
44struct zfcp_data zfcp_data = { 45struct zfcp_data zfcp_data = {
45 .scsi_host_template = { 46 .scsi_host_template = {
@@ -61,6 +62,7 @@ struct zfcp_data zfcp_data = {
61 .use_clustering = 1, 62 .use_clustering = 1,
62 .sdev_attrs = zfcp_sysfs_sdev_attrs, 63 .sdev_attrs = zfcp_sysfs_sdev_attrs,
63 .max_sectors = ZFCP_MAX_SECTORS, 64 .max_sectors = ZFCP_MAX_SECTORS,
65 .shost_attrs = zfcp_a_stats_attrs,
64 }, 66 },
65 .driver_version = ZFCP_VERSION, 67 .driver_version = ZFCP_VERSION,
66}; 68};
@@ -809,4 +811,116 @@ static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
809 NULL 811 NULL
810}; 812};
811 813
814static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
815 struct device_attribute *attr,
816 char *buf)
817{
818 struct Scsi_Host *scsi_host = dev_to_shost(dev);
819 struct fsf_qtcb_bottom_port *qtcb_port;
820 int retval;
821 struct zfcp_adapter *adapter;
822
823 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
824 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
825 return -EOPNOTSUPP;
826
827 qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
828 if (!qtcb_port)
829 return -ENOMEM;
830
831 retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
832 if (!retval)
833 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
834 qtcb_port->cb_util, qtcb_port->a_util);
835 kfree(qtcb_port);
836 return retval;
837}
838
839static int zfcp_sysfs_adapter_ex_config(struct device *dev,
840 struct fsf_statistics_info *stat_inf)
841{
842 int retval;
843 struct fsf_qtcb_bottom_config *qtcb_config;
844 struct Scsi_Host *scsi_host = dev_to_shost(dev);
845 struct zfcp_adapter *adapter;
846
847 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
848 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
849 return -EOPNOTSUPP;
850
851 qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
852 GFP_KERNEL);
853 if (!qtcb_config)
854 return -ENOMEM;
855
856 retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
857 if (!retval)
858 *stat_inf = qtcb_config->stat_info;
859
860 kfree(qtcb_config);
861 return retval;
862}
863
864static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev,
865 struct device_attribute *attr,
866 char *buf)
867{
868 struct fsf_statistics_info stat_info;
869 int retval;
870
871 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
872 if (retval)
873 return retval;
874
875 return sprintf(buf, "%llu %llu %llu\n",
876 (unsigned long long) stat_info.input_req,
877 (unsigned long long) stat_info.output_req,
878 (unsigned long long) stat_info.control_req);
879}
880
881static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev,
882 struct device_attribute *attr,
883 char *buf)
884{
885 struct fsf_statistics_info stat_info;
886 int retval;
887
888 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
889 if (retval)
890 return retval;
891
892 return sprintf(buf, "%llu %llu\n",
893 (unsigned long long) stat_info.input_mb,
894 (unsigned long long) stat_info.output_mb);
895}
896
897static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev,
898 struct device_attribute *attr,
899 char *buf)
900{
901 struct fsf_statistics_info stat_info;
902 int retval;
903
904 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
905 if (retval)
906 return retval;
907
908 return sprintf(buf, "%llu\n",
909 (unsigned long long) stat_info.seconds_act);
910}
911
912static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
913static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL);
914static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL);
915static DEVICE_ATTR(seconds_active, S_IRUGO,
916 zfcp_sysfs_adapter_sec_active_show, NULL);
917
918static struct device_attribute *zfcp_a_stats_attrs[] = {
919 &dev_attr_utilization,
920 &dev_attr_requests,
921 &dev_attr_megabytes,
922 &dev_attr_seconds_active,
923 NULL
924};
925
812#undef ZFCP_LOG_AREA 926#undef ZFCP_LOG_AREA
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index b374e457e5e2..b898d382b7b0 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1499,7 +1499,7 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb)
1499 thisCard = ((struct sccb_card *)pCurrCard)->cardIndex; 1499 thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
1500 ioport = ((struct sccb_card *)pCurrCard)->ioPort; 1500 ioport = ((struct sccb_card *)pCurrCard)->ioPort;
1501 1501
1502 if ((p_Sccb->TargID > MAX_SCSI_TAR) || (p_Sccb->Lun > MAX_LUN)) { 1502 if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) {
1503 1503
1504 p_Sccb->HostStatus = SCCB_COMPLETE; 1504 p_Sccb->HostStatus = SCCB_COMPLETE;
1505 p_Sccb->SccbStatus = SCCB_ERROR; 1505 p_Sccb->SccbStatus = SCCB_ERROR;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7f78e3ea517d..99c57b0c1d54 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1677,6 +1677,16 @@ config MAC_SCSI
1677 SCSI-HOWTO, available from 1677 SCSI-HOWTO, available from
1678 <http://www.tldp.org/docs.html#howto>. 1678 <http://www.tldp.org/docs.html#howto>.
1679 1679
1680config SCSI_MAC_ESP
1681 tristate "Macintosh NCR53c9[46] SCSI"
1682 depends on MAC && SCSI
1683 help
1684 This is the NCR 53c9x SCSI controller found on most of the 68040
1685 based Macintoshes.
1686
1687 To compile this driver as a module, choose M here: the module
1688 will be called mac_esp.
1689
1680config MVME147_SCSI 1690config MVME147_SCSI
1681 bool "WD33C93 SCSI driver for MVME147" 1691 bool "WD33C93 SCSI driver for MVME147"
1682 depends on MVME147 && SCSI=y 1692 depends on MVME147 && SCSI=y
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 23e6ecbd4778..6c775e350c98 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
46obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o 46obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
47obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o 47obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
48obj-$(CONFIG_MAC_SCSI) += mac_scsi.o 48obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
49obj-$(CONFIG_SCSI_MAC_ESP) += esp_scsi.o mac_esp.o
49obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o 50obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
50obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o 51obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
51obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o 52obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 6ccdc96cc480..a09b2d3fdf5a 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1432,15 +1432,10 @@ static void run(struct work_struct *work)
1432 */ 1432 */
1433static irqreturn_t intr(int irqno, void *dev_id) 1433static irqreturn_t intr(int irqno, void *dev_id)
1434{ 1434{
1435 struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id; 1435 struct Scsi_Host *shpnt = dev_id;
1436 unsigned long flags; 1436 unsigned long flags;
1437 unsigned char rev, dmacntrl0; 1437 unsigned char rev, dmacntrl0;
1438 1438
1439 if (!shpnt) {
1440 printk(KERN_ERR "aha152x: catched interrupt %d for unknown controller.\n", irqno);
1441 return IRQ_NONE;
1442 }
1443
1444 /* 1439 /*
1445 * Read a couple of registers that are known to not be all 1's. If 1440 * Read a couple of registers that are known to not be all 1's. If
1446 * we read all 1's (-1), that means that either: 1441 * we read all 1's (-1), that means that either:
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 5a1471c370fa..80594947c6f6 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -153,8 +153,6 @@ struct aha1542_hostdata {
153 153
154#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata) 154#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
155 155
156static struct Scsi_Host *aha_host[7]; /* One for each IRQ level (9-15) */
157
158static DEFINE_SPINLOCK(aha1542_lock); 156static DEFINE_SPINLOCK(aha1542_lock);
159 157
160 158
@@ -163,8 +161,7 @@ static DEFINE_SPINLOCK(aha1542_lock);
163 161
164static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt); 162static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt);
165static int aha1542_restart(struct Scsi_Host *shost); 163static int aha1542_restart(struct Scsi_Host *shost);
166static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id); 164static void aha1542_intr_handle(struct Scsi_Host *shost);
167static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id);
168 165
169#define aha1542_intr_reset(base) outb(IRST, CONTROL(base)) 166#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
170 167
@@ -404,23 +401,19 @@ fail:
404} 401}
405 402
406/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */ 403/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */
407static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id) 404static irqreturn_t do_aha1542_intr_handle(int dummy, void *dev_id)
408{ 405{
409 unsigned long flags; 406 unsigned long flags;
410 struct Scsi_Host *shost; 407 struct Scsi_Host *shost = dev_id;
411
412 shost = aha_host[irq - 9];
413 if (!shost)
414 panic("Splunge!");
415 408
416 spin_lock_irqsave(shost->host_lock, flags); 409 spin_lock_irqsave(shost->host_lock, flags);
417 aha1542_intr_handle(shost, dev_id); 410 aha1542_intr_handle(shost);
418 spin_unlock_irqrestore(shost->host_lock, flags); 411 spin_unlock_irqrestore(shost->host_lock, flags);
419 return IRQ_HANDLED; 412 return IRQ_HANDLED;
420} 413}
421 414
422/* A "high" level interrupt handler */ 415/* A "high" level interrupt handler */
423static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id) 416static void aha1542_intr_handle(struct Scsi_Host *shost)
424{ 417{
425 void (*my_done) (Scsi_Cmnd *) = NULL; 418 void (*my_done) (Scsi_Cmnd *) = NULL;
426 int errstatus, mbi, mbo, mbistatus; 419 int errstatus, mbi, mbo, mbistatus;
@@ -1197,7 +1190,8 @@ fail:
1197 1190
1198 DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level)); 1191 DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
1199 spin_lock_irqsave(&aha1542_lock, flags); 1192 spin_lock_irqsave(&aha1542_lock, flags);
1200 if (request_irq(irq_level, do_aha1542_intr_handle, 0, "aha1542", NULL)) { 1193 if (request_irq(irq_level, do_aha1542_intr_handle, 0,
1194 "aha1542", shpnt)) {
1201 printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n"); 1195 printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n");
1202 spin_unlock_irqrestore(&aha1542_lock, flags); 1196 spin_unlock_irqrestore(&aha1542_lock, flags);
1203 goto unregister; 1197 goto unregister;
@@ -1205,7 +1199,7 @@ fail:
1205 if (dma_chan != 0xFF) { 1199 if (dma_chan != 0xFF) {
1206 if (request_dma(dma_chan, "aha1542")) { 1200 if (request_dma(dma_chan, "aha1542")) {
1207 printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n"); 1201 printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n");
1208 free_irq(irq_level, NULL); 1202 free_irq(irq_level, shpnt);
1209 spin_unlock_irqrestore(&aha1542_lock, flags); 1203 spin_unlock_irqrestore(&aha1542_lock, flags);
1210 goto unregister; 1204 goto unregister;
1211 } 1205 }
@@ -1214,7 +1208,7 @@ fail:
1214 enable_dma(dma_chan); 1208 enable_dma(dma_chan);
1215 } 1209 }
1216 } 1210 }
1217 aha_host[irq_level - 9] = shpnt; 1211
1218 shpnt->this_id = scsi_id; 1212 shpnt->this_id = scsi_id;
1219 shpnt->unique_id = base_io; 1213 shpnt->unique_id = base_io;
1220 shpnt->io_port = base_io; 1214 shpnt->io_port = base_io;
@@ -1276,7 +1270,7 @@ unregister:
1276static int aha1542_release(struct Scsi_Host *shost) 1270static int aha1542_release(struct Scsi_Host *shost)
1277{ 1271{
1278 if (shost->irq) 1272 if (shost->irq)
1279 free_irq(shost->irq, NULL); 1273 free_irq(shost->irq, shost);
1280 if (shost->dma_channel != 0xff) 1274 if (shost->dma_channel != 0xff)
1281 free_dma(shost->dma_channel); 1275 free_dma(shost->dma_channel);
1282 if (shost->io_port && shost->n_io_port) 1276 if (shost->io_port && shost->n_io_port)
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 2f00467b6b8c..be5558ab84ea 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -815,7 +815,7 @@ struct ahd_tmode_tstate {
815struct ahd_phase_table_entry { 815struct ahd_phase_table_entry {
816 uint8_t phase; 816 uint8_t phase;
817 uint8_t mesg_out; /* Message response to parity errors */ 817 uint8_t mesg_out; /* Message response to parity errors */
818 char *phasemsg; 818 const char *phasemsg;
819}; 819};
820 820
821/************************** Serial EEPROM Format ******************************/ 821/************************** Serial EEPROM Format ******************************/
@@ -1314,7 +1314,7 @@ typedef int (ahd_device_setup_t)(struct ahd_softc *);
1314struct ahd_pci_identity { 1314struct ahd_pci_identity {
1315 uint64_t full_id; 1315 uint64_t full_id;
1316 uint64_t id_mask; 1316 uint64_t id_mask;
1317 char *name; 1317 const char *name;
1318 ahd_device_setup_t *setup; 1318 ahd_device_setup_t *setup;
1319}; 1319};
1320 1320
@@ -1322,7 +1322,7 @@ struct ahd_pci_identity {
1322struct aic7770_identity { 1322struct aic7770_identity {
1323 uint32_t full_id; 1323 uint32_t full_id;
1324 uint32_t id_mask; 1324 uint32_t id_mask;
1325 char *name; 1325 const char *name;
1326 ahd_device_setup_t *setup; 1326 ahd_device_setup_t *setup;
1327}; 1327};
1328extern struct aic7770_identity aic7770_ident_table []; 1328extern struct aic7770_identity aic7770_ident_table [];
@@ -1333,12 +1333,11 @@ extern const int ahd_num_aic7770_devs;
1333 1333
1334/*************************** Function Declarations ****************************/ 1334/*************************** Function Declarations ****************************/
1335/******************************************************************************/ 1335/******************************************************************************/
1336void ahd_reset_cmds_pending(struct ahd_softc *ahd);
1337 1336
1338/***************************** PCI Front End *********************************/ 1337/***************************** PCI Front End *********************************/
1339struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t); 1338const struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
1340int ahd_pci_config(struct ahd_softc *, 1339int ahd_pci_config(struct ahd_softc *,
1341 struct ahd_pci_identity *); 1340 const struct ahd_pci_identity *);
1342int ahd_pci_test_register_access(struct ahd_softc *); 1341int ahd_pci_test_register_access(struct ahd_softc *);
1343#ifdef CONFIG_PM 1342#ifdef CONFIG_PM
1344void ahd_pci_suspend(struct ahd_softc *); 1343void ahd_pci_suspend(struct ahd_softc *);
@@ -1376,16 +1375,6 @@ int ahd_write_flexport(struct ahd_softc *ahd,
1376int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, 1375int ahd_read_flexport(struct ahd_softc *ahd, u_int addr,
1377 uint8_t *value); 1376 uint8_t *value);
1378 1377
1379/*************************** Interrupt Services *******************************/
1380void ahd_run_qoutfifo(struct ahd_softc *ahd);
1381#ifdef AHD_TARGET_MODE
1382void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
1383#endif
1384void ahd_handle_hwerrint(struct ahd_softc *ahd);
1385void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
1386void ahd_handle_scsiint(struct ahd_softc *ahd,
1387 u_int intstat);
1388
1389/***************************** Error Recovery *********************************/ 1378/***************************** Error Recovery *********************************/
1390typedef enum { 1379typedef enum {
1391 SEARCH_COMPLETE, 1380 SEARCH_COMPLETE,
@@ -1479,7 +1468,7 @@ extern uint32_t ahd_debug;
1479void ahd_print_devinfo(struct ahd_softc *ahd, 1468void ahd_print_devinfo(struct ahd_softc *ahd,
1480 struct ahd_devinfo *devinfo); 1469 struct ahd_devinfo *devinfo);
1481void ahd_dump_card_state(struct ahd_softc *ahd); 1470void ahd_dump_card_state(struct ahd_softc *ahd);
1482int ahd_print_register(ahd_reg_parse_entry_t *table, 1471int ahd_print_register(const ahd_reg_parse_entry_t *table,
1483 u_int num_entries, 1472 u_int num_entries,
1484 const char *name, 1473 const char *name,
1485 u_int address, 1474 u_int address,
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
index be14e2ecb8f7..cca16fc5b4ad 100644
--- a/drivers/scsi/aic7xxx/aic79xx.reg
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -198,6 +198,7 @@ register SEQINTCODE {
198register CLRINT { 198register CLRINT {
199 address 0x003 199 address 0x003
200 access_mode WO 200 access_mode WO
201 count 19
201 field CLRHWERRINT 0x80 /* Rev B or greater */ 202 field CLRHWERRINT 0x80 /* Rev B or greater */
202 field CLRBRKADRINT 0x40 203 field CLRBRKADRINT 0x40
203 field CLRSWTMINT 0x20 204 field CLRSWTMINT 0x20
@@ -245,6 +246,7 @@ register CLRERR {
245register HCNTRL { 246register HCNTRL {
246 address 0x005 247 address 0x005
247 access_mode RW 248 access_mode RW
249 count 12
248 field SEQ_RESET 0x80 /* Rev B or greater */ 250 field SEQ_RESET 0x80 /* Rev B or greater */
249 field POWRDN 0x40 251 field POWRDN 0x40
250 field SWINT 0x10 252 field SWINT 0x10
@@ -262,6 +264,7 @@ register HNSCB_QOFF {
262 address 0x006 264 address 0x006
263 access_mode RW 265 access_mode RW
264 size 2 266 size 2
267 count 2
265} 268}
266 269
267/* 270/*
@@ -270,6 +273,7 @@ register HNSCB_QOFF {
270register HESCB_QOFF { 273register HESCB_QOFF {
271 address 0x008 274 address 0x008
272 access_mode RW 275 access_mode RW
276 count 2
273} 277}
274 278
275/* 279/*
@@ -287,6 +291,7 @@ register HS_MAILBOX {
287 */ 291 */
288register SEQINTSTAT { 292register SEQINTSTAT {
289 address 0x00C 293 address 0x00C
294 count 1
290 access_mode RO 295 access_mode RO
291 field SEQ_SWTMRTO 0x10 296 field SEQ_SWTMRTO 0x10
292 field SEQ_SEQINT 0x08 297 field SEQ_SEQINT 0x08
@@ -332,6 +337,7 @@ register SNSCB_QOFF {
332 */ 337 */
333register SESCB_QOFF { 338register SESCB_QOFF {
334 address 0x012 339 address 0x012
340 count 2
335 access_mode RW 341 access_mode RW
336 modes M_CCHAN 342 modes M_CCHAN
337} 343}
@@ -397,6 +403,7 @@ register DFCNTRL {
397 address 0x019 403 address 0x019
398 access_mode RW 404 access_mode RW
399 modes M_DFF0, M_DFF1 405 modes M_DFF0, M_DFF1
406 count 11
400 field PRELOADEN 0x80 407 field PRELOADEN 0x80
401 field SCSIENWRDIS 0x40 /* Rev B only. */ 408 field SCSIENWRDIS 0x40 /* Rev B only. */
402 field SCSIEN 0x20 409 field SCSIEN 0x20
@@ -415,6 +422,7 @@ register DFCNTRL {
415 */ 422 */
416register DSCOMMAND0 { 423register DSCOMMAND0 {
417 address 0x019 424 address 0x019
425 count 1
418 access_mode RW 426 access_mode RW
419 modes M_CFG 427 modes M_CFG
420 field CACHETHEN 0x80 /* Cache Threshold enable */ 428 field CACHETHEN 0x80 /* Cache Threshold enable */
@@ -580,6 +588,7 @@ register DFF_THRSH {
580 address 0x088 588 address 0x088
581 access_mode RW 589 access_mode RW
582 modes M_CFG 590 modes M_CFG
591 count 1
583 field WR_DFTHRSH 0x70 { 592 field WR_DFTHRSH 0x70 {
584 WR_DFTHRSH_MIN, 593 WR_DFTHRSH_MIN,
585 WR_DFTHRSH_25, 594 WR_DFTHRSH_25,
@@ -800,6 +809,7 @@ register PCIXCTL {
800 address 0x093 809 address 0x093
801 access_mode RW 810 access_mode RW
802 modes M_CFG 811 modes M_CFG
812 count 1
803 field SERRPULSE 0x80 813 field SERRPULSE 0x80
804 field UNEXPSCIEN 0x20 814 field UNEXPSCIEN 0x20
805 field SPLTSMADIS 0x10 815 field SPLTSMADIS 0x10
@@ -844,6 +854,7 @@ register DCHSPLTSTAT0 {
844 address 0x096 854 address 0x096
845 access_mode RW 855 access_mode RW
846 modes M_DFF0, M_DFF1 856 modes M_DFF0, M_DFF1
857 count 2
847 field STAETERM 0x80 858 field STAETERM 0x80
848 field SCBCERR 0x40 859 field SCBCERR 0x40
849 field SCADERR 0x20 860 field SCADERR 0x20
@@ -895,6 +906,7 @@ register DCHSPLTSTAT1 {
895 address 0x097 906 address 0x097
896 access_mode RW 907 access_mode RW
897 modes M_DFF0, M_DFF1 908 modes M_DFF0, M_DFF1
909 count 2
898 field RXDATABUCKET 0x01 910 field RXDATABUCKET 0x01
899} 911}
900 912
@@ -1048,6 +1060,7 @@ register SGSPLTSTAT0 {
1048 address 0x09E 1060 address 0x09E
1049 access_mode RW 1061 access_mode RW
1050 modes M_DFF0, M_DFF1 1062 modes M_DFF0, M_DFF1
1063 count 2
1051 field STAETERM 0x80 1064 field STAETERM 0x80
1052 field SCBCERR 0x40 1065 field SCBCERR 0x40
1053 field SCADERR 0x20 1066 field SCADERR 0x20
@@ -1065,6 +1078,7 @@ register SGSPLTSTAT1 {
1065 address 0x09F 1078 address 0x09F
1066 access_mode RW 1079 access_mode RW
1067 modes M_DFF0, M_DFF1 1080 modes M_DFF0, M_DFF1
1081 count 2
1068 field RXDATABUCKET 0x01 1082 field RXDATABUCKET 0x01
1069} 1083}
1070 1084
@@ -1086,6 +1100,7 @@ register DF0PCISTAT {
1086 address 0x0A0 1100 address 0x0A0
1087 access_mode RW 1101 access_mode RW
1088 modes M_CFG 1102 modes M_CFG
1103 count 1
1089 field DPE 0x80 1104 field DPE 0x80
1090 field SSE 0x40 1105 field SSE 0x40
1091 field RMA 0x20 1106 field RMA 0x20
@@ -1184,6 +1199,7 @@ register TARGPCISTAT {
1184 address 0x0A7 1199 address 0x0A7
1185 access_mode RW 1200 access_mode RW
1186 modes M_CFG 1201 modes M_CFG
1202 count 5
1187 field DPE 0x80 1203 field DPE 0x80
1188 field SSE 0x40 1204 field SSE 0x40
1189 field STA 0x08 1205 field STA 0x08
@@ -1198,6 +1214,7 @@ register LQIN {
1198 address 0x020 1214 address 0x020
1199 access_mode RW 1215 access_mode RW
1200 size 20 1216 size 20
1217 count 2
1201 modes M_DFF0, M_DFF1, M_SCSI 1218 modes M_DFF0, M_DFF1, M_SCSI
1202} 1219}
1203 1220
@@ -1229,6 +1246,7 @@ register LUNPTR {
1229 address 0x022 1246 address 0x022
1230 access_mode RW 1247 access_mode RW
1231 modes M_CFG 1248 modes M_CFG
1249 count 2
1232} 1250}
1233 1251
1234/* 1252/*
@@ -1259,6 +1277,7 @@ register CMDLENPTR {
1259 address 0x025 1277 address 0x025
1260 access_mode RW 1278 access_mode RW
1261 modes M_CFG 1279 modes M_CFG
1280 count 1
1262} 1281}
1263 1282
1264/* 1283/*
@@ -1270,6 +1289,7 @@ register ATTRPTR {
1270 address 0x026 1289 address 0x026
1271 access_mode RW 1290 access_mode RW
1272 modes M_CFG 1291 modes M_CFG
1292 count 1
1273} 1293}
1274 1294
1275/* 1295/*
@@ -1281,6 +1301,7 @@ register FLAGPTR {
1281 address 0x027 1301 address 0x027
1282 access_mode RW 1302 access_mode RW
1283 modes M_CFG 1303 modes M_CFG
1304 count 1
1284} 1305}
1285 1306
1286/* 1307/*
@@ -1291,6 +1312,7 @@ register CMDPTR {
1291 address 0x028 1312 address 0x028
1292 access_mode RW 1313 access_mode RW
1293 modes M_CFG 1314 modes M_CFG
1315 count 1
1294} 1316}
1295 1317
1296/* 1318/*
@@ -1301,6 +1323,7 @@ register QNEXTPTR {
1301 address 0x029 1323 address 0x029
1302 access_mode RW 1324 access_mode RW
1303 modes M_CFG 1325 modes M_CFG
1326 count 1
1304} 1327}
1305 1328
1306/* 1329/*
@@ -1323,6 +1346,7 @@ register ABRTBYTEPTR {
1323 address 0x02B 1346 address 0x02B
1324 access_mode RW 1347 access_mode RW
1325 modes M_CFG 1348 modes M_CFG
1349 count 1
1326} 1350}
1327 1351
1328/* 1352/*
@@ -1333,6 +1357,7 @@ register ABRTBITPTR {
1333 address 0x02C 1357 address 0x02C
1334 access_mode RW 1358 access_mode RW
1335 modes M_CFG 1359 modes M_CFG
1360 count 1
1336} 1361}
1337 1362
1338/* 1363/*
@@ -1370,6 +1395,7 @@ register LUNLEN {
1370 address 0x030 1395 address 0x030
1371 access_mode RW 1396 access_mode RW
1372 modes M_CFG 1397 modes M_CFG
1398 count 2
1373 mask ILUNLEN 0x0F 1399 mask ILUNLEN 0x0F
1374 mask TLUNLEN 0xF0 1400 mask TLUNLEN 0xF0
1375} 1401}
@@ -1383,6 +1409,7 @@ register CDBLIMIT {
1383 address 0x031 1409 address 0x031
1384 access_mode RW 1410 access_mode RW
1385 modes M_CFG 1411 modes M_CFG
1412 count 1
1386} 1413}
1387 1414
1388/* 1415/*
@@ -1394,6 +1421,7 @@ register MAXCMD {
1394 address 0x032 1421 address 0x032
1395 access_mode RW 1422 access_mode RW
1396 modes M_CFG 1423 modes M_CFG
1424 count 9
1397} 1425}
1398 1426
1399/* 1427/*
@@ -1458,6 +1486,7 @@ register LQCTL1 {
1458 address 0x038 1486 address 0x038
1459 access_mode RW 1487 access_mode RW
1460 modes M_DFF0, M_DFF1, M_SCSI 1488 modes M_DFF0, M_DFF1, M_SCSI
1489 count 2
1461 field PCI2PCI 0x04 1490 field PCI2PCI 0x04
1462 field SINGLECMD 0x02 1491 field SINGLECMD 0x02
1463 field ABORTPENDING 0x01 1492 field ABORTPENDING 0x01
@@ -1470,6 +1499,7 @@ register LQCTL2 {
1470 address 0x039 1499 address 0x039
1471 access_mode RW 1500 access_mode RW
1472 modes M_DFF0, M_DFF1, M_SCSI 1501 modes M_DFF0, M_DFF1, M_SCSI
1502 count 5
1473 field LQIRETRY 0x80 1503 field LQIRETRY 0x80
1474 field LQICONTINUE 0x40 1504 field LQICONTINUE 0x40
1475 field LQITOIDLE 0x20 1505 field LQITOIDLE 0x20
@@ -1528,6 +1558,7 @@ register SCSISEQ1 {
1528 address 0x03B 1558 address 0x03B
1529 access_mode RW 1559 access_mode RW
1530 modes M_DFF0, M_DFF1, M_SCSI 1560 modes M_DFF0, M_DFF1, M_SCSI
1561 count 8
1531 field MANUALCTL 0x40 1562 field MANUALCTL 0x40
1532 field ENSELI 0x20 1563 field ENSELI 0x20
1533 field ENRSELI 0x10 1564 field ENRSELI 0x10
@@ -1667,6 +1698,9 @@ register SCSISIGO {
1667 } 1698 }
1668} 1699}
1669 1700
1701/*
1702 * SCSI Control Signal In
1703 */
1670register SCSISIGI { 1704register SCSISIGI {
1671 address 0x041 1705 address 0x041
1672 access_mode RO 1706 access_mode RO
@@ -1703,6 +1737,7 @@ register MULTARGID {
1703 access_mode RW 1737 access_mode RW
1704 modes M_CFG 1738 modes M_CFG
1705 size 2 1739 size 2
1740 count 2
1706} 1741}
1707 1742
1708/* 1743/*
@@ -1758,6 +1793,7 @@ register TARGIDIN {
1758 address 0x048 1793 address 0x048
1759 access_mode RO 1794 access_mode RO
1760 modes M_DFF0, M_DFF1, M_SCSI 1795 modes M_DFF0, M_DFF1, M_SCSI
1796 count 2
1761 field CLKOUT 0x80 1797 field CLKOUT 0x80
1762 field TARGID 0x0F 1798 field TARGID 0x0F
1763} 1799}
@@ -1798,6 +1834,7 @@ register OPTIONMODE {
1798 address 0x04A 1834 address 0x04A
1799 access_mode RW 1835 access_mode RW
1800 modes M_CFG 1836 modes M_CFG
1837 count 4
1801 field BIOSCANCTL 0x80 1838 field BIOSCANCTL 0x80
1802 field AUTOACKEN 0x40 1839 field AUTOACKEN 0x40
1803 field BIASCANCTL 0x20 1840 field BIASCANCTL 0x20
@@ -1850,6 +1887,7 @@ register SIMODE0 {
1850 address 0x04B 1887 address 0x04B
1851 access_mode RW 1888 access_mode RW
1852 modes M_CFG 1889 modes M_CFG
1890 count 8
1853 field ENSELDO 0x40 1891 field ENSELDO 0x40
1854 field ENSELDI 0x20 1892 field ENSELDI 0x20
1855 field ENSELINGO 0x10 1893 field ENSELINGO 0x10
@@ -1945,6 +1983,7 @@ register PERRDIAG {
1945 address 0x04E 1983 address 0x04E
1946 access_mode RO 1984 access_mode RO
1947 modes M_DFF0, M_DFF1, M_SCSI 1985 modes M_DFF0, M_DFF1, M_SCSI
1986 count 3
1948 field HIZERO 0x80 1987 field HIZERO 0x80
1949 field HIPERR 0x40 1988 field HIPERR 0x40
1950 field PREVPHASE 0x20 1989 field PREVPHASE 0x20
@@ -1962,6 +2001,7 @@ register LQISTATE {
1962 address 0x04E 2001 address 0x04E
1963 access_mode RO 2002 access_mode RO
1964 modes M_CFG 2003 modes M_CFG
2004 count 6
1965} 2005}
1966 2006
1967/* 2007/*
@@ -1971,6 +2011,7 @@ register SOFFCNT {
1971 address 0x04F 2011 address 0x04F
1972 access_mode RO 2012 access_mode RO
1973 modes M_DFF0, M_DFF1, M_SCSI 2013 modes M_DFF0, M_DFF1, M_SCSI
2014 count 1
1974} 2015}
1975 2016
1976/* 2017/*
@@ -1980,6 +2021,7 @@ register LQOSTATE {
1980 address 0x04F 2021 address 0x04F
1981 access_mode RO 2022 access_mode RO
1982 modes M_CFG 2023 modes M_CFG
2024 count 2
1983} 2025}
1984 2026
1985/* 2027/*
@@ -1989,6 +2031,7 @@ register LQISTAT0 {
1989 address 0x050 2031 address 0x050
1990 access_mode RO 2032 access_mode RO
1991 modes M_DFF0, M_DFF1, M_SCSI 2033 modes M_DFF0, M_DFF1, M_SCSI
2034 count 2
1992 field LQIATNQAS 0x20 2035 field LQIATNQAS 0x20
1993 field LQICRCT1 0x10 2036 field LQICRCT1 0x10
1994 field LQICRCT2 0x08 2037 field LQICRCT2 0x08
@@ -2004,6 +2047,7 @@ register CLRLQIINT0 {
2004 address 0x050 2047 address 0x050
2005 access_mode WO 2048 access_mode WO
2006 modes M_DFF0, M_DFF1, M_SCSI 2049 modes M_DFF0, M_DFF1, M_SCSI
2050 count 1
2007 field CLRLQIATNQAS 0x20 2051 field CLRLQIATNQAS 0x20
2008 field CLRLQICRCT1 0x10 2052 field CLRLQICRCT1 0x10
2009 field CLRLQICRCT2 0x08 2053 field CLRLQICRCT2 0x08
@@ -2019,6 +2063,7 @@ register LQIMODE0 {
2019 address 0x050 2063 address 0x050
2020 access_mode RW 2064 access_mode RW
2021 modes M_CFG 2065 modes M_CFG
2066 count 3
2022 field ENLQIATNQASK 0x20 2067 field ENLQIATNQASK 0x20
2023 field ENLQICRCT1 0x10 2068 field ENLQICRCT1 0x10
2024 field ENLQICRCT2 0x08 2069 field ENLQICRCT2 0x08
@@ -2034,6 +2079,7 @@ register LQISTAT1 {
2034 address 0x051 2079 address 0x051
2035 access_mode RO 2080 access_mode RO
2036 modes M_DFF0, M_DFF1, M_SCSI 2081 modes M_DFF0, M_DFF1, M_SCSI
2082 count 3
2037 field LQIPHASE_LQ 0x80 2083 field LQIPHASE_LQ 0x80
2038 field LQIPHASE_NLQ 0x40 2084 field LQIPHASE_NLQ 0x40
2039 field LQIABORT 0x20 2085 field LQIABORT 0x20
@@ -2051,6 +2097,7 @@ register CLRLQIINT1 {
2051 address 0x051 2097 address 0x051
2052 access_mode WO 2098 access_mode WO
2053 modes M_DFF0, M_DFF1, M_SCSI 2099 modes M_DFF0, M_DFF1, M_SCSI
2100 count 4
2054 field CLRLQIPHASE_LQ 0x80 2101 field CLRLQIPHASE_LQ 0x80
2055 field CLRLQIPHASE_NLQ 0x40 2102 field CLRLQIPHASE_NLQ 0x40
2056 field CLRLIQABORT 0x20 2103 field CLRLIQABORT 0x20
@@ -2068,6 +2115,7 @@ register LQIMODE1 {
2068 address 0x051 2115 address 0x051
2069 access_mode RW 2116 access_mode RW
2070 modes M_CFG 2117 modes M_CFG
2118 count 4
2071 field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */ 2119 field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */
2072 field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */ 2120 field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */
2073 field ENLIQABORT 0x20 2121 field ENLIQABORT 0x20
@@ -2102,6 +2150,7 @@ register SSTAT3 {
2102 address 0x053 2150 address 0x053
2103 access_mode RO 2151 access_mode RO
2104 modes M_DFF0, M_DFF1, M_SCSI 2152 modes M_DFF0, M_DFF1, M_SCSI
2153 count 3
2105 field NTRAMPERR 0x02 2154 field NTRAMPERR 0x02
2106 field OSRAMPERR 0x01 2155 field OSRAMPERR 0x01
2107} 2156}
@@ -2113,6 +2162,7 @@ register CLRSINT3 {
2113 address 0x053 2162 address 0x053
2114 access_mode WO 2163 access_mode WO
2115 modes M_DFF0, M_DFF1, M_SCSI 2164 modes M_DFF0, M_DFF1, M_SCSI
2165 count 3
2116 field CLRNTRAMPERR 0x02 2166 field CLRNTRAMPERR 0x02
2117 field CLROSRAMPERR 0x01 2167 field CLROSRAMPERR 0x01
2118} 2168}
@@ -2124,6 +2174,7 @@ register SIMODE3 {
2124 address 0x053 2174 address 0x053
2125 access_mode RW 2175 access_mode RW
2126 modes M_CFG 2176 modes M_CFG
2177 count 4
2127 field ENNTRAMPERR 0x02 2178 field ENNTRAMPERR 0x02
2128 field ENOSRAMPERR 0x01 2179 field ENOSRAMPERR 0x01
2129} 2180}
@@ -2135,6 +2186,7 @@ register LQOSTAT0 {
2135 address 0x054 2186 address 0x054
2136 access_mode RO 2187 access_mode RO
2137 modes M_DFF0, M_DFF1, M_SCSI 2188 modes M_DFF0, M_DFF1, M_SCSI
2189 count 2
2138 field LQOTARGSCBPERR 0x10 2190 field LQOTARGSCBPERR 0x10
2139 field LQOSTOPT2 0x08 2191 field LQOSTOPT2 0x08
2140 field LQOATNLQ 0x04 2192 field LQOATNLQ 0x04
@@ -2149,6 +2201,7 @@ register CLRLQOINT0 {
2149 address 0x054 2201 address 0x054
2150 access_mode WO 2202 access_mode WO
2151 modes M_DFF0, M_DFF1, M_SCSI 2203 modes M_DFF0, M_DFF1, M_SCSI
2204 count 3
2152 field CLRLQOTARGSCBPERR 0x10 2205 field CLRLQOTARGSCBPERR 0x10
2153 field CLRLQOSTOPT2 0x08 2206 field CLRLQOSTOPT2 0x08
2154 field CLRLQOATNLQ 0x04 2207 field CLRLQOATNLQ 0x04
@@ -2163,6 +2216,7 @@ register LQOMODE0 {
2163 address 0x054 2216 address 0x054
2164 access_mode RW 2217 access_mode RW
2165 modes M_CFG 2218 modes M_CFG
2219 count 4
2166 field ENLQOTARGSCBPERR 0x10 2220 field ENLQOTARGSCBPERR 0x10
2167 field ENLQOSTOPT2 0x08 2221 field ENLQOSTOPT2 0x08
2168 field ENLQOATNLQ 0x04 2222 field ENLQOATNLQ 0x04
@@ -2191,6 +2245,7 @@ register CLRLQOINT1 {
2191 address 0x055 2245 address 0x055
2192 access_mode WO 2246 access_mode WO
2193 modes M_DFF0, M_DFF1, M_SCSI 2247 modes M_DFF0, M_DFF1, M_SCSI
2248 count 7
2194 field CLRLQOINITSCBPERR 0x10 2249 field CLRLQOINITSCBPERR 0x10
2195 field CLRLQOSTOPI2 0x08 2250 field CLRLQOSTOPI2 0x08
2196 field CLRLQOBADQAS 0x04 2251 field CLRLQOBADQAS 0x04
@@ -2205,6 +2260,7 @@ register LQOMODE1 {
2205 address 0x055 2260 address 0x055
2206 access_mode RW 2261 access_mode RW
2207 modes M_CFG 2262 modes M_CFG
2263 count 4
2208 field ENLQOINITSCBPERR 0x10 2264 field ENLQOINITSCBPERR 0x10
2209 field ENLQOSTOPI2 0x08 2265 field ENLQOSTOPI2 0x08
2210 field ENLQOBADQAS 0x04 2266 field ENLQOBADQAS 0x04
@@ -2232,6 +2288,7 @@ register OS_SPACE_CNT {
2232 address 0x056 2288 address 0x056
2233 access_mode RO 2289 access_mode RO
2234 modes M_CFG 2290 modes M_CFG
2291 count 2
2235} 2292}
2236 2293
2237/* 2294/*
@@ -2286,13 +2343,19 @@ register NEXTSCB {
2286 modes M_SCSI 2343 modes M_SCSI
2287} 2344}
2288 2345
2289/* Rev B only. */ 2346/*
2347 * LQO SCSI Control
2348 * (Rev B only.)
2349 */
2290register LQOSCSCTL { 2350register LQOSCSCTL {
2291 address 0x05A 2351 address 0x05A
2292 access_mode RW 2352 access_mode RW
2293 size 1 2353 size 1
2294 modes M_CFG 2354 modes M_CFG
2355 count 1
2295 field LQOH2A_VERSION 0x80 2356 field LQOH2A_VERSION 0x80
2357 field LQOBUSETDLY 0x40
2358 field LQONOHOLDLACK 0x02
2296 field LQONOCHKOVER 0x01 2359 field LQONOCHKOVER 0x01
2297} 2360}
2298 2361
@@ -2459,6 +2522,7 @@ register NEGPERIOD {
2459 address 0x061 2522 address 0x061
2460 access_mode RW 2523 access_mode RW
2461 modes M_SCSI 2524 modes M_SCSI
2525 count 1
2462} 2526}
2463 2527
2464/* 2528/*
@@ -2478,6 +2542,7 @@ register NEGOFFSET {
2478 address 0x062 2542 address 0x062
2479 access_mode RW 2543 access_mode RW
2480 modes M_SCSI 2544 modes M_SCSI
2545 count 1
2481} 2546}
2482 2547
2483/* 2548/*
@@ -2487,6 +2552,7 @@ register NEGPPROPTS {
2487 address 0x063 2552 address 0x063
2488 access_mode RW 2553 access_mode RW
2489 modes M_SCSI 2554 modes M_SCSI
2555 count 1
2490 field PPROPT_PACE 0x08 2556 field PPROPT_PACE 0x08
2491 field PPROPT_QAS 0x04 2557 field PPROPT_QAS 0x04
2492 field PPROPT_DT 0x02 2558 field PPROPT_DT 0x02
@@ -2516,12 +2582,19 @@ register ANNEXCOL {
2516 address 0x065 2582 address 0x065
2517 access_mode RW 2583 access_mode RW
2518 modes M_SCSI 2584 modes M_SCSI
2585 count 7
2519} 2586}
2520 2587
2588/*
2589 * SCSI Check
2590 * (Rev. B only)
2591 */
2521register SCSCHKN { 2592register SCSCHKN {
2522 address 0x066 2593 address 0x066
2523 access_mode RW 2594 access_mode RW
2524 modes M_CFG 2595 modes M_CFG
2596 count 1
2597 field BIDICHKDIS 0x80
2525 field STSELSKIDDIS 0x40 2598 field STSELSKIDDIS 0x40
2526 field CURRFIFODEF 0x20 2599 field CURRFIFODEF 0x20
2527 field WIDERESEN 0x10 2600 field WIDERESEN 0x10
@@ -2561,6 +2634,7 @@ register ANNEXDAT {
2561 address 0x066 2634 address 0x066
2562 access_mode RW 2635 access_mode RW
2563 modes M_SCSI 2636 modes M_SCSI
2637 count 3
2564} 2638}
2565 2639
2566/* 2640/*
@@ -2596,6 +2670,7 @@ register TOWNID {
2596 address 0x069 2670 address 0x069
2597 access_mode RW 2671 access_mode RW
2598 modes M_SCSI 2672 modes M_SCSI
2673 count 2
2599} 2674}
2600 2675
2601/* 2676/*
@@ -2737,6 +2812,7 @@ register SCBAUTOPTR {
2737 address 0x0AB 2812 address 0x0AB
2738 access_mode RW 2813 access_mode RW
2739 modes M_CFG 2814 modes M_CFG
2815 count 1
2740 field AUSCBPTR_EN 0x80 2816 field AUSCBPTR_EN 0x80
2741 field SCBPTR_ADDR 0x38 2817 field SCBPTR_ADDR 0x38
2742 field SCBPTR_OFF 0x07 2818 field SCBPTR_OFF 0x07
@@ -2881,6 +2957,7 @@ register BRDDAT {
2881 address 0x0B8 2957 address 0x0B8
2882 access_mode RW 2958 access_mode RW
2883 modes M_SCSI 2959 modes M_SCSI
2960 count 2
2884} 2961}
2885 2962
2886/* 2963/*
@@ -2890,6 +2967,7 @@ register BRDCTL {
2890 address 0x0B9 2967 address 0x0B9
2891 access_mode RW 2968 access_mode RW
2892 modes M_SCSI 2969 modes M_SCSI
2970 count 7
2893 field FLXARBACK 0x80 2971 field FLXARBACK 0x80
2894 field FLXARBREQ 0x40 2972 field FLXARBREQ 0x40
2895 field BRDADDR 0x38 2973 field BRDADDR 0x38
@@ -2905,6 +2983,7 @@ register SEEADR {
2905 address 0x0BA 2983 address 0x0BA
2906 access_mode RW 2984 access_mode RW
2907 modes M_SCSI 2985 modes M_SCSI
2986 count 4
2908} 2987}
2909 2988
2910/* 2989/*
@@ -2915,6 +2994,7 @@ register SEEDAT {
2915 access_mode RW 2994 access_mode RW
2916 size 2 2995 size 2
2917 modes M_SCSI 2996 modes M_SCSI
2997 count 4
2918} 2998}
2919 2999
2920/* 3000/*
@@ -2924,6 +3004,7 @@ register SEESTAT {
2924 address 0x0BE 3004 address 0x0BE
2925 access_mode RO 3005 access_mode RO
2926 modes M_SCSI 3006 modes M_SCSI
3007 count 1
2927 field INIT_DONE 0x80 3008 field INIT_DONE 0x80
2928 field SEEOPCODE 0x70 3009 field SEEOPCODE 0x70
2929 field LDALTID_L 0x08 3010 field LDALTID_L 0x08
@@ -2939,6 +3020,7 @@ register SEECTL {
2939 address 0x0BE 3020 address 0x0BE
2940 access_mode RW 3021 access_mode RW
2941 modes M_SCSI 3022 modes M_SCSI
3023 count 4
2942 field SEEOPCODE 0x70 { 3024 field SEEOPCODE 0x70 {
2943 SEEOP_ERASE 0x70, 3025 SEEOP_ERASE 0x70,
2944 SEEOP_READ 0x60, 3026 SEEOP_READ 0x60,
@@ -3000,6 +3082,7 @@ register DSPDATACTL {
3000 address 0x0C1 3082 address 0x0C1
3001 access_mode RW 3083 access_mode RW
3002 modes M_CFG 3084 modes M_CFG
3085 count 3
3003 field BYPASSENAB 0x80 3086 field BYPASSENAB 0x80
3004 field DESQDIS 0x10 3087 field DESQDIS 0x10
3005 field RCVROFFSTDIS 0x04 3088 field RCVROFFSTDIS 0x04
@@ -3058,6 +3141,7 @@ register DSPSELECT {
3058 address 0x0C4 3141 address 0x0C4
3059 access_mode RW 3142 access_mode RW
3060 modes M_CFG 3143 modes M_CFG
3144 count 1
3061 field AUTOINCEN 0x80 3145 field AUTOINCEN 0x80
3062 field DSPSEL 0x1F 3146 field DSPSEL 0x1F
3063} 3147}
@@ -3071,6 +3155,7 @@ register WRTBIASCTL {
3071 address 0x0C5 3155 address 0x0C5
3072 access_mode WO 3156 access_mode WO
3073 modes M_CFG 3157 modes M_CFG
3158 count 3
3074 field AUTOXBCDIS 0x80 3159 field AUTOXBCDIS 0x80
3075 field XMITMANVAL 0x3F 3160 field XMITMANVAL 0x3F
3076} 3161}
@@ -3196,7 +3281,8 @@ register OVLYADDR {
3196 */ 3281 */
3197register SEQCTL0 { 3282register SEQCTL0 {
3198 address 0x0D6 3283 address 0x0D6
3199 access_mode RW 3284 access_mode RW
3285 count 11
3200 field PERRORDIS 0x80 3286 field PERRORDIS 0x80
3201 field PAUSEDIS 0x40 3287 field PAUSEDIS 0x40
3202 field FAILDIS 0x20 3288 field FAILDIS 0x20
@@ -3226,7 +3312,8 @@ register SEQCTL1 {
3226 */ 3312 */
3227register FLAGS { 3313register FLAGS {
3228 address 0x0D8 3314 address 0x0D8
3229 access_mode RO 3315 access_mode RO
3316 count 23
3230 field ZERO 0x02 3317 field ZERO 0x02
3231 field CARRY 0x01 3318 field CARRY 0x01
3232} 3319}
@@ -3255,7 +3342,8 @@ register SEQINTCTL {
3255 */ 3342 */
3256register SEQRAM { 3343register SEQRAM {
3257 address 0x0DA 3344 address 0x0DA
3258 access_mode RW 3345 access_mode RW
3346 count 2
3259} 3347}
3260 3348
3261/* 3349/*
@@ -3266,6 +3354,7 @@ register PRGMCNT {
3266 address 0x0DE 3354 address 0x0DE
3267 access_mode RW 3355 access_mode RW
3268 size 2 3356 size 2
3357 count 5
3269} 3358}
3270 3359
3271/* 3360/*
@@ -3273,7 +3362,7 @@ register PRGMCNT {
3273 */ 3362 */
3274register ACCUM { 3363register ACCUM {
3275 address 0x0E0 3364 address 0x0E0
3276 access_mode RW 3365 access_mode RW
3277 accumulator 3366 accumulator
3278} 3367}
3279 3368
@@ -3401,6 +3490,7 @@ register INTVEC1_ADDR {
3401 access_mode RW 3490 access_mode RW
3402 size 2 3491 size 2
3403 modes M_CFG 3492 modes M_CFG
3493 count 1
3404} 3494}
3405 3495
3406/* 3496/*
@@ -3412,6 +3502,7 @@ register CURADDR {
3412 access_mode RW 3502 access_mode RW
3413 size 2 3503 size 2
3414 modes M_SCSI 3504 modes M_SCSI
3505 count 2
3415} 3506}
3416 3507
3417/* 3508/*
@@ -3423,6 +3514,7 @@ register INTVEC2_ADDR {
3423 access_mode RW 3514 access_mode RW
3424 size 2 3515 size 2
3425 modes M_CFG 3516 modes M_CFG
3517 count 1
3426} 3518}
3427 3519
3428/* 3520/*
@@ -3579,6 +3671,7 @@ scratch_ram {
3579 /* Parameters for DMA Logic */ 3671 /* Parameters for DMA Logic */
3580 DMAPARAMS { 3672 DMAPARAMS {
3581 size 1 3673 size 1
3674 count 8
3582 field PRELOADEN 0x80 3675 field PRELOADEN 0x80
3583 field WIDEODD 0x40 3676 field WIDEODD 0x40
3584 field SCSIEN 0x20 3677 field SCSIEN 0x20
@@ -3648,9 +3741,11 @@ scratch_ram {
3648 */ 3741 */
3649 KERNEL_TQINPOS { 3742 KERNEL_TQINPOS {
3650 size 1 3743 size 1
3744 count 1
3651 } 3745 }
3652 TQINPOS { 3746 TQINPOS {
3653 size 1 3747 size 1
3748 count 8
3654 } 3749 }
3655 /* 3750 /*
3656 * Base address of our shared data with the kernel driver in host 3751 * Base address of our shared data with the kernel driver in host
@@ -3681,6 +3776,7 @@ scratch_ram {
3681 } 3776 }
3682 ARG_2 { 3777 ARG_2 {
3683 size 1 3778 size 1
3779 count 1
3684 alias RETURN_2 3780 alias RETURN_2
3685 } 3781 }
3686 3782
@@ -3698,6 +3794,7 @@ scratch_ram {
3698 */ 3794 */
3699 SCSISEQ_TEMPLATE { 3795 SCSISEQ_TEMPLATE {
3700 size 1 3796 size 1
3797 count 7
3701 field MANUALCTL 0x40 3798 field MANUALCTL 0x40
3702 field ENSELI 0x20 3799 field ENSELI 0x20
3703 field ENRSELI 0x10 3800 field ENRSELI 0x10
@@ -3711,6 +3808,7 @@ scratch_ram {
3711 */ 3808 */
3712 INITIATOR_TAG { 3809 INITIATOR_TAG {
3713 size 1 3810 size 1
3811 count 1
3714 } 3812 }
3715 3813
3716 SEQ_FLAGS2 { 3814 SEQ_FLAGS2 {
@@ -3777,6 +3875,7 @@ scratch_ram {
3777 */ 3875 */
3778 CMDSIZE_TABLE { 3876 CMDSIZE_TABLE {
3779 size 8 3877 size 8
3878 count 8
3780 } 3879 }
3781 /* 3880 /*
3782 * When an SCB with the MK_MESSAGE flag is 3881 * When an SCB with the MK_MESSAGE flag is
@@ -3803,8 +3902,8 @@ scratch_ram {
3803/************************* Hardware SCB Definition ****************************/ 3902/************************* Hardware SCB Definition ****************************/
3804scb { 3903scb {
3805 address 0x180 3904 address 0x180
3806 size 64 3905 size 64
3807 modes 0, 1, 2, 3 3906 modes 0, 1, 2, 3
3808 SCB_RESIDUAL_DATACNT { 3907 SCB_RESIDUAL_DATACNT {
3809 size 4 3908 size 4
3810 alias SCB_CDB_STORE 3909 alias SCB_CDB_STORE
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index ade0fb8fbdb2..55508b0fcec4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -52,7 +52,7 @@
52 52
53 53
54/***************************** Lookup Tables **********************************/ 54/***************************** Lookup Tables **********************************/
55static char *ahd_chip_names[] = 55static const char *const ahd_chip_names[] =
56{ 56{
57 "NONE", 57 "NONE",
58 "aic7901", 58 "aic7901",
@@ -66,10 +66,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
66 */ 66 */
67struct ahd_hard_error_entry { 67struct ahd_hard_error_entry {
68 uint8_t errno; 68 uint8_t errno;
69 char *errmesg; 69 const char *errmesg;
70}; 70};
71 71
72static struct ahd_hard_error_entry ahd_hard_errors[] = { 72static const struct ahd_hard_error_entry ahd_hard_errors[] = {
73 { DSCTMOUT, "Discard Timer has timed out" }, 73 { DSCTMOUT, "Discard Timer has timed out" },
74 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 74 { ILLOPCODE, "Illegal Opcode in sequencer program" },
75 { SQPARERR, "Sequencer Parity Error" }, 75 { SQPARERR, "Sequencer Parity Error" },
@@ -79,7 +79,7 @@ static struct ahd_hard_error_entry ahd_hard_errors[] = {
79}; 79};
80static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); 80static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors);
81 81
82static struct ahd_phase_table_entry ahd_phase_table[] = 82static const struct ahd_phase_table_entry ahd_phase_table[] =
83{ 83{
84 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 84 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
85 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 85 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
@@ -213,7 +213,7 @@ static void ahd_dumpseq(struct ahd_softc *ahd);
213#endif 213#endif
214static void ahd_loadseq(struct ahd_softc *ahd); 214static void ahd_loadseq(struct ahd_softc *ahd);
215static int ahd_check_patch(struct ahd_softc *ahd, 215static int ahd_check_patch(struct ahd_softc *ahd,
216 struct patch **start_patch, 216 const struct patch **start_patch,
217 u_int start_instr, u_int *skip_addr); 217 u_int start_instr, u_int *skip_addr);
218static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, 218static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd,
219 u_int address); 219 u_int address);
@@ -254,7 +254,7 @@ static void ahd_freeze_devq(struct ahd_softc *ahd,
254 struct scb *scb); 254 struct scb *scb);
255static void ahd_handle_scb_status(struct ahd_softc *ahd, 255static void ahd_handle_scb_status(struct ahd_softc *ahd,
256 struct scb *scb); 256 struct scb *scb);
257static struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); 257static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
258static void ahd_shutdown(void *arg); 258static void ahd_shutdown(void *arg);
259static void ahd_update_coalescing_values(struct ahd_softc *ahd, 259static void ahd_update_coalescing_values(struct ahd_softc *ahd,
260 u_int timer, 260 u_int timer,
@@ -266,8 +266,774 @@ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
266 int target, char channel, int lun, 266 int target, char channel, int lun,
267 u_int tag, role_t role); 267 u_int tag, role_t role);
268 268
269/******************************** Private Inlines *****************************/ 269static void ahd_reset_cmds_pending(struct ahd_softc *ahd);
270
271/*************************** Interrupt Services *******************************/
272static void ahd_run_qoutfifo(struct ahd_softc *ahd);
273#ifdef AHD_TARGET_MODE
274static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
275#endif
276static void ahd_handle_hwerrint(struct ahd_softc *ahd);
277static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
278static void ahd_handle_scsiint(struct ahd_softc *ahd,
279 u_int intstat);
280
281/************************ Sequencer Execution Control *************************/
282void
283ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
284{
285 if (ahd->src_mode == src && ahd->dst_mode == dst)
286 return;
287#ifdef AHD_DEBUG
288 if (ahd->src_mode == AHD_MODE_UNKNOWN
289 || ahd->dst_mode == AHD_MODE_UNKNOWN)
290 panic("Setting mode prior to saving it.\n");
291 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
292 printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
293 ahd_build_mode_state(ahd, src, dst));
294#endif
295 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
296 ahd->src_mode = src;
297 ahd->dst_mode = dst;
298}
299
300static void
301ahd_update_modes(struct ahd_softc *ahd)
302{
303 ahd_mode_state mode_ptr;
304 ahd_mode src;
305 ahd_mode dst;
306
307 mode_ptr = ahd_inb(ahd, MODE_PTR);
308#ifdef AHD_DEBUG
309 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
310 printf("Reading mode 0x%x\n", mode_ptr);
311#endif
312 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
313 ahd_known_modes(ahd, src, dst);
314}
315
316static void
317ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
318 ahd_mode dstmode, const char *file, int line)
319{
320#ifdef AHD_DEBUG
321 if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
322 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
323 panic("%s:%s:%d: Mode assertion failed.\n",
324 ahd_name(ahd), file, line);
325 }
326#endif
327}
328
329#define AHD_ASSERT_MODES(ahd, source, dest) \
330 ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
331
332ahd_mode_state
333ahd_save_modes(struct ahd_softc *ahd)
334{
335 if (ahd->src_mode == AHD_MODE_UNKNOWN
336 || ahd->dst_mode == AHD_MODE_UNKNOWN)
337 ahd_update_modes(ahd);
338
339 return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
340}
341
342void
343ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
344{
345 ahd_mode src;
346 ahd_mode dst;
347
348 ahd_extract_mode_state(ahd, state, &src, &dst);
349 ahd_set_modes(ahd, src, dst);
350}
351
352/*
353 * Determine whether the sequencer has halted code execution.
354 * Returns non-zero status if the sequencer is stopped.
355 */
356int
357ahd_is_paused(struct ahd_softc *ahd)
358{
359 return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
360}
361
362/*
363 * Request that the sequencer stop and wait, indefinitely, for it
364 * to stop. The sequencer will only acknowledge that it is paused
365 * once it has reached an instruction boundary and PAUSEDIS is
366 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
367 * for critical sections.
368 */
369void
370ahd_pause(struct ahd_softc *ahd)
371{
372 ahd_outb(ahd, HCNTRL, ahd->pause);
373
374 /*
375 * Since the sequencer can disable pausing in a critical section, we
376 * must loop until it actually stops.
377 */
378 while (ahd_is_paused(ahd) == 0)
379 ;
380}
381
382/*
383 * Allow the sequencer to continue program execution.
384 * We check here to ensure that no additional interrupt
385 * sources that would cause the sequencer to halt have been
386 * asserted. If, for example, a SCSI bus reset is detected
387 * while we are fielding a different, pausing, interrupt type,
388 * we don't want to release the sequencer before going back
389 * into our interrupt handler and dealing with this new
390 * condition.
391 */
392void
393ahd_unpause(struct ahd_softc *ahd)
394{
395 /*
396 * Automatically restore our modes to those saved
397 * prior to the first change of the mode.
398 */
399 if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
400 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
401 if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
402 ahd_reset_cmds_pending(ahd);
403 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
404 }
405
406 if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
407 ahd_outb(ahd, HCNTRL, ahd->unpause);
408
409 ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
410}
411
412/*********************** Scatter Gather List Handling *************************/
413void *
414ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
415 void *sgptr, dma_addr_t addr, bus_size_t len, int last)
416{
417 scb->sg_count++;
418 if (sizeof(dma_addr_t) > 4
419 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
420 struct ahd_dma64_seg *sg;
421
422 sg = (struct ahd_dma64_seg *)sgptr;
423 sg->addr = ahd_htole64(addr);
424 sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
425 return (sg + 1);
426 } else {
427 struct ahd_dma_seg *sg;
270 428
429 sg = (struct ahd_dma_seg *)sgptr;
430 sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
431 sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
432 | (last ? AHD_DMA_LAST_SEG : 0));
433 return (sg + 1);
434 }
435}
436
437static void
438ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
439{
440 /* XXX Handle target mode SCBs. */
441 scb->crc_retry_count = 0;
442 if ((scb->flags & SCB_PACKETIZED) != 0) {
443 /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
444 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
445 } else {
446 if (ahd_get_transfer_length(scb) & 0x01)
447 scb->hscb->task_attribute = SCB_XFERLEN_ODD;
448 else
449 scb->hscb->task_attribute = 0;
450 }
451
452 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
453 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
454 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
455 ahd_htole32(scb->sense_busaddr);
456}
457
458static void
459ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
460{
461 /*
462 * Copy the first SG into the "current" data ponter area.
463 */
464 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
465 struct ahd_dma64_seg *sg;
466
467 sg = (struct ahd_dma64_seg *)scb->sg_list;
468 scb->hscb->dataptr = sg->addr;
469 scb->hscb->datacnt = sg->len;
470 } else {
471 struct ahd_dma_seg *sg;
472 uint32_t *dataptr_words;
473
474 sg = (struct ahd_dma_seg *)scb->sg_list;
475 dataptr_words = (uint32_t*)&scb->hscb->dataptr;
476 dataptr_words[0] = sg->addr;
477 dataptr_words[1] = 0;
478 if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
479 uint64_t high_addr;
480
481 high_addr = ahd_le32toh(sg->len) & 0x7F000000;
482 scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
483 }
484 scb->hscb->datacnt = sg->len;
485 }
486 /*
487 * Note where to find the SG entries in bus space.
488 * We also set the full residual flag which the
489 * sequencer will clear as soon as a data transfer
490 * occurs.
491 */
492 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
493}
494
495static void
496ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
497{
498 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
499 scb->hscb->dataptr = 0;
500 scb->hscb->datacnt = 0;
501}
502
503/************************** Memory mapping routines ***************************/
504static void *
505ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
506{
507 dma_addr_t sg_offset;
508
509 /* sg_list_phys points to entry 1, not 0 */
510 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
511 return ((uint8_t *)scb->sg_list + sg_offset);
512}
513
514static uint32_t
515ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
516{
517 dma_addr_t sg_offset;
518
519 /* sg_list_phys points to entry 1, not 0 */
520 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
521 - ahd_sg_size(ahd);
522
523 return (scb->sg_list_busaddr + sg_offset);
524}
525
526static void
527ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
528{
529 ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
530 scb->hscb_map->dmamap,
531 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
532 /*len*/sizeof(*scb->hscb), op);
533}
534
535void
536ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
537{
538 if (scb->sg_count == 0)
539 return;
540
541 ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
542 scb->sg_map->dmamap,
543 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
544 /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
545}
546
547static void
548ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
549{
550 ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
551 scb->sense_map->dmamap,
552 /*offset*/scb->sense_busaddr,
553 /*len*/AHD_SENSE_BUFSIZE, op);
554}
555
556#ifdef AHD_TARGET_MODE
557static uint32_t
558ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
559{
560 return (((uint8_t *)&ahd->targetcmds[index])
561 - (uint8_t *)ahd->qoutfifo);
562}
563#endif
564
565/*********************** Miscelaneous Support Functions ***********************/
566/*
567 * Return pointers to the transfer negotiation information
568 * for the specified our_id/remote_id pair.
569 */
570struct ahd_initiator_tinfo *
571ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
572 u_int remote_id, struct ahd_tmode_tstate **tstate)
573{
574 /*
575 * Transfer data structures are stored from the perspective
576 * of the target role. Since the parameters for a connection
577 * in the initiator role to a given target are the same as
578 * when the roles are reversed, we pretend we are the target.
579 */
580 if (channel == 'B')
581 our_id += 8;
582 *tstate = ahd->enabled_targets[our_id];
583 return (&(*tstate)->transinfo[remote_id]);
584}
585
586uint16_t
587ahd_inw(struct ahd_softc *ahd, u_int port)
588{
589 /*
590 * Read high byte first as some registers increment
591 * or have other side effects when the low byte is
592 * read.
593 */
594 uint16_t r = ahd_inb(ahd, port+1) << 8;
595 return r | ahd_inb(ahd, port);
596}
597
598void
599ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
600{
601 /*
602 * Write low byte first to accomodate registers
603 * such as PRGMCNT where the order maters.
604 */
605 ahd_outb(ahd, port, value & 0xFF);
606 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
607}
608
609uint32_t
610ahd_inl(struct ahd_softc *ahd, u_int port)
611{
612 return ((ahd_inb(ahd, port))
613 | (ahd_inb(ahd, port+1) << 8)
614 | (ahd_inb(ahd, port+2) << 16)
615 | (ahd_inb(ahd, port+3) << 24));
616}
617
618void
619ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
620{
621 ahd_outb(ahd, port, (value) & 0xFF);
622 ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
623 ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
624 ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
625}
626
627uint64_t
628ahd_inq(struct ahd_softc *ahd, u_int port)
629{
630 return ((ahd_inb(ahd, port))
631 | (ahd_inb(ahd, port+1) << 8)
632 | (ahd_inb(ahd, port+2) << 16)
633 | (ahd_inb(ahd, port+3) << 24)
634 | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
635 | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
636 | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
637 | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
638}
639
640void
641ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
642{
643 ahd_outb(ahd, port, value & 0xFF);
644 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
645 ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
646 ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
647 ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
648 ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
649 ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
650 ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
651}
652
653u_int
654ahd_get_scbptr(struct ahd_softc *ahd)
655{
656 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
657 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
658 return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
659}
660
661void
662ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
663{
664 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
665 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
666 ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
667 ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
668}
669
670#if 0 /* unused */
671static u_int
672ahd_get_hnscb_qoff(struct ahd_softc *ahd)
673{
674 return (ahd_inw_atomic(ahd, HNSCB_QOFF));
675}
676#endif
677
678static void
679ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
680{
681 ahd_outw_atomic(ahd, HNSCB_QOFF, value);
682}
683
684#if 0 /* unused */
685static u_int
686ahd_get_hescb_qoff(struct ahd_softc *ahd)
687{
688 return (ahd_inb(ahd, HESCB_QOFF));
689}
690#endif
691
692static void
693ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
694{
695 ahd_outb(ahd, HESCB_QOFF, value);
696}
697
698static u_int
699ahd_get_snscb_qoff(struct ahd_softc *ahd)
700{
701 u_int oldvalue;
702
703 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
704 oldvalue = ahd_inw(ahd, SNSCB_QOFF);
705 ahd_outw(ahd, SNSCB_QOFF, oldvalue);
706 return (oldvalue);
707}
708
709static void
710ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
711{
712 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
713 ahd_outw(ahd, SNSCB_QOFF, value);
714}
715
716#if 0 /* unused */
717static u_int
718ahd_get_sescb_qoff(struct ahd_softc *ahd)
719{
720 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
721 return (ahd_inb(ahd, SESCB_QOFF));
722}
723#endif
724
725static void
726ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
727{
728 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
729 ahd_outb(ahd, SESCB_QOFF, value);
730}
731
732#if 0 /* unused */
733static u_int
734ahd_get_sdscb_qoff(struct ahd_softc *ahd)
735{
736 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
737 return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
738}
739#endif
740
741static void
742ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
743{
744 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
745 ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
746 ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
747}
748
749u_int
750ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
751{
752 u_int value;
753
754 /*
755 * Workaround PCI-X Rev A. hardware bug.
756 * After a host read of SCB memory, the chip
757 * may become confused into thinking prefetch
758 * was required. This starts the discard timer
759 * running and can cause an unexpected discard
760 * timer interrupt. The work around is to read
761 * a normal register prior to the exhaustion of
762 * the discard timer. The mode pointer register
763 * has no side effects and so serves well for
764 * this purpose.
765 *
766 * Razor #528
767 */
768 value = ahd_inb(ahd, offset);
769 if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
770 ahd_inb(ahd, MODE_PTR);
771 return (value);
772}
773
774u_int
775ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
776{
777 return (ahd_inb_scbram(ahd, offset)
778 | (ahd_inb_scbram(ahd, offset+1) << 8));
779}
780
781static uint32_t
782ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
783{
784 return (ahd_inw_scbram(ahd, offset)
785 | (ahd_inw_scbram(ahd, offset+2) << 16));
786}
787
788static uint64_t
789ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
790{
791 return (ahd_inl_scbram(ahd, offset)
792 | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
793}
794
795struct scb *
796ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
797{
798 struct scb* scb;
799
800 if (tag >= AHD_SCB_MAX)
801 return (NULL);
802 scb = ahd->scb_data.scbindex[tag];
803 if (scb != NULL)
804 ahd_sync_scb(ahd, scb,
805 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
806 return (scb);
807}
808
809static void
810ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
811{
812 struct hardware_scb *q_hscb;
813 struct map_node *q_hscb_map;
814 uint32_t saved_hscb_busaddr;
815
816 /*
817 * Our queuing method is a bit tricky. The card
818 * knows in advance which HSCB (by address) to download,
819 * and we can't disappoint it. To achieve this, the next
820 * HSCB to download is saved off in ahd->next_queued_hscb.
821 * When we are called to queue "an arbitrary scb",
822 * we copy the contents of the incoming HSCB to the one
823 * the sequencer knows about, swap HSCB pointers and
824 * finally assign the SCB to the tag indexed location
825 * in the scb_array. This makes sure that we can still
826 * locate the correct SCB by SCB_TAG.
827 */
828 q_hscb = ahd->next_queued_hscb;
829 q_hscb_map = ahd->next_queued_hscb_map;
830 saved_hscb_busaddr = q_hscb->hscb_busaddr;
831 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
832 q_hscb->hscb_busaddr = saved_hscb_busaddr;
833 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
834
835 /* Now swap HSCB pointers. */
836 ahd->next_queued_hscb = scb->hscb;
837 ahd->next_queued_hscb_map = scb->hscb_map;
838 scb->hscb = q_hscb;
839 scb->hscb_map = q_hscb_map;
840
841 /* Now define the mapping from tag to SCB in the scbindex */
842 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
843}
844
845/*
846 * Tell the sequencer about a new transaction to execute.
847 */
848void
849ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
850{
851 ahd_swap_with_next_hscb(ahd, scb);
852
853 if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
854 panic("Attempt to queue invalid SCB tag %x\n",
855 SCB_GET_TAG(scb));
856
857 /*
858 * Keep a history of SCBs we've downloaded in the qinfifo.
859 */
860 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
861 ahd->qinfifonext++;
862
863 if (scb->sg_count != 0)
864 ahd_setup_data_scb(ahd, scb);
865 else
866 ahd_setup_noxfer_scb(ahd, scb);
867 ahd_setup_scb_common(ahd, scb);
868
869 /*
870 * Make sure our data is consistent from the
871 * perspective of the adapter.
872 */
873 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
874
875#ifdef AHD_DEBUG
876 if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
877 uint64_t host_dataptr;
878
879 host_dataptr = ahd_le64toh(scb->hscb->dataptr);
880 printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
881 ahd_name(ahd),
882 SCB_GET_TAG(scb), scb->hscb->scsiid,
883 ahd_le32toh(scb->hscb->hscb_busaddr),
884 (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
885 (u_int)(host_dataptr & 0xFFFFFFFF),
886 ahd_le32toh(scb->hscb->datacnt));
887 }
888#endif
889 /* Tell the adapter about the newly queued SCB */
890 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
891}
892
893/************************** Interrupt Processing ******************************/
894static void
895ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
896{
897 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
898 /*offset*/0,
899 /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
900}
901
902static void
903ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
904{
905#ifdef AHD_TARGET_MODE
906 if ((ahd->flags & AHD_TARGETROLE) != 0) {
907 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
908 ahd->shared_data_map.dmamap,
909 ahd_targetcmd_offset(ahd, 0),
910 sizeof(struct target_cmd) * AHD_TMODE_CMDS,
911 op);
912 }
913#endif
914}
915
916/*
917 * See if the firmware has posted any completed commands
918 * into our in-core command complete fifos.
919 */
920#define AHD_RUN_QOUTFIFO 0x1
921#define AHD_RUN_TQINFIFO 0x2
922static u_int
923ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
924{
925 u_int retval;
926
927 retval = 0;
928 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
929 /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
930 /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
931 if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
932 == ahd->qoutfifonext_valid_tag)
933 retval |= AHD_RUN_QOUTFIFO;
934#ifdef AHD_TARGET_MODE
935 if ((ahd->flags & AHD_TARGETROLE) != 0
936 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
937 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
938 ahd->shared_data_map.dmamap,
939 ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
940 /*len*/sizeof(struct target_cmd),
941 BUS_DMASYNC_POSTREAD);
942 if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
943 retval |= AHD_RUN_TQINFIFO;
944 }
945#endif
946 return (retval);
947}
948
949/*
950 * Catch an interrupt from the adapter
951 */
952int
953ahd_intr(struct ahd_softc *ahd)
954{
955 u_int intstat;
956
957 if ((ahd->pause & INTEN) == 0) {
958 /*
959 * Our interrupt is not enabled on the chip
960 * and may be disabled for re-entrancy reasons,
961 * so just return. This is likely just a shared
962 * interrupt.
963 */
964 return (0);
965 }
966
967 /*
968 * Instead of directly reading the interrupt status register,
969 * infer the cause of the interrupt by checking our in-core
970 * completion queues. This avoids a costly PCI bus read in
971 * most cases.
972 */
973 if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
974 && (ahd_check_cmdcmpltqueues(ahd) != 0))
975 intstat = CMDCMPLT;
976 else
977 intstat = ahd_inb(ahd, INTSTAT);
978
979 if ((intstat & INT_PEND) == 0)
980 return (0);
981
982 if (intstat & CMDCMPLT) {
983 ahd_outb(ahd, CLRINT, CLRCMDINT);
984
985 /*
986 * Ensure that the chip sees that we've cleared
987 * this interrupt before we walk the output fifo.
988 * Otherwise, we may, due to posted bus writes,
989 * clear the interrupt after we finish the scan,
990 * and after the sequencer has added new entries
991 * and asserted the interrupt again.
992 */
993 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
994 if (ahd_is_paused(ahd)) {
995 /*
996 * Potentially lost SEQINT.
997 * If SEQINTCODE is non-zero,
998 * simulate the SEQINT.
999 */
1000 if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
1001 intstat |= SEQINT;
1002 }
1003 } else {
1004 ahd_flush_device_writes(ahd);
1005 }
1006 ahd_run_qoutfifo(ahd);
1007 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
1008 ahd->cmdcmplt_total++;
1009#ifdef AHD_TARGET_MODE
1010 if ((ahd->flags & AHD_TARGETROLE) != 0)
1011 ahd_run_tqinfifo(ahd, /*paused*/FALSE);
1012#endif
1013 }
1014
1015 /*
1016 * Handle statuses that may invalidate our cached
1017 * copy of INTSTAT separately.
1018 */
1019 if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
1020 /* Hot eject. Do nothing */
1021 } else if (intstat & HWERRINT) {
1022 ahd_handle_hwerrint(ahd);
1023 } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
1024 ahd->bus_intr(ahd);
1025 } else {
1026
1027 if ((intstat & SEQINT) != 0)
1028 ahd_handle_seqint(ahd, intstat);
1029
1030 if ((intstat & SCSIINT) != 0)
1031 ahd_handle_scsiint(ahd, intstat);
1032 }
1033 return (1);
1034}
1035
1036/******************************** Private Inlines *****************************/
271static __inline void 1037static __inline void
272ahd_assert_atn(struct ahd_softc *ahd) 1038ahd_assert_atn(struct ahd_softc *ahd)
273{ 1039{
@@ -280,7 +1046,7 @@ ahd_assert_atn(struct ahd_softc *ahd)
280 * are currently in a packetized transfer. We could 1046 * are currently in a packetized transfer. We could
281 * just as easily be sending or receiving a message. 1047 * just as easily be sending or receiving a message.
282 */ 1048 */
283static __inline int 1049static int
284ahd_currently_packetized(struct ahd_softc *ahd) 1050ahd_currently_packetized(struct ahd_softc *ahd)
285{ 1051{
286 ahd_mode_state saved_modes; 1052 ahd_mode_state saved_modes;
@@ -896,7 +1662,7 @@ clrchn:
896 * a copy of the first byte (little endian) of the sgptr 1662 * a copy of the first byte (little endian) of the sgptr
897 * hscb field. 1663 * hscb field.
898 */ 1664 */
899void 1665static void
900ahd_run_qoutfifo(struct ahd_softc *ahd) 1666ahd_run_qoutfifo(struct ahd_softc *ahd)
901{ 1667{
902 struct ahd_completion *completion; 1668 struct ahd_completion *completion;
@@ -935,7 +1701,7 @@ ahd_run_qoutfifo(struct ahd_softc *ahd)
935} 1701}
936 1702
937/************************* Interrupt Handling *********************************/ 1703/************************* Interrupt Handling *********************************/
938void 1704static void
939ahd_handle_hwerrint(struct ahd_softc *ahd) 1705ahd_handle_hwerrint(struct ahd_softc *ahd)
940{ 1706{
941 /* 1707 /*
@@ -1009,7 +1775,7 @@ ahd_dump_sglist(struct scb *scb)
1009} 1775}
1010#endif /* AHD_DEBUG */ 1776#endif /* AHD_DEBUG */
1011 1777
1012void 1778static void
1013ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) 1779ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1014{ 1780{
1015 u_int seqintcode; 1781 u_int seqintcode;
@@ -1621,7 +2387,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1621 ahd_unpause(ahd); 2387 ahd_unpause(ahd);
1622} 2388}
1623 2389
1624void 2390static void
1625ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) 2391ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
1626{ 2392{
1627 struct scb *scb; 2393 struct scb *scb;
@@ -3571,11 +4337,11 @@ ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3571 devinfo->target, devinfo->lun); 4337 devinfo->target, devinfo->lun);
3572} 4338}
3573 4339
3574static struct ahd_phase_table_entry* 4340static const struct ahd_phase_table_entry*
3575ahd_lookup_phase_entry(int phase) 4341ahd_lookup_phase_entry(int phase)
3576{ 4342{
3577 struct ahd_phase_table_entry *entry; 4343 const struct ahd_phase_table_entry *entry;
3578 struct ahd_phase_table_entry *last_entry; 4344 const struct ahd_phase_table_entry *last_entry;
3579 4345
3580 /* 4346 /*
3581 * num_phases doesn't include the default entry which 4347 * num_phases doesn't include the default entry which
@@ -3941,7 +4707,7 @@ ahd_clear_msg_state(struct ahd_softc *ahd)
3941 */ 4707 */
3942static void 4708static void
3943ahd_handle_message_phase(struct ahd_softc *ahd) 4709ahd_handle_message_phase(struct ahd_softc *ahd)
3944{ 4710{
3945 struct ahd_devinfo devinfo; 4711 struct ahd_devinfo devinfo;
3946 u_int bus_phase; 4712 u_int bus_phase;
3947 int end_session; 4713 int end_session;
@@ -5983,8 +6749,7 @@ found:
5983 */ 6749 */
5984void 6750void
5985ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) 6751ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
5986{ 6752{
5987
5988 /* Clean up for the next user */ 6753 /* Clean up for the next user */
5989 scb->flags = SCB_FLAG_NONE; 6754 scb->flags = SCB_FLAG_NONE;
5990 scb->hscb->control = 0; 6755 scb->hscb->control = 0;
@@ -6272,6 +7037,24 @@ static const char *termstat_strings[] = {
6272 "Not Configured" 7037 "Not Configured"
6273}; 7038};
6274 7039
7040/***************************** Timer Facilities *******************************/
7041#define ahd_timer_init init_timer
7042#define ahd_timer_stop del_timer_sync
7043typedef void ahd_linux_callback_t (u_long);
7044
7045static void
7046ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
7047{
7048 struct ahd_softc *ahd;
7049
7050 ahd = (struct ahd_softc *)arg;
7051 del_timer(timer);
7052 timer->data = (u_long)arg;
7053 timer->expires = jiffies + (usec * HZ)/1000000;
7054 timer->function = (ahd_linux_callback_t*)func;
7055 add_timer(timer);
7056}
7057
6275/* 7058/*
6276 * Start the board, ready for normal operation 7059 * Start the board, ready for normal operation
6277 */ 7060 */
@@ -7370,7 +8153,7 @@ ahd_qinfifo_count(struct ahd_softc *ahd)
7370 + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); 8153 + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos);
7371} 8154}
7372 8155
7373void 8156static void
7374ahd_reset_cmds_pending(struct ahd_softc *ahd) 8157ahd_reset_cmds_pending(struct ahd_softc *ahd)
7375{ 8158{
7376 struct scb *scb; 8159 struct scb *scb;
@@ -8571,7 +9354,7 @@ ahd_loadseq(struct ahd_softc *ahd)
8571 struct cs cs_table[num_critical_sections]; 9354 struct cs cs_table[num_critical_sections];
8572 u_int begin_set[num_critical_sections]; 9355 u_int begin_set[num_critical_sections];
8573 u_int end_set[num_critical_sections]; 9356 u_int end_set[num_critical_sections];
8574 struct patch *cur_patch; 9357 const struct patch *cur_patch;
8575 u_int cs_count; 9358 u_int cs_count;
8576 u_int cur_cs; 9359 u_int cur_cs;
8577 u_int i; 9360 u_int i;
@@ -8726,11 +9509,11 @@ ahd_loadseq(struct ahd_softc *ahd)
8726} 9509}
8727 9510
8728static int 9511static int
8729ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, 9512ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch,
8730 u_int start_instr, u_int *skip_addr) 9513 u_int start_instr, u_int *skip_addr)
8731{ 9514{
8732 struct patch *cur_patch; 9515 const struct patch *cur_patch;
8733 struct patch *last_patch; 9516 const struct patch *last_patch;
8734 u_int num_patches; 9517 u_int num_patches;
8735 9518
8736 num_patches = ARRAY_SIZE(patches); 9519 num_patches = ARRAY_SIZE(patches);
@@ -8764,7 +9547,7 @@ ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
8764static u_int 9547static u_int
8765ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) 9548ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
8766{ 9549{
8767 struct patch *cur_patch; 9550 const struct patch *cur_patch;
8768 int address_offset; 9551 int address_offset;
8769 u_int skip_addr; 9552 u_int skip_addr;
8770 u_int i; 9553 u_int i;
@@ -8895,7 +9678,7 @@ sized:
8895} 9678}
8896 9679
8897int 9680int
8898ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, 9681ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
8899 const char *name, u_int address, u_int value, 9682 const char *name, u_int address, u_int value,
8900 u_int *cur_column, u_int wrap_point) 9683 u_int *cur_column, u_int wrap_point)
8901{ 9684{
@@ -9886,7 +10669,7 @@ ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
9886#endif 10669#endif
9887} 10670}
9888 10671
9889void 10672static void
9890ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) 10673ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
9891{ 10674{
9892 struct target_cmd *cmd; 10675 struct target_cmd *cmd;
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
index 45e55575a0fa..5f12cf9d99d0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_inline.h
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -63,18 +63,15 @@ static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
63static __inline void ahd_extract_mode_state(struct ahd_softc *ahd, 63static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
64 ahd_mode_state state, 64 ahd_mode_state state,
65 ahd_mode *src, ahd_mode *dst); 65 ahd_mode *src, ahd_mode *dst);
66static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, 66
67 ahd_mode dst); 67void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
68static __inline void ahd_update_modes(struct ahd_softc *ahd); 68 ahd_mode dst);
69static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, 69ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
70 ahd_mode dstmode, const char *file, 70void ahd_restore_modes(struct ahd_softc *ahd,
71 int line); 71 ahd_mode_state state);
72static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd); 72int ahd_is_paused(struct ahd_softc *ahd);
73static __inline void ahd_restore_modes(struct ahd_softc *ahd, 73void ahd_pause(struct ahd_softc *ahd);
74 ahd_mode_state state); 74void ahd_unpause(struct ahd_softc *ahd);
75static __inline int ahd_is_paused(struct ahd_softc *ahd);
76static __inline void ahd_pause(struct ahd_softc *ahd);
77static __inline void ahd_unpause(struct ahd_softc *ahd);
78 75
79static __inline void 76static __inline void
80ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) 77ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
@@ -99,256 +96,16 @@ ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
99 *dst = (state & DST_MODE) >> DST_MODE_SHIFT; 96 *dst = (state & DST_MODE) >> DST_MODE_SHIFT;
100} 97}
101 98
102static __inline void
103ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
104{
105 if (ahd->src_mode == src && ahd->dst_mode == dst)
106 return;
107#ifdef AHD_DEBUG
108 if (ahd->src_mode == AHD_MODE_UNKNOWN
109 || ahd->dst_mode == AHD_MODE_UNKNOWN)
110 panic("Setting mode prior to saving it.\n");
111 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
112 printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
113 ahd_build_mode_state(ahd, src, dst));
114#endif
115 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
116 ahd->src_mode = src;
117 ahd->dst_mode = dst;
118}
119
120static __inline void
121ahd_update_modes(struct ahd_softc *ahd)
122{
123 ahd_mode_state mode_ptr;
124 ahd_mode src;
125 ahd_mode dst;
126
127 mode_ptr = ahd_inb(ahd, MODE_PTR);
128#ifdef AHD_DEBUG
129 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
130 printf("Reading mode 0x%x\n", mode_ptr);
131#endif
132 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
133 ahd_known_modes(ahd, src, dst);
134}
135
136static __inline void
137ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
138 ahd_mode dstmode, const char *file, int line)
139{
140#ifdef AHD_DEBUG
141 if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
142 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
143 panic("%s:%s:%d: Mode assertion failed.\n",
144 ahd_name(ahd), file, line);
145 }
146#endif
147}
148
149static __inline ahd_mode_state
150ahd_save_modes(struct ahd_softc *ahd)
151{
152 if (ahd->src_mode == AHD_MODE_UNKNOWN
153 || ahd->dst_mode == AHD_MODE_UNKNOWN)
154 ahd_update_modes(ahd);
155
156 return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
157}
158
159static __inline void
160ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
161{
162 ahd_mode src;
163 ahd_mode dst;
164
165 ahd_extract_mode_state(ahd, state, &src, &dst);
166 ahd_set_modes(ahd, src, dst);
167}
168
169#define AHD_ASSERT_MODES(ahd, source, dest) \
170 ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
171
172/*
173 * Determine whether the sequencer has halted code execution.
174 * Returns non-zero status if the sequencer is stopped.
175 */
176static __inline int
177ahd_is_paused(struct ahd_softc *ahd)
178{
179 return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
180}
181
182/*
183 * Request that the sequencer stop and wait, indefinitely, for it
184 * to stop. The sequencer will only acknowledge that it is paused
185 * once it has reached an instruction boundary and PAUSEDIS is
186 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
187 * for critical sections.
188 */
189static __inline void
190ahd_pause(struct ahd_softc *ahd)
191{
192 ahd_outb(ahd, HCNTRL, ahd->pause);
193
194 /*
195 * Since the sequencer can disable pausing in a critical section, we
196 * must loop until it actually stops.
197 */
198 while (ahd_is_paused(ahd) == 0)
199 ;
200}
201
202/*
203 * Allow the sequencer to continue program execution.
204 * We check here to ensure that no additional interrupt
205 * sources that would cause the sequencer to halt have been
206 * asserted. If, for example, a SCSI bus reset is detected
207 * while we are fielding a different, pausing, interrupt type,
208 * we don't want to release the sequencer before going back
209 * into our interrupt handler and dealing with this new
210 * condition.
211 */
212static __inline void
213ahd_unpause(struct ahd_softc *ahd)
214{
215 /*
216 * Automatically restore our modes to those saved
217 * prior to the first change of the mode.
218 */
219 if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
220 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
221 if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
222 ahd_reset_cmds_pending(ahd);
223 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
224 }
225
226 if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
227 ahd_outb(ahd, HCNTRL, ahd->unpause);
228
229 ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
230}
231
232/*********************** Scatter Gather List Handling *************************/ 99/*********************** Scatter Gather List Handling *************************/
233static __inline void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, 100void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
234 void *sgptr, dma_addr_t addr, 101 void *sgptr, dma_addr_t addr,
235 bus_size_t len, int last); 102 bus_size_t len, int last);
236static __inline void ahd_setup_scb_common(struct ahd_softc *ahd,
237 struct scb *scb);
238static __inline void ahd_setup_data_scb(struct ahd_softc *ahd,
239 struct scb *scb);
240static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd,
241 struct scb *scb);
242
243static __inline void *
244ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
245 void *sgptr, dma_addr_t addr, bus_size_t len, int last)
246{
247 scb->sg_count++;
248 if (sizeof(dma_addr_t) > 4
249 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
250 struct ahd_dma64_seg *sg;
251
252 sg = (struct ahd_dma64_seg *)sgptr;
253 sg->addr = ahd_htole64(addr);
254 sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
255 return (sg + 1);
256 } else {
257 struct ahd_dma_seg *sg;
258
259 sg = (struct ahd_dma_seg *)sgptr;
260 sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
261 sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
262 | (last ? AHD_DMA_LAST_SEG : 0));
263 return (sg + 1);
264 }
265}
266
267static __inline void
268ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
269{
270 /* XXX Handle target mode SCBs. */
271 scb->crc_retry_count = 0;
272 if ((scb->flags & SCB_PACKETIZED) != 0) {
273 /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
274 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
275 } else {
276 if (ahd_get_transfer_length(scb) & 0x01)
277 scb->hscb->task_attribute = SCB_XFERLEN_ODD;
278 else
279 scb->hscb->task_attribute = 0;
280 }
281
282 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
283 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
284 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
285 ahd_htole32(scb->sense_busaddr);
286}
287
288static __inline void
289ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
290{
291 /*
292 * Copy the first SG into the "current" data ponter area.
293 */
294 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
295 struct ahd_dma64_seg *sg;
296
297 sg = (struct ahd_dma64_seg *)scb->sg_list;
298 scb->hscb->dataptr = sg->addr;
299 scb->hscb->datacnt = sg->len;
300 } else {
301 struct ahd_dma_seg *sg;
302 uint32_t *dataptr_words;
303
304 sg = (struct ahd_dma_seg *)scb->sg_list;
305 dataptr_words = (uint32_t*)&scb->hscb->dataptr;
306 dataptr_words[0] = sg->addr;
307 dataptr_words[1] = 0;
308 if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
309 uint64_t high_addr;
310
311 high_addr = ahd_le32toh(sg->len) & 0x7F000000;
312 scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
313 }
314 scb->hscb->datacnt = sg->len;
315 }
316 /*
317 * Note where to find the SG entries in bus space.
318 * We also set the full residual flag which the
319 * sequencer will clear as soon as a data transfer
320 * occurs.
321 */
322 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
323}
324
325static __inline void
326ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
327{
328 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
329 scb->hscb->dataptr = 0;
330 scb->hscb->datacnt = 0;
331}
332 103
333/************************** Memory mapping routines ***************************/ 104/************************** Memory mapping routines ***************************/
334static __inline size_t ahd_sg_size(struct ahd_softc *ahd); 105static __inline size_t ahd_sg_size(struct ahd_softc *ahd);
335static __inline void * 106
336 ahd_sg_bus_to_virt(struct ahd_softc *ahd, 107void ahd_sync_sglist(struct ahd_softc *ahd,
337 struct scb *scb, 108 struct scb *scb, int op);
338 uint32_t sg_busaddr);
339static __inline uint32_t
340 ahd_sg_virt_to_bus(struct ahd_softc *ahd,
341 struct scb *scb,
342 void *sg);
343static __inline void ahd_sync_scb(struct ahd_softc *ahd,
344 struct scb *scb, int op);
345static __inline void ahd_sync_sglist(struct ahd_softc *ahd,
346 struct scb *scb, int op);
347static __inline void ahd_sync_sense(struct ahd_softc *ahd,
348 struct scb *scb, int op);
349static __inline uint32_t
350 ahd_targetcmd_offset(struct ahd_softc *ahd,
351 u_int index);
352 109
353static __inline size_t 110static __inline size_t
354ahd_sg_size(struct ahd_softc *ahd) 111ahd_sg_size(struct ahd_softc *ahd)
@@ -358,104 +115,32 @@ ahd_sg_size(struct ahd_softc *ahd)
358 return (sizeof(struct ahd_dma_seg)); 115 return (sizeof(struct ahd_dma_seg));
359} 116}
360 117
361static __inline void *
362ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
363{
364 dma_addr_t sg_offset;
365
366 /* sg_list_phys points to entry 1, not 0 */
367 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
368 return ((uint8_t *)scb->sg_list + sg_offset);
369}
370
371static __inline uint32_t
372ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
373{
374 dma_addr_t sg_offset;
375
376 /* sg_list_phys points to entry 1, not 0 */
377 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
378 - ahd_sg_size(ahd);
379
380 return (scb->sg_list_busaddr + sg_offset);
381}
382
383static __inline void
384ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
385{
386 ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
387 scb->hscb_map->dmamap,
388 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
389 /*len*/sizeof(*scb->hscb), op);
390}
391
392static __inline void
393ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
394{
395 if (scb->sg_count == 0)
396 return;
397
398 ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
399 scb->sg_map->dmamap,
400 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
401 /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
402}
403
404static __inline void
405ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
406{
407 ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
408 scb->sense_map->dmamap,
409 /*offset*/scb->sense_busaddr,
410 /*len*/AHD_SENSE_BUFSIZE, op);
411}
412
413static __inline uint32_t
414ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
415{
416 return (((uint8_t *)&ahd->targetcmds[index])
417 - (uint8_t *)ahd->qoutfifo);
418}
419
420/*********************** Miscellaneous Support Functions ***********************/ 118/*********************** Miscellaneous Support Functions ***********************/
421static __inline struct ahd_initiator_tinfo * 119struct ahd_initiator_tinfo *
422 ahd_fetch_transinfo(struct ahd_softc *ahd, 120 ahd_fetch_transinfo(struct ahd_softc *ahd,
423 char channel, u_int our_id, 121 char channel, u_int our_id,
424 u_int remote_id, 122 u_int remote_id,
425 struct ahd_tmode_tstate **tstate); 123 struct ahd_tmode_tstate **tstate);
426static __inline uint16_t 124uint16_t
427 ahd_inw(struct ahd_softc *ahd, u_int port); 125 ahd_inw(struct ahd_softc *ahd, u_int port);
428static __inline void ahd_outw(struct ahd_softc *ahd, u_int port, 126void ahd_outw(struct ahd_softc *ahd, u_int port,
429 u_int value); 127 u_int value);
430static __inline uint32_t 128uint32_t
431 ahd_inl(struct ahd_softc *ahd, u_int port); 129 ahd_inl(struct ahd_softc *ahd, u_int port);
432static __inline void ahd_outl(struct ahd_softc *ahd, u_int port, 130void ahd_outl(struct ahd_softc *ahd, u_int port,
433 uint32_t value); 131 uint32_t value);
434static __inline uint64_t 132uint64_t
435 ahd_inq(struct ahd_softc *ahd, u_int port); 133 ahd_inq(struct ahd_softc *ahd, u_int port);
436static __inline void ahd_outq(struct ahd_softc *ahd, u_int port, 134void ahd_outq(struct ahd_softc *ahd, u_int port,
437 uint64_t value); 135 uint64_t value);
438static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd); 136u_int ahd_get_scbptr(struct ahd_softc *ahd);
439static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr); 137void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
440static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd); 138u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
441static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value); 139u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
442static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd); 140struct scb *
443static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value); 141 ahd_lookup_scb(struct ahd_softc *ahd, u_int tag);
444static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd); 142void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
445static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value); 143
446static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd);
447static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value);
448static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd);
449static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value);
450static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
451static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
452static __inline uint32_t
453 ahd_inl_scbram(struct ahd_softc *ahd, u_int offset);
454static __inline uint64_t
455 ahd_inq_scbram(struct ahd_softc *ahd, u_int offset);
456static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd,
457 struct scb *scb);
458static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
459static __inline uint8_t * 144static __inline uint8_t *
460 ahd_get_sense_buf(struct ahd_softc *ahd, 145 ahd_get_sense_buf(struct ahd_softc *ahd,
461 struct scb *scb); 146 struct scb *scb);
@@ -463,25 +148,7 @@ static __inline uint32_t
463 ahd_get_sense_bufaddr(struct ahd_softc *ahd, 148 ahd_get_sense_bufaddr(struct ahd_softc *ahd,
464 struct scb *scb); 149 struct scb *scb);
465 150
466/* 151#if 0 /* unused */
467 * Return pointers to the transfer negotiation information
468 * for the specified our_id/remote_id pair.
469 */
470static __inline struct ahd_initiator_tinfo *
471ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
472 u_int remote_id, struct ahd_tmode_tstate **tstate)
473{
474 /*
475 * Transfer data structures are stored from the perspective
476 * of the target role. Since the parameters for a connection
477 * in the initiator role to a given target are the same as
478 * when the roles are reversed, we pretend we are the target.
479 */
480 if (channel == 'B')
481 our_id += 8;
482 *tstate = ahd->enabled_targets[our_id];
483 return (&(*tstate)->transinfo[remote_id]);
484}
485 152
486#define AHD_COPY_COL_IDX(dst, src) \ 153#define AHD_COPY_COL_IDX(dst, src) \
487do { \ 154do { \
@@ -489,304 +156,7 @@ do { \
489 dst->hscb->lun = src->hscb->lun; \ 156 dst->hscb->lun = src->hscb->lun; \
490} while (0) 157} while (0)
491 158
492static __inline uint16_t
493ahd_inw(struct ahd_softc *ahd, u_int port)
494{
495 /*
496 * Read high byte first as some registers increment
497 * or have other side effects when the low byte is
498 * read.
499 */
500 uint16_t r = ahd_inb(ahd, port+1) << 8;
501 return r | ahd_inb(ahd, port);
502}
503
504static __inline void
505ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
506{
507 /*
508 * Write low byte first to accomodate registers
509 * such as PRGMCNT where the order maters.
510 */
511 ahd_outb(ahd, port, value & 0xFF);
512 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
513}
514
515static __inline uint32_t
516ahd_inl(struct ahd_softc *ahd, u_int port)
517{
518 return ((ahd_inb(ahd, port))
519 | (ahd_inb(ahd, port+1) << 8)
520 | (ahd_inb(ahd, port+2) << 16)
521 | (ahd_inb(ahd, port+3) << 24));
522}
523
524static __inline void
525ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
526{
527 ahd_outb(ahd, port, (value) & 0xFF);
528 ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
529 ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
530 ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
531}
532
533static __inline uint64_t
534ahd_inq(struct ahd_softc *ahd, u_int port)
535{
536 return ((ahd_inb(ahd, port))
537 | (ahd_inb(ahd, port+1) << 8)
538 | (ahd_inb(ahd, port+2) << 16)
539 | (ahd_inb(ahd, port+3) << 24)
540 | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
541 | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
542 | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
543 | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
544}
545
546static __inline void
547ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
548{
549 ahd_outb(ahd, port, value & 0xFF);
550 ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
551 ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
552 ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
553 ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
554 ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
555 ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
556 ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
557}
558
559static __inline u_int
560ahd_get_scbptr(struct ahd_softc *ahd)
561{
562 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
563 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
564 return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
565}
566
567static __inline void
568ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
569{
570 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
571 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
572 ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
573 ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
574}
575
576static __inline u_int
577ahd_get_hnscb_qoff(struct ahd_softc *ahd)
578{
579 return (ahd_inw_atomic(ahd, HNSCB_QOFF));
580}
581
582static __inline void
583ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
584{
585 ahd_outw_atomic(ahd, HNSCB_QOFF, value);
586}
587
588static __inline u_int
589ahd_get_hescb_qoff(struct ahd_softc *ahd)
590{
591 return (ahd_inb(ahd, HESCB_QOFF));
592}
593
594static __inline void
595ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
596{
597 ahd_outb(ahd, HESCB_QOFF, value);
598}
599
600static __inline u_int
601ahd_get_snscb_qoff(struct ahd_softc *ahd)
602{
603 u_int oldvalue;
604
605 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
606 oldvalue = ahd_inw(ahd, SNSCB_QOFF);
607 ahd_outw(ahd, SNSCB_QOFF, oldvalue);
608 return (oldvalue);
609}
610
611static __inline void
612ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
613{
614 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
615 ahd_outw(ahd, SNSCB_QOFF, value);
616}
617
618static __inline u_int
619ahd_get_sescb_qoff(struct ahd_softc *ahd)
620{
621 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
622 return (ahd_inb(ahd, SESCB_QOFF));
623}
624
625static __inline void
626ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
627{
628 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
629 ahd_outb(ahd, SESCB_QOFF, value);
630}
631
632static __inline u_int
633ahd_get_sdscb_qoff(struct ahd_softc *ahd)
634{
635 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
636 return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
637}
638
639static __inline void
640ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
641{
642 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
643 ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
644 ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
645}
646
647static __inline u_int
648ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
649{
650 u_int value;
651
652 /*
653 * Workaround PCI-X Rev A. hardware bug.
654 * After a host read of SCB memory, the chip
655 * may become confused into thinking prefetch
656 * was required. This starts the discard timer
657 * running and can cause an unexpected discard
658 * timer interrupt. The work around is to read
659 * a normal register prior to the exhaustion of
660 * the discard timer. The mode pointer register
661 * has no side effects and so serves well for
662 * this purpose.
663 *
664 * Razor #528
665 */
666 value = ahd_inb(ahd, offset);
667 if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
668 ahd_inb(ahd, MODE_PTR);
669 return (value);
670}
671
672static __inline u_int
673ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
674{
675 return (ahd_inb_scbram(ahd, offset)
676 | (ahd_inb_scbram(ahd, offset+1) << 8));
677}
678
679static __inline uint32_t
680ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
681{
682 return (ahd_inw_scbram(ahd, offset)
683 | (ahd_inw_scbram(ahd, offset+2) << 16));
684}
685
686static __inline uint64_t
687ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
688{
689 return (ahd_inl_scbram(ahd, offset)
690 | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
691}
692
693static __inline struct scb *
694ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
695{
696 struct scb* scb;
697
698 if (tag >= AHD_SCB_MAX)
699 return (NULL);
700 scb = ahd->scb_data.scbindex[tag];
701 if (scb != NULL)
702 ahd_sync_scb(ahd, scb,
703 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
704 return (scb);
705}
706
707static __inline void
708ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
709{
710 struct hardware_scb *q_hscb;
711 struct map_node *q_hscb_map;
712 uint32_t saved_hscb_busaddr;
713
714 /*
715 * Our queuing method is a bit tricky. The card
716 * knows in advance which HSCB (by address) to download,
717 * and we can't disappoint it. To achieve this, the next
718 * HSCB to download is saved off in ahd->next_queued_hscb.
719 * When we are called to queue "an arbitrary scb",
720 * we copy the contents of the incoming HSCB to the one
721 * the sequencer knows about, swap HSCB pointers and
722 * finally assign the SCB to the tag indexed location
723 * in the scb_array. This makes sure that we can still
724 * locate the correct SCB by SCB_TAG.
725 */
726 q_hscb = ahd->next_queued_hscb;
727 q_hscb_map = ahd->next_queued_hscb_map;
728 saved_hscb_busaddr = q_hscb->hscb_busaddr;
729 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
730 q_hscb->hscb_busaddr = saved_hscb_busaddr;
731 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
732
733 /* Now swap HSCB pointers. */
734 ahd->next_queued_hscb = scb->hscb;
735 ahd->next_queued_hscb_map = scb->hscb_map;
736 scb->hscb = q_hscb;
737 scb->hscb_map = q_hscb_map;
738
739 /* Now define the mapping from tag to SCB in the scbindex */
740 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
741}
742
743/*
744 * Tell the sequencer about a new transaction to execute.
745 */
746static __inline void
747ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
748{
749 ahd_swap_with_next_hscb(ahd, scb);
750
751 if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
752 panic("Attempt to queue invalid SCB tag %x\n",
753 SCB_GET_TAG(scb));
754
755 /*
756 * Keep a history of SCBs we've downloaded in the qinfifo.
757 */
758 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
759 ahd->qinfifonext++;
760
761 if (scb->sg_count != 0)
762 ahd_setup_data_scb(ahd, scb);
763 else
764 ahd_setup_noxfer_scb(ahd, scb);
765 ahd_setup_scb_common(ahd, scb);
766
767 /*
768 * Make sure our data is consistent from the
769 * perspective of the adapter.
770 */
771 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
772
773#ifdef AHD_DEBUG
774 if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
775 uint64_t host_dataptr;
776
777 host_dataptr = ahd_le64toh(scb->hscb->dataptr);
778 printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
779 ahd_name(ahd),
780 SCB_GET_TAG(scb), scb->hscb->scsiid,
781 ahd_le32toh(scb->hscb->hscb_busaddr),
782 (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
783 (u_int)(host_dataptr & 0xFFFFFFFF),
784 ahd_le32toh(scb->hscb->datacnt));
785 }
786#endif 159#endif
787 /* Tell the adapter about the newly queued SCB */
788 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
789}
790 160
791static __inline uint8_t * 161static __inline uint8_t *
792ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) 162ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
@@ -801,151 +171,6 @@ ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
801} 171}
802 172
803/************************** Interrupt Processing ******************************/ 173/************************** Interrupt Processing ******************************/
804static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op); 174int ahd_intr(struct ahd_softc *ahd);
805static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op);
806static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd);
807static __inline int ahd_intr(struct ahd_softc *ahd);
808
809static __inline void
810ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
811{
812 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
813 /*offset*/0,
814 /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
815}
816
817static __inline void
818ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
819{
820#ifdef AHD_TARGET_MODE
821 if ((ahd->flags & AHD_TARGETROLE) != 0) {
822 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
823 ahd->shared_data_map.dmamap,
824 ahd_targetcmd_offset(ahd, 0),
825 sizeof(struct target_cmd) * AHD_TMODE_CMDS,
826 op);
827 }
828#endif
829}
830
831/*
832 * See if the firmware has posted any completed commands
833 * into our in-core command complete fifos.
834 */
835#define AHD_RUN_QOUTFIFO 0x1
836#define AHD_RUN_TQINFIFO 0x2
837static __inline u_int
838ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
839{
840 u_int retval;
841
842 retval = 0;
843 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
844 /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
845 /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
846 if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
847 == ahd->qoutfifonext_valid_tag)
848 retval |= AHD_RUN_QOUTFIFO;
849#ifdef AHD_TARGET_MODE
850 if ((ahd->flags & AHD_TARGETROLE) != 0
851 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
852 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
853 ahd->shared_data_map.dmamap,
854 ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
855 /*len*/sizeof(struct target_cmd),
856 BUS_DMASYNC_POSTREAD);
857 if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
858 retval |= AHD_RUN_TQINFIFO;
859 }
860#endif
861 return (retval);
862}
863
864/*
865 * Catch an interrupt from the adapter
866 */
867static __inline int
868ahd_intr(struct ahd_softc *ahd)
869{
870 u_int intstat;
871
872 if ((ahd->pause & INTEN) == 0) {
873 /*
874 * Our interrupt is not enabled on the chip
875 * and may be disabled for re-entrancy reasons,
876 * so just return. This is likely just a shared
877 * interrupt.
878 */
879 return (0);
880 }
881
882 /*
883 * Instead of directly reading the interrupt status register,
884 * infer the cause of the interrupt by checking our in-core
885 * completion queues. This avoids a costly PCI bus read in
886 * most cases.
887 */
888 if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
889 && (ahd_check_cmdcmpltqueues(ahd) != 0))
890 intstat = CMDCMPLT;
891 else
892 intstat = ahd_inb(ahd, INTSTAT);
893
894 if ((intstat & INT_PEND) == 0)
895 return (0);
896
897 if (intstat & CMDCMPLT) {
898 ahd_outb(ahd, CLRINT, CLRCMDINT);
899
900 /*
901 * Ensure that the chip sees that we've cleared
902 * this interrupt before we walk the output fifo.
903 * Otherwise, we may, due to posted bus writes,
904 * clear the interrupt after we finish the scan,
905 * and after the sequencer has added new entries
906 * and asserted the interrupt again.
907 */
908 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
909 if (ahd_is_paused(ahd)) {
910 /*
911 * Potentially lost SEQINT.
912 * If SEQINTCODE is non-zero,
913 * simulate the SEQINT.
914 */
915 if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
916 intstat |= SEQINT;
917 }
918 } else {
919 ahd_flush_device_writes(ahd);
920 }
921 ahd_run_qoutfifo(ahd);
922 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
923 ahd->cmdcmplt_total++;
924#ifdef AHD_TARGET_MODE
925 if ((ahd->flags & AHD_TARGETROLE) != 0)
926 ahd_run_tqinfifo(ahd, /*paused*/FALSE);
927#endif
928 }
929
930 /*
931 * Handle statuses that may invalidate our cached
932 * copy of INTSTAT separately.
933 */
934 if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
935 /* Hot eject. Do nothing */
936 } else if (intstat & HWERRINT) {
937 ahd_handle_hwerrint(ahd);
938 } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
939 ahd->bus_intr(ahd);
940 } else {
941
942 if ((intstat & SEQINT) != 0)
943 ahd_handle_seqint(ahd, intstat);
944
945 if ((intstat & SCSIINT) != 0)
946 ahd_handle_scsiint(ahd, intstat);
947 }
948 return (1);
949}
950 175
951#endif /* _AIC79XX_INLINE_H_ */ 176#endif /* _AIC79XX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 0081aa357c8b..0f829b3b8ab7 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -193,7 +193,7 @@ struct ahd_linux_iocell_opts
193#define AIC79XX_PRECOMP_INDEX 0 193#define AIC79XX_PRECOMP_INDEX 0
194#define AIC79XX_SLEWRATE_INDEX 1 194#define AIC79XX_SLEWRATE_INDEX 1
195#define AIC79XX_AMPLITUDE_INDEX 2 195#define AIC79XX_AMPLITUDE_INDEX 2
196static struct ahd_linux_iocell_opts aic79xx_iocell_info[] = 196static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
197{ 197{
198 AIC79XX_DEFAULT_IOOPTS, 198 AIC79XX_DEFAULT_IOOPTS,
199 AIC79XX_DEFAULT_IOOPTS, 199 AIC79XX_DEFAULT_IOOPTS,
@@ -369,10 +369,167 @@ static void ahd_release_simq(struct ahd_softc *ahd);
369static int ahd_linux_unit; 369static int ahd_linux_unit;
370 370
371 371
372/************************** OS Utility Wrappers *******************************/
373void ahd_delay(long);
374void
375ahd_delay(long usec)
376{
377 /*
378 * udelay on Linux can have problems for
379 * multi-millisecond waits. Wait at most
380 * 1024us per call.
381 */
382 while (usec > 0) {
383 udelay(usec % 1024);
384 usec -= 1024;
385 }
386}
387
388
389/***************************** Low Level I/O **********************************/
390uint8_t ahd_inb(struct ahd_softc * ahd, long port);
391void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
392void ahd_outw_atomic(struct ahd_softc * ahd,
393 long port, uint16_t val);
394void ahd_outsb(struct ahd_softc * ahd, long port,
395 uint8_t *, int count);
396void ahd_insb(struct ahd_softc * ahd, long port,
397 uint8_t *, int count);
398
399uint8_t
400ahd_inb(struct ahd_softc * ahd, long port)
401{
402 uint8_t x;
403
404 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
405 x = readb(ahd->bshs[0].maddr + port);
406 } else {
407 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
408 }
409 mb();
410 return (x);
411}
412
413#if 0 /* unused */
414static uint16_t
415ahd_inw_atomic(struct ahd_softc * ahd, long port)
416{
417 uint8_t x;
418
419 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
420 x = readw(ahd->bshs[0].maddr + port);
421 } else {
422 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
423 }
424 mb();
425 return (x);
426}
427#endif
428
429void
430ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
431{
432 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
433 writeb(val, ahd->bshs[0].maddr + port);
434 } else {
435 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
436 }
437 mb();
438}
439
440void
441ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
442{
443 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
444 writew(val, ahd->bshs[0].maddr + port);
445 } else {
446 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
447 }
448 mb();
449}
450
451void
452ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
453{
454 int i;
455
456 /*
457 * There is probably a more efficient way to do this on Linux
458 * but we don't use this for anything speed critical and this
459 * should work.
460 */
461 for (i = 0; i < count; i++)
462 ahd_outb(ahd, port, *array++);
463}
464
465void
466ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
467{
468 int i;
469
470 /*
471 * There is probably a more efficient way to do this on Linux
472 * but we don't use this for anything speed critical and this
473 * should work.
474 */
475 for (i = 0; i < count; i++)
476 *array++ = ahd_inb(ahd, port);
477}
478
479/******************************* PCI Routines *********************************/
480uint32_t
481ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
482{
483 switch (width) {
484 case 1:
485 {
486 uint8_t retval;
487
488 pci_read_config_byte(pci, reg, &retval);
489 return (retval);
490 }
491 case 2:
492 {
493 uint16_t retval;
494 pci_read_config_word(pci, reg, &retval);
495 return (retval);
496 }
497 case 4:
498 {
499 uint32_t retval;
500 pci_read_config_dword(pci, reg, &retval);
501 return (retval);
502 }
503 default:
504 panic("ahd_pci_read_config: Read size too big");
505 /* NOTREACHED */
506 return (0);
507 }
508}
509
510void
511ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
512{
513 switch (width) {
514 case 1:
515 pci_write_config_byte(pci, reg, value);
516 break;
517 case 2:
518 pci_write_config_word(pci, reg, value);
519 break;
520 case 4:
521 pci_write_config_dword(pci, reg, value);
522 break;
523 default:
524 panic("ahd_pci_write_config: Write size too big");
525 /* NOTREACHED */
526 }
527}
528
372/****************************** Inlines ***************************************/ 529/****************************** Inlines ***************************************/
373static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*); 530static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
374 531
375static __inline void 532static void
376ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) 533ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
377{ 534{
378 struct scsi_cmnd *cmd; 535 struct scsi_cmnd *cmd;
@@ -400,13 +557,11 @@ ahd_linux_info(struct Scsi_Host *host)
400 bp = &buffer[0]; 557 bp = &buffer[0];
401 ahd = *(struct ahd_softc **)host->hostdata; 558 ahd = *(struct ahd_softc **)host->hostdata;
402 memset(bp, 0, sizeof(buffer)); 559 memset(bp, 0, sizeof(buffer));
403 strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev "); 560 strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n"
404 strcat(bp, AIC79XX_DRIVER_VERSION); 561 " <");
405 strcat(bp, "\n");
406 strcat(bp, " <");
407 strcat(bp, ahd->description); 562 strcat(bp, ahd->description);
408 strcat(bp, ">\n"); 563 strcat(bp, ">\n"
409 strcat(bp, " "); 564 " ");
410 ahd_controller_info(ahd, ahd_info); 565 ahd_controller_info(ahd, ahd_info);
411 strcat(bp, ahd_info); 566 strcat(bp, ahd_info);
412 567
@@ -432,7 +587,7 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
432 return rtn; 587 return rtn;
433} 588}
434 589
435static inline struct scsi_target ** 590static struct scsi_target **
436ahd_linux_target_in_softc(struct scsi_target *starget) 591ahd_linux_target_in_softc(struct scsi_target *starget)
437{ 592{
438 struct ahd_softc *ahd = 593 struct ahd_softc *ahd =
@@ -991,7 +1146,7 @@ aic79xx_setup(char *s)
991 char *p; 1146 char *p;
992 char *end; 1147 char *end;
993 1148
994 static struct { 1149 static const struct {
995 const char *name; 1150 const char *name;
996 uint32_t *flag; 1151 uint32_t *flag;
997 } options[] = { 1152 } options[] = {
@@ -1223,7 +1378,7 @@ ahd_platform_init(struct ahd_softc *ahd)
1223 * Lookup and commit any modified IO Cell options. 1378 * Lookup and commit any modified IO Cell options.
1224 */ 1379 */
1225 if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) { 1380 if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
1226 struct ahd_linux_iocell_opts *iocell_opts; 1381 const struct ahd_linux_iocell_opts *iocell_opts;
1227 1382
1228 iocell_opts = &aic79xx_iocell_info[ahd->unit]; 1383 iocell_opts = &aic79xx_iocell_info[ahd->unit];
1229 if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP) 1384 if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
@@ -2613,7 +2768,7 @@ static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
2613 uint8_t precomp; 2768 uint8_t precomp;
2614 2769
2615 if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) { 2770 if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
2616 struct ahd_linux_iocell_opts *iocell_opts; 2771 const struct ahd_linux_iocell_opts *iocell_opts;
2617 2772
2618 iocell_opts = &aic79xx_iocell_info[ahd->unit]; 2773 iocell_opts = &aic79xx_iocell_info[ahd->unit];
2619 precomp = iocell_opts->precomp; 2774 precomp = iocell_opts->precomp;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 853998be1474..8d6612c19922 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -222,22 +222,6 @@ typedef struct timer_list ahd_timer_t;
222/***************************** Timer Facilities *******************************/ 222/***************************** Timer Facilities *******************************/
223#define ahd_timer_init init_timer 223#define ahd_timer_init init_timer
224#define ahd_timer_stop del_timer_sync 224#define ahd_timer_stop del_timer_sync
225typedef void ahd_linux_callback_t (u_long);
226static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
227 ahd_callback_t *func, void *arg);
228
229static __inline void
230ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
231{
232 struct ahd_softc *ahd;
233
234 ahd = (struct ahd_softc *)arg;
235 del_timer(timer);
236 timer->data = (u_long)arg;
237 timer->expires = jiffies + (usec * HZ)/1000000;
238 timer->function = (ahd_linux_callback_t*)func;
239 add_timer(timer);
240}
241 225
242/***************************** SMP support ************************************/ 226/***************************** SMP support ************************************/
243#include <linux/spinlock.h> 227#include <linux/spinlock.h>
@@ -376,7 +360,7 @@ struct ahd_platform_data {
376#define AHD_LINUX_NOIRQ ((uint32_t)~0) 360#define AHD_LINUX_NOIRQ ((uint32_t)~0)
377 uint32_t irq; /* IRQ for this adapter */ 361 uint32_t irq; /* IRQ for this adapter */
378 uint32_t bios_address; 362 uint32_t bios_address;
379 uint32_t mem_busaddr; /* Mem Base Addr */ 363 resource_size_t mem_busaddr; /* Mem Base Addr */
380}; 364};
381 365
382/************************** OS Utility Wrappers *******************************/ 366/************************** OS Utility Wrappers *******************************/
@@ -386,111 +370,18 @@ struct ahd_platform_data {
386#define malloc(size, type, flags) kmalloc(size, flags) 370#define malloc(size, type, flags) kmalloc(size, flags)
387#define free(ptr, type) kfree(ptr) 371#define free(ptr, type) kfree(ptr)
388 372
389static __inline void ahd_delay(long); 373void ahd_delay(long);
390static __inline void
391ahd_delay(long usec)
392{
393 /*
394 * udelay on Linux can have problems for
395 * multi-millisecond waits. Wait at most
396 * 1024us per call.
397 */
398 while (usec > 0) {
399 udelay(usec % 1024);
400 usec -= 1024;
401 }
402}
403
404 374
405/***************************** Low Level I/O **********************************/ 375/***************************** Low Level I/O **********************************/
406static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port); 376uint8_t ahd_inb(struct ahd_softc * ahd, long port);
407static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port); 377void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
408static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val); 378void ahd_outw_atomic(struct ahd_softc * ahd,
409static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
410 long port, uint16_t val); 379 long port, uint16_t val);
411static __inline void ahd_outsb(struct ahd_softc * ahd, long port, 380void ahd_outsb(struct ahd_softc * ahd, long port,
412 uint8_t *, int count); 381 uint8_t *, int count);
413static __inline void ahd_insb(struct ahd_softc * ahd, long port, 382void ahd_insb(struct ahd_softc * ahd, long port,
414 uint8_t *, int count); 383 uint8_t *, int count);
415 384
416static __inline uint8_t
417ahd_inb(struct ahd_softc * ahd, long port)
418{
419 uint8_t x;
420
421 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
422 x = readb(ahd->bshs[0].maddr + port);
423 } else {
424 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
425 }
426 mb();
427 return (x);
428}
429
430static __inline uint16_t
431ahd_inw_atomic(struct ahd_softc * ahd, long port)
432{
433 uint8_t x;
434
435 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
436 x = readw(ahd->bshs[0].maddr + port);
437 } else {
438 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
439 }
440 mb();
441 return (x);
442}
443
444static __inline void
445ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
446{
447 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
448 writeb(val, ahd->bshs[0].maddr + port);
449 } else {
450 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
451 }
452 mb();
453}
454
455static __inline void
456ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
457{
458 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
459 writew(val, ahd->bshs[0].maddr + port);
460 } else {
461 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
462 }
463 mb();
464}
465
466static __inline void
467ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
468{
469 int i;
470
471 /*
472 * There is probably a more efficient way to do this on Linux
473 * but we don't use this for anything speed critical and this
474 * should work.
475 */
476 for (i = 0; i < count; i++)
477 ahd_outb(ahd, port, *array++);
478}
479
480static __inline void
481ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
482{
483 int i;
484
485 /*
486 * There is probably a more efficient way to do this on Linux
487 * but we don't use this for anything speed critical and this
488 * should work.
489 */
490 for (i = 0; i < count; i++)
491 *array++ = ahd_inb(ahd, port);
492}
493
494/**************************** Initialization **********************************/ 385/**************************** Initialization **********************************/
495int ahd_linux_register_host(struct ahd_softc *, 386int ahd_linux_register_host(struct ahd_softc *,
496 struct scsi_host_template *); 387 struct scsi_host_template *);
@@ -593,62 +484,12 @@ void ahd_linux_pci_exit(void);
593int ahd_pci_map_registers(struct ahd_softc *ahd); 484int ahd_pci_map_registers(struct ahd_softc *ahd);
594int ahd_pci_map_int(struct ahd_softc *ahd); 485int ahd_pci_map_int(struct ahd_softc *ahd);
595 486
596static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci, 487uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
597 int reg, int width); 488 int reg, int width);
598 489void ahd_pci_write_config(ahd_dev_softc_t pci,
599static __inline uint32_t
600ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
601{
602 switch (width) {
603 case 1:
604 {
605 uint8_t retval;
606
607 pci_read_config_byte(pci, reg, &retval);
608 return (retval);
609 }
610 case 2:
611 {
612 uint16_t retval;
613 pci_read_config_word(pci, reg, &retval);
614 return (retval);
615 }
616 case 4:
617 {
618 uint32_t retval;
619 pci_read_config_dword(pci, reg, &retval);
620 return (retval);
621 }
622 default:
623 panic("ahd_pci_read_config: Read size too big");
624 /* NOTREACHED */
625 return (0);
626 }
627}
628
629static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
630 int reg, uint32_t value, 490 int reg, uint32_t value,
631 int width); 491 int width);
632 492
633static __inline void
634ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
635{
636 switch (width) {
637 case 1:
638 pci_write_config_byte(pci, reg, value);
639 break;
640 case 2:
641 pci_write_config_word(pci, reg, value);
642 break;
643 case 4:
644 pci_write_config_dword(pci, reg, value);
645 break;
646 default:
647 panic("ahd_pci_write_config: Write size too big");
648 /* NOTREACHED */
649 }
650}
651
652static __inline int ahd_get_pci_function(ahd_dev_softc_t); 493static __inline int ahd_get_pci_function(ahd_dev_softc_t);
653static __inline int 494static __inline int
654ahd_get_pci_function(ahd_dev_softc_t pci) 495ahd_get_pci_function(ahd_dev_softc_t pci)
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index dfaaae5e73ae..6593056867f6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -49,7 +49,7 @@
49 ID2C(x), \ 49 ID2C(x), \
50 ID2C(IDIROC(x)) 50 ID2C(IDIROC(x))
51 51
52static struct pci_device_id ahd_linux_pci_id_table[] = { 52static const struct pci_device_id ahd_linux_pci_id_table[] = {
53 /* aic7901 based controllers */ 53 /* aic7901 based controllers */
54 ID(ID_AHA_29320A), 54 ID(ID_AHA_29320A),
55 ID(ID_AHA_29320ALP), 55 ID(ID_AHA_29320ALP),
@@ -159,7 +159,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
159 char buf[80]; 159 char buf[80];
160 struct ahd_softc *ahd; 160 struct ahd_softc *ahd;
161 ahd_dev_softc_t pci; 161 ahd_dev_softc_t pci;
162 struct ahd_pci_identity *entry; 162 const struct ahd_pci_identity *entry;
163 char *name; 163 char *name;
164 int error; 164 int error;
165 struct device *dev = &pdev->dev; 165 struct device *dev = &pdev->dev;
@@ -249,8 +249,8 @@ ahd_linux_pci_exit(void)
249} 249}
250 250
251static int 251static int
252ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base, 252ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base,
253 u_long *base2) 253 resource_size_t *base2)
254{ 254{
255 *base = pci_resource_start(ahd->dev_softc, 0); 255 *base = pci_resource_start(ahd->dev_softc, 0);
256 /* 256 /*
@@ -272,11 +272,11 @@ ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
272 272
273static int 273static int
274ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, 274ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
275 u_long *bus_addr, 275 resource_size_t *bus_addr,
276 uint8_t __iomem **maddr) 276 uint8_t __iomem **maddr)
277{ 277{
278 u_long start; 278 resource_size_t start;
279 u_long base_page; 279 resource_size_t base_page;
280 u_long base_offset; 280 u_long base_offset;
281 int error = 0; 281 int error = 0;
282 282
@@ -310,7 +310,7 @@ int
310ahd_pci_map_registers(struct ahd_softc *ahd) 310ahd_pci_map_registers(struct ahd_softc *ahd)
311{ 311{
312 uint32_t command; 312 uint32_t command;
313 u_long base; 313 resource_size_t base;
314 uint8_t __iomem *maddr; 314 uint8_t __iomem *maddr;
315 int error; 315 int error;
316 316
@@ -346,31 +346,32 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
346 } else 346 } else
347 command |= PCIM_CMD_MEMEN; 347 command |= PCIM_CMD_MEMEN;
348 } else if (bootverbose) { 348 } else if (bootverbose) {
349 printf("aic79xx: PCI%d:%d:%d MEM region 0x%lx " 349 printf("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
350 "unavailable. Cannot memory map device.\n", 350 "unavailable. Cannot memory map device.\n",
351 ahd_get_pci_bus(ahd->dev_softc), 351 ahd_get_pci_bus(ahd->dev_softc),
352 ahd_get_pci_slot(ahd->dev_softc), 352 ahd_get_pci_slot(ahd->dev_softc),
353 ahd_get_pci_function(ahd->dev_softc), 353 ahd_get_pci_function(ahd->dev_softc),
354 base); 354 (unsigned long long)base);
355 } 355 }
356 356
357 if (maddr == NULL) { 357 if (maddr == NULL) {
358 u_long base2; 358 resource_size_t base2;
359 359
360 error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2); 360 error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2);
361 if (error == 0) { 361 if (error == 0) {
362 ahd->tags[0] = BUS_SPACE_PIO; 362 ahd->tags[0] = BUS_SPACE_PIO;
363 ahd->tags[1] = BUS_SPACE_PIO; 363 ahd->tags[1] = BUS_SPACE_PIO;
364 ahd->bshs[0].ioport = base; 364 ahd->bshs[0].ioport = (u_long)base;
365 ahd->bshs[1].ioport = base2; 365 ahd->bshs[1].ioport = (u_long)base2;
366 command |= PCIM_CMD_PORTEN; 366 command |= PCIM_CMD_PORTEN;
367 } else { 367 } else {
368 printf("aic79xx: PCI%d:%d:%d IO regions 0x%lx and 0x%lx" 368 printf("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
369 "unavailable. Cannot map device.\n", 369 "0x%llx unavailable. Cannot map device.\n",
370 ahd_get_pci_bus(ahd->dev_softc), 370 ahd_get_pci_bus(ahd->dev_softc),
371 ahd_get_pci_slot(ahd->dev_softc), 371 ahd_get_pci_slot(ahd->dev_softc),
372 ahd_get_pci_function(ahd->dev_softc), 372 ahd_get_pci_function(ahd->dev_softc),
373 base, base2); 373 (unsigned long long)base,
374 (unsigned long long)base2);
374 } 375 }
375 } 376 }
376 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4); 377 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c9f79fdf9131..c25b6adffbf9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -97,7 +97,7 @@ static ahd_device_setup_t ahd_aic7901A_setup;
97static ahd_device_setup_t ahd_aic7902_setup; 97static ahd_device_setup_t ahd_aic7902_setup;
98static ahd_device_setup_t ahd_aic790X_setup; 98static ahd_device_setup_t ahd_aic790X_setup;
99 99
100static struct ahd_pci_identity ahd_pci_ident_table [] = 100static const struct ahd_pci_identity ahd_pci_ident_table[] =
101{ 101{
102 /* aic7901 based controllers */ 102 /* aic7901 based controllers */
103 { 103 {
@@ -253,7 +253,7 @@ static void ahd_configure_termination(struct ahd_softc *ahd,
253static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat); 253static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat);
254static void ahd_pci_intr(struct ahd_softc *ahd); 254static void ahd_pci_intr(struct ahd_softc *ahd);
255 255
256struct ahd_pci_identity * 256const struct ahd_pci_identity *
257ahd_find_pci_device(ahd_dev_softc_t pci) 257ahd_find_pci_device(ahd_dev_softc_t pci)
258{ 258{
259 uint64_t full_id; 259 uint64_t full_id;
@@ -261,7 +261,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
261 uint16_t vendor; 261 uint16_t vendor;
262 uint16_t subdevice; 262 uint16_t subdevice;
263 uint16_t subvendor; 263 uint16_t subvendor;
264 struct ahd_pci_identity *entry; 264 const struct ahd_pci_identity *entry;
265 u_int i; 265 u_int i;
266 266
267 vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); 267 vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -292,7 +292,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
292} 292}
293 293
294int 294int
295ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) 295ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
296{ 296{
297 struct scb_data *shared_scb_data; 297 struct scb_data *shared_scb_data;
298 u_int command; 298 u_int command;
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 6b28bebcbca0..014bed716e7c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -57,7 +57,7 @@ static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
57 * Table of syncrates that don't follow the "divisible by 4" 57 * Table of syncrates that don't follow the "divisible by 4"
58 * rule. This table will be expanded in future SCSI specs. 58 * rule. This table will be expanded in future SCSI specs.
59 */ 59 */
60static struct { 60static const struct {
61 u_int period_factor; 61 u_int period_factor;
62 u_int period; /* in 100ths of ns */ 62 u_int period; /* in 100ths of ns */
63} scsi_syncrates[] = { 63} scsi_syncrates[] = {
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index 2068e00d2c75..c21ceab8e913 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -48,13 +48,6 @@ ahd_reg_print_t ahd_error_print;
48#endif 48#endif
49 49
50#if AIC_DEBUG_REGISTERS 50#if AIC_DEBUG_REGISTERS
51ahd_reg_print_t ahd_clrerr_print;
52#else
53#define ahd_clrerr_print(regvalue, cur_col, wrap) \
54 ahd_print_register(NULL, 0, "CLRERR", 0x04, regvalue, cur_col, wrap)
55#endif
56
57#if AIC_DEBUG_REGISTERS
58ahd_reg_print_t ahd_hcntrl_print; 51ahd_reg_print_t ahd_hcntrl_print;
59#else 52#else
60#define ahd_hcntrl_print(regvalue, cur_col, wrap) \ 53#define ahd_hcntrl_print(regvalue, cur_col, wrap) \
@@ -167,13 +160,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
167#endif 160#endif
168 161
169#if AIC_DEBUG_REGISTERS 162#if AIC_DEBUG_REGISTERS
170ahd_reg_print_t ahd_arbctl_print;
171#else
172#define ahd_arbctl_print(regvalue, cur_col, wrap) \
173 ahd_print_register(NULL, 0, "ARBCTL", 0x1b, regvalue, cur_col, wrap)
174#endif
175
176#if AIC_DEBUG_REGISTERS
177ahd_reg_print_t ahd_sg_cache_pre_print; 163ahd_reg_print_t ahd_sg_cache_pre_print;
178#else 164#else
179#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \ 165#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \
@@ -188,20 +174,6 @@ ahd_reg_print_t ahd_lqin_print;
188#endif 174#endif
189 175
190#if AIC_DEBUG_REGISTERS 176#if AIC_DEBUG_REGISTERS
191ahd_reg_print_t ahd_typeptr_print;
192#else
193#define ahd_typeptr_print(regvalue, cur_col, wrap) \
194 ahd_print_register(NULL, 0, "TYPEPTR", 0x20, regvalue, cur_col, wrap)
195#endif
196
197#if AIC_DEBUG_REGISTERS
198ahd_reg_print_t ahd_tagptr_print;
199#else
200#define ahd_tagptr_print(regvalue, cur_col, wrap) \
201 ahd_print_register(NULL, 0, "TAGPTR", 0x21, regvalue, cur_col, wrap)
202#endif
203
204#if AIC_DEBUG_REGISTERS
205ahd_reg_print_t ahd_lunptr_print; 177ahd_reg_print_t ahd_lunptr_print;
206#else 178#else
207#define ahd_lunptr_print(regvalue, cur_col, wrap) \ 179#define ahd_lunptr_print(regvalue, cur_col, wrap) \
@@ -209,20 +181,6 @@ ahd_reg_print_t ahd_lunptr_print;
209#endif 181#endif
210 182
211#if AIC_DEBUG_REGISTERS 183#if AIC_DEBUG_REGISTERS
212ahd_reg_print_t ahd_datalenptr_print;
213#else
214#define ahd_datalenptr_print(regvalue, cur_col, wrap) \
215 ahd_print_register(NULL, 0, "DATALENPTR", 0x23, regvalue, cur_col, wrap)
216#endif
217
218#if AIC_DEBUG_REGISTERS
219ahd_reg_print_t ahd_statlenptr_print;
220#else
221#define ahd_statlenptr_print(regvalue, cur_col, wrap) \
222 ahd_print_register(NULL, 0, "STATLENPTR", 0x24, regvalue, cur_col, wrap)
223#endif
224
225#if AIC_DEBUG_REGISTERS
226ahd_reg_print_t ahd_cmdlenptr_print; 184ahd_reg_print_t ahd_cmdlenptr_print;
227#else 185#else
228#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \ 186#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \
@@ -258,13 +216,6 @@ ahd_reg_print_t ahd_qnextptr_print;
258#endif 216#endif
259 217
260#if AIC_DEBUG_REGISTERS 218#if AIC_DEBUG_REGISTERS
261ahd_reg_print_t ahd_idptr_print;
262#else
263#define ahd_idptr_print(regvalue, cur_col, wrap) \
264 ahd_print_register(NULL, 0, "IDPTR", 0x2a, regvalue, cur_col, wrap)
265#endif
266
267#if AIC_DEBUG_REGISTERS
268ahd_reg_print_t ahd_abrtbyteptr_print; 219ahd_reg_print_t ahd_abrtbyteptr_print;
269#else 220#else
270#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \ 221#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \
@@ -279,27 +230,6 @@ ahd_reg_print_t ahd_abrtbitptr_print;
279#endif 230#endif
280 231
281#if AIC_DEBUG_REGISTERS 232#if AIC_DEBUG_REGISTERS
282ahd_reg_print_t ahd_maxcmdbytes_print;
283#else
284#define ahd_maxcmdbytes_print(regvalue, cur_col, wrap) \
285 ahd_print_register(NULL, 0, "MAXCMDBYTES", 0x2d, regvalue, cur_col, wrap)
286#endif
287
288#if AIC_DEBUG_REGISTERS
289ahd_reg_print_t ahd_maxcmd2rcv_print;
290#else
291#define ahd_maxcmd2rcv_print(regvalue, cur_col, wrap) \
292 ahd_print_register(NULL, 0, "MAXCMD2RCV", 0x2e, regvalue, cur_col, wrap)
293#endif
294
295#if AIC_DEBUG_REGISTERS
296ahd_reg_print_t ahd_shortthresh_print;
297#else
298#define ahd_shortthresh_print(regvalue, cur_col, wrap) \
299 ahd_print_register(NULL, 0, "SHORTTHRESH", 0x2f, regvalue, cur_col, wrap)
300#endif
301
302#if AIC_DEBUG_REGISTERS
303ahd_reg_print_t ahd_lunlen_print; 233ahd_reg_print_t ahd_lunlen_print;
304#else 234#else
305#define ahd_lunlen_print(regvalue, cur_col, wrap) \ 235#define ahd_lunlen_print(regvalue, cur_col, wrap) \
@@ -328,41 +258,6 @@ ahd_reg_print_t ahd_maxcmdcnt_print;
328#endif 258#endif
329 259
330#if AIC_DEBUG_REGISTERS 260#if AIC_DEBUG_REGISTERS
331ahd_reg_print_t ahd_lqrsvd01_print;
332#else
333#define ahd_lqrsvd01_print(regvalue, cur_col, wrap) \
334 ahd_print_register(NULL, 0, "LQRSVD01", 0x34, regvalue, cur_col, wrap)
335#endif
336
337#if AIC_DEBUG_REGISTERS
338ahd_reg_print_t ahd_lqrsvd16_print;
339#else
340#define ahd_lqrsvd16_print(regvalue, cur_col, wrap) \
341 ahd_print_register(NULL, 0, "LQRSVD16", 0x35, regvalue, cur_col, wrap)
342#endif
343
344#if AIC_DEBUG_REGISTERS
345ahd_reg_print_t ahd_lqrsvd17_print;
346#else
347#define ahd_lqrsvd17_print(regvalue, cur_col, wrap) \
348 ahd_print_register(NULL, 0, "LQRSVD17", 0x36, regvalue, cur_col, wrap)
349#endif
350
351#if AIC_DEBUG_REGISTERS
352ahd_reg_print_t ahd_cmdrsvd0_print;
353#else
354#define ahd_cmdrsvd0_print(regvalue, cur_col, wrap) \
355 ahd_print_register(NULL, 0, "CMDRSVD0", 0x37, regvalue, cur_col, wrap)
356#endif
357
358#if AIC_DEBUG_REGISTERS
359ahd_reg_print_t ahd_lqctl0_print;
360#else
361#define ahd_lqctl0_print(regvalue, cur_col, wrap) \
362 ahd_print_register(NULL, 0, "LQCTL0", 0x38, regvalue, cur_col, wrap)
363#endif
364
365#if AIC_DEBUG_REGISTERS
366ahd_reg_print_t ahd_lqctl1_print; 261ahd_reg_print_t ahd_lqctl1_print;
367#else 262#else
368#define ahd_lqctl1_print(regvalue, cur_col, wrap) \ 263#define ahd_lqctl1_print(regvalue, cur_col, wrap) \
@@ -370,13 +265,6 @@ ahd_reg_print_t ahd_lqctl1_print;
370#endif 265#endif
371 266
372#if AIC_DEBUG_REGISTERS 267#if AIC_DEBUG_REGISTERS
373ahd_reg_print_t ahd_scsbist0_print;
374#else
375#define ahd_scsbist0_print(regvalue, cur_col, wrap) \
376 ahd_print_register(NULL, 0, "SCSBIST0", 0x39, regvalue, cur_col, wrap)
377#endif
378
379#if AIC_DEBUG_REGISTERS
380ahd_reg_print_t ahd_lqctl2_print; 268ahd_reg_print_t ahd_lqctl2_print;
381#else 269#else
382#define ahd_lqctl2_print(regvalue, cur_col, wrap) \ 270#define ahd_lqctl2_print(regvalue, cur_col, wrap) \
@@ -384,13 +272,6 @@ ahd_reg_print_t ahd_lqctl2_print;
384#endif 272#endif
385 273
386#if AIC_DEBUG_REGISTERS 274#if AIC_DEBUG_REGISTERS
387ahd_reg_print_t ahd_scsbist1_print;
388#else
389#define ahd_scsbist1_print(regvalue, cur_col, wrap) \
390 ahd_print_register(NULL, 0, "SCSBIST1", 0x3a, regvalue, cur_col, wrap)
391#endif
392
393#if AIC_DEBUG_REGISTERS
394ahd_reg_print_t ahd_scsiseq0_print; 275ahd_reg_print_t ahd_scsiseq0_print;
395#else 276#else
396#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \ 277#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \
@@ -412,20 +293,6 @@ ahd_reg_print_t ahd_sxfrctl0_print;
412#endif 293#endif
413 294
414#if AIC_DEBUG_REGISTERS 295#if AIC_DEBUG_REGISTERS
415ahd_reg_print_t ahd_dlcount_print;
416#else
417#define ahd_dlcount_print(regvalue, cur_col, wrap) \
418 ahd_print_register(NULL, 0, "DLCOUNT", 0x3c, regvalue, cur_col, wrap)
419#endif
420
421#if AIC_DEBUG_REGISTERS
422ahd_reg_print_t ahd_businitid_print;
423#else
424#define ahd_businitid_print(regvalue, cur_col, wrap) \
425 ahd_print_register(NULL, 0, "BUSINITID", 0x3c, regvalue, cur_col, wrap)
426#endif
427
428#if AIC_DEBUG_REGISTERS
429ahd_reg_print_t ahd_sxfrctl1_print; 296ahd_reg_print_t ahd_sxfrctl1_print;
430#else 297#else
431#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \ 298#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \
@@ -433,20 +300,6 @@ ahd_reg_print_t ahd_sxfrctl1_print;
433#endif 300#endif
434 301
435#if AIC_DEBUG_REGISTERS 302#if AIC_DEBUG_REGISTERS
436ahd_reg_print_t ahd_bustargid_print;
437#else
438#define ahd_bustargid_print(regvalue, cur_col, wrap) \
439 ahd_print_register(NULL, 0, "BUSTARGID", 0x3e, regvalue, cur_col, wrap)
440#endif
441
442#if AIC_DEBUG_REGISTERS
443ahd_reg_print_t ahd_sxfrctl2_print;
444#else
445#define ahd_sxfrctl2_print(regvalue, cur_col, wrap) \
446 ahd_print_register(NULL, 0, "SXFRCTL2", 0x3e, regvalue, cur_col, wrap)
447#endif
448
449#if AIC_DEBUG_REGISTERS
450ahd_reg_print_t ahd_dffstat_print; 303ahd_reg_print_t ahd_dffstat_print;
451#else 304#else
452#define ahd_dffstat_print(regvalue, cur_col, wrap) \ 305#define ahd_dffstat_print(regvalue, cur_col, wrap) \
@@ -454,17 +307,17 @@ ahd_reg_print_t ahd_dffstat_print;
454#endif 307#endif
455 308
456#if AIC_DEBUG_REGISTERS 309#if AIC_DEBUG_REGISTERS
457ahd_reg_print_t ahd_scsisigo_print; 310ahd_reg_print_t ahd_multargid_print;
458#else 311#else
459#define ahd_scsisigo_print(regvalue, cur_col, wrap) \ 312#define ahd_multargid_print(regvalue, cur_col, wrap) \
460 ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap) 313 ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
461#endif 314#endif
462 315
463#if AIC_DEBUG_REGISTERS 316#if AIC_DEBUG_REGISTERS
464ahd_reg_print_t ahd_multargid_print; 317ahd_reg_print_t ahd_scsisigo_print;
465#else 318#else
466#define ahd_multargid_print(regvalue, cur_col, wrap) \ 319#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
467 ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap) 320 ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
468#endif 321#endif
469 322
470#if AIC_DEBUG_REGISTERS 323#if AIC_DEBUG_REGISTERS
@@ -482,13 +335,6 @@ ahd_reg_print_t ahd_scsiphase_print;
482#endif 335#endif
483 336
484#if AIC_DEBUG_REGISTERS 337#if AIC_DEBUG_REGISTERS
485ahd_reg_print_t ahd_scsidat0_img_print;
486#else
487#define ahd_scsidat0_img_print(regvalue, cur_col, wrap) \
488 ahd_print_register(NULL, 0, "SCSIDAT0_IMG", 0x43, regvalue, cur_col, wrap)
489#endif
490
491#if AIC_DEBUG_REGISTERS
492ahd_reg_print_t ahd_scsidat_print; 338ahd_reg_print_t ahd_scsidat_print;
493#else 339#else
494#define ahd_scsidat_print(regvalue, cur_col, wrap) \ 340#define ahd_scsidat_print(regvalue, cur_col, wrap) \
@@ -531,13 +377,6 @@ ahd_reg_print_t ahd_sblkctl_print;
531#endif 377#endif
532 378
533#if AIC_DEBUG_REGISTERS 379#if AIC_DEBUG_REGISTERS
534ahd_reg_print_t ahd_clrsint0_print;
535#else
536#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
537 ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
538#endif
539
540#if AIC_DEBUG_REGISTERS
541ahd_reg_print_t ahd_sstat0_print; 380ahd_reg_print_t ahd_sstat0_print;
542#else 381#else
543#define ahd_sstat0_print(regvalue, cur_col, wrap) \ 382#define ahd_sstat0_print(regvalue, cur_col, wrap) \
@@ -552,10 +391,10 @@ ahd_reg_print_t ahd_simode0_print;
552#endif 391#endif
553 392
554#if AIC_DEBUG_REGISTERS 393#if AIC_DEBUG_REGISTERS
555ahd_reg_print_t ahd_clrsint1_print; 394ahd_reg_print_t ahd_clrsint0_print;
556#else 395#else
557#define ahd_clrsint1_print(regvalue, cur_col, wrap) \ 396#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
558 ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap) 397 ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
559#endif 398#endif
560 399
561#if AIC_DEBUG_REGISTERS 400#if AIC_DEBUG_REGISTERS
@@ -566,17 +405,17 @@ ahd_reg_print_t ahd_sstat1_print;
566#endif 405#endif
567 406
568#if AIC_DEBUG_REGISTERS 407#if AIC_DEBUG_REGISTERS
569ahd_reg_print_t ahd_sstat2_print; 408ahd_reg_print_t ahd_clrsint1_print;
570#else 409#else
571#define ahd_sstat2_print(regvalue, cur_col, wrap) \ 410#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
572 ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap) 411 ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
573#endif 412#endif
574 413
575#if AIC_DEBUG_REGISTERS 414#if AIC_DEBUG_REGISTERS
576ahd_reg_print_t ahd_simode2_print; 415ahd_reg_print_t ahd_sstat2_print;
577#else 416#else
578#define ahd_simode2_print(regvalue, cur_col, wrap) \ 417#define ahd_sstat2_print(regvalue, cur_col, wrap) \
579 ahd_print_register(NULL, 0, "SIMODE2", 0x4d, regvalue, cur_col, wrap) 418 ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
580#endif 419#endif
581 420
582#if AIC_DEBUG_REGISTERS 421#if AIC_DEBUG_REGISTERS
@@ -622,17 +461,17 @@ ahd_reg_print_t ahd_lqistat0_print;
622#endif 461#endif
623 462
624#if AIC_DEBUG_REGISTERS 463#if AIC_DEBUG_REGISTERS
625ahd_reg_print_t ahd_clrlqiint0_print; 464ahd_reg_print_t ahd_lqimode0_print;
626#else 465#else
627#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \ 466#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
628 ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap) 467 ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
629#endif 468#endif
630 469
631#if AIC_DEBUG_REGISTERS 470#if AIC_DEBUG_REGISTERS
632ahd_reg_print_t ahd_lqimode0_print; 471ahd_reg_print_t ahd_clrlqiint0_print;
633#else 472#else
634#define ahd_lqimode0_print(regvalue, cur_col, wrap) \ 473#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
635 ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap) 474 ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
636#endif 475#endif
637 476
638#if AIC_DEBUG_REGISTERS 477#if AIC_DEBUG_REGISTERS
@@ -790,13 +629,6 @@ ahd_reg_print_t ahd_seqintsrc_print;
790#endif 629#endif
791 630
792#if AIC_DEBUG_REGISTERS 631#if AIC_DEBUG_REGISTERS
793ahd_reg_print_t ahd_currscb_print;
794#else
795#define ahd_currscb_print(regvalue, cur_col, wrap) \
796 ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
797#endif
798
799#if AIC_DEBUG_REGISTERS
800ahd_reg_print_t ahd_seqimode_print; 632ahd_reg_print_t ahd_seqimode_print;
801#else 633#else
802#define ahd_seqimode_print(regvalue, cur_col, wrap) \ 634#define ahd_seqimode_print(regvalue, cur_col, wrap) \
@@ -804,24 +636,17 @@ ahd_reg_print_t ahd_seqimode_print;
804#endif 636#endif
805 637
806#if AIC_DEBUG_REGISTERS 638#if AIC_DEBUG_REGISTERS
807ahd_reg_print_t ahd_mdffstat_print; 639ahd_reg_print_t ahd_currscb_print;
808#else
809#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
810 ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
811#endif
812
813#if AIC_DEBUG_REGISTERS
814ahd_reg_print_t ahd_crccontrol_print;
815#else 640#else
816#define ahd_crccontrol_print(regvalue, cur_col, wrap) \ 641#define ahd_currscb_print(regvalue, cur_col, wrap) \
817 ahd_print_register(NULL, 0, "CRCCONTROL", 0x5d, regvalue, cur_col, wrap) 642 ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
818#endif 643#endif
819 644
820#if AIC_DEBUG_REGISTERS 645#if AIC_DEBUG_REGISTERS
821ahd_reg_print_t ahd_dfftag_print; 646ahd_reg_print_t ahd_mdffstat_print;
822#else 647#else
823#define ahd_dfftag_print(regvalue, cur_col, wrap) \ 648#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
824 ahd_print_register(NULL, 0, "DFFTAG", 0x5e, regvalue, cur_col, wrap) 649 ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
825#endif 650#endif
826 651
827#if AIC_DEBUG_REGISTERS 652#if AIC_DEBUG_REGISTERS
@@ -832,20 +657,6 @@ ahd_reg_print_t ahd_lastscb_print;
832#endif 657#endif
833 658
834#if AIC_DEBUG_REGISTERS 659#if AIC_DEBUG_REGISTERS
835ahd_reg_print_t ahd_scsitest_print;
836#else
837#define ahd_scsitest_print(regvalue, cur_col, wrap) \
838 ahd_print_register(NULL, 0, "SCSITEST", 0x5e, regvalue, cur_col, wrap)
839#endif
840
841#if AIC_DEBUG_REGISTERS
842ahd_reg_print_t ahd_iopdnctl_print;
843#else
844#define ahd_iopdnctl_print(regvalue, cur_col, wrap) \
845 ahd_print_register(NULL, 0, "IOPDNCTL", 0x5f, regvalue, cur_col, wrap)
846#endif
847
848#if AIC_DEBUG_REGISTERS
849ahd_reg_print_t ahd_shaddr_print; 660ahd_reg_print_t ahd_shaddr_print;
850#else 661#else
851#define ahd_shaddr_print(regvalue, cur_col, wrap) \ 662#define ahd_shaddr_print(regvalue, cur_col, wrap) \
@@ -860,13 +671,6 @@ ahd_reg_print_t ahd_negoaddr_print;
860#endif 671#endif
861 672
862#if AIC_DEBUG_REGISTERS 673#if AIC_DEBUG_REGISTERS
863ahd_reg_print_t ahd_dgrpcrci_print;
864#else
865#define ahd_dgrpcrci_print(regvalue, cur_col, wrap) \
866 ahd_print_register(NULL, 0, "DGRPCRCI", 0x60, regvalue, cur_col, wrap)
867#endif
868
869#if AIC_DEBUG_REGISTERS
870ahd_reg_print_t ahd_negperiod_print; 674ahd_reg_print_t ahd_negperiod_print;
871#else 675#else
872#define ahd_negperiod_print(regvalue, cur_col, wrap) \ 676#define ahd_negperiod_print(regvalue, cur_col, wrap) \
@@ -874,13 +678,6 @@ ahd_reg_print_t ahd_negperiod_print;
874#endif 678#endif
875 679
876#if AIC_DEBUG_REGISTERS 680#if AIC_DEBUG_REGISTERS
877ahd_reg_print_t ahd_packcrci_print;
878#else
879#define ahd_packcrci_print(regvalue, cur_col, wrap) \
880 ahd_print_register(NULL, 0, "PACKCRCI", 0x62, regvalue, cur_col, wrap)
881#endif
882
883#if AIC_DEBUG_REGISTERS
884ahd_reg_print_t ahd_negoffset_print; 681ahd_reg_print_t ahd_negoffset_print;
885#else 682#else
886#define ahd_negoffset_print(regvalue, cur_col, wrap) \ 683#define ahd_negoffset_print(regvalue, cur_col, wrap) \
@@ -930,13 +727,6 @@ ahd_reg_print_t ahd_iownid_print;
930#endif 727#endif
931 728
932#if AIC_DEBUG_REGISTERS 729#if AIC_DEBUG_REGISTERS
933ahd_reg_print_t ahd_pll960ctl0_print;
934#else
935#define ahd_pll960ctl0_print(regvalue, cur_col, wrap) \
936 ahd_print_register(NULL, 0, "PLL960CTL0", 0x68, regvalue, cur_col, wrap)
937#endif
938
939#if AIC_DEBUG_REGISTERS
940ahd_reg_print_t ahd_shcnt_print; 730ahd_reg_print_t ahd_shcnt_print;
941#else 731#else
942#define ahd_shcnt_print(regvalue, cur_col, wrap) \ 732#define ahd_shcnt_print(regvalue, cur_col, wrap) \
@@ -951,27 +741,6 @@ ahd_reg_print_t ahd_townid_print;
951#endif 741#endif
952 742
953#if AIC_DEBUG_REGISTERS 743#if AIC_DEBUG_REGISTERS
954ahd_reg_print_t ahd_pll960ctl1_print;
955#else
956#define ahd_pll960ctl1_print(regvalue, cur_col, wrap) \
957 ahd_print_register(NULL, 0, "PLL960CTL1", 0x69, regvalue, cur_col, wrap)
958#endif
959
960#if AIC_DEBUG_REGISTERS
961ahd_reg_print_t ahd_pll960cnt0_print;
962#else
963#define ahd_pll960cnt0_print(regvalue, cur_col, wrap) \
964 ahd_print_register(NULL, 0, "PLL960CNT0", 0x6a, regvalue, cur_col, wrap)
965#endif
966
967#if AIC_DEBUG_REGISTERS
968ahd_reg_print_t ahd_xsig_print;
969#else
970#define ahd_xsig_print(regvalue, cur_col, wrap) \
971 ahd_print_register(NULL, 0, "XSIG", 0x6a, regvalue, cur_col, wrap)
972#endif
973
974#if AIC_DEBUG_REGISTERS
975ahd_reg_print_t ahd_seloid_print; 744ahd_reg_print_t ahd_seloid_print;
976#else 745#else
977#define ahd_seloid_print(regvalue, cur_col, wrap) \ 746#define ahd_seloid_print(regvalue, cur_col, wrap) \
@@ -979,41 +748,6 @@ ahd_reg_print_t ahd_seloid_print;
979#endif 748#endif
980 749
981#if AIC_DEBUG_REGISTERS 750#if AIC_DEBUG_REGISTERS
982ahd_reg_print_t ahd_pll400ctl0_print;
983#else
984#define ahd_pll400ctl0_print(regvalue, cur_col, wrap) \
985 ahd_print_register(NULL, 0, "PLL400CTL0", 0x6c, regvalue, cur_col, wrap)
986#endif
987
988#if AIC_DEBUG_REGISTERS
989ahd_reg_print_t ahd_fairness_print;
990#else
991#define ahd_fairness_print(regvalue, cur_col, wrap) \
992 ahd_print_register(NULL, 0, "FAIRNESS", 0x6c, regvalue, cur_col, wrap)
993#endif
994
995#if AIC_DEBUG_REGISTERS
996ahd_reg_print_t ahd_pll400ctl1_print;
997#else
998#define ahd_pll400ctl1_print(regvalue, cur_col, wrap) \
999 ahd_print_register(NULL, 0, "PLL400CTL1", 0x6d, regvalue, cur_col, wrap)
1000#endif
1001
1002#if AIC_DEBUG_REGISTERS
1003ahd_reg_print_t ahd_unfairness_print;
1004#else
1005#define ahd_unfairness_print(regvalue, cur_col, wrap) \
1006 ahd_print_register(NULL, 0, "UNFAIRNESS", 0x6e, regvalue, cur_col, wrap)
1007#endif
1008
1009#if AIC_DEBUG_REGISTERS
1010ahd_reg_print_t ahd_pll400cnt0_print;
1011#else
1012#define ahd_pll400cnt0_print(regvalue, cur_col, wrap) \
1013 ahd_print_register(NULL, 0, "PLL400CNT0", 0x6e, regvalue, cur_col, wrap)
1014#endif
1015
1016#if AIC_DEBUG_REGISTERS
1017ahd_reg_print_t ahd_haddr_print; 751ahd_reg_print_t ahd_haddr_print;
1018#else 752#else
1019#define ahd_haddr_print(regvalue, cur_col, wrap) \ 753#define ahd_haddr_print(regvalue, cur_col, wrap) \
@@ -1021,27 +755,6 @@ ahd_reg_print_t ahd_haddr_print;
1021#endif 755#endif
1022 756
1023#if AIC_DEBUG_REGISTERS 757#if AIC_DEBUG_REGISTERS
1024ahd_reg_print_t ahd_plldelay_print;
1025#else
1026#define ahd_plldelay_print(regvalue, cur_col, wrap) \
1027 ahd_print_register(NULL, 0, "PLLDELAY", 0x70, regvalue, cur_col, wrap)
1028#endif
1029
1030#if AIC_DEBUG_REGISTERS
1031ahd_reg_print_t ahd_hodmaadr_print;
1032#else
1033#define ahd_hodmaadr_print(regvalue, cur_col, wrap) \
1034 ahd_print_register(NULL, 0, "HODMAADR", 0x70, regvalue, cur_col, wrap)
1035#endif
1036
1037#if AIC_DEBUG_REGISTERS
1038ahd_reg_print_t ahd_hodmacnt_print;
1039#else
1040#define ahd_hodmacnt_print(regvalue, cur_col, wrap) \
1041 ahd_print_register(NULL, 0, "HODMACNT", 0x78, regvalue, cur_col, wrap)
1042#endif
1043
1044#if AIC_DEBUG_REGISTERS
1045ahd_reg_print_t ahd_hcnt_print; 758ahd_reg_print_t ahd_hcnt_print;
1046#else 759#else
1047#define ahd_hcnt_print(regvalue, cur_col, wrap) \ 760#define ahd_hcnt_print(regvalue, cur_col, wrap) \
@@ -1049,10 +762,10 @@ ahd_reg_print_t ahd_hcnt_print;
1049#endif 762#endif
1050 763
1051#if AIC_DEBUG_REGISTERS 764#if AIC_DEBUG_REGISTERS
1052ahd_reg_print_t ahd_hodmaen_print; 765ahd_reg_print_t ahd_sghaddr_print;
1053#else 766#else
1054#define ahd_hodmaen_print(regvalue, cur_col, wrap) \ 767#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
1055 ahd_print_register(NULL, 0, "HODMAEN", 0x7a, regvalue, cur_col, wrap) 768 ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
1056#endif 769#endif
1057 770
1058#if AIC_DEBUG_REGISTERS 771#if AIC_DEBUG_REGISTERS
@@ -1063,10 +776,10 @@ ahd_reg_print_t ahd_scbhaddr_print;
1063#endif 776#endif
1064 777
1065#if AIC_DEBUG_REGISTERS 778#if AIC_DEBUG_REGISTERS
1066ahd_reg_print_t ahd_sghaddr_print; 779ahd_reg_print_t ahd_sghcnt_print;
1067#else 780#else
1068#define ahd_sghaddr_print(regvalue, cur_col, wrap) \ 781#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
1069 ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap) 782 ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
1070#endif 783#endif
1071 784
1072#if AIC_DEBUG_REGISTERS 785#if AIC_DEBUG_REGISTERS
@@ -1077,13 +790,6 @@ ahd_reg_print_t ahd_scbhcnt_print;
1077#endif 790#endif
1078 791
1079#if AIC_DEBUG_REGISTERS 792#if AIC_DEBUG_REGISTERS
1080ahd_reg_print_t ahd_sghcnt_print;
1081#else
1082#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
1083 ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
1084#endif
1085
1086#if AIC_DEBUG_REGISTERS
1087ahd_reg_print_t ahd_dff_thrsh_print; 793ahd_reg_print_t ahd_dff_thrsh_print;
1088#else 794#else
1089#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \ 795#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \
@@ -1091,132 +797,6 @@ ahd_reg_print_t ahd_dff_thrsh_print;
1091#endif 797#endif
1092 798
1093#if AIC_DEBUG_REGISTERS 799#if AIC_DEBUG_REGISTERS
1094ahd_reg_print_t ahd_romaddr_print;
1095#else
1096#define ahd_romaddr_print(regvalue, cur_col, wrap) \
1097 ahd_print_register(NULL, 0, "ROMADDR", 0x8a, regvalue, cur_col, wrap)
1098#endif
1099
1100#if AIC_DEBUG_REGISTERS
1101ahd_reg_print_t ahd_romcntrl_print;
1102#else
1103#define ahd_romcntrl_print(regvalue, cur_col, wrap) \
1104 ahd_print_register(NULL, 0, "ROMCNTRL", 0x8d, regvalue, cur_col, wrap)
1105#endif
1106
1107#if AIC_DEBUG_REGISTERS
1108ahd_reg_print_t ahd_romdata_print;
1109#else
1110#define ahd_romdata_print(regvalue, cur_col, wrap) \
1111 ahd_print_register(NULL, 0, "ROMDATA", 0x8e, regvalue, cur_col, wrap)
1112#endif
1113
1114#if AIC_DEBUG_REGISTERS
1115ahd_reg_print_t ahd_cmcrxmsg0_print;
1116#else
1117#define ahd_cmcrxmsg0_print(regvalue, cur_col, wrap) \
1118 ahd_print_register(NULL, 0, "CMCRXMSG0", 0x90, regvalue, cur_col, wrap)
1119#endif
1120
1121#if AIC_DEBUG_REGISTERS
1122ahd_reg_print_t ahd_roenable_print;
1123#else
1124#define ahd_roenable_print(regvalue, cur_col, wrap) \
1125 ahd_print_register(NULL, 0, "ROENABLE", 0x90, regvalue, cur_col, wrap)
1126#endif
1127
1128#if AIC_DEBUG_REGISTERS
1129ahd_reg_print_t ahd_ovlyrxmsg0_print;
1130#else
1131#define ahd_ovlyrxmsg0_print(regvalue, cur_col, wrap) \
1132 ahd_print_register(NULL, 0, "OVLYRXMSG0", 0x90, regvalue, cur_col, wrap)
1133#endif
1134
1135#if AIC_DEBUG_REGISTERS
1136ahd_reg_print_t ahd_dchrxmsg0_print;
1137#else
1138#define ahd_dchrxmsg0_print(regvalue, cur_col, wrap) \
1139 ahd_print_register(NULL, 0, "DCHRXMSG0", 0x90, regvalue, cur_col, wrap)
1140#endif
1141
1142#if AIC_DEBUG_REGISTERS
1143ahd_reg_print_t ahd_ovlyrxmsg1_print;
1144#else
1145#define ahd_ovlyrxmsg1_print(regvalue, cur_col, wrap) \
1146 ahd_print_register(NULL, 0, "OVLYRXMSG1", 0x91, regvalue, cur_col, wrap)
1147#endif
1148
1149#if AIC_DEBUG_REGISTERS
1150ahd_reg_print_t ahd_nsenable_print;
1151#else
1152#define ahd_nsenable_print(regvalue, cur_col, wrap) \
1153 ahd_print_register(NULL, 0, "NSENABLE", 0x91, regvalue, cur_col, wrap)
1154#endif
1155
1156#if AIC_DEBUG_REGISTERS
1157ahd_reg_print_t ahd_cmcrxmsg1_print;
1158#else
1159#define ahd_cmcrxmsg1_print(regvalue, cur_col, wrap) \
1160 ahd_print_register(NULL, 0, "CMCRXMSG1", 0x91, regvalue, cur_col, wrap)
1161#endif
1162
1163#if AIC_DEBUG_REGISTERS
1164ahd_reg_print_t ahd_dchrxmsg1_print;
1165#else
1166#define ahd_dchrxmsg1_print(regvalue, cur_col, wrap) \
1167 ahd_print_register(NULL, 0, "DCHRXMSG1", 0x91, regvalue, cur_col, wrap)
1168#endif
1169
1170#if AIC_DEBUG_REGISTERS
1171ahd_reg_print_t ahd_dchrxmsg2_print;
1172#else
1173#define ahd_dchrxmsg2_print(regvalue, cur_col, wrap) \
1174 ahd_print_register(NULL, 0, "DCHRXMSG2", 0x92, regvalue, cur_col, wrap)
1175#endif
1176
1177#if AIC_DEBUG_REGISTERS
1178ahd_reg_print_t ahd_cmcrxmsg2_print;
1179#else
1180#define ahd_cmcrxmsg2_print(regvalue, cur_col, wrap) \
1181 ahd_print_register(NULL, 0, "CMCRXMSG2", 0x92, regvalue, cur_col, wrap)
1182#endif
1183
1184#if AIC_DEBUG_REGISTERS
1185ahd_reg_print_t ahd_ost_print;
1186#else
1187#define ahd_ost_print(regvalue, cur_col, wrap) \
1188 ahd_print_register(NULL, 0, "OST", 0x92, regvalue, cur_col, wrap)
1189#endif
1190
1191#if AIC_DEBUG_REGISTERS
1192ahd_reg_print_t ahd_ovlyrxmsg2_print;
1193#else
1194#define ahd_ovlyrxmsg2_print(regvalue, cur_col, wrap) \
1195 ahd_print_register(NULL, 0, "OVLYRXMSG2", 0x92, regvalue, cur_col, wrap)
1196#endif
1197
1198#if AIC_DEBUG_REGISTERS
1199ahd_reg_print_t ahd_dchrxmsg3_print;
1200#else
1201#define ahd_dchrxmsg3_print(regvalue, cur_col, wrap) \
1202 ahd_print_register(NULL, 0, "DCHRXMSG3", 0x93, regvalue, cur_col, wrap)
1203#endif
1204
1205#if AIC_DEBUG_REGISTERS
1206ahd_reg_print_t ahd_ovlyrxmsg3_print;
1207#else
1208#define ahd_ovlyrxmsg3_print(regvalue, cur_col, wrap) \
1209 ahd_print_register(NULL, 0, "OVLYRXMSG3", 0x93, regvalue, cur_col, wrap)
1210#endif
1211
1212#if AIC_DEBUG_REGISTERS
1213ahd_reg_print_t ahd_cmcrxmsg3_print;
1214#else
1215#define ahd_cmcrxmsg3_print(regvalue, cur_col, wrap) \
1216 ahd_print_register(NULL, 0, "CMCRXMSG3", 0x93, regvalue, cur_col, wrap)
1217#endif
1218
1219#if AIC_DEBUG_REGISTERS
1220ahd_reg_print_t ahd_pcixctl_print; 800ahd_reg_print_t ahd_pcixctl_print;
1221#else 801#else
1222#define ahd_pcixctl_print(regvalue, cur_col, wrap) \ 802#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
@@ -1224,34 +804,6 @@ ahd_reg_print_t ahd_pcixctl_print;
1224#endif 804#endif
1225 805
1226#if AIC_DEBUG_REGISTERS 806#if AIC_DEBUG_REGISTERS
1227ahd_reg_print_t ahd_ovlyseqbcnt_print;
1228#else
1229#define ahd_ovlyseqbcnt_print(regvalue, cur_col, wrap) \
1230 ahd_print_register(NULL, 0, "OVLYSEQBCNT", 0x94, regvalue, cur_col, wrap)
1231#endif
1232
1233#if AIC_DEBUG_REGISTERS
1234ahd_reg_print_t ahd_dchseqbcnt_print;
1235#else
1236#define ahd_dchseqbcnt_print(regvalue, cur_col, wrap) \
1237 ahd_print_register(NULL, 0, "DCHSEQBCNT", 0x94, regvalue, cur_col, wrap)
1238#endif
1239
1240#if AIC_DEBUG_REGISTERS
1241ahd_reg_print_t ahd_cmcseqbcnt_print;
1242#else
1243#define ahd_cmcseqbcnt_print(regvalue, cur_col, wrap) \
1244 ahd_print_register(NULL, 0, "CMCSEQBCNT", 0x94, regvalue, cur_col, wrap)
1245#endif
1246
1247#if AIC_DEBUG_REGISTERS
1248ahd_reg_print_t ahd_cmcspltstat0_print;
1249#else
1250#define ahd_cmcspltstat0_print(regvalue, cur_col, wrap) \
1251 ahd_print_register(NULL, 0, "CMCSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
1252#endif
1253
1254#if AIC_DEBUG_REGISTERS
1255ahd_reg_print_t ahd_dchspltstat0_print; 807ahd_reg_print_t ahd_dchspltstat0_print;
1256#else 808#else
1257#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \ 809#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
@@ -1259,27 +811,6 @@ ahd_reg_print_t ahd_dchspltstat0_print;
1259#endif 811#endif
1260 812
1261#if AIC_DEBUG_REGISTERS 813#if AIC_DEBUG_REGISTERS
1262ahd_reg_print_t ahd_ovlyspltstat0_print;
1263#else
1264#define ahd_ovlyspltstat0_print(regvalue, cur_col, wrap) \
1265 ahd_print_register(NULL, 0, "OVLYSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
1266#endif
1267
1268#if AIC_DEBUG_REGISTERS
1269ahd_reg_print_t ahd_cmcspltstat1_print;
1270#else
1271#define ahd_cmcspltstat1_print(regvalue, cur_col, wrap) \
1272 ahd_print_register(NULL, 0, "CMCSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
1273#endif
1274
1275#if AIC_DEBUG_REGISTERS
1276ahd_reg_print_t ahd_ovlyspltstat1_print;
1277#else
1278#define ahd_ovlyspltstat1_print(regvalue, cur_col, wrap) \
1279 ahd_print_register(NULL, 0, "OVLYSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
1280#endif
1281
1282#if AIC_DEBUG_REGISTERS
1283ahd_reg_print_t ahd_dchspltstat1_print; 814ahd_reg_print_t ahd_dchspltstat1_print;
1284#else 815#else
1285#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \ 816#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
@@ -1287,90 +818,6 @@ ahd_reg_print_t ahd_dchspltstat1_print;
1287#endif 818#endif
1288 819
1289#if AIC_DEBUG_REGISTERS 820#if AIC_DEBUG_REGISTERS
1290ahd_reg_print_t ahd_sgrxmsg0_print;
1291#else
1292#define ahd_sgrxmsg0_print(regvalue, cur_col, wrap) \
1293 ahd_print_register(NULL, 0, "SGRXMSG0", 0x98, regvalue, cur_col, wrap)
1294#endif
1295
1296#if AIC_DEBUG_REGISTERS
1297ahd_reg_print_t ahd_slvspltoutadr0_print;
1298#else
1299#define ahd_slvspltoutadr0_print(regvalue, cur_col, wrap) \
1300 ahd_print_register(NULL, 0, "SLVSPLTOUTADR0", 0x98, regvalue, cur_col, wrap)
1301#endif
1302
1303#if AIC_DEBUG_REGISTERS
1304ahd_reg_print_t ahd_sgrxmsg1_print;
1305#else
1306#define ahd_sgrxmsg1_print(regvalue, cur_col, wrap) \
1307 ahd_print_register(NULL, 0, "SGRXMSG1", 0x99, regvalue, cur_col, wrap)
1308#endif
1309
1310#if AIC_DEBUG_REGISTERS
1311ahd_reg_print_t ahd_slvspltoutadr1_print;
1312#else
1313#define ahd_slvspltoutadr1_print(regvalue, cur_col, wrap) \
1314 ahd_print_register(NULL, 0, "SLVSPLTOUTADR1", 0x99, regvalue, cur_col, wrap)
1315#endif
1316
1317#if AIC_DEBUG_REGISTERS
1318ahd_reg_print_t ahd_sgrxmsg2_print;
1319#else
1320#define ahd_sgrxmsg2_print(regvalue, cur_col, wrap) \
1321 ahd_print_register(NULL, 0, "SGRXMSG2", 0x9a, regvalue, cur_col, wrap)
1322#endif
1323
1324#if AIC_DEBUG_REGISTERS
1325ahd_reg_print_t ahd_slvspltoutadr2_print;
1326#else
1327#define ahd_slvspltoutadr2_print(regvalue, cur_col, wrap) \
1328 ahd_print_register(NULL, 0, "SLVSPLTOUTADR2", 0x9a, regvalue, cur_col, wrap)
1329#endif
1330
1331#if AIC_DEBUG_REGISTERS
1332ahd_reg_print_t ahd_sgrxmsg3_print;
1333#else
1334#define ahd_sgrxmsg3_print(regvalue, cur_col, wrap) \
1335 ahd_print_register(NULL, 0, "SGRXMSG3", 0x9b, regvalue, cur_col, wrap)
1336#endif
1337
1338#if AIC_DEBUG_REGISTERS
1339ahd_reg_print_t ahd_slvspltoutadr3_print;
1340#else
1341#define ahd_slvspltoutadr3_print(regvalue, cur_col, wrap) \
1342 ahd_print_register(NULL, 0, "SLVSPLTOUTADR3", 0x9b, regvalue, cur_col, wrap)
1343#endif
1344
1345#if AIC_DEBUG_REGISTERS
1346ahd_reg_print_t ahd_sgseqbcnt_print;
1347#else
1348#define ahd_sgseqbcnt_print(regvalue, cur_col, wrap) \
1349 ahd_print_register(NULL, 0, "SGSEQBCNT", 0x9c, regvalue, cur_col, wrap)
1350#endif
1351
1352#if AIC_DEBUG_REGISTERS
1353ahd_reg_print_t ahd_slvspltoutattr0_print;
1354#else
1355#define ahd_slvspltoutattr0_print(regvalue, cur_col, wrap) \
1356 ahd_print_register(NULL, 0, "SLVSPLTOUTATTR0", 0x9c, regvalue, cur_col, wrap)
1357#endif
1358
1359#if AIC_DEBUG_REGISTERS
1360ahd_reg_print_t ahd_slvspltoutattr1_print;
1361#else
1362#define ahd_slvspltoutattr1_print(regvalue, cur_col, wrap) \
1363 ahd_print_register(NULL, 0, "SLVSPLTOUTATTR1", 0x9d, regvalue, cur_col, wrap)
1364#endif
1365
1366#if AIC_DEBUG_REGISTERS
1367ahd_reg_print_t ahd_slvspltoutattr2_print;
1368#else
1369#define ahd_slvspltoutattr2_print(regvalue, cur_col, wrap) \
1370 ahd_print_register(NULL, 0, "SLVSPLTOUTATTR2", 0x9e, regvalue, cur_col, wrap)
1371#endif
1372
1373#if AIC_DEBUG_REGISTERS
1374ahd_reg_print_t ahd_sgspltstat0_print; 821ahd_reg_print_t ahd_sgspltstat0_print;
1375#else 822#else
1376#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \ 823#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
@@ -1385,13 +832,6 @@ ahd_reg_print_t ahd_sgspltstat1_print;
1385#endif 832#endif
1386 833
1387#if AIC_DEBUG_REGISTERS 834#if AIC_DEBUG_REGISTERS
1388ahd_reg_print_t ahd_sfunct_print;
1389#else
1390#define ahd_sfunct_print(regvalue, cur_col, wrap) \
1391 ahd_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
1392#endif
1393
1394#if AIC_DEBUG_REGISTERS
1395ahd_reg_print_t ahd_df0pcistat_print; 835ahd_reg_print_t ahd_df0pcistat_print;
1396#else 836#else
1397#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \ 837#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
@@ -1406,41 +846,6 @@ ahd_reg_print_t ahd_reg0_print;
1406#endif 846#endif
1407 847
1408#if AIC_DEBUG_REGISTERS 848#if AIC_DEBUG_REGISTERS
1409ahd_reg_print_t ahd_df1pcistat_print;
1410#else
1411#define ahd_df1pcistat_print(regvalue, cur_col, wrap) \
1412 ahd_print_register(NULL, 0, "DF1PCISTAT", 0xa1, regvalue, cur_col, wrap)
1413#endif
1414
1415#if AIC_DEBUG_REGISTERS
1416ahd_reg_print_t ahd_sgpcistat_print;
1417#else
1418#define ahd_sgpcistat_print(regvalue, cur_col, wrap) \
1419 ahd_print_register(NULL, 0, "SGPCISTAT", 0xa2, regvalue, cur_col, wrap)
1420#endif
1421
1422#if AIC_DEBUG_REGISTERS
1423ahd_reg_print_t ahd_reg1_print;
1424#else
1425#define ahd_reg1_print(regvalue, cur_col, wrap) \
1426 ahd_print_register(NULL, 0, "REG1", 0xa2, regvalue, cur_col, wrap)
1427#endif
1428
1429#if AIC_DEBUG_REGISTERS
1430ahd_reg_print_t ahd_cmcpcistat_print;
1431#else
1432#define ahd_cmcpcistat_print(regvalue, cur_col, wrap) \
1433 ahd_print_register(NULL, 0, "CMCPCISTAT", 0xa3, regvalue, cur_col, wrap)
1434#endif
1435
1436#if AIC_DEBUG_REGISTERS
1437ahd_reg_print_t ahd_ovlypcistat_print;
1438#else
1439#define ahd_ovlypcistat_print(regvalue, cur_col, wrap) \
1440 ahd_print_register(NULL, 0, "OVLYPCISTAT", 0xa4, regvalue, cur_col, wrap)
1441#endif
1442
1443#if AIC_DEBUG_REGISTERS
1444ahd_reg_print_t ahd_reg_isr_print; 849ahd_reg_print_t ahd_reg_isr_print;
1445#else 850#else
1446#define ahd_reg_isr_print(regvalue, cur_col, wrap) \ 851#define ahd_reg_isr_print(regvalue, cur_col, wrap) \
@@ -1455,13 +860,6 @@ ahd_reg_print_t ahd_sg_state_print;
1455#endif 860#endif
1456 861
1457#if AIC_DEBUG_REGISTERS 862#if AIC_DEBUG_REGISTERS
1458ahd_reg_print_t ahd_msipcistat_print;
1459#else
1460#define ahd_msipcistat_print(regvalue, cur_col, wrap) \
1461 ahd_print_register(NULL, 0, "MSIPCISTAT", 0xa6, regvalue, cur_col, wrap)
1462#endif
1463
1464#if AIC_DEBUG_REGISTERS
1465ahd_reg_print_t ahd_targpcistat_print; 863ahd_reg_print_t ahd_targpcistat_print;
1466#else 864#else
1467#define ahd_targpcistat_print(regvalue, cur_col, wrap) \ 865#define ahd_targpcistat_print(regvalue, cur_col, wrap) \
@@ -1469,13 +867,6 @@ ahd_reg_print_t ahd_targpcistat_print;
1469#endif 867#endif
1470 868
1471#if AIC_DEBUG_REGISTERS 869#if AIC_DEBUG_REGISTERS
1472ahd_reg_print_t ahd_data_count_odd_print;
1473#else
1474#define ahd_data_count_odd_print(regvalue, cur_col, wrap) \
1475 ahd_print_register(NULL, 0, "DATA_COUNT_ODD", 0xa7, regvalue, cur_col, wrap)
1476#endif
1477
1478#if AIC_DEBUG_REGISTERS
1479ahd_reg_print_t ahd_scbptr_print; 870ahd_reg_print_t ahd_scbptr_print;
1480#else 871#else
1481#define ahd_scbptr_print(regvalue, cur_col, wrap) \ 872#define ahd_scbptr_print(regvalue, cur_col, wrap) \
@@ -1483,13 +874,6 @@ ahd_reg_print_t ahd_scbptr_print;
1483#endif 874#endif
1484 875
1485#if AIC_DEBUG_REGISTERS 876#if AIC_DEBUG_REGISTERS
1486ahd_reg_print_t ahd_ccscbacnt_print;
1487#else
1488#define ahd_ccscbacnt_print(regvalue, cur_col, wrap) \
1489 ahd_print_register(NULL, 0, "CCSCBACNT", 0xab, regvalue, cur_col, wrap)
1490#endif
1491
1492#if AIC_DEBUG_REGISTERS
1493ahd_reg_print_t ahd_scbautoptr_print; 877ahd_reg_print_t ahd_scbautoptr_print;
1494#else 878#else
1495#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \ 879#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
@@ -1504,13 +888,6 @@ ahd_reg_print_t ahd_ccsgaddr_print;
1504#endif 888#endif
1505 889
1506#if AIC_DEBUG_REGISTERS 890#if AIC_DEBUG_REGISTERS
1507ahd_reg_print_t ahd_ccscbadr_bk_print;
1508#else
1509#define ahd_ccscbadr_bk_print(regvalue, cur_col, wrap) \
1510 ahd_print_register(NULL, 0, "CCSCBADR_BK", 0xac, regvalue, cur_col, wrap)
1511#endif
1512
1513#if AIC_DEBUG_REGISTERS
1514ahd_reg_print_t ahd_ccscbaddr_print; 891ahd_reg_print_t ahd_ccscbaddr_print;
1515#else 892#else
1516#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \ 893#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
@@ -1518,13 +895,6 @@ ahd_reg_print_t ahd_ccscbaddr_print;
1518#endif 895#endif
1519 896
1520#if AIC_DEBUG_REGISTERS 897#if AIC_DEBUG_REGISTERS
1521ahd_reg_print_t ahd_cmc_rambist_print;
1522#else
1523#define ahd_cmc_rambist_print(regvalue, cur_col, wrap) \
1524 ahd_print_register(NULL, 0, "CMC_RAMBIST", 0xad, regvalue, cur_col, wrap)
1525#endif
1526
1527#if AIC_DEBUG_REGISTERS
1528ahd_reg_print_t ahd_ccscbctl_print; 898ahd_reg_print_t ahd_ccscbctl_print;
1529#else 899#else
1530#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \ 900#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
@@ -1546,13 +916,6 @@ ahd_reg_print_t ahd_ccsgram_print;
1546#endif 916#endif
1547 917
1548#if AIC_DEBUG_REGISTERS 918#if AIC_DEBUG_REGISTERS
1549ahd_reg_print_t ahd_flexadr_print;
1550#else
1551#define ahd_flexadr_print(regvalue, cur_col, wrap) \
1552 ahd_print_register(NULL, 0, "FLEXADR", 0xb0, regvalue, cur_col, wrap)
1553#endif
1554
1555#if AIC_DEBUG_REGISTERS
1556ahd_reg_print_t ahd_ccscbram_print; 919ahd_reg_print_t ahd_ccscbram_print;
1557#else 920#else
1558#define ahd_ccscbram_print(regvalue, cur_col, wrap) \ 921#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
@@ -1560,27 +923,6 @@ ahd_reg_print_t ahd_ccscbram_print;
1560#endif 923#endif
1561 924
1562#if AIC_DEBUG_REGISTERS 925#if AIC_DEBUG_REGISTERS
1563ahd_reg_print_t ahd_flexcnt_print;
1564#else
1565#define ahd_flexcnt_print(regvalue, cur_col, wrap) \
1566 ahd_print_register(NULL, 0, "FLEXCNT", 0xb3, regvalue, cur_col, wrap)
1567#endif
1568
1569#if AIC_DEBUG_REGISTERS
1570ahd_reg_print_t ahd_flexdmastat_print;
1571#else
1572#define ahd_flexdmastat_print(regvalue, cur_col, wrap) \
1573 ahd_print_register(NULL, 0, "FLEXDMASTAT", 0xb5, regvalue, cur_col, wrap)
1574#endif
1575
1576#if AIC_DEBUG_REGISTERS
1577ahd_reg_print_t ahd_flexdata_print;
1578#else
1579#define ahd_flexdata_print(regvalue, cur_col, wrap) \
1580 ahd_print_register(NULL, 0, "FLEXDATA", 0xb6, regvalue, cur_col, wrap)
1581#endif
1582
1583#if AIC_DEBUG_REGISTERS
1584ahd_reg_print_t ahd_brddat_print; 926ahd_reg_print_t ahd_brddat_print;
1585#else 927#else
1586#define ahd_brddat_print(regvalue, cur_col, wrap) \ 928#define ahd_brddat_print(regvalue, cur_col, wrap) \
@@ -1623,27 +965,6 @@ ahd_reg_print_t ahd_seestat_print;
1623#endif 965#endif
1624 966
1625#if AIC_DEBUG_REGISTERS 967#if AIC_DEBUG_REGISTERS
1626ahd_reg_print_t ahd_scbcnt_print;
1627#else
1628#define ahd_scbcnt_print(regvalue, cur_col, wrap) \
1629 ahd_print_register(NULL, 0, "SCBCNT", 0xbf, regvalue, cur_col, wrap)
1630#endif
1631
1632#if AIC_DEBUG_REGISTERS
1633ahd_reg_print_t ahd_dfwaddr_print;
1634#else
1635#define ahd_dfwaddr_print(regvalue, cur_col, wrap) \
1636 ahd_print_register(NULL, 0, "DFWADDR", 0xc0, regvalue, cur_col, wrap)
1637#endif
1638
1639#if AIC_DEBUG_REGISTERS
1640ahd_reg_print_t ahd_dspfltrctl_print;
1641#else
1642#define ahd_dspfltrctl_print(regvalue, cur_col, wrap) \
1643 ahd_print_register(NULL, 0, "DSPFLTRCTL", 0xc0, regvalue, cur_col, wrap)
1644#endif
1645
1646#if AIC_DEBUG_REGISTERS
1647ahd_reg_print_t ahd_dspdatactl_print; 968ahd_reg_print_t ahd_dspdatactl_print;
1648#else 969#else
1649#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \ 970#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \
@@ -1651,27 +972,6 @@ ahd_reg_print_t ahd_dspdatactl_print;
1651#endif 972#endif
1652 973
1653#if AIC_DEBUG_REGISTERS 974#if AIC_DEBUG_REGISTERS
1654ahd_reg_print_t ahd_dfraddr_print;
1655#else
1656#define ahd_dfraddr_print(regvalue, cur_col, wrap) \
1657 ahd_print_register(NULL, 0, "DFRADDR", 0xc2, regvalue, cur_col, wrap)
1658#endif
1659
1660#if AIC_DEBUG_REGISTERS
1661ahd_reg_print_t ahd_dspreqctl_print;
1662#else
1663#define ahd_dspreqctl_print(regvalue, cur_col, wrap) \
1664 ahd_print_register(NULL, 0, "DSPREQCTL", 0xc2, regvalue, cur_col, wrap)
1665#endif
1666
1667#if AIC_DEBUG_REGISTERS
1668ahd_reg_print_t ahd_dspackctl_print;
1669#else
1670#define ahd_dspackctl_print(regvalue, cur_col, wrap) \
1671 ahd_print_register(NULL, 0, "DSPACKCTL", 0xc3, regvalue, cur_col, wrap)
1672#endif
1673
1674#if AIC_DEBUG_REGISTERS
1675ahd_reg_print_t ahd_dfdat_print; 975ahd_reg_print_t ahd_dfdat_print;
1676#else 976#else
1677#define ahd_dfdat_print(regvalue, cur_col, wrap) \ 977#define ahd_dfdat_print(regvalue, cur_col, wrap) \
@@ -1693,76 +993,6 @@ ahd_reg_print_t ahd_wrtbiasctl_print;
1693#endif 993#endif
1694 994
1695#if AIC_DEBUG_REGISTERS 995#if AIC_DEBUG_REGISTERS
1696ahd_reg_print_t ahd_rcvrbiosctl_print;
1697#else
1698#define ahd_rcvrbiosctl_print(regvalue, cur_col, wrap) \
1699 ahd_print_register(NULL, 0, "RCVRBIOSCTL", 0xc6, regvalue, cur_col, wrap)
1700#endif
1701
1702#if AIC_DEBUG_REGISTERS
1703ahd_reg_print_t ahd_wrtbiascalc_print;
1704#else
1705#define ahd_wrtbiascalc_print(regvalue, cur_col, wrap) \
1706 ahd_print_register(NULL, 0, "WRTBIASCALC", 0xc7, regvalue, cur_col, wrap)
1707#endif
1708
1709#if AIC_DEBUG_REGISTERS
1710ahd_reg_print_t ahd_rcvrbiascalc_print;
1711#else
1712#define ahd_rcvrbiascalc_print(regvalue, cur_col, wrap) \
1713 ahd_print_register(NULL, 0, "RCVRBIASCALC", 0xc8, regvalue, cur_col, wrap)
1714#endif
1715
1716#if AIC_DEBUG_REGISTERS
1717ahd_reg_print_t ahd_dfptrs_print;
1718#else
1719#define ahd_dfptrs_print(regvalue, cur_col, wrap) \
1720 ahd_print_register(NULL, 0, "DFPTRS", 0xc8, regvalue, cur_col, wrap)
1721#endif
1722
1723#if AIC_DEBUG_REGISTERS
1724ahd_reg_print_t ahd_skewcalc_print;
1725#else
1726#define ahd_skewcalc_print(regvalue, cur_col, wrap) \
1727 ahd_print_register(NULL, 0, "SKEWCALC", 0xc9, regvalue, cur_col, wrap)
1728#endif
1729
1730#if AIC_DEBUG_REGISTERS
1731ahd_reg_print_t ahd_dfbkptr_print;
1732#else
1733#define ahd_dfbkptr_print(regvalue, cur_col, wrap) \
1734 ahd_print_register(NULL, 0, "DFBKPTR", 0xc9, regvalue, cur_col, wrap)
1735#endif
1736
1737#if AIC_DEBUG_REGISTERS
1738ahd_reg_print_t ahd_dfdbctl_print;
1739#else
1740#define ahd_dfdbctl_print(regvalue, cur_col, wrap) \
1741 ahd_print_register(NULL, 0, "DFDBCTL", 0xcb, regvalue, cur_col, wrap)
1742#endif
1743
1744#if AIC_DEBUG_REGISTERS
1745ahd_reg_print_t ahd_dfscnt_print;
1746#else
1747#define ahd_dfscnt_print(regvalue, cur_col, wrap) \
1748 ahd_print_register(NULL, 0, "DFSCNT", 0xcc, regvalue, cur_col, wrap)
1749#endif
1750
1751#if AIC_DEBUG_REGISTERS
1752ahd_reg_print_t ahd_dfbcnt_print;
1753#else
1754#define ahd_dfbcnt_print(regvalue, cur_col, wrap) \
1755 ahd_print_register(NULL, 0, "DFBCNT", 0xce, regvalue, cur_col, wrap)
1756#endif
1757
1758#if AIC_DEBUG_REGISTERS
1759ahd_reg_print_t ahd_ovlyaddr_print;
1760#else
1761#define ahd_ovlyaddr_print(regvalue, cur_col, wrap) \
1762 ahd_print_register(NULL, 0, "OVLYADDR", 0xd4, regvalue, cur_col, wrap)
1763#endif
1764
1765#if AIC_DEBUG_REGISTERS
1766ahd_reg_print_t ahd_seqctl0_print; 996ahd_reg_print_t ahd_seqctl0_print;
1767#else 997#else
1768#define ahd_seqctl0_print(regvalue, cur_col, wrap) \ 998#define ahd_seqctl0_print(regvalue, cur_col, wrap) \
@@ -1770,13 +1000,6 @@ ahd_reg_print_t ahd_seqctl0_print;
1770#endif 1000#endif
1771 1001
1772#if AIC_DEBUG_REGISTERS 1002#if AIC_DEBUG_REGISTERS
1773ahd_reg_print_t ahd_seqctl1_print;
1774#else
1775#define ahd_seqctl1_print(regvalue, cur_col, wrap) \
1776 ahd_print_register(NULL, 0, "SEQCTL1", 0xd7, regvalue, cur_col, wrap)
1777#endif
1778
1779#if AIC_DEBUG_REGISTERS
1780ahd_reg_print_t ahd_flags_print; 1003ahd_reg_print_t ahd_flags_print;
1781#else 1004#else
1782#define ahd_flags_print(regvalue, cur_col, wrap) \ 1005#define ahd_flags_print(regvalue, cur_col, wrap) \
@@ -1826,20 +1049,6 @@ ahd_reg_print_t ahd_dindex_print;
1826#endif 1049#endif
1827 1050
1828#if AIC_DEBUG_REGISTERS 1051#if AIC_DEBUG_REGISTERS
1829ahd_reg_print_t ahd_brkaddr0_print;
1830#else
1831#define ahd_brkaddr0_print(regvalue, cur_col, wrap) \
1832 ahd_print_register(NULL, 0, "BRKADDR0", 0xe6, regvalue, cur_col, wrap)
1833#endif
1834
1835#if AIC_DEBUG_REGISTERS
1836ahd_reg_print_t ahd_brkaddr1_print;
1837#else
1838#define ahd_brkaddr1_print(regvalue, cur_col, wrap) \
1839 ahd_print_register(NULL, 0, "BRKADDR1", 0xe6, regvalue, cur_col, wrap)
1840#endif
1841
1842#if AIC_DEBUG_REGISTERS
1843ahd_reg_print_t ahd_allones_print; 1052ahd_reg_print_t ahd_allones_print;
1844#else 1053#else
1845#define ahd_allones_print(regvalue, cur_col, wrap) \ 1054#define ahd_allones_print(regvalue, cur_col, wrap) \
@@ -1875,13 +1084,6 @@ ahd_reg_print_t ahd_dindir_print;
1875#endif 1084#endif
1876 1085
1877#if AIC_DEBUG_REGISTERS 1086#if AIC_DEBUG_REGISTERS
1878ahd_reg_print_t ahd_function1_print;
1879#else
1880#define ahd_function1_print(regvalue, cur_col, wrap) \
1881 ahd_print_register(NULL, 0, "FUNCTION1", 0xf0, regvalue, cur_col, wrap)
1882#endif
1883
1884#if AIC_DEBUG_REGISTERS
1885ahd_reg_print_t ahd_stack_print; 1087ahd_reg_print_t ahd_stack_print;
1886#else 1088#else
1887#define ahd_stack_print(regvalue, cur_col, wrap) \ 1089#define ahd_stack_print(regvalue, cur_col, wrap) \
@@ -1903,13 +1105,6 @@ ahd_reg_print_t ahd_curaddr_print;
1903#endif 1105#endif
1904 1106
1905#if AIC_DEBUG_REGISTERS 1107#if AIC_DEBUG_REGISTERS
1906ahd_reg_print_t ahd_lastaddr_print;
1907#else
1908#define ahd_lastaddr_print(regvalue, cur_col, wrap) \
1909 ahd_print_register(NULL, 0, "LASTADDR", 0xf6, regvalue, cur_col, wrap)
1910#endif
1911
1912#if AIC_DEBUG_REGISTERS
1913ahd_reg_print_t ahd_intvec2_addr_print; 1108ahd_reg_print_t ahd_intvec2_addr_print;
1914#else 1109#else
1915#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \ 1110#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \
@@ -1931,24 +1126,17 @@ ahd_reg_print_t ahd_accum_save_print;
1931#endif 1126#endif
1932 1127
1933#if AIC_DEBUG_REGISTERS 1128#if AIC_DEBUG_REGISTERS
1934ahd_reg_print_t ahd_waiting_scb_tails_print; 1129ahd_reg_print_t ahd_sram_base_print;
1935#else
1936#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
1937 ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
1938#endif
1939
1940#if AIC_DEBUG_REGISTERS
1941ahd_reg_print_t ahd_ahd_pci_config_base_print;
1942#else 1130#else
1943#define ahd_ahd_pci_config_base_print(regvalue, cur_col, wrap) \ 1131#define ahd_sram_base_print(regvalue, cur_col, wrap) \
1944 ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE", 0x100, regvalue, cur_col, wrap) 1132 ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
1945#endif 1133#endif
1946 1134
1947#if AIC_DEBUG_REGISTERS 1135#if AIC_DEBUG_REGISTERS
1948ahd_reg_print_t ahd_sram_base_print; 1136ahd_reg_print_t ahd_waiting_scb_tails_print;
1949#else 1137#else
1950#define ahd_sram_base_print(regvalue, cur_col, wrap) \ 1138#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
1951 ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap) 1139 ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
1952#endif 1140#endif
1953 1141
1954#if AIC_DEBUG_REGISTERS 1142#if AIC_DEBUG_REGISTERS
@@ -2218,17 +1406,17 @@ ahd_reg_print_t ahd_mk_message_scsiid_print;
2218#endif 1406#endif
2219 1407
2220#if AIC_DEBUG_REGISTERS 1408#if AIC_DEBUG_REGISTERS
2221ahd_reg_print_t ahd_scb_base_print; 1409ahd_reg_print_t ahd_scb_residual_datacnt_print;
2222#else 1410#else
2223#define ahd_scb_base_print(regvalue, cur_col, wrap) \ 1411#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
2224 ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap) 1412 ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
2225#endif 1413#endif
2226 1414
2227#if AIC_DEBUG_REGISTERS 1415#if AIC_DEBUG_REGISTERS
2228ahd_reg_print_t ahd_scb_residual_datacnt_print; 1416ahd_reg_print_t ahd_scb_base_print;
2229#else 1417#else
2230#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \ 1418#define ahd_scb_base_print(regvalue, cur_col, wrap) \
2231 ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap) 1419 ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
2232#endif 1420#endif
2233 1421
2234#if AIC_DEBUG_REGISTERS 1422#if AIC_DEBUG_REGISTERS
@@ -2246,27 +1434,6 @@ ahd_reg_print_t ahd_scb_scsi_status_print;
2246#endif 1434#endif
2247 1435
2248#if AIC_DEBUG_REGISTERS 1436#if AIC_DEBUG_REGISTERS
2249ahd_reg_print_t ahd_scb_target_phases_print;
2250#else
2251#define ahd_scb_target_phases_print(regvalue, cur_col, wrap) \
2252 ahd_print_register(NULL, 0, "SCB_TARGET_PHASES", 0x189, regvalue, cur_col, wrap)
2253#endif
2254
2255#if AIC_DEBUG_REGISTERS
2256ahd_reg_print_t ahd_scb_target_data_dir_print;
2257#else
2258#define ahd_scb_target_data_dir_print(regvalue, cur_col, wrap) \
2259 ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0x18a, regvalue, cur_col, wrap)
2260#endif
2261
2262#if AIC_DEBUG_REGISTERS
2263ahd_reg_print_t ahd_scb_target_itag_print;
2264#else
2265#define ahd_scb_target_itag_print(regvalue, cur_col, wrap) \
2266 ahd_print_register(NULL, 0, "SCB_TARGET_ITAG", 0x18b, regvalue, cur_col, wrap)
2267#endif
2268
2269#if AIC_DEBUG_REGISTERS
2270ahd_reg_print_t ahd_scb_sense_busaddr_print; 1437ahd_reg_print_t ahd_scb_sense_busaddr_print;
2271#else 1438#else
2272#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \ 1439#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \
@@ -2365,13 +1532,6 @@ ahd_reg_print_t ahd_scb_next2_print;
2365#endif 1532#endif
2366 1533
2367#if AIC_DEBUG_REGISTERS 1534#if AIC_DEBUG_REGISTERS
2368ahd_reg_print_t ahd_scb_spare_print;
2369#else
2370#define ahd_scb_spare_print(regvalue, cur_col, wrap) \
2371 ahd_print_register(NULL, 0, "SCB_SPARE", 0x1b0, regvalue, cur_col, wrap)
2372#endif
2373
2374#if AIC_DEBUG_REGISTERS
2375ahd_reg_print_t ahd_scb_disconnected_lists_print; 1535ahd_reg_print_t ahd_scb_disconnected_lists_print;
2376#else 1536#else
2377#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \ 1537#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \
@@ -2557,10 +1717,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2557 1717
2558#define SG_CACHE_PRE 0x1b 1718#define SG_CACHE_PRE 0x1b
2559 1719
2560#define LQIN 0x20
2561
2562#define TYPEPTR 0x20 1720#define TYPEPTR 0x20
2563 1721
1722#define LQIN 0x20
1723
2564#define TAGPTR 0x21 1724#define TAGPTR 0x21
2565 1725
2566#define LUNPTR 0x22 1726#define LUNPTR 0x22
@@ -2620,14 +1780,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2620#define SINGLECMD 0x02 1780#define SINGLECMD 0x02
2621#define ABORTPENDING 0x01 1781#define ABORTPENDING 0x01
2622 1782
2623#define SCSBIST0 0x39
2624#define GSBISTERR 0x40
2625#define GSBISTDONE 0x20
2626#define GSBISTRUN 0x10
2627#define OSBISTERR 0x04
2628#define OSBISTDONE 0x02
2629#define OSBISTRUN 0x01
2630
2631#define LQCTL2 0x39 1783#define LQCTL2 0x39
2632#define LQIRETRY 0x80 1784#define LQIRETRY 0x80
2633#define LQICONTINUE 0x40 1785#define LQICONTINUE 0x40
@@ -2638,10 +1790,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2638#define LQOTOIDLE 0x02 1790#define LQOTOIDLE 0x02
2639#define LQOPAUSE 0x01 1791#define LQOPAUSE 0x01
2640 1792
2641#define SCSBIST1 0x3a 1793#define SCSBIST0 0x39
2642#define NTBISTERR 0x04 1794#define GSBISTERR 0x40
2643#define NTBISTDONE 0x02 1795#define GSBISTDONE 0x20
2644#define NTBISTRUN 0x01 1796#define GSBISTRUN 0x10
1797#define OSBISTERR 0x04
1798#define OSBISTDONE 0x02
1799#define OSBISTRUN 0x01
2645 1800
2646#define SCSISEQ0 0x3a 1801#define SCSISEQ0 0x3a
2647#define TEMODEO 0x80 1802#define TEMODEO 0x80
@@ -2650,8 +1805,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2650#define FORCEBUSFREE 0x10 1805#define FORCEBUSFREE 0x10
2651#define SCSIRSTO 0x01 1806#define SCSIRSTO 0x01
2652 1807
1808#define SCSBIST1 0x3a
1809#define NTBISTERR 0x04
1810#define NTBISTDONE 0x02
1811#define NTBISTRUN 0x01
1812
2653#define SCSISEQ1 0x3b 1813#define SCSISEQ1 0x3b
2654 1814
1815#define BUSINITID 0x3c
1816
2655#define SXFRCTL0 0x3c 1817#define SXFRCTL0 0x3c
2656#define DFON 0x80 1818#define DFON 0x80
2657#define DFPEXP 0x40 1819#define DFPEXP 0x40
@@ -2660,8 +1822,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2660 1822
2661#define DLCOUNT 0x3c 1823#define DLCOUNT 0x3c
2662 1824
2663#define BUSINITID 0x3c
2664
2665#define SXFRCTL1 0x3d 1825#define SXFRCTL1 0x3d
2666#define BITBUCKET 0x80 1826#define BITBUCKET 0x80
2667#define ENSACHK 0x40 1827#define ENSACHK 0x40
@@ -2686,6 +1846,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2686#define CURRFIFO_1 0x01 1846#define CURRFIFO_1 0x01
2687#define CURRFIFO_0 0x00 1847#define CURRFIFO_0 0x00
2688 1848
1849#define MULTARGID 0x40
1850
2689#define SCSISIGO 0x40 1851#define SCSISIGO 0x40
2690#define CDO 0x80 1852#define CDO 0x80
2691#define IOO 0x40 1853#define IOO 0x40
@@ -2696,8 +1858,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2696#define REQO 0x02 1858#define REQO 0x02
2697#define ACKO 0x01 1859#define ACKO 0x01
2698 1860
2699#define MULTARGID 0x40
2700
2701#define SCSISIGI 0x41 1861#define SCSISIGI 0x41
2702#define ATNI 0x10 1862#define ATNI 0x10
2703#define SELI 0x08 1863#define SELI 0x08
@@ -2744,15 +1904,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2744#define ENAB20 0x04 1904#define ENAB20 0x04
2745#define SELWIDE 0x02 1905#define SELWIDE 0x02
2746 1906
2747#define CLRSINT0 0x4b
2748#define CLRSELDO 0x40
2749#define CLRSELDI 0x20
2750#define CLRSELINGO 0x10
2751#define CLRIOERR 0x08
2752#define CLROVERRUN 0x04
2753#define CLRSPIORDY 0x02
2754#define CLRARBDO 0x01
2755
2756#define SSTAT0 0x4b 1907#define SSTAT0 0x4b
2757#define TARGET 0x80 1908#define TARGET 0x80
2758#define SELDO 0x40 1909#define SELDO 0x40
@@ -2772,14 +1923,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2772#define ENSPIORDY 0x02 1923#define ENSPIORDY 0x02
2773#define ENARBDO 0x01 1924#define ENARBDO 0x01
2774 1925
2775#define CLRSINT1 0x4c 1926#define CLRSINT0 0x4b
2776#define CLRSELTIMEO 0x80 1927#define CLRSELDO 0x40
2777#define CLRATNO 0x40 1928#define CLRSELDI 0x20
2778#define CLRSCSIRSTI 0x20 1929#define CLRSELINGO 0x10
2779#define CLRBUSFREE 0x08 1930#define CLRIOERR 0x08
2780#define CLRSCSIPERR 0x04 1931#define CLROVERRUN 0x04
2781#define CLRSTRB2FAST 0x02 1932#define CLRSPIORDY 0x02
2782#define CLRREQINIT 0x01 1933#define CLRARBDO 0x01
2783 1934
2784#define SSTAT1 0x4c 1935#define SSTAT1 0x4c
2785#define SELTO 0x80 1936#define SELTO 0x80
@@ -2791,6 +1942,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2791#define STRB2FAST 0x02 1942#define STRB2FAST 0x02
2792#define REQINIT 0x01 1943#define REQINIT 0x01
2793 1944
1945#define CLRSINT1 0x4c
1946#define CLRSELTIMEO 0x80
1947#define CLRATNO 0x40
1948#define CLRSCSIRSTI 0x20
1949#define CLRBUSFREE 0x08
1950#define CLRSCSIPERR 0x04
1951#define CLRSTRB2FAST 0x02
1952#define CLRREQINIT 0x01
1953
2794#define SSTAT2 0x4d 1954#define SSTAT2 0x4d
2795#define BUSFREETIME 0xc0 1955#define BUSFREETIME 0xc0
2796#define NONPACKREQ 0x20 1956#define NONPACKREQ 0x20
@@ -2838,14 +1998,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2838#define LQIATNLQ 0x02 1998#define LQIATNLQ 0x02
2839#define LQIATNCMD 0x01 1999#define LQIATNCMD 0x01
2840 2000
2841#define CLRLQIINT0 0x50
2842#define CLRLQIATNQAS 0x20
2843#define CLRLQICRCT1 0x10
2844#define CLRLQICRCT2 0x08
2845#define CLRLQIBADLQT 0x04
2846#define CLRLQIATNLQ 0x02
2847#define CLRLQIATNCMD 0x01
2848
2849#define LQIMODE0 0x50 2001#define LQIMODE0 0x50
2850#define ENLQIATNQASK 0x20 2002#define ENLQIATNQASK 0x20
2851#define ENLQICRCT1 0x10 2003#define ENLQICRCT1 0x10
@@ -2854,6 +2006,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2854#define ENLQIATNLQ 0x02 2006#define ENLQIATNLQ 0x02
2855#define ENLQIATNCMD 0x01 2007#define ENLQIATNCMD 0x01
2856 2008
2009#define CLRLQIINT0 0x50
2010#define CLRLQIATNQAS 0x20
2011#define CLRLQICRCT1 0x10
2012#define CLRLQICRCT2 0x08
2013#define CLRLQIBADLQT 0x04
2014#define CLRLQIATNLQ 0x02
2015#define CLRLQIATNCMD 0x01
2016
2857#define LQIMODE1 0x51 2017#define LQIMODE1 0x51
2858#define ENLQIPHASE_LQ 0x80 2018#define ENLQIPHASE_LQ 0x80
2859#define ENLQIPHASE_NLQ 0x40 2019#define ENLQIPHASE_NLQ 0x40
@@ -2976,6 +2136,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2976 2136
2977#define LQOSCSCTL 0x5a 2137#define LQOSCSCTL 0x5a
2978#define LQOH2A_VERSION 0x80 2138#define LQOH2A_VERSION 0x80
2139#define LQOBUSETDLY 0x40
2140#define LQONOHOLDLACK 0x02
2979#define LQONOCHKOVER 0x01 2141#define LQONOCHKOVER 0x01
2980 2142
2981#define NEXTSCB 0x5a 2143#define NEXTSCB 0x5a
@@ -2998,8 +2160,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
2998#define CFG4ICMD 0x02 2160#define CFG4ICMD 0x02
2999#define CFG4TCMD 0x01 2161#define CFG4TCMD 0x01
3000 2162
3001#define CURRSCB 0x5c
3002
3003#define SEQIMODE 0x5c 2163#define SEQIMODE 0x5c
3004#define ENCTXTDONE 0x40 2164#define ENCTXTDONE 0x40
3005#define ENSAVEPTRS 0x20 2165#define ENSAVEPTRS 0x20
@@ -3009,6 +2169,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3009#define ENCFG4ICMD 0x02 2169#define ENCFG4ICMD 0x02
3010#define ENCFG4TCMD 0x01 2170#define ENCFG4TCMD 0x01
3011 2171
2172#define CURRSCB 0x5c
2173
3012#define MDFFSTAT 0x5d 2174#define MDFFSTAT 0x5d
3013#define SHCNTNEGATIVE 0x40 2175#define SHCNTNEGATIVE 0x40
3014#define SHCNTMINUS1 0x20 2176#define SHCNTMINUS1 0x20
@@ -3023,29 +2185,29 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3023 2185
3024#define DFFTAG 0x5e 2186#define DFFTAG 0x5e
3025 2187
3026#define LASTSCB 0x5e
3027
3028#define SCSITEST 0x5e 2188#define SCSITEST 0x5e
3029#define CNTRTEST 0x08 2189#define CNTRTEST 0x08
3030#define SEL_TXPLL_DEBUG 0x04 2190#define SEL_TXPLL_DEBUG 0x04
3031 2191
2192#define LASTSCB 0x5e
2193
3032#define IOPDNCTL 0x5f 2194#define IOPDNCTL 0x5f
3033#define DISABLE_OE 0x80 2195#define DISABLE_OE 0x80
3034#define PDN_IDIST 0x04 2196#define PDN_IDIST 0x04
3035#define PDN_DIFFSENSE 0x01 2197#define PDN_DIFFSENSE 0x01
3036 2198
2199#define DGRPCRCI 0x60
2200
3037#define SHADDR 0x60 2201#define SHADDR 0x60
3038 2202
3039#define NEGOADDR 0x60 2203#define NEGOADDR 0x60
3040 2204
3041#define DGRPCRCI 0x60
3042
3043#define NEGPERIOD 0x61 2205#define NEGPERIOD 0x61
3044 2206
3045#define PACKCRCI 0x62
3046
3047#define NEGOFFSET 0x62 2207#define NEGOFFSET 0x62
3048 2208
2209#define PACKCRCI 0x62
2210
3049#define NEGPPROPTS 0x63 2211#define NEGPPROPTS 0x63
3050#define PPROPT_PACE 0x08 2212#define PPROPT_PACE 0x08
3051#define PPROPT_QAS 0x04 2213#define PPROPT_QAS 0x04
@@ -3066,6 +2228,7 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3066#define ANNEXDAT 0x66 2228#define ANNEXDAT 0x66
3067 2229
3068#define SCSCHKN 0x66 2230#define SCSCHKN 0x66
2231#define BIDICHKDIS 0x80
3069#define STSELSKIDDIS 0x40 2232#define STSELSKIDDIS 0x40
3070#define CURRFIFODEF 0x20 2233#define CURRFIFODEF 0x20
3071#define WIDERESEN 0x10 2234#define WIDERESEN 0x10
@@ -3090,6 +2253,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3090 2253
3091#define SELOID 0x6b 2254#define SELOID 0x6b
3092 2255
2256#define FAIRNESS 0x6c
2257
3093#define PLL400CTL0 0x6c 2258#define PLL400CTL0 0x6c
3094#define PLL_VCOSEL 0x80 2259#define PLL_VCOSEL 0x80
3095#define PLL_PWDN 0x40 2260#define PLL_PWDN 0x40
@@ -3099,8 +2264,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3099#define PLL_DLPF 0x02 2264#define PLL_DLPF 0x02
3100#define PLL_ENFBM 0x01 2265#define PLL_ENFBM 0x01
3101 2266
3102#define FAIRNESS 0x6c
3103
3104#define PLL400CTL1 0x6d 2267#define PLL400CTL1 0x6d
3105#define PLL_CNTEN 0x80 2268#define PLL_CNTEN 0x80
3106#define PLL_CNTCLR 0x40 2269#define PLL_CNTCLR 0x40
@@ -3112,25 +2275,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3112 2275
3113#define HADDR 0x70 2276#define HADDR 0x70
3114 2277
2278#define HODMAADR 0x70
2279
3115#define PLLDELAY 0x70 2280#define PLLDELAY 0x70
3116#define SPLIT_DROP_REQ 0x80 2281#define SPLIT_DROP_REQ 0x80
3117 2282
3118#define HODMAADR 0x70 2283#define HCNT 0x78
3119 2284
3120#define HODMACNT 0x78 2285#define HODMACNT 0x78
3121 2286
3122#define HCNT 0x78
3123
3124#define HODMAEN 0x7a 2287#define HODMAEN 0x7a
3125 2288
3126#define SCBHADDR 0x7c
3127
3128#define SGHADDR 0x7c 2289#define SGHADDR 0x7c
3129 2290
3130#define SCBHCNT 0x84 2291#define SCBHADDR 0x7c
3131 2292
3132#define SGHCNT 0x84 2293#define SGHCNT 0x84
3133 2294
2295#define SCBHCNT 0x84
2296
3134#define DFF_THRSH 0x88 2297#define DFF_THRSH 0x88
3135#define WR_DFTHRSH 0x70 2298#define WR_DFTHRSH 0x70
3136#define RD_DFTHRSH 0x07 2299#define RD_DFTHRSH 0x07
@@ -3163,6 +2326,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3163 2326
3164#define CMCRXMSG0 0x90 2327#define CMCRXMSG0 0x90
3165 2328
2329#define OVLYRXMSG0 0x90
2330
2331#define DCHRXMSG0 0x90
2332
3166#define ROENABLE 0x90 2333#define ROENABLE 0x90
3167#define MSIROEN 0x20 2334#define MSIROEN 0x20
3168#define OVLYROEN 0x10 2335#define OVLYROEN 0x10
@@ -3171,11 +2338,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3171#define DCH1ROEN 0x02 2338#define DCH1ROEN 0x02
3172#define DCH0ROEN 0x01 2339#define DCH0ROEN 0x01
3173 2340
3174#define OVLYRXMSG0 0x90 2341#define OVLYRXMSG1 0x91
3175 2342
3176#define DCHRXMSG0 0x90 2343#define CMCRXMSG1 0x91
3177 2344
3178#define OVLYRXMSG1 0x91 2345#define DCHRXMSG1 0x91
3179 2346
3180#define NSENABLE 0x91 2347#define NSENABLE 0x91
3181#define MSINSEN 0x20 2348#define MSINSEN 0x20
@@ -3185,10 +2352,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3185#define DCH1NSEN 0x02 2352#define DCH1NSEN 0x02
3186#define DCH0NSEN 0x01 2353#define DCH0NSEN 0x01
3187 2354
3188#define CMCRXMSG1 0x91
3189
3190#define DCHRXMSG1 0x91
3191
3192#define DCHRXMSG2 0x92 2355#define DCHRXMSG2 0x92
3193 2356
3194#define CMCRXMSG2 0x92 2357#define CMCRXMSG2 0x92
@@ -3212,24 +2375,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3212#define TSCSERREN 0x02 2375#define TSCSERREN 0x02
3213#define CMPABCDIS 0x01 2376#define CMPABCDIS 0x01
3214 2377
2378#define CMCSEQBCNT 0x94
2379
3215#define OVLYSEQBCNT 0x94 2380#define OVLYSEQBCNT 0x94
3216 2381
3217#define DCHSEQBCNT 0x94 2382#define DCHSEQBCNT 0x94
3218 2383
3219#define CMCSEQBCNT 0x94
3220
3221#define CMCSPLTSTAT0 0x96
3222
3223#define DCHSPLTSTAT0 0x96 2384#define DCHSPLTSTAT0 0x96
3224 2385
3225#define OVLYSPLTSTAT0 0x96 2386#define OVLYSPLTSTAT0 0x96
3226 2387
3227#define CMCSPLTSTAT1 0x97 2388#define CMCSPLTSTAT0 0x96
3228 2389
3229#define OVLYSPLTSTAT1 0x97 2390#define OVLYSPLTSTAT1 0x97
3230 2391
3231#define DCHSPLTSTAT1 0x97 2392#define DCHSPLTSTAT1 0x97
3232 2393
2394#define CMCSPLTSTAT1 0x97
2395
3233#define SGRXMSG0 0x98 2396#define SGRXMSG0 0x98
3234#define CDNUM 0xf8 2397#define CDNUM 0xf8
3235#define CFNUM 0x07 2398#define CFNUM 0x07
@@ -3257,18 +2420,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3257#define TAG_NUM 0x1f 2420#define TAG_NUM 0x1f
3258#define RLXORD 0x10 2421#define RLXORD 0x10
3259 2422
3260#define SGSEQBCNT 0x9c
3261
3262#define SLVSPLTOUTATTR0 0x9c 2423#define SLVSPLTOUTATTR0 0x9c
3263#define LOWER_BCNT 0xff 2424#define LOWER_BCNT 0xff
3264 2425
2426#define SGSEQBCNT 0x9c
2427
3265#define SLVSPLTOUTATTR1 0x9d 2428#define SLVSPLTOUTATTR1 0x9d
3266#define CMPLT_DNUM 0xf8 2429#define CMPLT_DNUM 0xf8
3267#define CMPLT_FNUM 0x07 2430#define CMPLT_FNUM 0x07
3268 2431
3269#define SLVSPLTOUTATTR2 0x9e
3270#define CMPLT_BNUM 0xff
3271
3272#define SGSPLTSTAT0 0x9e 2432#define SGSPLTSTAT0 0x9e
3273#define STAETERM 0x80 2433#define STAETERM 0x80
3274#define SCBCERR 0x40 2434#define SCBCERR 0x40
@@ -3279,6 +2439,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3279#define RXSCEMSG 0x02 2439#define RXSCEMSG 0x02
3280#define RXSPLTRSP 0x01 2440#define RXSPLTRSP 0x01
3281 2441
2442#define SLVSPLTOUTATTR2 0x9e
2443#define CMPLT_BNUM 0xff
2444
3282#define SGSPLTSTAT1 0x9f 2445#define SGSPLTSTAT1 0x9f
3283#define RXDATABUCKET 0x01 2446#define RXDATABUCKET 0x01
3284 2447
@@ -3334,10 +2497,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3334 2497
3335#define CCSGADDR 0xac 2498#define CCSGADDR 0xac
3336 2499
3337#define CCSCBADR_BK 0xac
3338
3339#define CCSCBADDR 0xac 2500#define CCSCBADDR 0xac
3340 2501
2502#define CCSCBADR_BK 0xac
2503
3341#define CMC_RAMBIST 0xad 2504#define CMC_RAMBIST 0xad
3342#define SG_ELEMENT_SIZE 0x80 2505#define SG_ELEMENT_SIZE 0x80
3343#define SCBRAMBIST_FAIL 0x40 2506#define SCBRAMBIST_FAIL 0x40
@@ -3391,9 +2554,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3391#define SEEDAT 0xbc 2554#define SEEDAT 0xbc
3392 2555
3393#define SEECTL 0xbe 2556#define SEECTL 0xbe
2557#define SEEOP_EWDS 0x40
3394#define SEEOP_WALL 0x40 2558#define SEEOP_WALL 0x40
3395#define SEEOP_EWEN 0x40 2559#define SEEOP_EWEN 0x40
3396#define SEEOP_EWDS 0x40
3397#define SEEOPCODE 0x70 2560#define SEEOPCODE 0x70
3398#define SEERST 0x02 2561#define SEERST 0x02
3399#define SEESTART 0x01 2562#define SEESTART 0x01
@@ -3410,25 +2573,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3410 2573
3411#define SCBCNT 0xbf 2574#define SCBCNT 0xbf
3412 2575
3413#define DFWADDR 0xc0
3414
3415#define DSPFLTRCTL 0xc0 2576#define DSPFLTRCTL 0xc0
3416#define FLTRDISABLE 0x20 2577#define FLTRDISABLE 0x20
3417#define EDGESENSE 0x10 2578#define EDGESENSE 0x10
3418#define DSPFCNTSEL 0x0f 2579#define DSPFCNTSEL 0x0f
3419 2580
2581#define DFWADDR 0xc0
2582
3420#define DSPDATACTL 0xc1 2583#define DSPDATACTL 0xc1
3421#define BYPASSENAB 0x80 2584#define BYPASSENAB 0x80
3422#define DESQDIS 0x10 2585#define DESQDIS 0x10
3423#define RCVROFFSTDIS 0x04 2586#define RCVROFFSTDIS 0x04
3424#define XMITOFFSTDIS 0x02 2587#define XMITOFFSTDIS 0x02
3425 2588
3426#define DFRADDR 0xc2
3427
3428#define DSPREQCTL 0xc2 2589#define DSPREQCTL 0xc2
3429#define MANREQCTL 0xc0 2590#define MANREQCTL 0xc0
3430#define MANREQDLY 0x3f 2591#define MANREQDLY 0x3f
3431 2592
2593#define DFRADDR 0xc2
2594
3432#define DSPACKCTL 0xc3 2595#define DSPACKCTL 0xc3
3433#define MANACKCTL 0xc0 2596#define MANACKCTL 0xc0
3434#define MANACKDLY 0x3f 2597#define MANACKDLY 0x3f
@@ -3449,14 +2612,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3449 2612
3450#define WRTBIASCALC 0xc7 2613#define WRTBIASCALC 0xc7
3451 2614
3452#define RCVRBIASCALC 0xc8
3453
3454#define DFPTRS 0xc8 2615#define DFPTRS 0xc8
3455 2616
3456#define SKEWCALC 0xc9 2617#define RCVRBIASCALC 0xc8
3457 2618
3458#define DFBKPTR 0xc9 2619#define DFBKPTR 0xc9
3459 2620
2621#define SKEWCALC 0xc9
2622
3460#define DFDBCTL 0xcb 2623#define DFDBCTL 0xcb
3461#define DFF_CIO_WR_RDY 0x20 2624#define DFF_CIO_WR_RDY 0x20
3462#define DFF_CIO_RD_RDY 0x10 2625#define DFF_CIO_RD_RDY 0x10
@@ -3541,12 +2704,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3541 2704
3542#define ACCUM_SAVE 0xfa 2705#define ACCUM_SAVE 0xfa
3543 2706
3544#define WAITING_SCB_TAILS 0x100
3545
3546#define AHD_PCI_CONFIG_BASE 0x100 2707#define AHD_PCI_CONFIG_BASE 0x100
3547 2708
3548#define SRAM_BASE 0x100 2709#define SRAM_BASE 0x100
3549 2710
2711#define WAITING_SCB_TAILS 0x100
2712
3550#define WAITING_TID_HEAD 0x120 2713#define WAITING_TID_HEAD 0x120
3551 2714
3552#define WAITING_TID_TAIL 0x122 2715#define WAITING_TID_TAIL 0x122
@@ -3575,8 +2738,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3575#define PRELOADEN 0x80 2738#define PRELOADEN 0x80
3576#define WIDEODD 0x40 2739#define WIDEODD 0x40
3577#define SCSIEN 0x20 2740#define SCSIEN 0x20
3578#define SDMAEN 0x10
3579#define SDMAENACK 0x10 2741#define SDMAENACK 0x10
2742#define SDMAEN 0x10
3580#define HDMAEN 0x08 2743#define HDMAEN 0x08
3581#define HDMAENACK 0x08 2744#define HDMAENACK 0x08
3582#define DIRECTION 0x04 2745#define DIRECTION 0x04
@@ -3674,12 +2837,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3674 2837
3675#define MK_MESSAGE_SCSIID 0x162 2838#define MK_MESSAGE_SCSIID 0x162
3676 2839
3677#define SCB_BASE 0x180
3678
3679#define SCB_RESIDUAL_DATACNT 0x180 2840#define SCB_RESIDUAL_DATACNT 0x180
3680#define SCB_CDB_STORE 0x180 2841#define SCB_CDB_STORE 0x180
3681#define SCB_HOST_CDB_PTR 0x180 2842#define SCB_HOST_CDB_PTR 0x180
3682 2843
2844#define SCB_BASE 0x180
2845
3683#define SCB_RESIDUAL_SGPTR 0x184 2846#define SCB_RESIDUAL_SGPTR 0x184
3684#define SG_ADDR_MASK 0xf8 2847#define SG_ADDR_MASK 0xf8
3685#define SG_OVERRUN_RESID 0x02 2848#define SG_OVERRUN_RESID 0x02
@@ -3747,6 +2910,17 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3747#define SCB_DISCONNECTED_LISTS 0x1b8 2910#define SCB_DISCONNECTED_LISTS 0x1b8
3748 2911
3749 2912
2913#define CMD_GROUP_CODE_SHIFT 0x05
2914#define STIMESEL_MIN 0x18
2915#define STIMESEL_SHIFT 0x03
2916#define INVALID_ADDR 0x80
2917#define AHD_PRECOMP_MASK 0x07
2918#define TARGET_DATA_IN 0x01
2919#define CCSCBADDR_MAX 0x80
2920#define NUMDSPS 0x14
2921#define SEEOP_EWEN_ADDR 0xc0
2922#define AHD_ANNEXCOL_PER_DEV0 0x04
2923#define DST_MODE_SHIFT 0x04
3750#define AHD_TIMER_MAX_US 0x18ffe7 2924#define AHD_TIMER_MAX_US 0x18ffe7
3751#define AHD_TIMER_MAX_TICKS 0xffff 2925#define AHD_TIMER_MAX_TICKS 0xffff
3752#define AHD_SENSE_BUFSIZE 0x100 2926#define AHD_SENSE_BUFSIZE 0x100
@@ -3781,43 +2955,32 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
3781#define LUNLEN_SINGLE_LEVEL_LUN 0x0f 2955#define LUNLEN_SINGLE_LEVEL_LUN 0x0f
3782#define NVRAM_SCB_OFFSET 0x2c 2956#define NVRAM_SCB_OFFSET 0x2c
3783#define STATUS_PKT_SENSE 0xff 2957#define STATUS_PKT_SENSE 0xff
3784#define CMD_GROUP_CODE_SHIFT 0x05
3785#define MAX_OFFSET_PACED_BUG 0x7f 2958#define MAX_OFFSET_PACED_BUG 0x7f
3786#define STIMESEL_BUG_ADJ 0x08 2959#define STIMESEL_BUG_ADJ 0x08
3787#define STIMESEL_MIN 0x18
3788#define STIMESEL_SHIFT 0x03
3789#define CCSGRAM_MAXSEGS 0x10 2960#define CCSGRAM_MAXSEGS 0x10
3790#define INVALID_ADDR 0x80
3791#define SEEOP_ERAL_ADDR 0x80 2961#define SEEOP_ERAL_ADDR 0x80
3792#define AHD_SLEWRATE_DEF_REVB 0x08 2962#define AHD_SLEWRATE_DEF_REVB 0x08
3793#define AHD_PRECOMP_CUTBACK_17 0x04 2963#define AHD_PRECOMP_CUTBACK_17 0x04
3794#define AHD_PRECOMP_MASK 0x07
3795#define SRC_MODE_SHIFT 0x00 2964#define SRC_MODE_SHIFT 0x00
3796#define PKT_OVERRUN_BUFSIZE 0x200 2965#define PKT_OVERRUN_BUFSIZE 0x200
3797#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30 2966#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30
3798#define TARGET_DATA_IN 0x01
3799#define HOST_MSG 0xff 2967#define HOST_MSG 0xff
3800#define MAX_OFFSET 0xfe 2968#define MAX_OFFSET 0xfe
3801#define BUS_16_BIT 0x01 2969#define BUS_16_BIT 0x01
3802#define CCSCBADDR_MAX 0x80
3803#define NUMDSPS 0x14
3804#define SEEOP_EWEN_ADDR 0xc0
3805#define AHD_ANNEXCOL_PER_DEV0 0x04
3806#define DST_MODE_SHIFT 0x04
3807 2970
3808 2971
3809/* Downloaded Constant Definitions */ 2972/* Downloaded Constant Definitions */
2973#define SG_SIZEOF 0x04
2974#define SG_PREFETCH_ALIGN_MASK 0x02
2975#define SG_PREFETCH_CNT_LIMIT 0x01
3810#define CACHELINE_MASK 0x07 2976#define CACHELINE_MASK 0x07
3811#define SCB_TRANSFER_SIZE 0x06 2977#define SCB_TRANSFER_SIZE 0x06
3812#define PKT_OVERRUN_BUFOFFSET 0x05 2978#define PKT_OVERRUN_BUFOFFSET 0x05
3813#define SG_SIZEOF 0x04
3814#define SG_PREFETCH_ADDR_MASK 0x03 2979#define SG_PREFETCH_ADDR_MASK 0x03
3815#define SG_PREFETCH_ALIGN_MASK 0x02
3816#define SG_PREFETCH_CNT_LIMIT 0x01
3817#define SG_PREFETCH_CNT 0x00 2980#define SG_PREFETCH_CNT 0x00
3818#define DOWNLOAD_CONST_COUNT 0x08 2981#define DOWNLOAD_CONST_COUNT 0x08
3819 2982
3820 2983
3821/* Exported Labels */ 2984/* Exported Labels */
3822#define LABEL_seq_isr 0x28f
3823#define LABEL_timer_isr 0x28b 2985#define LABEL_timer_isr 0x28b
2986#define LABEL_seq_isr 0x28f
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index db38a61a8cb4..c4c8a96bf5a3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -8,7 +8,7 @@
8 8
9#include "aic79xx_osm.h" 9#include "aic79xx_osm.h"
10 10
11static ahd_reg_parse_entry_t MODE_PTR_parse_table[] = { 11static const ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
12 { "SRC_MODE", 0x07, 0x07 }, 12 { "SRC_MODE", 0x07, 0x07 },
13 { "DST_MODE", 0x70, 0x70 } 13 { "DST_MODE", 0x70, 0x70 }
14}; 14};
@@ -20,7 +20,7 @@ ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
20 0x00, regvalue, cur_col, wrap)); 20 0x00, regvalue, cur_col, wrap));
21} 21}
22 22
23static ahd_reg_parse_entry_t INTSTAT_parse_table[] = { 23static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
24 { "SPLTINT", 0x01, 0x01 }, 24 { "SPLTINT", 0x01, 0x01 },
25 { "CMDCMPLT", 0x02, 0x02 }, 25 { "CMDCMPLT", 0x02, 0x02 },
26 { "SEQINT", 0x04, 0x04 }, 26 { "SEQINT", 0x04, 0x04 },
@@ -39,7 +39,7 @@ ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
39 0x01, regvalue, cur_col, wrap)); 39 0x01, regvalue, cur_col, wrap));
40} 40}
41 41
42static ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = { 42static const ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
43 { "NO_SEQINT", 0x00, 0xff }, 43 { "NO_SEQINT", 0x00, 0xff },
44 { "BAD_PHASE", 0x01, 0xff }, 44 { "BAD_PHASE", 0x01, 0xff },
45 { "SEND_REJECT", 0x02, 0xff }, 45 { "SEND_REJECT", 0x02, 0xff },
@@ -76,7 +76,7 @@ ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
76 0x02, regvalue, cur_col, wrap)); 76 0x02, regvalue, cur_col, wrap));
77} 77}
78 78
79static ahd_reg_parse_entry_t CLRINT_parse_table[] = { 79static const ahd_reg_parse_entry_t CLRINT_parse_table[] = {
80 { "CLRSPLTINT", 0x01, 0x01 }, 80 { "CLRSPLTINT", 0x01, 0x01 },
81 { "CLRCMDINT", 0x02, 0x02 }, 81 { "CLRCMDINT", 0x02, 0x02 },
82 { "CLRSEQINT", 0x04, 0x04 }, 82 { "CLRSEQINT", 0x04, 0x04 },
@@ -94,7 +94,7 @@ ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
94 0x03, regvalue, cur_col, wrap)); 94 0x03, regvalue, cur_col, wrap));
95} 95}
96 96
97static ahd_reg_parse_entry_t ERROR_parse_table[] = { 97static const ahd_reg_parse_entry_t ERROR_parse_table[] = {
98 { "DSCTMOUT", 0x02, 0x02 }, 98 { "DSCTMOUT", 0x02, 0x02 },
99 { "ILLOPCODE", 0x04, 0x04 }, 99 { "ILLOPCODE", 0x04, 0x04 },
100 { "SQPARERR", 0x08, 0x08 }, 100 { "SQPARERR", 0x08, 0x08 },
@@ -111,24 +111,7 @@ ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
111 0x04, regvalue, cur_col, wrap)); 111 0x04, regvalue, cur_col, wrap));
112} 112}
113 113
114static ahd_reg_parse_entry_t CLRERR_parse_table[] = { 114static const ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
115 { "CLRDSCTMOUT", 0x02, 0x02 },
116 { "CLRILLOPCODE", 0x04, 0x04 },
117 { "CLRSQPARERR", 0x08, 0x08 },
118 { "CLRDPARERR", 0x10, 0x10 },
119 { "CLRMPARERR", 0x20, 0x20 },
120 { "CLRCIOACCESFAIL", 0x40, 0x40 },
121 { "CLRCIOPARERR", 0x80, 0x80 }
122};
123
124int
125ahd_clrerr_print(u_int regvalue, u_int *cur_col, u_int wrap)
126{
127 return (ahd_print_register(CLRERR_parse_table, 7, "CLRERR",
128 0x04, regvalue, cur_col, wrap));
129}
130
131static ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
132 { "CHIPRST", 0x01, 0x01 }, 115 { "CHIPRST", 0x01, 0x01 },
133 { "CHIPRSTACK", 0x01, 0x01 }, 116 { "CHIPRSTACK", 0x01, 0x01 },
134 { "INTEN", 0x02, 0x02 }, 117 { "INTEN", 0x02, 0x02 },
@@ -160,7 +143,7 @@ ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
160 0x08, regvalue, cur_col, wrap)); 143 0x08, regvalue, cur_col, wrap));
161} 144}
162 145
163static ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = { 146static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
164 { "ENINT_COALESCE", 0x40, 0x40 }, 147 { "ENINT_COALESCE", 0x40, 0x40 },
165 { "HOST_TQINPOS", 0x80, 0x80 } 148 { "HOST_TQINPOS", 0x80, 0x80 }
166}; 149};
@@ -172,7 +155,7 @@ ahd_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
172 0x0b, regvalue, cur_col, wrap)); 155 0x0b, regvalue, cur_col, wrap));
173} 156}
174 157
175static ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = { 158static const ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
176 { "SEQ_SPLTINT", 0x01, 0x01 }, 159 { "SEQ_SPLTINT", 0x01, 0x01 },
177 { "SEQ_PCIINT", 0x02, 0x02 }, 160 { "SEQ_PCIINT", 0x02, 0x02 },
178 { "SEQ_SCSIINT", 0x04, 0x04 }, 161 { "SEQ_SCSIINT", 0x04, 0x04 },
@@ -187,7 +170,7 @@ ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
187 0x0c, regvalue, cur_col, wrap)); 170 0x0c, regvalue, cur_col, wrap));
188} 171}
189 172
190static ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = { 173static const ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
191 { "CLRSEQ_SPLTINT", 0x01, 0x01 }, 174 { "CLRSEQ_SPLTINT", 0x01, 0x01 },
192 { "CLRSEQ_PCIINT", 0x02, 0x02 }, 175 { "CLRSEQ_PCIINT", 0x02, 0x02 },
193 { "CLRSEQ_SCSIINT", 0x04, 0x04 }, 176 { "CLRSEQ_SCSIINT", 0x04, 0x04 },
@@ -230,7 +213,7 @@ ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
230 0x14, regvalue, cur_col, wrap)); 213 0x14, regvalue, cur_col, wrap));
231} 214}
232 215
233static ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = { 216static const ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
234 { "SCB_QSIZE_4", 0x00, 0x0f }, 217 { "SCB_QSIZE_4", 0x00, 0x0f },
235 { "SCB_QSIZE_8", 0x01, 0x0f }, 218 { "SCB_QSIZE_8", 0x01, 0x0f },
236 { "SCB_QSIZE_16", 0x02, 0x0f }, 219 { "SCB_QSIZE_16", 0x02, 0x0f },
@@ -258,7 +241,7 @@ ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
258 0x16, regvalue, cur_col, wrap)); 241 0x16, regvalue, cur_col, wrap));
259} 242}
260 243
261static ahd_reg_parse_entry_t INTCTL_parse_table[] = { 244static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
262 { "SPLTINTEN", 0x01, 0x01 }, 245 { "SPLTINTEN", 0x01, 0x01 },
263 { "SEQINTEN", 0x02, 0x02 }, 246 { "SEQINTEN", 0x02, 0x02 },
264 { "SCSIINTEN", 0x04, 0x04 }, 247 { "SCSIINTEN", 0x04, 0x04 },
@@ -276,7 +259,7 @@ ahd_intctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
276 0x18, regvalue, cur_col, wrap)); 259 0x18, regvalue, cur_col, wrap));
277} 260}
278 261
279static ahd_reg_parse_entry_t DFCNTRL_parse_table[] = { 262static const ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
280 { "DIRECTIONEN", 0x01, 0x01 }, 263 { "DIRECTIONEN", 0x01, 0x01 },
281 { "FIFOFLUSH", 0x02, 0x02 }, 264 { "FIFOFLUSH", 0x02, 0x02 },
282 { "FIFOFLUSHACK", 0x02, 0x02 }, 265 { "FIFOFLUSHACK", 0x02, 0x02 },
@@ -297,7 +280,7 @@ ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
297 0x19, regvalue, cur_col, wrap)); 280 0x19, regvalue, cur_col, wrap));
298} 281}
299 282
300static ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = { 283static const ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
301 { "CIOPARCKEN", 0x01, 0x01 }, 284 { "CIOPARCKEN", 0x01, 0x01 },
302 { "DISABLE_TWATE", 0x02, 0x02 }, 285 { "DISABLE_TWATE", 0x02, 0x02 },
303 { "EXTREQLCK", 0x10, 0x10 }, 286 { "EXTREQLCK", 0x10, 0x10 },
@@ -313,7 +296,7 @@ ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
313 0x19, regvalue, cur_col, wrap)); 296 0x19, regvalue, cur_col, wrap));
314} 297}
315 298
316static ahd_reg_parse_entry_t DFSTATUS_parse_table[] = { 299static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
317 { "FIFOEMP", 0x01, 0x01 }, 300 { "FIFOEMP", 0x01, 0x01 },
318 { "FIFOFULL", 0x02, 0x02 }, 301 { "FIFOFULL", 0x02, 0x02 },
319 { "DFTHRESH", 0x04, 0x04 }, 302 { "DFTHRESH", 0x04, 0x04 },
@@ -330,7 +313,7 @@ ahd_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
330 0x1a, regvalue, cur_col, wrap)); 313 0x1a, regvalue, cur_col, wrap));
331} 314}
332 315
333static ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = { 316static const ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
334 { "LAST_SEG_DONE", 0x01, 0x01 }, 317 { "LAST_SEG_DONE", 0x01, 0x01 },
335 { "LAST_SEG", 0x02, 0x02 }, 318 { "LAST_SEG", 0x02, 0x02 },
336 { "ODD_SEG", 0x04, 0x04 }, 319 { "ODD_SEG", 0x04, 0x04 },
@@ -344,20 +327,7 @@ ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
344 0x1b, regvalue, cur_col, wrap)); 327 0x1b, regvalue, cur_col, wrap));
345} 328}
346 329
347static ahd_reg_parse_entry_t ARBCTL_parse_table[] = { 330static const ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
348 { "USE_TIME", 0x07, 0x07 },
349 { "RETRY_SWEN", 0x08, 0x08 },
350 { "RESET_HARB", 0x80, 0x80 }
351};
352
353int
354ahd_arbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
355{
356 return (ahd_print_register(ARBCTL_parse_table, 3, "ARBCTL",
357 0x1b, regvalue, cur_col, wrap));
358}
359
360static ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
361 { "LAST_SEG", 0x02, 0x02 }, 331 { "LAST_SEG", 0x02, 0x02 },
362 { "ODD_SEG", 0x04, 0x04 }, 332 { "ODD_SEG", 0x04, 0x04 },
363 { "SG_ADDR_MASK", 0xf8, 0xf8 } 333 { "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -378,20 +348,6 @@ ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
378} 348}
379 349
380int 350int
381ahd_typeptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
382{
383 return (ahd_print_register(NULL, 0, "TYPEPTR",
384 0x20, regvalue, cur_col, wrap));
385}
386
387int
388ahd_tagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
389{
390 return (ahd_print_register(NULL, 0, "TAGPTR",
391 0x21, regvalue, cur_col, wrap));
392}
393
394int
395ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap) 351ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
396{ 352{
397 return (ahd_print_register(NULL, 0, "LUNPTR", 353 return (ahd_print_register(NULL, 0, "LUNPTR",
@@ -399,20 +355,6 @@ ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
399} 355}
400 356
401int 357int
402ahd_datalenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
403{
404 return (ahd_print_register(NULL, 0, "DATALENPTR",
405 0x23, regvalue, cur_col, wrap));
406}
407
408int
409ahd_statlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
410{
411 return (ahd_print_register(NULL, 0, "STATLENPTR",
412 0x24, regvalue, cur_col, wrap));
413}
414
415int
416ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap) 358ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
417{ 359{
418 return (ahd_print_register(NULL, 0, "CMDLENPTR", 360 return (ahd_print_register(NULL, 0, "CMDLENPTR",
@@ -448,13 +390,6 @@ ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
448} 390}
449 391
450int 392int
451ahd_idptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
452{
453 return (ahd_print_register(NULL, 0, "IDPTR",
454 0x2a, regvalue, cur_col, wrap));
455}
456
457int
458ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap) 393ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
459{ 394{
460 return (ahd_print_register(NULL, 0, "ABRTBYTEPTR", 395 return (ahd_print_register(NULL, 0, "ABRTBYTEPTR",
@@ -468,28 +403,7 @@ ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
468 0x2c, regvalue, cur_col, wrap)); 403 0x2c, regvalue, cur_col, wrap));
469} 404}
470 405
471int 406static const ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
472ahd_maxcmdbytes_print(u_int regvalue, u_int *cur_col, u_int wrap)
473{
474 return (ahd_print_register(NULL, 0, "MAXCMDBYTES",
475 0x2d, regvalue, cur_col, wrap));
476}
477
478int
479ahd_maxcmd2rcv_print(u_int regvalue, u_int *cur_col, u_int wrap)
480{
481 return (ahd_print_register(NULL, 0, "MAXCMD2RCV",
482 0x2e, regvalue, cur_col, wrap));
483}
484
485int
486ahd_shortthresh_print(u_int regvalue, u_int *cur_col, u_int wrap)
487{
488 return (ahd_print_register(NULL, 0, "SHORTTHRESH",
489 0x2f, regvalue, cur_col, wrap));
490}
491
492static ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
493 { "ILUNLEN", 0x0f, 0x0f }, 407 { "ILUNLEN", 0x0f, 0x0f },
494 { "TLUNLEN", 0xf0, 0xf0 } 408 { "TLUNLEN", 0xf0, 0xf0 }
495}; 409};
@@ -522,49 +436,7 @@ ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
522 0x33, regvalue, cur_col, wrap)); 436 0x33, regvalue, cur_col, wrap));
523} 437}
524 438
525int 439static const ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
526ahd_lqrsvd01_print(u_int regvalue, u_int *cur_col, u_int wrap)
527{
528 return (ahd_print_register(NULL, 0, "LQRSVD01",
529 0x34, regvalue, cur_col, wrap));
530}
531
532int
533ahd_lqrsvd16_print(u_int regvalue, u_int *cur_col, u_int wrap)
534{
535 return (ahd_print_register(NULL, 0, "LQRSVD16",
536 0x35, regvalue, cur_col, wrap));
537}
538
539int
540ahd_lqrsvd17_print(u_int regvalue, u_int *cur_col, u_int wrap)
541{
542 return (ahd_print_register(NULL, 0, "LQRSVD17",
543 0x36, regvalue, cur_col, wrap));
544}
545
546int
547ahd_cmdrsvd0_print(u_int regvalue, u_int *cur_col, u_int wrap)
548{
549 return (ahd_print_register(NULL, 0, "CMDRSVD0",
550 0x37, regvalue, cur_col, wrap));
551}
552
553static ahd_reg_parse_entry_t LQCTL0_parse_table[] = {
554 { "LQ0INITGCLT", 0x03, 0x03 },
555 { "LQ0TARGCLT", 0x0c, 0x0c },
556 { "LQIINITGCLT", 0x30, 0x30 },
557 { "LQITARGCLT", 0xc0, 0xc0 }
558};
559
560int
561ahd_lqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
562{
563 return (ahd_print_register(LQCTL0_parse_table, 4, "LQCTL0",
564 0x38, regvalue, cur_col, wrap));
565}
566
567static ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
568 { "ABORTPENDING", 0x01, 0x01 }, 440 { "ABORTPENDING", 0x01, 0x01 },
569 { "SINGLECMD", 0x02, 0x02 }, 441 { "SINGLECMD", 0x02, 0x02 },
570 { "PCI2PCI", 0x04, 0x04 } 442 { "PCI2PCI", 0x04, 0x04 }
@@ -577,23 +449,7 @@ ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
577 0x38, regvalue, cur_col, wrap)); 449 0x38, regvalue, cur_col, wrap));
578} 450}
579 451
580static ahd_reg_parse_entry_t SCSBIST0_parse_table[] = { 452static const ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
581 { "OSBISTRUN", 0x01, 0x01 },
582 { "OSBISTDONE", 0x02, 0x02 },
583 { "OSBISTERR", 0x04, 0x04 },
584 { "GSBISTRUN", 0x10, 0x10 },
585 { "GSBISTDONE", 0x20, 0x20 },
586 { "GSBISTERR", 0x40, 0x40 }
587};
588
589int
590ahd_scsbist0_print(u_int regvalue, u_int *cur_col, u_int wrap)
591{
592 return (ahd_print_register(SCSBIST0_parse_table, 6, "SCSBIST0",
593 0x39, regvalue, cur_col, wrap));
594}
595
596static ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
597 { "LQOPAUSE", 0x01, 0x01 }, 453 { "LQOPAUSE", 0x01, 0x01 },
598 { "LQOTOIDLE", 0x02, 0x02 }, 454 { "LQOTOIDLE", 0x02, 0x02 },
599 { "LQOCONTINUE", 0x04, 0x04 }, 455 { "LQOCONTINUE", 0x04, 0x04 },
@@ -611,20 +467,7 @@ ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
611 0x39, regvalue, cur_col, wrap)); 467 0x39, regvalue, cur_col, wrap));
612} 468}
613 469
614static ahd_reg_parse_entry_t SCSBIST1_parse_table[] = { 470static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
615 { "NTBISTRUN", 0x01, 0x01 },
616 { "NTBISTDONE", 0x02, 0x02 },
617 { "NTBISTERR", 0x04, 0x04 }
618};
619
620int
621ahd_scsbist1_print(u_int regvalue, u_int *cur_col, u_int wrap)
622{
623 return (ahd_print_register(SCSBIST1_parse_table, 3, "SCSBIST1",
624 0x3a, regvalue, cur_col, wrap));
625}
626
627static ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
628 { "SCSIRSTO", 0x01, 0x01 }, 471 { "SCSIRSTO", 0x01, 0x01 },
629 { "FORCEBUSFREE", 0x10, 0x10 }, 472 { "FORCEBUSFREE", 0x10, 0x10 },
630 { "ENARBO", 0x20, 0x20 }, 473 { "ENARBO", 0x20, 0x20 },
@@ -639,7 +482,7 @@ ahd_scsiseq0_print(u_int regvalue, u_int *cur_col, u_int wrap)
639 0x3a, regvalue, cur_col, wrap)); 482 0x3a, regvalue, cur_col, wrap));
640} 483}
641 484
642static ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = { 485static const ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
643 { "ALTSTIM", 0x01, 0x01 }, 486 { "ALTSTIM", 0x01, 0x01 },
644 { "ENAUTOATNP", 0x02, 0x02 }, 487 { "ENAUTOATNP", 0x02, 0x02 },
645 { "MANUALP", 0x0c, 0x0c }, 488 { "MANUALP", 0x0c, 0x0c },
@@ -655,7 +498,7 @@ ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
655 0x3b, regvalue, cur_col, wrap)); 498 0x3b, regvalue, cur_col, wrap));
656} 499}
657 500
658static ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = { 501static const ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
659 { "SPIOEN", 0x08, 0x08 }, 502 { "SPIOEN", 0x08, 0x08 },
660 { "BIOSCANCELEN", 0x10, 0x10 }, 503 { "BIOSCANCELEN", 0x10, 0x10 },
661 { "DFPEXP", 0x40, 0x40 }, 504 { "DFPEXP", 0x40, 0x40 },
@@ -669,21 +512,7 @@ ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
669 0x3c, regvalue, cur_col, wrap)); 512 0x3c, regvalue, cur_col, wrap));
670} 513}
671 514
672int 515static const ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
673ahd_dlcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
674{
675 return (ahd_print_register(NULL, 0, "DLCOUNT",
676 0x3c, regvalue, cur_col, wrap));
677}
678
679int
680ahd_businitid_print(u_int regvalue, u_int *cur_col, u_int wrap)
681{
682 return (ahd_print_register(NULL, 0, "BUSINITID",
683 0x3c, regvalue, cur_col, wrap));
684}
685
686static ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
687 { "STPWEN", 0x01, 0x01 }, 516 { "STPWEN", 0x01, 0x01 },
688 { "ACTNEGEN", 0x02, 0x02 }, 517 { "ACTNEGEN", 0x02, 0x02 },
689 { "ENSTIMER", 0x04, 0x04 }, 518 { "ENSTIMER", 0x04, 0x04 },
@@ -700,27 +529,7 @@ ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
700 0x3d, regvalue, cur_col, wrap)); 529 0x3d, regvalue, cur_col, wrap));
701} 530}
702 531
703int 532static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
704ahd_bustargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
705{
706 return (ahd_print_register(NULL, 0, "BUSTARGID",
707 0x3e, regvalue, cur_col, wrap));
708}
709
710static ahd_reg_parse_entry_t SXFRCTL2_parse_table[] = {
711 { "ASU", 0x07, 0x07 },
712 { "CMDDMAEN", 0x08, 0x08 },
713 { "AUTORSTDIS", 0x10, 0x10 }
714};
715
716int
717ahd_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
718{
719 return (ahd_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
720 0x3e, regvalue, cur_col, wrap));
721}
722
723static ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
724 { "CURRFIFO_0", 0x00, 0x03 }, 533 { "CURRFIFO_0", 0x00, 0x03 },
725 { "CURRFIFO_1", 0x01, 0x03 }, 534 { "CURRFIFO_1", 0x01, 0x03 },
726 { "CURRFIFO_NONE", 0x03, 0x03 }, 535 { "CURRFIFO_NONE", 0x03, 0x03 },
@@ -736,7 +545,14 @@ ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
736 0x3f, regvalue, cur_col, wrap)); 545 0x3f, regvalue, cur_col, wrap));
737} 546}
738 547
739static ahd_reg_parse_entry_t SCSISIGO_parse_table[] = { 548int
549ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
550{
551 return (ahd_print_register(NULL, 0, "MULTARGID",
552 0x40, regvalue, cur_col, wrap));
553}
554
555static const ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
740 { "P_DATAOUT", 0x00, 0xe0 }, 556 { "P_DATAOUT", 0x00, 0xe0 },
741 { "P_DATAOUT_DT", 0x20, 0xe0 }, 557 { "P_DATAOUT_DT", 0x20, 0xe0 },
742 { "P_DATAIN", 0x40, 0xe0 }, 558 { "P_DATAIN", 0x40, 0xe0 },
@@ -763,14 +579,7 @@ ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
763 0x40, regvalue, cur_col, wrap)); 579 0x40, regvalue, cur_col, wrap));
764} 580}
765 581
766int 582static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
767ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
768{
769 return (ahd_print_register(NULL, 0, "MULTARGID",
770 0x40, regvalue, cur_col, wrap));
771}
772
773static ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
774 { "P_DATAOUT", 0x00, 0xe0 }, 583 { "P_DATAOUT", 0x00, 0xe0 },
775 { "P_DATAOUT_DT", 0x20, 0xe0 }, 584 { "P_DATAOUT_DT", 0x20, 0xe0 },
776 { "P_DATAIN", 0x40, 0xe0 }, 585 { "P_DATAIN", 0x40, 0xe0 },
@@ -797,7 +606,7 @@ ahd_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
797 0x41, regvalue, cur_col, wrap)); 606 0x41, regvalue, cur_col, wrap));
798} 607}
799 608
800static ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = { 609static const ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
801 { "DATA_OUT_PHASE", 0x01, 0x03 }, 610 { "DATA_OUT_PHASE", 0x01, 0x03 },
802 { "DATA_IN_PHASE", 0x02, 0x03 }, 611 { "DATA_IN_PHASE", 0x02, 0x03 },
803 { "DATA_PHASE_MASK", 0x03, 0x03 }, 612 { "DATA_PHASE_MASK", 0x03, 0x03 },
@@ -815,13 +624,6 @@ ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
815} 624}
816 625
817int 626int
818ahd_scsidat0_img_print(u_int regvalue, u_int *cur_col, u_int wrap)
819{
820 return (ahd_print_register(NULL, 0, "SCSIDAT0_IMG",
821 0x43, regvalue, cur_col, wrap));
822}
823
824int
825ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap) 627ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
826{ 628{
827 return (ahd_print_register(NULL, 0, "SCSIDAT", 629 return (ahd_print_register(NULL, 0, "SCSIDAT",
@@ -835,7 +637,7 @@ ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
835 0x46, regvalue, cur_col, wrap)); 637 0x46, regvalue, cur_col, wrap));
836} 638}
837 639
838static ahd_reg_parse_entry_t TARGIDIN_parse_table[] = { 640static const ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
839 { "TARGID", 0x0f, 0x0f }, 641 { "TARGID", 0x0f, 0x0f },
840 { "CLKOUT", 0x80, 0x80 } 642 { "CLKOUT", 0x80, 0x80 }
841}; 643};
@@ -847,7 +649,7 @@ ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
847 0x48, regvalue, cur_col, wrap)); 649 0x48, regvalue, cur_col, wrap));
848} 650}
849 651
850static ahd_reg_parse_entry_t SELID_parse_table[] = { 652static const ahd_reg_parse_entry_t SELID_parse_table[] = {
851 { "ONEBIT", 0x08, 0x08 }, 653 { "ONEBIT", 0x08, 0x08 },
852 { "SELID_MASK", 0xf0, 0xf0 } 654 { "SELID_MASK", 0xf0, 0xf0 }
853}; 655};
@@ -859,7 +661,7 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
859 0x49, regvalue, cur_col, wrap)); 661 0x49, regvalue, cur_col, wrap));
860} 662}
861 663
862static ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = { 664static const ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
863 { "AUTO_MSGOUT_DE", 0x02, 0x02 }, 665 { "AUTO_MSGOUT_DE", 0x02, 0x02 },
864 { "ENDGFORMCHK", 0x04, 0x04 }, 666 { "ENDGFORMCHK", 0x04, 0x04 },
865 { "BUSFREEREV", 0x10, 0x10 }, 667 { "BUSFREEREV", 0x10, 0x10 },
@@ -876,7 +678,7 @@ ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
876 0x4a, regvalue, cur_col, wrap)); 678 0x4a, regvalue, cur_col, wrap));
877} 679}
878 680
879static ahd_reg_parse_entry_t SBLKCTL_parse_table[] = { 681static const ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
880 { "SELWIDE", 0x02, 0x02 }, 682 { "SELWIDE", 0x02, 0x02 },
881 { "ENAB20", 0x04, 0x04 }, 683 { "ENAB20", 0x04, 0x04 },
882 { "ENAB40", 0x08, 0x08 }, 684 { "ENAB40", 0x08, 0x08 },
@@ -891,24 +693,7 @@ ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
891 0x4a, regvalue, cur_col, wrap)); 693 0x4a, regvalue, cur_col, wrap));
892} 694}
893 695
894static ahd_reg_parse_entry_t CLRSINT0_parse_table[] = { 696static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
895 { "CLRARBDO", 0x01, 0x01 },
896 { "CLRSPIORDY", 0x02, 0x02 },
897 { "CLROVERRUN", 0x04, 0x04 },
898 { "CLRIOERR", 0x08, 0x08 },
899 { "CLRSELINGO", 0x10, 0x10 },
900 { "CLRSELDI", 0x20, 0x20 },
901 { "CLRSELDO", 0x40, 0x40 }
902};
903
904int
905ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
906{
907 return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
908 0x4b, regvalue, cur_col, wrap));
909}
910
911static ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
912 { "ARBDO", 0x01, 0x01 }, 697 { "ARBDO", 0x01, 0x01 },
913 { "SPIORDY", 0x02, 0x02 }, 698 { "SPIORDY", 0x02, 0x02 },
914 { "OVERRUN", 0x04, 0x04 }, 699 { "OVERRUN", 0x04, 0x04 },
@@ -926,7 +711,7 @@ ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
926 0x4b, regvalue, cur_col, wrap)); 711 0x4b, regvalue, cur_col, wrap));
927} 712}
928 713
929static ahd_reg_parse_entry_t SIMODE0_parse_table[] = { 714static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
930 { "ENARBDO", 0x01, 0x01 }, 715 { "ENARBDO", 0x01, 0x01 },
931 { "ENSPIORDY", 0x02, 0x02 }, 716 { "ENSPIORDY", 0x02, 0x02 },
932 { "ENOVERRUN", 0x04, 0x04 }, 717 { "ENOVERRUN", 0x04, 0x04 },
@@ -943,24 +728,24 @@ ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
943 0x4b, regvalue, cur_col, wrap)); 728 0x4b, regvalue, cur_col, wrap));
944} 729}
945 730
946static ahd_reg_parse_entry_t CLRSINT1_parse_table[] = { 731static const ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
947 { "CLRREQINIT", 0x01, 0x01 }, 732 { "CLRARBDO", 0x01, 0x01 },
948 { "CLRSTRB2FAST", 0x02, 0x02 }, 733 { "CLRSPIORDY", 0x02, 0x02 },
949 { "CLRSCSIPERR", 0x04, 0x04 }, 734 { "CLROVERRUN", 0x04, 0x04 },
950 { "CLRBUSFREE", 0x08, 0x08 }, 735 { "CLRIOERR", 0x08, 0x08 },
951 { "CLRSCSIRSTI", 0x20, 0x20 }, 736 { "CLRSELINGO", 0x10, 0x10 },
952 { "CLRATNO", 0x40, 0x40 }, 737 { "CLRSELDI", 0x20, 0x20 },
953 { "CLRSELTIMEO", 0x80, 0x80 } 738 { "CLRSELDO", 0x40, 0x40 }
954}; 739};
955 740
956int 741int
957ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap) 742ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
958{ 743{
959 return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1", 744 return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
960 0x4c, regvalue, cur_col, wrap)); 745 0x4b, regvalue, cur_col, wrap));
961} 746}
962 747
963static ahd_reg_parse_entry_t SSTAT1_parse_table[] = { 748static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
964 { "REQINIT", 0x01, 0x01 }, 749 { "REQINIT", 0x01, 0x01 },
965 { "STRB2FAST", 0x02, 0x02 }, 750 { "STRB2FAST", 0x02, 0x02 },
966 { "SCSIPERR", 0x04, 0x04 }, 751 { "SCSIPERR", 0x04, 0x04 },
@@ -978,7 +763,24 @@ ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
978 0x4c, regvalue, cur_col, wrap)); 763 0x4c, regvalue, cur_col, wrap));
979} 764}
980 765
981static ahd_reg_parse_entry_t SSTAT2_parse_table[] = { 766static const ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
767 { "CLRREQINIT", 0x01, 0x01 },
768 { "CLRSTRB2FAST", 0x02, 0x02 },
769 { "CLRSCSIPERR", 0x04, 0x04 },
770 { "CLRBUSFREE", 0x08, 0x08 },
771 { "CLRSCSIRSTI", 0x20, 0x20 },
772 { "CLRATNO", 0x40, 0x40 },
773 { "CLRSELTIMEO", 0x80, 0x80 }
774};
775
776int
777ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
778{
779 return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
780 0x4c, regvalue, cur_col, wrap));
781}
782
783static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
982 { "BUSFREE_LQO", 0x40, 0xc0 }, 784 { "BUSFREE_LQO", 0x40, 0xc0 },
983 { "BUSFREE_DFF0", 0x80, 0xc0 }, 785 { "BUSFREE_DFF0", 0x80, 0xc0 },
984 { "BUSFREE_DFF1", 0xc0, 0xc0 }, 786 { "BUSFREE_DFF1", 0xc0, 0xc0 },
@@ -998,20 +800,7 @@ ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
998 0x4d, regvalue, cur_col, wrap)); 800 0x4d, regvalue, cur_col, wrap));
999} 801}
1000 802
1001static ahd_reg_parse_entry_t SIMODE2_parse_table[] = { 803static const ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
1002 { "ENDMADONE", 0x01, 0x01 },
1003 { "ENSDONE", 0x02, 0x02 },
1004 { "ENWIDE_RES", 0x04, 0x04 }
1005};
1006
1007int
1008ahd_simode2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1009{
1010 return (ahd_print_register(SIMODE2_parse_table, 3, "SIMODE2",
1011 0x4d, regvalue, cur_col, wrap));
1012}
1013
1014static ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
1015 { "CLRDMADONE", 0x01, 0x01 }, 804 { "CLRDMADONE", 0x01, 0x01 },
1016 { "CLRSDONE", 0x02, 0x02 }, 805 { "CLRSDONE", 0x02, 0x02 },
1017 { "CLRWIDE_RES", 0x04, 0x04 }, 806 { "CLRWIDE_RES", 0x04, 0x04 },
@@ -1025,7 +814,7 @@ ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1025 0x4d, regvalue, cur_col, wrap)); 814 0x4d, regvalue, cur_col, wrap));
1026} 815}
1027 816
1028static ahd_reg_parse_entry_t PERRDIAG_parse_table[] = { 817static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
1029 { "DTERR", 0x01, 0x01 }, 818 { "DTERR", 0x01, 0x01 },
1030 { "DGFORMERR", 0x02, 0x02 }, 819 { "DGFORMERR", 0x02, 0x02 },
1031 { "CRCERR", 0x04, 0x04 }, 820 { "CRCERR", 0x04, 0x04 },
@@ -1064,7 +853,7 @@ ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
1064 0x4f, regvalue, cur_col, wrap)); 853 0x4f, regvalue, cur_col, wrap));
1065} 854}
1066 855
1067static ahd_reg_parse_entry_t LQISTAT0_parse_table[] = { 856static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
1068 { "LQIATNCMD", 0x01, 0x01 }, 857 { "LQIATNCMD", 0x01, 0x01 },
1069 { "LQIATNLQ", 0x02, 0x02 }, 858 { "LQIATNLQ", 0x02, 0x02 },
1070 { "LQIBADLQT", 0x04, 0x04 }, 859 { "LQIBADLQT", 0x04, 0x04 },
@@ -1080,23 +869,7 @@ ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1080 0x50, regvalue, cur_col, wrap)); 869 0x50, regvalue, cur_col, wrap));
1081} 870}
1082 871
1083static ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = { 872static const ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
1084 { "CLRLQIATNCMD", 0x01, 0x01 },
1085 { "CLRLQIATNLQ", 0x02, 0x02 },
1086 { "CLRLQIBADLQT", 0x04, 0x04 },
1087 { "CLRLQICRCT2", 0x08, 0x08 },
1088 { "CLRLQICRCT1", 0x10, 0x10 },
1089 { "CLRLQIATNQAS", 0x20, 0x20 }
1090};
1091
1092int
1093ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1094{
1095 return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
1096 0x50, regvalue, cur_col, wrap));
1097}
1098
1099static ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
1100 { "ENLQIATNCMD", 0x01, 0x01 }, 873 { "ENLQIATNCMD", 0x01, 0x01 },
1101 { "ENLQIATNLQ", 0x02, 0x02 }, 874 { "ENLQIATNLQ", 0x02, 0x02 },
1102 { "ENLQIBADLQT", 0x04, 0x04 }, 875 { "ENLQIBADLQT", 0x04, 0x04 },
@@ -1112,7 +885,23 @@ ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1112 0x50, regvalue, cur_col, wrap)); 885 0x50, regvalue, cur_col, wrap));
1113} 886}
1114 887
1115static ahd_reg_parse_entry_t LQIMODE1_parse_table[] = { 888static const ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
889 { "CLRLQIATNCMD", 0x01, 0x01 },
890 { "CLRLQIATNLQ", 0x02, 0x02 },
891 { "CLRLQIBADLQT", 0x04, 0x04 },
892 { "CLRLQICRCT2", 0x08, 0x08 },
893 { "CLRLQICRCT1", 0x10, 0x10 },
894 { "CLRLQIATNQAS", 0x20, 0x20 }
895};
896
897int
898ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
899{
900 return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
901 0x50, regvalue, cur_col, wrap));
902}
903
904static const ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
1116 { "ENLQIOVERI_NLQ", 0x01, 0x01 }, 905 { "ENLQIOVERI_NLQ", 0x01, 0x01 },
1117 { "ENLQIOVERI_LQ", 0x02, 0x02 }, 906 { "ENLQIOVERI_LQ", 0x02, 0x02 },
1118 { "ENLQIBADLQI", 0x04, 0x04 }, 907 { "ENLQIBADLQI", 0x04, 0x04 },
@@ -1130,7 +919,7 @@ ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1130 0x51, regvalue, cur_col, wrap)); 919 0x51, regvalue, cur_col, wrap));
1131} 920}
1132 921
1133static ahd_reg_parse_entry_t LQISTAT1_parse_table[] = { 922static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
1134 { "LQIOVERI_NLQ", 0x01, 0x01 }, 923 { "LQIOVERI_NLQ", 0x01, 0x01 },
1135 { "LQIOVERI_LQ", 0x02, 0x02 }, 924 { "LQIOVERI_LQ", 0x02, 0x02 },
1136 { "LQIBADLQI", 0x04, 0x04 }, 925 { "LQIBADLQI", 0x04, 0x04 },
@@ -1148,7 +937,7 @@ ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1148 0x51, regvalue, cur_col, wrap)); 937 0x51, regvalue, cur_col, wrap));
1149} 938}
1150 939
1151static ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = { 940static const ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
1152 { "CLRLQIOVERI_NLQ", 0x01, 0x01 }, 941 { "CLRLQIOVERI_NLQ", 0x01, 0x01 },
1153 { "CLRLQIOVERI_LQ", 0x02, 0x02 }, 942 { "CLRLQIOVERI_LQ", 0x02, 0x02 },
1154 { "CLRLQIBADLQI", 0x04, 0x04 }, 943 { "CLRLQIBADLQI", 0x04, 0x04 },
@@ -1166,7 +955,7 @@ ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1166 0x51, regvalue, cur_col, wrap)); 955 0x51, regvalue, cur_col, wrap));
1167} 956}
1168 957
1169static ahd_reg_parse_entry_t LQISTAT2_parse_table[] = { 958static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
1170 { "LQIGSAVAIL", 0x01, 0x01 }, 959 { "LQIGSAVAIL", 0x01, 0x01 },
1171 { "LQISTOPCMD", 0x02, 0x02 }, 960 { "LQISTOPCMD", 0x02, 0x02 },
1172 { "LQISTOPLQ", 0x04, 0x04 }, 961 { "LQISTOPLQ", 0x04, 0x04 },
@@ -1184,7 +973,7 @@ ahd_lqistat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1184 0x52, regvalue, cur_col, wrap)); 973 0x52, regvalue, cur_col, wrap));
1185} 974}
1186 975
1187static ahd_reg_parse_entry_t SSTAT3_parse_table[] = { 976static const ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
1188 { "OSRAMPERR", 0x01, 0x01 }, 977 { "OSRAMPERR", 0x01, 0x01 },
1189 { "NTRAMPERR", 0x02, 0x02 } 978 { "NTRAMPERR", 0x02, 0x02 }
1190}; 979};
@@ -1196,7 +985,7 @@ ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
1196 0x53, regvalue, cur_col, wrap)); 985 0x53, regvalue, cur_col, wrap));
1197} 986}
1198 987
1199static ahd_reg_parse_entry_t SIMODE3_parse_table[] = { 988static const ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
1200 { "ENOSRAMPERR", 0x01, 0x01 }, 989 { "ENOSRAMPERR", 0x01, 0x01 },
1201 { "ENNTRAMPERR", 0x02, 0x02 } 990 { "ENNTRAMPERR", 0x02, 0x02 }
1202}; 991};
@@ -1208,7 +997,7 @@ ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
1208 0x53, regvalue, cur_col, wrap)); 997 0x53, regvalue, cur_col, wrap));
1209} 998}
1210 999
1211static ahd_reg_parse_entry_t CLRSINT3_parse_table[] = { 1000static const ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
1212 { "CLROSRAMPERR", 0x01, 0x01 }, 1001 { "CLROSRAMPERR", 0x01, 0x01 },
1213 { "CLRNTRAMPERR", 0x02, 0x02 } 1002 { "CLRNTRAMPERR", 0x02, 0x02 }
1214}; 1003};
@@ -1220,7 +1009,7 @@ ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
1220 0x53, regvalue, cur_col, wrap)); 1009 0x53, regvalue, cur_col, wrap));
1221} 1010}
1222 1011
1223static ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = { 1012static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
1224 { "LQOTCRC", 0x01, 0x01 }, 1013 { "LQOTCRC", 0x01, 0x01 },
1225 { "LQOATNPKT", 0x02, 0x02 }, 1014 { "LQOATNPKT", 0x02, 0x02 },
1226 { "LQOATNLQ", 0x04, 0x04 }, 1015 { "LQOATNLQ", 0x04, 0x04 },
@@ -1235,7 +1024,7 @@ ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1235 0x54, regvalue, cur_col, wrap)); 1024 0x54, regvalue, cur_col, wrap));
1236} 1025}
1237 1026
1238static ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = { 1027static const ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
1239 { "CLRLQOTCRC", 0x01, 0x01 }, 1028 { "CLRLQOTCRC", 0x01, 0x01 },
1240 { "CLRLQOATNPKT", 0x02, 0x02 }, 1029 { "CLRLQOATNPKT", 0x02, 0x02 },
1241 { "CLRLQOATNLQ", 0x04, 0x04 }, 1030 { "CLRLQOATNLQ", 0x04, 0x04 },
@@ -1250,7 +1039,7 @@ ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1250 0x54, regvalue, cur_col, wrap)); 1039 0x54, regvalue, cur_col, wrap));
1251} 1040}
1252 1041
1253static ahd_reg_parse_entry_t LQOMODE0_parse_table[] = { 1042static const ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
1254 { "ENLQOTCRC", 0x01, 0x01 }, 1043 { "ENLQOTCRC", 0x01, 0x01 },
1255 { "ENLQOATNPKT", 0x02, 0x02 }, 1044 { "ENLQOATNPKT", 0x02, 0x02 },
1256 { "ENLQOATNLQ", 0x04, 0x04 }, 1045 { "ENLQOATNLQ", 0x04, 0x04 },
@@ -1265,7 +1054,7 @@ ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1265 0x54, regvalue, cur_col, wrap)); 1054 0x54, regvalue, cur_col, wrap));
1266} 1055}
1267 1056
1268static ahd_reg_parse_entry_t LQOMODE1_parse_table[] = { 1057static const ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
1269 { "ENLQOPHACHGINPKT", 0x01, 0x01 }, 1058 { "ENLQOPHACHGINPKT", 0x01, 0x01 },
1270 { "ENLQOBUSFREE", 0x02, 0x02 }, 1059 { "ENLQOBUSFREE", 0x02, 0x02 },
1271 { "ENLQOBADQAS", 0x04, 0x04 }, 1060 { "ENLQOBADQAS", 0x04, 0x04 },
@@ -1280,7 +1069,7 @@ ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1280 0x55, regvalue, cur_col, wrap)); 1069 0x55, regvalue, cur_col, wrap));
1281} 1070}
1282 1071
1283static ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = { 1072static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
1284 { "LQOPHACHGINPKT", 0x01, 0x01 }, 1073 { "LQOPHACHGINPKT", 0x01, 0x01 },
1285 { "LQOBUSFREE", 0x02, 0x02 }, 1074 { "LQOBUSFREE", 0x02, 0x02 },
1286 { "LQOBADQAS", 0x04, 0x04 }, 1075 { "LQOBADQAS", 0x04, 0x04 },
@@ -1295,7 +1084,7 @@ ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1295 0x55, regvalue, cur_col, wrap)); 1084 0x55, regvalue, cur_col, wrap));
1296} 1085}
1297 1086
1298static ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = { 1087static const ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
1299 { "CLRLQOPHACHGINPKT", 0x01, 0x01 }, 1088 { "CLRLQOPHACHGINPKT", 0x01, 0x01 },
1300 { "CLRLQOBUSFREE", 0x02, 0x02 }, 1089 { "CLRLQOBUSFREE", 0x02, 0x02 },
1301 { "CLRLQOBADQAS", 0x04, 0x04 }, 1090 { "CLRLQOBADQAS", 0x04, 0x04 },
@@ -1310,7 +1099,7 @@ ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1310 0x55, regvalue, cur_col, wrap)); 1099 0x55, regvalue, cur_col, wrap));
1311} 1100}
1312 1101
1313static ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = { 1102static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
1314 { "LQOSTOP0", 0x01, 0x01 }, 1103 { "LQOSTOP0", 0x01, 0x01 },
1315 { "LQOPHACHGOUTPKT", 0x02, 0x02 }, 1104 { "LQOPHACHGOUTPKT", 0x02, 0x02 },
1316 { "LQOWAITFIFO", 0x10, 0x10 }, 1105 { "LQOWAITFIFO", 0x10, 0x10 },
@@ -1331,7 +1120,7 @@ ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1331 0x56, regvalue, cur_col, wrap)); 1120 0x56, regvalue, cur_col, wrap));
1332} 1121}
1333 1122
1334static ahd_reg_parse_entry_t SIMODE1_parse_table[] = { 1123static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
1335 { "ENREQINIT", 0x01, 0x01 }, 1124 { "ENREQINIT", 0x01, 0x01 },
1336 { "ENSTRB2FAST", 0x02, 0x02 }, 1125 { "ENSTRB2FAST", 0x02, 0x02 },
1337 { "ENSCSIPERR", 0x04, 0x04 }, 1126 { "ENSCSIPERR", 0x04, 0x04 },
@@ -1356,7 +1145,7 @@ ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1356 0x58, regvalue, cur_col, wrap)); 1145 0x58, regvalue, cur_col, wrap));
1357} 1146}
1358 1147
1359static ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = { 1148static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
1360 { "RSTCHN", 0x01, 0x01 }, 1149 { "RSTCHN", 0x01, 0x01 },
1361 { "CLRCHN", 0x02, 0x02 }, 1150 { "CLRCHN", 0x02, 0x02 },
1362 { "CLRSHCNT", 0x04, 0x04 }, 1151 { "CLRSHCNT", 0x04, 0x04 },
@@ -1370,15 +1159,17 @@ ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1370 0x5a, regvalue, cur_col, wrap)); 1159 0x5a, regvalue, cur_col, wrap));
1371} 1160}
1372 1161
1373static ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = { 1162static const ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
1374 { "LQONOCHKOVER", 0x01, 0x01 }, 1163 { "LQONOCHKOVER", 0x01, 0x01 },
1164 { "LQONOHOLDLACK", 0x02, 0x02 },
1165 { "LQOBUSETDLY", 0x40, 0x40 },
1375 { "LQOH2A_VERSION", 0x80, 0x80 } 1166 { "LQOH2A_VERSION", 0x80, 0x80 }
1376}; 1167};
1377 1168
1378int 1169int
1379ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap) 1170ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1380{ 1171{
1381 return (ahd_print_register(LQOSCSCTL_parse_table, 2, "LQOSCSCTL", 1172 return (ahd_print_register(LQOSCSCTL_parse_table, 4, "LQOSCSCTL",
1382 0x5a, regvalue, cur_col, wrap)); 1173 0x5a, regvalue, cur_col, wrap));
1383} 1174}
1384 1175
@@ -1389,7 +1180,7 @@ ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1389 0x5a, regvalue, cur_col, wrap)); 1180 0x5a, regvalue, cur_col, wrap));
1390} 1181}
1391 1182
1392static ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = { 1183static const ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
1393 { "CLRCFG4TCMD", 0x01, 0x01 }, 1184 { "CLRCFG4TCMD", 0x01, 0x01 },
1394 { "CLRCFG4ICMD", 0x02, 0x02 }, 1185 { "CLRCFG4ICMD", 0x02, 0x02 },
1395 { "CLRCFG4TSTAT", 0x04, 0x04 }, 1186 { "CLRCFG4TSTAT", 0x04, 0x04 },
@@ -1406,7 +1197,7 @@ ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
1406 0x5b, regvalue, cur_col, wrap)); 1197 0x5b, regvalue, cur_col, wrap));
1407} 1198}
1408 1199
1409static ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = { 1200static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
1410 { "CFG4TCMD", 0x01, 0x01 }, 1201 { "CFG4TCMD", 0x01, 0x01 },
1411 { "CFG4ICMD", 0x02, 0x02 }, 1202 { "CFG4ICMD", 0x02, 0x02 },
1412 { "CFG4TSTAT", 0x04, 0x04 }, 1203 { "CFG4TSTAT", 0x04, 0x04 },
@@ -1423,14 +1214,7 @@ ahd_seqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
1423 0x5b, regvalue, cur_col, wrap)); 1214 0x5b, regvalue, cur_col, wrap));
1424} 1215}
1425 1216
1426int 1217static const ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
1427ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1428{
1429 return (ahd_print_register(NULL, 0, "CURRSCB",
1430 0x5c, regvalue, cur_col, wrap));
1431}
1432
1433static ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
1434 { "ENCFG4TCMD", 0x01, 0x01 }, 1218 { "ENCFG4TCMD", 0x01, 0x01 },
1435 { "ENCFG4ICMD", 0x02, 0x02 }, 1219 { "ENCFG4ICMD", 0x02, 0x02 },
1436 { "ENCFG4TSTAT", 0x04, 0x04 }, 1220 { "ENCFG4TSTAT", 0x04, 0x04 },
@@ -1447,7 +1231,14 @@ ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
1447 0x5c, regvalue, cur_col, wrap)); 1231 0x5c, regvalue, cur_col, wrap));
1448} 1232}
1449 1233
1450static ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = { 1234int
1235ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1236{
1237 return (ahd_print_register(NULL, 0, "CURRSCB",
1238 0x5c, regvalue, cur_col, wrap));
1239}
1240
1241static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
1451 { "FIFOFREE", 0x01, 0x01 }, 1242 { "FIFOFREE", 0x01, 0x01 },
1452 { "DATAINFIFO", 0x02, 0x02 }, 1243 { "DATAINFIFO", 0x02, 0x02 },
1453 { "DLZERO", 0x04, 0x04 }, 1244 { "DLZERO", 0x04, 0x04 },
@@ -1464,24 +1255,6 @@ ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1464 0x5d, regvalue, cur_col, wrap)); 1255 0x5d, regvalue, cur_col, wrap));
1465} 1256}
1466 1257
1467static ahd_reg_parse_entry_t CRCCONTROL_parse_table[] = {
1468 { "CRCVALCHKEN", 0x40, 0x40 }
1469};
1470
1471int
1472ahd_crccontrol_print(u_int regvalue, u_int *cur_col, u_int wrap)
1473{
1474 return (ahd_print_register(CRCCONTROL_parse_table, 1, "CRCCONTROL",
1475 0x5d, regvalue, cur_col, wrap));
1476}
1477
1478int
1479ahd_dfftag_print(u_int regvalue, u_int *cur_col, u_int wrap)
1480{
1481 return (ahd_print_register(NULL, 0, "DFFTAG",
1482 0x5e, regvalue, cur_col, wrap));
1483}
1484
1485int 1258int
1486ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap) 1259ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1487{ 1260{
@@ -1489,31 +1262,6 @@ ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
1489 0x5e, regvalue, cur_col, wrap)); 1262 0x5e, regvalue, cur_col, wrap));
1490} 1263}
1491 1264
1492static ahd_reg_parse_entry_t SCSITEST_parse_table[] = {
1493 { "SEL_TXPLL_DEBUG", 0x04, 0x04 },
1494 { "CNTRTEST", 0x08, 0x08 }
1495};
1496
1497int
1498ahd_scsitest_print(u_int regvalue, u_int *cur_col, u_int wrap)
1499{
1500 return (ahd_print_register(SCSITEST_parse_table, 2, "SCSITEST",
1501 0x5e, regvalue, cur_col, wrap));
1502}
1503
1504static ahd_reg_parse_entry_t IOPDNCTL_parse_table[] = {
1505 { "PDN_DIFFSENSE", 0x01, 0x01 },
1506 { "PDN_IDIST", 0x04, 0x04 },
1507 { "DISABLE_OE", 0x80, 0x80 }
1508};
1509
1510int
1511ahd_iopdnctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1512{
1513 return (ahd_print_register(IOPDNCTL_parse_table, 3, "IOPDNCTL",
1514 0x5f, regvalue, cur_col, wrap));
1515}
1516
1517int 1265int
1518ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1266ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1519{ 1267{
@@ -1529,13 +1277,6 @@ ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1529} 1277}
1530 1278
1531int 1279int
1532ahd_dgrpcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
1533{
1534 return (ahd_print_register(NULL, 0, "DGRPCRCI",
1535 0x60, regvalue, cur_col, wrap));
1536}
1537
1538int
1539ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap) 1280ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
1540{ 1281{
1541 return (ahd_print_register(NULL, 0, "NEGPERIOD", 1282 return (ahd_print_register(NULL, 0, "NEGPERIOD",
@@ -1543,20 +1284,13 @@ ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
1543} 1284}
1544 1285
1545int 1286int
1546ahd_packcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
1547{
1548 return (ahd_print_register(NULL, 0, "PACKCRCI",
1549 0x62, regvalue, cur_col, wrap));
1550}
1551
1552int
1553ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap) 1287ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
1554{ 1288{
1555 return (ahd_print_register(NULL, 0, "NEGOFFSET", 1289 return (ahd_print_register(NULL, 0, "NEGOFFSET",
1556 0x62, regvalue, cur_col, wrap)); 1290 0x62, regvalue, cur_col, wrap));
1557} 1291}
1558 1292
1559static ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = { 1293static const ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
1560 { "PPROPT_IUT", 0x01, 0x01 }, 1294 { "PPROPT_IUT", 0x01, 0x01 },
1561 { "PPROPT_DT", 0x02, 0x02 }, 1295 { "PPROPT_DT", 0x02, 0x02 },
1562 { "PPROPT_QAS", 0x04, 0x04 }, 1296 { "PPROPT_QAS", 0x04, 0x04 },
@@ -1570,7 +1304,7 @@ ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
1570 0x63, regvalue, cur_col, wrap)); 1304 0x63, regvalue, cur_col, wrap));
1571} 1305}
1572 1306
1573static ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = { 1307static const ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
1574 { "WIDEXFER", 0x01, 0x01 }, 1308 { "WIDEXFER", 0x01, 0x01 },
1575 { "ENAUTOATNO", 0x02, 0x02 }, 1309 { "ENAUTOATNO", 0x02, 0x02 },
1576 { "ENAUTOATNI", 0x04, 0x04 }, 1310 { "ENAUTOATNI", 0x04, 0x04 },
@@ -1601,20 +1335,21 @@ ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1601 0x66, regvalue, cur_col, wrap)); 1335 0x66, regvalue, cur_col, wrap));
1602} 1336}
1603 1337
1604static ahd_reg_parse_entry_t SCSCHKN_parse_table[] = { 1338static const ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
1605 { "LSTSGCLRDIS", 0x01, 0x01 }, 1339 { "LSTSGCLRDIS", 0x01, 0x01 },
1606 { "SHVALIDSTDIS", 0x02, 0x02 }, 1340 { "SHVALIDSTDIS", 0x02, 0x02 },
1607 { "DFFACTCLR", 0x04, 0x04 }, 1341 { "DFFACTCLR", 0x04, 0x04 },
1608 { "SDONEMSKDIS", 0x08, 0x08 }, 1342 { "SDONEMSKDIS", 0x08, 0x08 },
1609 { "WIDERESEN", 0x10, 0x10 }, 1343 { "WIDERESEN", 0x10, 0x10 },
1610 { "CURRFIFODEF", 0x20, 0x20 }, 1344 { "CURRFIFODEF", 0x20, 0x20 },
1611 { "STSELSKIDDIS", 0x40, 0x40 } 1345 { "STSELSKIDDIS", 0x40, 0x40 },
1346 { "BIDICHKDIS", 0x80, 0x80 }
1612}; 1347};
1613 1348
1614int 1349int
1615ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap) 1350ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
1616{ 1351{
1617 return (ahd_print_register(SCSCHKN_parse_table, 7, "SCSCHKN", 1352 return (ahd_print_register(SCSCHKN_parse_table, 8, "SCSCHKN",
1618 0x66, regvalue, cur_col, wrap)); 1353 0x66, regvalue, cur_col, wrap));
1619} 1354}
1620 1355
@@ -1625,23 +1360,6 @@ ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1625 0x67, regvalue, cur_col, wrap)); 1360 0x67, regvalue, cur_col, wrap));
1626} 1361}
1627 1362
1628static ahd_reg_parse_entry_t PLL960CTL0_parse_table[] = {
1629 { "PLL_ENFBM", 0x01, 0x01 },
1630 { "PLL_DLPF", 0x02, 0x02 },
1631 { "PLL_ENLPF", 0x04, 0x04 },
1632 { "PLL_ENLUD", 0x08, 0x08 },
1633 { "PLL_NS", 0x30, 0x30 },
1634 { "PLL_PWDN", 0x40, 0x40 },
1635 { "PLL_VCOSEL", 0x80, 0x80 }
1636};
1637
1638int
1639ahd_pll960ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1640{
1641 return (ahd_print_register(PLL960CTL0_parse_table, 7, "PLL960CTL0",
1642 0x68, regvalue, cur_col, wrap));
1643}
1644
1645int 1363int
1646ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 1364ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1647{ 1365{
@@ -1656,33 +1374,6 @@ ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1656 0x69, regvalue, cur_col, wrap)); 1374 0x69, regvalue, cur_col, wrap));
1657} 1375}
1658 1376
1659static ahd_reg_parse_entry_t PLL960CTL1_parse_table[] = {
1660 { "PLL_RST", 0x01, 0x01 },
1661 { "PLL_CNTCLR", 0x40, 0x40 },
1662 { "PLL_CNTEN", 0x80, 0x80 }
1663};
1664
1665int
1666ahd_pll960ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1667{
1668 return (ahd_print_register(PLL960CTL1_parse_table, 3, "PLL960CTL1",
1669 0x69, regvalue, cur_col, wrap));
1670}
1671
1672int
1673ahd_pll960cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1674{
1675 return (ahd_print_register(NULL, 0, "PLL960CNT0",
1676 0x6a, regvalue, cur_col, wrap));
1677}
1678
1679int
1680ahd_xsig_print(u_int regvalue, u_int *cur_col, u_int wrap)
1681{
1682 return (ahd_print_register(NULL, 0, "XSIG",
1683 0x6a, regvalue, cur_col, wrap));
1684}
1685
1686int 1377int
1687ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap) 1378ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1688{ 1379{
@@ -1690,57 +1381,6 @@ ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1690 0x6b, regvalue, cur_col, wrap)); 1381 0x6b, regvalue, cur_col, wrap));
1691} 1382}
1692 1383
1693static ahd_reg_parse_entry_t PLL400CTL0_parse_table[] = {
1694 { "PLL_ENFBM", 0x01, 0x01 },
1695 { "PLL_DLPF", 0x02, 0x02 },
1696 { "PLL_ENLPF", 0x04, 0x04 },
1697 { "PLL_ENLUD", 0x08, 0x08 },
1698 { "PLL_NS", 0x30, 0x30 },
1699 { "PLL_PWDN", 0x40, 0x40 },
1700 { "PLL_VCOSEL", 0x80, 0x80 }
1701};
1702
1703int
1704ahd_pll400ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1705{
1706 return (ahd_print_register(PLL400CTL0_parse_table, 7, "PLL400CTL0",
1707 0x6c, regvalue, cur_col, wrap));
1708}
1709
1710int
1711ahd_fairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
1712{
1713 return (ahd_print_register(NULL, 0, "FAIRNESS",
1714 0x6c, regvalue, cur_col, wrap));
1715}
1716
1717static ahd_reg_parse_entry_t PLL400CTL1_parse_table[] = {
1718 { "PLL_RST", 0x01, 0x01 },
1719 { "PLL_CNTCLR", 0x40, 0x40 },
1720 { "PLL_CNTEN", 0x80, 0x80 }
1721};
1722
1723int
1724ahd_pll400ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1725{
1726 return (ahd_print_register(PLL400CTL1_parse_table, 3, "PLL400CTL1",
1727 0x6d, regvalue, cur_col, wrap));
1728}
1729
1730int
1731ahd_unfairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
1732{
1733 return (ahd_print_register(NULL, 0, "UNFAIRNESS",
1734 0x6e, regvalue, cur_col, wrap));
1735}
1736
1737int
1738ahd_pll400cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1739{
1740 return (ahd_print_register(NULL, 0, "PLL400CNT0",
1741 0x6e, regvalue, cur_col, wrap));
1742}
1743
1744int 1384int
1745ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1385ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1746{ 1386{
@@ -1748,31 +1388,6 @@ ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1748 0x70, regvalue, cur_col, wrap)); 1388 0x70, regvalue, cur_col, wrap));
1749} 1389}
1750 1390
1751static ahd_reg_parse_entry_t PLLDELAY_parse_table[] = {
1752 { "SPLIT_DROP_REQ", 0x80, 0x80 }
1753};
1754
1755int
1756ahd_plldelay_print(u_int regvalue, u_int *cur_col, u_int wrap)
1757{
1758 return (ahd_print_register(PLLDELAY_parse_table, 1, "PLLDELAY",
1759 0x70, regvalue, cur_col, wrap));
1760}
1761
1762int
1763ahd_hodmaadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1764{
1765 return (ahd_print_register(NULL, 0, "HODMAADR",
1766 0x70, regvalue, cur_col, wrap));
1767}
1768
1769int
1770ahd_hodmacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1771{
1772 return (ahd_print_register(NULL, 0, "HODMACNT",
1773 0x78, regvalue, cur_col, wrap));
1774}
1775
1776int 1391int
1777ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 1392ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1778{ 1393{
@@ -1781,10 +1396,10 @@ ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1781} 1396}
1782 1397
1783int 1398int
1784ahd_hodmaen_print(u_int regvalue, u_int *cur_col, u_int wrap) 1399ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1785{ 1400{
1786 return (ahd_print_register(NULL, 0, "HODMAEN", 1401 return (ahd_print_register(NULL, 0, "SGHADDR",
1787 0x7a, regvalue, cur_col, wrap)); 1402 0x7c, regvalue, cur_col, wrap));
1788} 1403}
1789 1404
1790int 1405int
@@ -1795,10 +1410,10 @@ ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1795} 1410}
1796 1411
1797int 1412int
1798ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1413ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1799{ 1414{
1800 return (ahd_print_register(NULL, 0, "SGHADDR", 1415 return (ahd_print_register(NULL, 0, "SGHCNT",
1801 0x7c, regvalue, cur_col, wrap)); 1416 0x84, regvalue, cur_col, wrap));
1802} 1417}
1803 1418
1804int 1419int
@@ -1808,14 +1423,7 @@ ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1808 0x84, regvalue, cur_col, wrap)); 1423 0x84, regvalue, cur_col, wrap));
1809} 1424}
1810 1425
1811int 1426static const ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1812ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1813{
1814 return (ahd_print_register(NULL, 0, "SGHCNT",
1815 0x84, regvalue, cur_col, wrap));
1816}
1817
1818static ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1819 { "WR_DFTHRSH_MIN", 0x00, 0x70 }, 1427 { "WR_DFTHRSH_MIN", 0x00, 0x70 },
1820 { "RD_DFTHRSH_MIN", 0x00, 0x07 }, 1428 { "RD_DFTHRSH_MIN", 0x00, 0x07 },
1821 { "RD_DFTHRSH_25", 0x01, 0x07 }, 1429 { "RD_DFTHRSH_25", 0x01, 0x07 },
@@ -1843,209 +1451,7 @@ ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
1843 0x88, regvalue, cur_col, wrap)); 1451 0x88, regvalue, cur_col, wrap));
1844} 1452}
1845 1453
1846int 1454static const ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
1847ahd_romaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1848{
1849 return (ahd_print_register(NULL, 0, "ROMADDR",
1850 0x8a, regvalue, cur_col, wrap));
1851}
1852
1853static ahd_reg_parse_entry_t ROMCNTRL_parse_table[] = {
1854 { "RDY", 0x01, 0x01 },
1855 { "REPEAT", 0x02, 0x02 },
1856 { "ROMSPD", 0x18, 0x18 },
1857 { "ROMOP", 0xe0, 0xe0 }
1858};
1859
1860int
1861ahd_romcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1862{
1863 return (ahd_print_register(ROMCNTRL_parse_table, 4, "ROMCNTRL",
1864 0x8d, regvalue, cur_col, wrap));
1865}
1866
1867int
1868ahd_romdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
1869{
1870 return (ahd_print_register(NULL, 0, "ROMDATA",
1871 0x8e, regvalue, cur_col, wrap));
1872}
1873
1874static ahd_reg_parse_entry_t CMCRXMSG0_parse_table[] = {
1875 { "CFNUM", 0x07, 0x07 },
1876 { "CDNUM", 0xf8, 0xf8 }
1877};
1878
1879int
1880ahd_cmcrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1881{
1882 return (ahd_print_register(CMCRXMSG0_parse_table, 2, "CMCRXMSG0",
1883 0x90, regvalue, cur_col, wrap));
1884}
1885
1886static ahd_reg_parse_entry_t ROENABLE_parse_table[] = {
1887 { "DCH0ROEN", 0x01, 0x01 },
1888 { "DCH1ROEN", 0x02, 0x02 },
1889 { "SGROEN", 0x04, 0x04 },
1890 { "CMCROEN", 0x08, 0x08 },
1891 { "OVLYROEN", 0x10, 0x10 },
1892 { "MSIROEN", 0x20, 0x20 }
1893};
1894
1895int
1896ahd_roenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
1897{
1898 return (ahd_print_register(ROENABLE_parse_table, 6, "ROENABLE",
1899 0x90, regvalue, cur_col, wrap));
1900}
1901
1902static ahd_reg_parse_entry_t OVLYRXMSG0_parse_table[] = {
1903 { "CFNUM", 0x07, 0x07 },
1904 { "CDNUM", 0xf8, 0xf8 }
1905};
1906
1907int
1908ahd_ovlyrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1909{
1910 return (ahd_print_register(OVLYRXMSG0_parse_table, 2, "OVLYRXMSG0",
1911 0x90, regvalue, cur_col, wrap));
1912}
1913
1914static ahd_reg_parse_entry_t DCHRXMSG0_parse_table[] = {
1915 { "CFNUM", 0x07, 0x07 },
1916 { "CDNUM", 0xf8, 0xf8 }
1917};
1918
1919int
1920ahd_dchrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
1921{
1922 return (ahd_print_register(DCHRXMSG0_parse_table, 2, "DCHRXMSG0",
1923 0x90, regvalue, cur_col, wrap));
1924}
1925
1926static ahd_reg_parse_entry_t OVLYRXMSG1_parse_table[] = {
1927 { "CBNUM", 0xff, 0xff }
1928};
1929
1930int
1931ahd_ovlyrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1932{
1933 return (ahd_print_register(OVLYRXMSG1_parse_table, 1, "OVLYRXMSG1",
1934 0x91, regvalue, cur_col, wrap));
1935}
1936
1937static ahd_reg_parse_entry_t NSENABLE_parse_table[] = {
1938 { "DCH0NSEN", 0x01, 0x01 },
1939 { "DCH1NSEN", 0x02, 0x02 },
1940 { "SGNSEN", 0x04, 0x04 },
1941 { "CMCNSEN", 0x08, 0x08 },
1942 { "OVLYNSEN", 0x10, 0x10 },
1943 { "MSINSEN", 0x20, 0x20 }
1944};
1945
1946int
1947ahd_nsenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
1948{
1949 return (ahd_print_register(NSENABLE_parse_table, 6, "NSENABLE",
1950 0x91, regvalue, cur_col, wrap));
1951}
1952
1953static ahd_reg_parse_entry_t CMCRXMSG1_parse_table[] = {
1954 { "CBNUM", 0xff, 0xff }
1955};
1956
1957int
1958ahd_cmcrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1959{
1960 return (ahd_print_register(CMCRXMSG1_parse_table, 1, "CMCRXMSG1",
1961 0x91, regvalue, cur_col, wrap));
1962}
1963
1964static ahd_reg_parse_entry_t DCHRXMSG1_parse_table[] = {
1965 { "CBNUM", 0xff, 0xff }
1966};
1967
1968int
1969ahd_dchrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1970{
1971 return (ahd_print_register(DCHRXMSG1_parse_table, 1, "DCHRXMSG1",
1972 0x91, regvalue, cur_col, wrap));
1973}
1974
1975static ahd_reg_parse_entry_t DCHRXMSG2_parse_table[] = {
1976 { "MINDEX", 0xff, 0xff }
1977};
1978
1979int
1980ahd_dchrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1981{
1982 return (ahd_print_register(DCHRXMSG2_parse_table, 1, "DCHRXMSG2",
1983 0x92, regvalue, cur_col, wrap));
1984}
1985
1986static ahd_reg_parse_entry_t CMCRXMSG2_parse_table[] = {
1987 { "MINDEX", 0xff, 0xff }
1988};
1989
1990int
1991ahd_cmcrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
1992{
1993 return (ahd_print_register(CMCRXMSG2_parse_table, 1, "CMCRXMSG2",
1994 0x92, regvalue, cur_col, wrap));
1995}
1996
1997int
1998ahd_ost_print(u_int regvalue, u_int *cur_col, u_int wrap)
1999{
2000 return (ahd_print_register(NULL, 0, "OST",
2001 0x92, regvalue, cur_col, wrap));
2002}
2003
2004static ahd_reg_parse_entry_t OVLYRXMSG2_parse_table[] = {
2005 { "MINDEX", 0xff, 0xff }
2006};
2007
2008int
2009ahd_ovlyrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2010{
2011 return (ahd_print_register(OVLYRXMSG2_parse_table, 1, "OVLYRXMSG2",
2012 0x92, regvalue, cur_col, wrap));
2013}
2014
2015static ahd_reg_parse_entry_t DCHRXMSG3_parse_table[] = {
2016 { "MCLASS", 0x0f, 0x0f }
2017};
2018
2019int
2020ahd_dchrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2021{
2022 return (ahd_print_register(DCHRXMSG3_parse_table, 1, "DCHRXMSG3",
2023 0x93, regvalue, cur_col, wrap));
2024}
2025
2026static ahd_reg_parse_entry_t OVLYRXMSG3_parse_table[] = {
2027 { "MCLASS", 0x0f, 0x0f }
2028};
2029
2030int
2031ahd_ovlyrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2032{
2033 return (ahd_print_register(OVLYRXMSG3_parse_table, 1, "OVLYRXMSG3",
2034 0x93, regvalue, cur_col, wrap));
2035}
2036
2037static ahd_reg_parse_entry_t CMCRXMSG3_parse_table[] = {
2038 { "MCLASS", 0x0f, 0x0f }
2039};
2040
2041int
2042ahd_cmcrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2043{
2044 return (ahd_print_register(CMCRXMSG3_parse_table, 1, "CMCRXMSG3",
2045 0x93, regvalue, cur_col, wrap));
2046}
2047
2048static ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
2049 { "CMPABCDIS", 0x01, 0x01 }, 1455 { "CMPABCDIS", 0x01, 0x01 },
2050 { "TSCSERREN", 0x02, 0x02 }, 1456 { "TSCSERREN", 0x02, 0x02 },
2051 { "SRSPDPEEN", 0x04, 0x04 }, 1457 { "SRSPDPEEN", 0x04, 0x04 },
@@ -2062,46 +1468,7 @@ ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2062 0x93, regvalue, cur_col, wrap)); 1468 0x93, regvalue, cur_col, wrap));
2063} 1469}
2064 1470
2065int 1471static const ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
2066ahd_ovlyseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2067{
2068 return (ahd_print_register(NULL, 0, "OVLYSEQBCNT",
2069 0x94, regvalue, cur_col, wrap));
2070}
2071
2072int
2073ahd_dchseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2074{
2075 return (ahd_print_register(NULL, 0, "DCHSEQBCNT",
2076 0x94, regvalue, cur_col, wrap));
2077}
2078
2079int
2080ahd_cmcseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2081{
2082 return (ahd_print_register(NULL, 0, "CMCSEQBCNT",
2083 0x94, regvalue, cur_col, wrap));
2084}
2085
2086static ahd_reg_parse_entry_t CMCSPLTSTAT0_parse_table[] = {
2087 { "RXSPLTRSP", 0x01, 0x01 },
2088 { "RXSCEMSG", 0x02, 0x02 },
2089 { "RXOVRUN", 0x04, 0x04 },
2090 { "CNTNOTCMPLT", 0x08, 0x08 },
2091 { "SCDATBUCKET", 0x10, 0x10 },
2092 { "SCADERR", 0x20, 0x20 },
2093 { "SCBCERR", 0x40, 0x40 },
2094 { "STAETERM", 0x80, 0x80 }
2095};
2096
2097int
2098ahd_cmcspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2099{
2100 return (ahd_print_register(CMCSPLTSTAT0_parse_table, 8, "CMCSPLTSTAT0",
2101 0x96, regvalue, cur_col, wrap));
2102}
2103
2104static ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
2105 { "RXSPLTRSP", 0x01, 0x01 }, 1472 { "RXSPLTRSP", 0x01, 0x01 },
2106 { "RXSCEMSG", 0x02, 0x02 }, 1473 { "RXSCEMSG", 0x02, 0x02 },
2107 { "RXOVRUN", 0x04, 0x04 }, 1474 { "RXOVRUN", 0x04, 0x04 },
@@ -2119,47 +1486,7 @@ ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2119 0x96, regvalue, cur_col, wrap)); 1486 0x96, regvalue, cur_col, wrap));
2120} 1487}
2121 1488
2122static ahd_reg_parse_entry_t OVLYSPLTSTAT0_parse_table[] = { 1489static const ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
2123 { "RXSPLTRSP", 0x01, 0x01 },
2124 { "RXSCEMSG", 0x02, 0x02 },
2125 { "RXOVRUN", 0x04, 0x04 },
2126 { "CNTNOTCMPLT", 0x08, 0x08 },
2127 { "SCDATBUCKET", 0x10, 0x10 },
2128 { "SCADERR", 0x20, 0x20 },
2129 { "SCBCERR", 0x40, 0x40 },
2130 { "STAETERM", 0x80, 0x80 }
2131};
2132
2133int
2134ahd_ovlyspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2135{
2136 return (ahd_print_register(OVLYSPLTSTAT0_parse_table, 8, "OVLYSPLTSTAT0",
2137 0x96, regvalue, cur_col, wrap));
2138}
2139
2140static ahd_reg_parse_entry_t CMCSPLTSTAT1_parse_table[] = {
2141 { "RXDATABUCKET", 0x01, 0x01 }
2142};
2143
2144int
2145ahd_cmcspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2146{
2147 return (ahd_print_register(CMCSPLTSTAT1_parse_table, 1, "CMCSPLTSTAT1",
2148 0x97, regvalue, cur_col, wrap));
2149}
2150
2151static ahd_reg_parse_entry_t OVLYSPLTSTAT1_parse_table[] = {
2152 { "RXDATABUCKET", 0x01, 0x01 }
2153};
2154
2155int
2156ahd_ovlyspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2157{
2158 return (ahd_print_register(OVLYSPLTSTAT1_parse_table, 1, "OVLYSPLTSTAT1",
2159 0x97, regvalue, cur_col, wrap));
2160}
2161
2162static ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
2163 { "RXDATABUCKET", 0x01, 0x01 } 1490 { "RXDATABUCKET", 0x01, 0x01 }
2164}; 1491};
2165 1492
@@ -2170,139 +1497,7 @@ ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2170 0x97, regvalue, cur_col, wrap)); 1497 0x97, regvalue, cur_col, wrap));
2171} 1498}
2172 1499
2173static ahd_reg_parse_entry_t SGRXMSG0_parse_table[] = { 1500static const ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
2174 { "CFNUM", 0x07, 0x07 },
2175 { "CDNUM", 0xf8, 0xf8 }
2176};
2177
2178int
2179ahd_sgrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2180{
2181 return (ahd_print_register(SGRXMSG0_parse_table, 2, "SGRXMSG0",
2182 0x98, regvalue, cur_col, wrap));
2183}
2184
2185static ahd_reg_parse_entry_t SLVSPLTOUTADR0_parse_table[] = {
2186 { "LOWER_ADDR", 0x7f, 0x7f }
2187};
2188
2189int
2190ahd_slvspltoutadr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2191{
2192 return (ahd_print_register(SLVSPLTOUTADR0_parse_table, 1, "SLVSPLTOUTADR0",
2193 0x98, regvalue, cur_col, wrap));
2194}
2195
2196static ahd_reg_parse_entry_t SGRXMSG1_parse_table[] = {
2197 { "CBNUM", 0xff, 0xff }
2198};
2199
2200int
2201ahd_sgrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2202{
2203 return (ahd_print_register(SGRXMSG1_parse_table, 1, "SGRXMSG1",
2204 0x99, regvalue, cur_col, wrap));
2205}
2206
2207static ahd_reg_parse_entry_t SLVSPLTOUTADR1_parse_table[] = {
2208 { "REQ_FNUM", 0x07, 0x07 },
2209 { "REQ_DNUM", 0xf8, 0xf8 }
2210};
2211
2212int
2213ahd_slvspltoutadr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2214{
2215 return (ahd_print_register(SLVSPLTOUTADR1_parse_table, 2, "SLVSPLTOUTADR1",
2216 0x99, regvalue, cur_col, wrap));
2217}
2218
2219static ahd_reg_parse_entry_t SGRXMSG2_parse_table[] = {
2220 { "MINDEX", 0xff, 0xff }
2221};
2222
2223int
2224ahd_sgrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2225{
2226 return (ahd_print_register(SGRXMSG2_parse_table, 1, "SGRXMSG2",
2227 0x9a, regvalue, cur_col, wrap));
2228}
2229
2230static ahd_reg_parse_entry_t SLVSPLTOUTADR2_parse_table[] = {
2231 { "REQ_BNUM", 0xff, 0xff }
2232};
2233
2234int
2235ahd_slvspltoutadr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2236{
2237 return (ahd_print_register(SLVSPLTOUTADR2_parse_table, 1, "SLVSPLTOUTADR2",
2238 0x9a, regvalue, cur_col, wrap));
2239}
2240
2241static ahd_reg_parse_entry_t SGRXMSG3_parse_table[] = {
2242 { "MCLASS", 0x0f, 0x0f }
2243};
2244
2245int
2246ahd_sgrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2247{
2248 return (ahd_print_register(SGRXMSG3_parse_table, 1, "SGRXMSG3",
2249 0x9b, regvalue, cur_col, wrap));
2250}
2251
2252static ahd_reg_parse_entry_t SLVSPLTOUTADR3_parse_table[] = {
2253 { "RLXORD", 0x10, 0x10 },
2254 { "TAG_NUM", 0x1f, 0x1f }
2255};
2256
2257int
2258ahd_slvspltoutadr3_print(u_int regvalue, u_int *cur_col, u_int wrap)
2259{
2260 return (ahd_print_register(SLVSPLTOUTADR3_parse_table, 2, "SLVSPLTOUTADR3",
2261 0x9b, regvalue, cur_col, wrap));
2262}
2263
2264int
2265ahd_sgseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2266{
2267 return (ahd_print_register(NULL, 0, "SGSEQBCNT",
2268 0x9c, regvalue, cur_col, wrap));
2269}
2270
2271static ahd_reg_parse_entry_t SLVSPLTOUTATTR0_parse_table[] = {
2272 { "LOWER_BCNT", 0xff, 0xff }
2273};
2274
2275int
2276ahd_slvspltoutattr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2277{
2278 return (ahd_print_register(SLVSPLTOUTATTR0_parse_table, 1, "SLVSPLTOUTATTR0",
2279 0x9c, regvalue, cur_col, wrap));
2280}
2281
2282static ahd_reg_parse_entry_t SLVSPLTOUTATTR1_parse_table[] = {
2283 { "CMPLT_FNUM", 0x07, 0x07 },
2284 { "CMPLT_DNUM", 0xf8, 0xf8 }
2285};
2286
2287int
2288ahd_slvspltoutattr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2289{
2290 return (ahd_print_register(SLVSPLTOUTATTR1_parse_table, 2, "SLVSPLTOUTATTR1",
2291 0x9d, regvalue, cur_col, wrap));
2292}
2293
2294static ahd_reg_parse_entry_t SLVSPLTOUTATTR2_parse_table[] = {
2295 { "CMPLT_BNUM", 0xff, 0xff }
2296};
2297
2298int
2299ahd_slvspltoutattr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
2300{
2301 return (ahd_print_register(SLVSPLTOUTATTR2_parse_table, 1, "SLVSPLTOUTATTR2",
2302 0x9e, regvalue, cur_col, wrap));
2303}
2304
2305static ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
2306 { "RXSPLTRSP", 0x01, 0x01 }, 1501 { "RXSPLTRSP", 0x01, 0x01 },
2307 { "RXSCEMSG", 0x02, 0x02 }, 1502 { "RXSCEMSG", 0x02, 0x02 },
2308 { "RXOVRUN", 0x04, 0x04 }, 1503 { "RXOVRUN", 0x04, 0x04 },
@@ -2320,7 +1515,7 @@ ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2320 0x9e, regvalue, cur_col, wrap)); 1515 0x9e, regvalue, cur_col, wrap));
2321} 1516}
2322 1517
2323static ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = { 1518static const ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
2324 { "RXDATABUCKET", 0x01, 0x01 } 1519 { "RXDATABUCKET", 0x01, 0x01 }
2325}; 1520};
2326 1521
@@ -2331,19 +1526,7 @@ ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2331 0x9f, regvalue, cur_col, wrap)); 1526 0x9f, regvalue, cur_col, wrap));
2332} 1527}
2333 1528
2334static ahd_reg_parse_entry_t SFUNCT_parse_table[] = { 1529static const ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
2335 { "TEST_NUM", 0x0f, 0x0f },
2336 { "TEST_GROUP", 0xf0, 0xf0 }
2337};
2338
2339int
2340ahd_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
2341{
2342 return (ahd_print_register(SFUNCT_parse_table, 2, "SFUNCT",
2343 0x9f, regvalue, cur_col, wrap));
2344}
2345
2346static ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
2347 { "DPR", 0x01, 0x01 }, 1530 { "DPR", 0x01, 0x01 },
2348 { "TWATERR", 0x02, 0x02 }, 1531 { "TWATERR", 0x02, 0x02 },
2349 { "RDPERR", 0x04, 0x04 }, 1532 { "RDPERR", 0x04, 0x04 },
@@ -2368,83 +1551,6 @@ ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2368 0xa0, regvalue, cur_col, wrap)); 1551 0xa0, regvalue, cur_col, wrap));
2369} 1552}
2370 1553
2371static ahd_reg_parse_entry_t DF1PCISTAT_parse_table[] = {
2372 { "DPR", 0x01, 0x01 },
2373 { "TWATERR", 0x02, 0x02 },
2374 { "RDPERR", 0x04, 0x04 },
2375 { "SCAAPERR", 0x08, 0x08 },
2376 { "RTA", 0x10, 0x10 },
2377 { "RMA", 0x20, 0x20 },
2378 { "SSE", 0x40, 0x40 },
2379 { "DPE", 0x80, 0x80 }
2380};
2381
2382int
2383ahd_df1pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2384{
2385 return (ahd_print_register(DF1PCISTAT_parse_table, 8, "DF1PCISTAT",
2386 0xa1, regvalue, cur_col, wrap));
2387}
2388
2389static ahd_reg_parse_entry_t SGPCISTAT_parse_table[] = {
2390 { "DPR", 0x01, 0x01 },
2391 { "RDPERR", 0x04, 0x04 },
2392 { "SCAAPERR", 0x08, 0x08 },
2393 { "RTA", 0x10, 0x10 },
2394 { "RMA", 0x20, 0x20 },
2395 { "SSE", 0x40, 0x40 },
2396 { "DPE", 0x80, 0x80 }
2397};
2398
2399int
2400ahd_sgpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2401{
2402 return (ahd_print_register(SGPCISTAT_parse_table, 7, "SGPCISTAT",
2403 0xa2, regvalue, cur_col, wrap));
2404}
2405
2406int
2407ahd_reg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2408{
2409 return (ahd_print_register(NULL, 0, "REG1",
2410 0xa2, regvalue, cur_col, wrap));
2411}
2412
2413static ahd_reg_parse_entry_t CMCPCISTAT_parse_table[] = {
2414 { "DPR", 0x01, 0x01 },
2415 { "TWATERR", 0x02, 0x02 },
2416 { "RDPERR", 0x04, 0x04 },
2417 { "SCAAPERR", 0x08, 0x08 },
2418 { "RTA", 0x10, 0x10 },
2419 { "RMA", 0x20, 0x20 },
2420 { "SSE", 0x40, 0x40 },
2421 { "DPE", 0x80, 0x80 }
2422};
2423
2424int
2425ahd_cmcpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2426{
2427 return (ahd_print_register(CMCPCISTAT_parse_table, 8, "CMCPCISTAT",
2428 0xa3, regvalue, cur_col, wrap));
2429}
2430
2431static ahd_reg_parse_entry_t OVLYPCISTAT_parse_table[] = {
2432 { "DPR", 0x01, 0x01 },
2433 { "RDPERR", 0x04, 0x04 },
2434 { "SCAAPERR", 0x08, 0x08 },
2435 { "RTA", 0x10, 0x10 },
2436 { "RMA", 0x20, 0x20 },
2437 { "SSE", 0x40, 0x40 },
2438 { "DPE", 0x80, 0x80 }
2439};
2440
2441int
2442ahd_ovlypcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2443{
2444 return (ahd_print_register(OVLYPCISTAT_parse_table, 7, "OVLYPCISTAT",
2445 0xa4, regvalue, cur_col, wrap));
2446}
2447
2448int 1554int
2449ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1555ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2450{ 1556{
@@ -2452,7 +1558,7 @@ ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2452 0xa4, regvalue, cur_col, wrap)); 1558 0xa4, regvalue, cur_col, wrap));
2453} 1559}
2454 1560
2455static ahd_reg_parse_entry_t SG_STATE_parse_table[] = { 1561static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
2456 { "SEGS_AVAIL", 0x01, 0x01 }, 1562 { "SEGS_AVAIL", 0x01, 0x01 },
2457 { "LOADING_NEEDED", 0x02, 0x02 }, 1563 { "LOADING_NEEDED", 0x02, 0x02 },
2458 { "FETCH_INPROG", 0x04, 0x04 } 1564 { "FETCH_INPROG", 0x04, 0x04 }
@@ -2465,23 +1571,7 @@ ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
2465 0xa6, regvalue, cur_col, wrap)); 1571 0xa6, regvalue, cur_col, wrap));
2466} 1572}
2467 1573
2468static ahd_reg_parse_entry_t MSIPCISTAT_parse_table[] = { 1574static const ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
2469 { "DPR", 0x01, 0x01 },
2470 { "TWATERR", 0x02, 0x02 },
2471 { "CLRPENDMSI", 0x08, 0x08 },
2472 { "RTA", 0x10, 0x10 },
2473 { "RMA", 0x20, 0x20 },
2474 { "SSE", 0x40, 0x40 }
2475};
2476
2477int
2478ahd_msipcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2479{
2480 return (ahd_print_register(MSIPCISTAT_parse_table, 6, "MSIPCISTAT",
2481 0xa6, regvalue, cur_col, wrap));
2482}
2483
2484static ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
2485 { "TWATERR", 0x02, 0x02 }, 1575 { "TWATERR", 0x02, 0x02 },
2486 { "STA", 0x08, 0x08 }, 1576 { "STA", 0x08, 0x08 },
2487 { "SSE", 0x40, 0x40 }, 1577 { "SSE", 0x40, 0x40 },
@@ -2496,27 +1586,13 @@ ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2496} 1586}
2497 1587
2498int 1588int
2499ahd_data_count_odd_print(u_int regvalue, u_int *cur_col, u_int wrap)
2500{
2501 return (ahd_print_register(NULL, 0, "DATA_COUNT_ODD",
2502 0xa7, regvalue, cur_col, wrap));
2503}
2504
2505int
2506ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1589ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2507{ 1590{
2508 return (ahd_print_register(NULL, 0, "SCBPTR", 1591 return (ahd_print_register(NULL, 0, "SCBPTR",
2509 0xa8, regvalue, cur_col, wrap)); 1592 0xa8, regvalue, cur_col, wrap));
2510} 1593}
2511 1594
2512int 1595static const ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
2513ahd_ccscbacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2514{
2515 return (ahd_print_register(NULL, 0, "CCSCBACNT",
2516 0xab, regvalue, cur_col, wrap));
2517}
2518
2519static ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
2520 { "SCBPTR_OFF", 0x07, 0x07 }, 1596 { "SCBPTR_OFF", 0x07, 0x07 },
2521 { "SCBPTR_ADDR", 0x38, 0x38 }, 1597 { "SCBPTR_ADDR", 0x38, 0x38 },
2522 { "AUSCBPTR_EN", 0x80, 0x80 } 1598 { "AUSCBPTR_EN", 0x80, 0x80 }
@@ -2537,36 +1613,13 @@ ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2537} 1613}
2538 1614
2539int 1615int
2540ahd_ccscbadr_bk_print(u_int regvalue, u_int *cur_col, u_int wrap)
2541{
2542 return (ahd_print_register(NULL, 0, "CCSCBADR_BK",
2543 0xac, regvalue, cur_col, wrap));
2544}
2545
2546int
2547ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1616ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2548{ 1617{
2549 return (ahd_print_register(NULL, 0, "CCSCBADDR", 1618 return (ahd_print_register(NULL, 0, "CCSCBADDR",
2550 0xac, regvalue, cur_col, wrap)); 1619 0xac, regvalue, cur_col, wrap));
2551} 1620}
2552 1621
2553static ahd_reg_parse_entry_t CMC_RAMBIST_parse_table[] = { 1622static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
2554 { "CMC_BUFFER_BIST_EN", 0x01, 0x01 },
2555 { "CMC_BUFFER_BIST_FAIL",0x02, 0x02 },
2556 { "SG_BIST_EN", 0x10, 0x10 },
2557 { "SG_BIST_FAIL", 0x20, 0x20 },
2558 { "SCBRAMBIST_FAIL", 0x40, 0x40 },
2559 { "SG_ELEMENT_SIZE", 0x80, 0x80 }
2560};
2561
2562int
2563ahd_cmc_rambist_print(u_int regvalue, u_int *cur_col, u_int wrap)
2564{
2565 return (ahd_print_register(CMC_RAMBIST_parse_table, 6, "CMC_RAMBIST",
2566 0xad, regvalue, cur_col, wrap));
2567}
2568
2569static ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
2570 { "CCSCBRESET", 0x01, 0x01 }, 1623 { "CCSCBRESET", 0x01, 0x01 },
2571 { "CCSCBDIR", 0x04, 0x04 }, 1624 { "CCSCBDIR", 0x04, 0x04 },
2572 { "CCSCBEN", 0x08, 0x08 }, 1625 { "CCSCBEN", 0x08, 0x08 },
@@ -2582,7 +1635,7 @@ ahd_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2582 0xad, regvalue, cur_col, wrap)); 1635 0xad, regvalue, cur_col, wrap));
2583} 1636}
2584 1637
2585static ahd_reg_parse_entry_t CCSGCTL_parse_table[] = { 1638static const ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
2586 { "CCSGRESET", 0x01, 0x01 }, 1639 { "CCSGRESET", 0x01, 0x01 },
2587 { "SG_FETCH_REQ", 0x02, 0x02 }, 1640 { "SG_FETCH_REQ", 0x02, 0x02 },
2588 { "CCSGENACK", 0x08, 0x08 }, 1641 { "CCSGENACK", 0x08, 0x08 },
@@ -2606,13 +1659,6 @@ ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
2606} 1659}
2607 1660
2608int 1661int
2609ahd_flexadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2610{
2611 return (ahd_print_register(NULL, 0, "FLEXADR",
2612 0xb0, regvalue, cur_col, wrap));
2613}
2614
2615int
2616ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap) 1662ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
2617{ 1663{
2618 return (ahd_print_register(NULL, 0, "CCSCBRAM", 1664 return (ahd_print_register(NULL, 0, "CCSCBRAM",
@@ -2620,39 +1666,13 @@ ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
2620} 1666}
2621 1667
2622int 1668int
2623ahd_flexcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2624{
2625 return (ahd_print_register(NULL, 0, "FLEXCNT",
2626 0xb3, regvalue, cur_col, wrap));
2627}
2628
2629static ahd_reg_parse_entry_t FLEXDMASTAT_parse_table[] = {
2630 { "FLEXDMADONE", 0x01, 0x01 },
2631 { "FLEXDMAERR", 0x02, 0x02 }
2632};
2633
2634int
2635ahd_flexdmastat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2636{
2637 return (ahd_print_register(FLEXDMASTAT_parse_table, 2, "FLEXDMASTAT",
2638 0xb5, regvalue, cur_col, wrap));
2639}
2640
2641int
2642ahd_flexdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
2643{
2644 return (ahd_print_register(NULL, 0, "FLEXDATA",
2645 0xb6, regvalue, cur_col, wrap));
2646}
2647
2648int
2649ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap) 1669ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2650{ 1670{
2651 return (ahd_print_register(NULL, 0, "BRDDAT", 1671 return (ahd_print_register(NULL, 0, "BRDDAT",
2652 0xb8, regvalue, cur_col, wrap)); 1672 0xb8, regvalue, cur_col, wrap));
2653} 1673}
2654 1674
2655static ahd_reg_parse_entry_t BRDCTL_parse_table[] = { 1675static const ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
2656 { "BRDSTB", 0x01, 0x01 }, 1676 { "BRDSTB", 0x01, 0x01 },
2657 { "BRDRW", 0x02, 0x02 }, 1677 { "BRDRW", 0x02, 0x02 },
2658 { "BRDEN", 0x04, 0x04 }, 1678 { "BRDEN", 0x04, 0x04 },
@@ -2682,7 +1702,7 @@ ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2682 0xbc, regvalue, cur_col, wrap)); 1702 0xbc, regvalue, cur_col, wrap));
2683} 1703}
2684 1704
2685static ahd_reg_parse_entry_t SEECTL_parse_table[] = { 1705static const ahd_reg_parse_entry_t SEECTL_parse_table[] = {
2686 { "SEEOP_ERAL", 0x40, 0x70 }, 1706 { "SEEOP_ERAL", 0x40, 0x70 },
2687 { "SEEOP_WRITE", 0x50, 0x70 }, 1707 { "SEEOP_WRITE", 0x50, 0x70 },
2688 { "SEEOP_READ", 0x60, 0x70 }, 1708 { "SEEOP_READ", 0x60, 0x70 },
@@ -2702,7 +1722,7 @@ ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2702 0xbe, regvalue, cur_col, wrap)); 1722 0xbe, regvalue, cur_col, wrap));
2703} 1723}
2704 1724
2705static ahd_reg_parse_entry_t SEESTAT_parse_table[] = { 1725static const ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
2706 { "SEESTART", 0x01, 0x01 }, 1726 { "SEESTART", 0x01, 0x01 },
2707 { "SEEBUSY", 0x02, 0x02 }, 1727 { "SEEBUSY", 0x02, 0x02 },
2708 { "SEEARBACK", 0x04, 0x04 }, 1728 { "SEEARBACK", 0x04, 0x04 },
@@ -2718,34 +1738,7 @@ ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2718 0xbe, regvalue, cur_col, wrap)); 1738 0xbe, regvalue, cur_col, wrap));
2719} 1739}
2720 1740
2721int 1741static const ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
2722ahd_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2723{
2724 return (ahd_print_register(NULL, 0, "SCBCNT",
2725 0xbf, regvalue, cur_col, wrap));
2726}
2727
2728int
2729ahd_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2730{
2731 return (ahd_print_register(NULL, 0, "DFWADDR",
2732 0xc0, regvalue, cur_col, wrap));
2733}
2734
2735static ahd_reg_parse_entry_t DSPFLTRCTL_parse_table[] = {
2736 { "DSPFCNTSEL", 0x0f, 0x0f },
2737 { "EDGESENSE", 0x10, 0x10 },
2738 { "FLTRDISABLE", 0x20, 0x20 }
2739};
2740
2741int
2742ahd_dspfltrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2743{
2744 return (ahd_print_register(DSPFLTRCTL_parse_table, 3, "DSPFLTRCTL",
2745 0xc0, regvalue, cur_col, wrap));
2746}
2747
2748static ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
2749 { "XMITOFFSTDIS", 0x02, 0x02 }, 1742 { "XMITOFFSTDIS", 0x02, 0x02 },
2750 { "RCVROFFSTDIS", 0x04, 0x04 }, 1743 { "RCVROFFSTDIS", 0x04, 0x04 },
2751 { "DESQDIS", 0x10, 0x10 }, 1744 { "DESQDIS", 0x10, 0x10 },
@@ -2760,44 +1753,13 @@ ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2760} 1753}
2761 1754
2762int 1755int
2763ahd_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2764{
2765 return (ahd_print_register(NULL, 0, "DFRADDR",
2766 0xc2, regvalue, cur_col, wrap));
2767}
2768
2769static ahd_reg_parse_entry_t DSPREQCTL_parse_table[] = {
2770 { "MANREQDLY", 0x3f, 0x3f },
2771 { "MANREQCTL", 0xc0, 0xc0 }
2772};
2773
2774int
2775ahd_dspreqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2776{
2777 return (ahd_print_register(DSPREQCTL_parse_table, 2, "DSPREQCTL",
2778 0xc2, regvalue, cur_col, wrap));
2779}
2780
2781static ahd_reg_parse_entry_t DSPACKCTL_parse_table[] = {
2782 { "MANACKDLY", 0x3f, 0x3f },
2783 { "MANACKCTL", 0xc0, 0xc0 }
2784};
2785
2786int
2787ahd_dspackctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2788{
2789 return (ahd_print_register(DSPACKCTL_parse_table, 2, "DSPACKCTL",
2790 0xc3, regvalue, cur_col, wrap));
2791}
2792
2793int
2794ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap) 1756ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
2795{ 1757{
2796 return (ahd_print_register(NULL, 0, "DFDAT", 1758 return (ahd_print_register(NULL, 0, "DFDAT",
2797 0xc4, regvalue, cur_col, wrap)); 1759 0xc4, regvalue, cur_col, wrap));
2798} 1760}
2799 1761
2800static ahd_reg_parse_entry_t DSPSELECT_parse_table[] = { 1762static const ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
2801 { "DSPSEL", 0x1f, 0x1f }, 1763 { "DSPSEL", 0x1f, 0x1f },
2802 { "AUTOINCEN", 0x80, 0x80 } 1764 { "AUTOINCEN", 0x80, 0x80 }
2803}; 1765};
@@ -2809,7 +1771,7 @@ ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
2809 0xc4, regvalue, cur_col, wrap)); 1771 0xc4, regvalue, cur_col, wrap));
2810} 1772}
2811 1773
2812static ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = { 1774static const ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
2813 { "XMITMANVAL", 0x3f, 0x3f }, 1775 { "XMITMANVAL", 0x3f, 0x3f },
2814 { "AUTOXBCDIS", 0x80, 0x80 } 1776 { "AUTOXBCDIS", 0x80, 0x80 }
2815}; 1777};
@@ -2821,91 +1783,7 @@ ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2821 0xc5, regvalue, cur_col, wrap)); 1783 0xc5, regvalue, cur_col, wrap));
2822} 1784}
2823 1785
2824static ahd_reg_parse_entry_t RCVRBIOSCTL_parse_table[] = { 1786static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
2825 { "RCVRMANVAL", 0x3f, 0x3f },
2826 { "AUTORBCDIS", 0x80, 0x80 }
2827};
2828
2829int
2830ahd_rcvrbiosctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2831{
2832 return (ahd_print_register(RCVRBIOSCTL_parse_table, 2, "RCVRBIOSCTL",
2833 0xc6, regvalue, cur_col, wrap));
2834}
2835
2836int
2837ahd_wrtbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
2838{
2839 return (ahd_print_register(NULL, 0, "WRTBIASCALC",
2840 0xc7, regvalue, cur_col, wrap));
2841}
2842
2843int
2844ahd_rcvrbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
2845{
2846 return (ahd_print_register(NULL, 0, "RCVRBIASCALC",
2847 0xc8, regvalue, cur_col, wrap));
2848}
2849
2850int
2851ahd_dfptrs_print(u_int regvalue, u_int *cur_col, u_int wrap)
2852{
2853 return (ahd_print_register(NULL, 0, "DFPTRS",
2854 0xc8, regvalue, cur_col, wrap));
2855}
2856
2857int
2858ahd_skewcalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
2859{
2860 return (ahd_print_register(NULL, 0, "SKEWCALC",
2861 0xc9, regvalue, cur_col, wrap));
2862}
2863
2864int
2865ahd_dfbkptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2866{
2867 return (ahd_print_register(NULL, 0, "DFBKPTR",
2868 0xc9, regvalue, cur_col, wrap));
2869}
2870
2871static ahd_reg_parse_entry_t DFDBCTL_parse_table[] = {
2872 { "DFF_RAMBIST_EN", 0x01, 0x01 },
2873 { "DFF_RAMBIST_DONE", 0x02, 0x02 },
2874 { "DFF_RAMBIST_FAIL", 0x04, 0x04 },
2875 { "DFF_DIR_ERR", 0x08, 0x08 },
2876 { "DFF_CIO_RD_RDY", 0x10, 0x10 },
2877 { "DFF_CIO_WR_RDY", 0x20, 0x20 }
2878};
2879
2880int
2881ahd_dfdbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
2882{
2883 return (ahd_print_register(DFDBCTL_parse_table, 6, "DFDBCTL",
2884 0xcb, regvalue, cur_col, wrap));
2885}
2886
2887int
2888ahd_dfscnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2889{
2890 return (ahd_print_register(NULL, 0, "DFSCNT",
2891 0xcc, regvalue, cur_col, wrap));
2892}
2893
2894int
2895ahd_dfbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
2896{
2897 return (ahd_print_register(NULL, 0, "DFBCNT",
2898 0xce, regvalue, cur_col, wrap));
2899}
2900
2901int
2902ahd_ovlyaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
2903{
2904 return (ahd_print_register(NULL, 0, "OVLYADDR",
2905 0xd4, regvalue, cur_col, wrap));
2906}
2907
2908static ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
2909 { "LOADRAM", 0x01, 0x01 }, 1787 { "LOADRAM", 0x01, 0x01 },
2910 { "SEQRESET", 0x02, 0x02 }, 1788 { "SEQRESET", 0x02, 0x02 },
2911 { "STEP", 0x04, 0x04 }, 1789 { "STEP", 0x04, 0x04 },
@@ -2923,21 +1801,7 @@ ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
2923 0xd6, regvalue, cur_col, wrap)); 1801 0xd6, regvalue, cur_col, wrap));
2924} 1802}
2925 1803
2926static ahd_reg_parse_entry_t SEQCTL1_parse_table[] = { 1804static const ahd_reg_parse_entry_t FLAGS_parse_table[] = {
2927 { "RAMBIST_EN", 0x01, 0x01 },
2928 { "RAMBIST_FAIL", 0x02, 0x02 },
2929 { "RAMBIST_DONE", 0x04, 0x04 },
2930 { "OVRLAY_DATA_CHK", 0x08, 0x08 }
2931};
2932
2933int
2934ahd_seqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
2935{
2936 return (ahd_print_register(SEQCTL1_parse_table, 4, "SEQCTL1",
2937 0xd7, regvalue, cur_col, wrap));
2938}
2939
2940static ahd_reg_parse_entry_t FLAGS_parse_table[] = {
2941 { "CARRY", 0x01, 0x01 }, 1805 { "CARRY", 0x01, 0x01 },
2942 { "ZERO", 0x02, 0x02 } 1806 { "ZERO", 0x02, 0x02 }
2943}; 1807};
@@ -2949,7 +1813,7 @@ ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
2949 0xd8, regvalue, cur_col, wrap)); 1813 0xd8, regvalue, cur_col, wrap));
2950} 1814}
2951 1815
2952static ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = { 1816static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
2953 { "IRET", 0x01, 0x01 }, 1817 { "IRET", 0x01, 0x01 },
2954 { "INTMASK1", 0x02, 0x02 }, 1818 { "INTMASK1", 0x02, 0x02 },
2955 { "INTMASK2", 0x04, 0x04 }, 1819 { "INTMASK2", 0x04, 0x04 },
@@ -3002,24 +1866,6 @@ ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
3002} 1866}
3003 1867
3004int 1868int
3005ahd_brkaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
3006{
3007 return (ahd_print_register(NULL, 0, "BRKADDR0",
3008 0xe6, regvalue, cur_col, wrap));
3009}
3010
3011static ahd_reg_parse_entry_t BRKADDR1_parse_table[] = {
3012 { "BRKDIS", 0x80, 0x80 }
3013};
3014
3015int
3016ahd_brkaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
3017{
3018 return (ahd_print_register(BRKADDR1_parse_table, 1, "BRKADDR1",
3019 0xe6, regvalue, cur_col, wrap));
3020}
3021
3022int
3023ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap) 1869ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
3024{ 1870{
3025 return (ahd_print_register(NULL, 0, "ALLONES", 1871 return (ahd_print_register(NULL, 0, "ALLONES",
@@ -3055,13 +1901,6 @@ ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
3055} 1901}
3056 1902
3057int 1903int
3058ahd_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
3059{
3060 return (ahd_print_register(NULL, 0, "FUNCTION1",
3061 0xf0, regvalue, cur_col, wrap));
3062}
3063
3064int
3065ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap) 1904ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
3066{ 1905{
3067 return (ahd_print_register(NULL, 0, "STACK", 1906 return (ahd_print_register(NULL, 0, "STACK",
@@ -3083,13 +1922,6 @@ ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3083} 1922}
3084 1923
3085int 1924int
3086ahd_lastaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3087{
3088 return (ahd_print_register(NULL, 0, "LASTADDR",
3089 0xf6, regvalue, cur_col, wrap));
3090}
3091
3092int
3093ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap) 1925ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3094{ 1926{
3095 return (ahd_print_register(NULL, 0, "INTVEC2_ADDR", 1927 return (ahd_print_register(NULL, 0, "INTVEC2_ADDR",
@@ -3111,23 +1943,16 @@ ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
3111} 1943}
3112 1944
3113int 1945int
3114ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap) 1946ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
3115{
3116 return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
3117 0x100, regvalue, cur_col, wrap));
3118}
3119
3120int
3121ahd_ahd_pci_config_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
3122{ 1947{
3123 return (ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE", 1948 return (ahd_print_register(NULL, 0, "SRAM_BASE",
3124 0x100, regvalue, cur_col, wrap)); 1949 0x100, regvalue, cur_col, wrap));
3125} 1950}
3126 1951
3127int 1952int
3128ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 1953ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
3129{ 1954{
3130 return (ahd_print_register(NULL, 0, "SRAM_BASE", 1955 return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
3131 0x100, regvalue, cur_col, wrap)); 1956 0x100, regvalue, cur_col, wrap));
3132} 1957}
3133 1958
@@ -3215,7 +2040,7 @@ ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
3215 0x137, regvalue, cur_col, wrap)); 2040 0x137, regvalue, cur_col, wrap));
3216} 2041}
3217 2042
3218static ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = { 2043static const ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
3219 { "FIFORESET", 0x01, 0x01 }, 2044 { "FIFORESET", 0x01, 0x01 },
3220 { "FIFOFLUSH", 0x02, 0x02 }, 2045 { "FIFOFLUSH", 0x02, 0x02 },
3221 { "DIRECTION", 0x04, 0x04 }, 2046 { "DIRECTION", 0x04, 0x04 },
@@ -3235,7 +2060,7 @@ ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
3235 0x138, regvalue, cur_col, wrap)); 2060 0x138, regvalue, cur_col, wrap));
3236} 2061}
3237 2062
3238static ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { 2063static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
3239 { "NO_DISCONNECT", 0x01, 0x01 }, 2064 { "NO_DISCONNECT", 0x01, 0x01 },
3240 { "SPHASE_PENDING", 0x02, 0x02 }, 2065 { "SPHASE_PENDING", 0x02, 0x02 },
3241 { "DPHASE_PENDING", 0x04, 0x04 }, 2066 { "DPHASE_PENDING", 0x04, 0x04 },
@@ -3268,7 +2093,7 @@ ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
3268 0x13b, regvalue, cur_col, wrap)); 2093 0x13b, regvalue, cur_col, wrap));
3269} 2094}
3270 2095
3271static ahd_reg_parse_entry_t LASTPHASE_parse_table[] = { 2096static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
3272 { "P_DATAOUT", 0x00, 0xe0 }, 2097 { "P_DATAOUT", 0x00, 0xe0 },
3273 { "P_DATAOUT_DT", 0x20, 0xe0 }, 2098 { "P_DATAOUT_DT", 0x20, 0xe0 },
3274 { "P_DATAIN", 0x40, 0xe0 }, 2099 { "P_DATAIN", 0x40, 0xe0 },
@@ -3326,7 +2151,7 @@ ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3326 0x144, regvalue, cur_col, wrap)); 2151 0x144, regvalue, cur_col, wrap));
3327} 2152}
3328 2153
3329static ahd_reg_parse_entry_t ARG_1_parse_table[] = { 2154static const ahd_reg_parse_entry_t ARG_1_parse_table[] = {
3330 { "CONT_MSG_LOOP_TARG", 0x02, 0x02 }, 2155 { "CONT_MSG_LOOP_TARG", 0x02, 0x02 },
3331 { "CONT_MSG_LOOP_READ", 0x03, 0x03 }, 2156 { "CONT_MSG_LOOP_READ", 0x03, 0x03 },
3332 { "CONT_MSG_LOOP_WRITE",0x04, 0x04 }, 2157 { "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
@@ -3358,7 +2183,7 @@ ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
3358 0x14a, regvalue, cur_col, wrap)); 2183 0x14a, regvalue, cur_col, wrap));
3359} 2184}
3360 2185
3361static ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = { 2186static const ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
3362 { "ALTSTIM", 0x01, 0x01 }, 2187 { "ALTSTIM", 0x01, 0x01 },
3363 { "ENAUTOATNP", 0x02, 0x02 }, 2188 { "ENAUTOATNP", 0x02, 0x02 },
3364 { "MANUALP", 0x0c, 0x0c }, 2189 { "MANUALP", 0x0c, 0x0c },
@@ -3381,7 +2206,7 @@ ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
3381 0x14c, regvalue, cur_col, wrap)); 2206 0x14c, regvalue, cur_col, wrap));
3382} 2207}
3383 2208
3384static ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = { 2209static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
3385 { "PENDING_MK_MESSAGE", 0x01, 0x01 }, 2210 { "PENDING_MK_MESSAGE", 0x01, 0x01 },
3386 { "TARGET_MSG_PENDING", 0x02, 0x02 }, 2211 { "TARGET_MSG_PENDING", 0x02, 0x02 },
3387 { "SELECTOUT_QFROZEN", 0x04, 0x04 } 2212 { "SELECTOUT_QFROZEN", 0x04, 0x04 }
@@ -3465,20 +2290,20 @@ ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
3465} 2290}
3466 2291
3467int 2292int
3468ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap) 2293ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
3469{ 2294{
3470 return (ahd_print_register(NULL, 0, "SCB_BASE", 2295 return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
3471 0x180, regvalue, cur_col, wrap)); 2296 0x180, regvalue, cur_col, wrap));
3472} 2297}
3473 2298
3474int 2299int
3475ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 2300ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
3476{ 2301{
3477 return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 2302 return (ahd_print_register(NULL, 0, "SCB_BASE",
3478 0x180, regvalue, cur_col, wrap)); 2303 0x180, regvalue, cur_col, wrap));
3479} 2304}
3480 2305
3481static ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = { 2306static const ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
3482 { "SG_LIST_NULL", 0x01, 0x01 }, 2307 { "SG_LIST_NULL", 0x01, 0x01 },
3483 { "SG_OVERRUN_RESID", 0x02, 0x02 }, 2308 { "SG_OVERRUN_RESID", 0x02, 0x02 },
3484 { "SG_ADDR_MASK", 0xf8, 0xf8 } 2309 { "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -3499,27 +2324,6 @@ ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
3499} 2324}
3500 2325
3501int 2326int
3502ahd_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
3503{
3504 return (ahd_print_register(NULL, 0, "SCB_TARGET_PHASES",
3505 0x189, regvalue, cur_col, wrap));
3506}
3507
3508int
3509ahd_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
3510{
3511 return (ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
3512 0x18a, regvalue, cur_col, wrap));
3513}
3514
3515int
3516ahd_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
3517{
3518 return (ahd_print_register(NULL, 0, "SCB_TARGET_ITAG",
3519 0x18b, regvalue, cur_col, wrap));
3520}
3521
3522int
3523ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 2327ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3524{ 2328{
3525 return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR", 2329 return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR",
@@ -3533,7 +2337,7 @@ ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
3533 0x190, regvalue, cur_col, wrap)); 2337 0x190, regvalue, cur_col, wrap));
3534} 2338}
3535 2339
3536static ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = { 2340static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
3537 { "SCB_TAG_TYPE", 0x03, 0x03 }, 2341 { "SCB_TAG_TYPE", 0x03, 0x03 },
3538 { "DISCONNECTED", 0x04, 0x04 }, 2342 { "DISCONNECTED", 0x04, 0x04 },
3539 { "STATUS_RCVD", 0x08, 0x08 }, 2343 { "STATUS_RCVD", 0x08, 0x08 },
@@ -3550,7 +2354,7 @@ ahd_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
3550 0x192, regvalue, cur_col, wrap)); 2354 0x192, regvalue, cur_col, wrap));
3551} 2355}
3552 2356
3553static ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = { 2357static const ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
3554 { "OID", 0x0f, 0x0f }, 2358 { "OID", 0x0f, 0x0f },
3555 { "TID", 0xf0, 0xf0 } 2359 { "TID", 0xf0, 0xf0 }
3556}; 2360};
@@ -3562,7 +2366,7 @@ ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
3562 0x193, regvalue, cur_col, wrap)); 2366 0x193, regvalue, cur_col, wrap));
3563} 2367}
3564 2368
3565static ahd_reg_parse_entry_t SCB_LUN_parse_table[] = { 2369static const ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
3566 { "LID", 0xff, 0xff } 2370 { "LID", 0xff, 0xff }
3567}; 2371};
3568 2372
@@ -3573,7 +2377,7 @@ ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
3573 0x194, regvalue, cur_col, wrap)); 2377 0x194, regvalue, cur_col, wrap));
3574} 2378}
3575 2379
3576static ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = { 2380static const ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
3577 { "SCB_XFERLEN_ODD", 0x01, 0x01 } 2381 { "SCB_XFERLEN_ODD", 0x01, 0x01 }
3578}; 2382};
3579 2383
@@ -3584,7 +2388,7 @@ ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
3584 0x195, regvalue, cur_col, wrap)); 2388 0x195, regvalue, cur_col, wrap));
3585} 2389}
3586 2390
3587static ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = { 2391static const ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
3588 { "SCB_CDB_LEN_PTR", 0x80, 0x80 } 2392 { "SCB_CDB_LEN_PTR", 0x80, 0x80 }
3589}; 2393};
3590 2394
@@ -3609,7 +2413,7 @@ ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
3609 0x198, regvalue, cur_col, wrap)); 2413 0x198, regvalue, cur_col, wrap));
3610} 2414}
3611 2415
3612static ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = { 2416static const ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
3613 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }, 2417 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f },
3614 { "SG_LAST_SEG", 0x80, 0x80 } 2418 { "SG_LAST_SEG", 0x80, 0x80 }
3615}; 2419};
@@ -3621,7 +2425,7 @@ ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
3621 0x1a0, regvalue, cur_col, wrap)); 2425 0x1a0, regvalue, cur_col, wrap));
3622} 2426}
3623 2427
3624static ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = { 2428static const ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
3625 { "SG_LIST_NULL", 0x01, 0x01 }, 2429 { "SG_LIST_NULL", 0x01, 0x01 },
3626 { "SG_FULL_RESID", 0x02, 0x02 }, 2430 { "SG_FULL_RESID", 0x02, 0x02 },
3627 { "SG_STATUS_VALID", 0x04, 0x04 } 2431 { "SG_STATUS_VALID", 0x04, 0x04 }
@@ -3656,13 +2460,6 @@ ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
3656} 2460}
3657 2461
3658int 2462int
3659ahd_scb_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
3660{
3661 return (ahd_print_register(NULL, 0, "SCB_SPARE",
3662 0x1b0, regvalue, cur_col, wrap));
3663}
3664
3665int
3666ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap) 2463ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
3667{ 2464{
3668 return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS", 2465 return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS",
diff --git a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
index 11bed07e90b7..4b51e232392f 100644
--- a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
@@ -5,7 +5,7 @@
5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
7 */ 7 */
8static uint8_t seqprog[] = { 8static const uint8_t seqprog[] = {
9 0xff, 0x02, 0x06, 0x78, 9 0xff, 0x02, 0x06, 0x78,
10 0x00, 0xea, 0x6e, 0x59, 10 0x00, 0xea, 0x6e, 0x59,
11 0x01, 0xea, 0x04, 0x30, 11 0x01, 0xea, 0x04, 0x30,
@@ -1027,7 +1027,7 @@ ahd_patch0_func(struct ahd_softc *ahd)
1027 return (0); 1027 return (0);
1028} 1028}
1029 1029
1030static struct patch { 1030static const struct patch {
1031 ahd_patch_func_t *patch_func; 1031 ahd_patch_func_t *patch_func;
1032 uint32_t begin :10, 1032 uint32_t begin :10,
1033 skip_instr :10, 1033 skip_instr :10,
@@ -1166,7 +1166,7 @@ static struct patch {
1166 { ahd_patch23_func, 815, 11, 1 } 1166 { ahd_patch23_func, 815, 11, 1 }
1167}; 1167};
1168 1168
1169static struct cs { 1169static const struct cs {
1170 uint16_t begin; 1170 uint16_t begin;
1171 uint16_t end; 1171 uint16_t end;
1172} critical_sections[] = { 1172} critical_sections[] = {
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index c0344e617651..e4e651cca3e4 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -736,7 +736,7 @@ struct ahc_syncrate {
736#define ST_SXFR 0x010 /* Rate Single Transition Only */ 736#define ST_SXFR 0x010 /* Rate Single Transition Only */
737#define DT_SXFR 0x040 /* Rate Double Transition Only */ 737#define DT_SXFR 0x040 /* Rate Double Transition Only */
738 uint8_t period; /* Period to send to SCSI target */ 738 uint8_t period; /* Period to send to SCSI target */
739 char *rate; 739 const char *rate;
740}; 740};
741 741
742/* Safe and valid period for async negotiations. */ 742/* Safe and valid period for async negotiations. */
@@ -1114,7 +1114,7 @@ typedef int (ahc_device_setup_t)(struct ahc_softc *);
1114struct ahc_pci_identity { 1114struct ahc_pci_identity {
1115 uint64_t full_id; 1115 uint64_t full_id;
1116 uint64_t id_mask; 1116 uint64_t id_mask;
1117 char *name; 1117 const char *name;
1118 ahc_device_setup_t *setup; 1118 ahc_device_setup_t *setup;
1119}; 1119};
1120 1120
@@ -1133,15 +1133,11 @@ extern const int ahc_num_aic7770_devs;
1133 1133
1134/*************************** Function Declarations ****************************/ 1134/*************************** Function Declarations ****************************/
1135/******************************************************************************/ 1135/******************************************************************************/
1136u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
1137void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
1138void ahc_busy_tcl(struct ahc_softc *ahc,
1139 u_int tcl, u_int busyid);
1140 1136
1141/***************************** PCI Front End *********************************/ 1137/***************************** PCI Front End *********************************/
1142struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t); 1138const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
1143int ahc_pci_config(struct ahc_softc *, 1139int ahc_pci_config(struct ahc_softc *,
1144 struct ahc_pci_identity *); 1140 const struct ahc_pci_identity *);
1145int ahc_pci_test_register_access(struct ahc_softc *); 1141int ahc_pci_test_register_access(struct ahc_softc *);
1146#ifdef CONFIG_PM 1142#ifdef CONFIG_PM
1147void ahc_pci_resume(struct ahc_softc *ahc); 1143void ahc_pci_resume(struct ahc_softc *ahc);
@@ -1155,9 +1151,6 @@ int aic7770_config(struct ahc_softc *ahc,
1155 1151
1156/************************** SCB and SCB queue management **********************/ 1152/************************** SCB and SCB queue management **********************/
1157int ahc_probe_scbs(struct ahc_softc *); 1153int ahc_probe_scbs(struct ahc_softc *);
1158void ahc_run_untagged_queues(struct ahc_softc *ahc);
1159void ahc_run_untagged_queue(struct ahc_softc *ahc,
1160 struct scb_tailq *queue);
1161void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, 1154void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc,
1162 struct scb *scb); 1155 struct scb *scb);
1163int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, 1156int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
@@ -1178,22 +1171,8 @@ int ahc_resume(struct ahc_softc *ahc);
1178#endif 1171#endif
1179void ahc_set_unit(struct ahc_softc *, int); 1172void ahc_set_unit(struct ahc_softc *, int);
1180void ahc_set_name(struct ahc_softc *, char *); 1173void ahc_set_name(struct ahc_softc *, char *);
1181void ahc_alloc_scbs(struct ahc_softc *ahc);
1182void ahc_free(struct ahc_softc *ahc); 1174void ahc_free(struct ahc_softc *ahc);
1183int ahc_reset(struct ahc_softc *ahc, int reinit); 1175int ahc_reset(struct ahc_softc *ahc, int reinit);
1184void ahc_shutdown(void *arg);
1185
1186/*************************** Interrupt Services *******************************/
1187void ahc_clear_intstat(struct ahc_softc *ahc);
1188void ahc_run_qoutfifo(struct ahc_softc *ahc);
1189#ifdef AHC_TARGET_MODE
1190void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
1191#endif
1192void ahc_handle_brkadrint(struct ahc_softc *ahc);
1193void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
1194void ahc_handle_scsiint(struct ahc_softc *ahc,
1195 u_int intstat);
1196void ahc_clear_critical_section(struct ahc_softc *ahc);
1197 1176
1198/***************************** Error Recovery *********************************/ 1177/***************************** Error Recovery *********************************/
1199typedef enum { 1178typedef enum {
@@ -1214,36 +1193,19 @@ int ahc_search_disc_list(struct ahc_softc *ahc, int target,
1214 char channel, int lun, u_int tag, 1193 char channel, int lun, u_int tag,
1215 int stop_on_first, int remove, 1194 int stop_on_first, int remove,
1216 int save_state); 1195 int save_state);
1217void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
1218int ahc_reset_channel(struct ahc_softc *ahc, char channel, 1196int ahc_reset_channel(struct ahc_softc *ahc, char channel,
1219 int initiate_reset); 1197 int initiate_reset);
1220int ahc_abort_scbs(struct ahc_softc *ahc, int target, 1198
1221 char channel, int lun, u_int tag,
1222 role_t role, uint32_t status);
1223void ahc_restart(struct ahc_softc *ahc);
1224void ahc_calc_residual(struct ahc_softc *ahc,
1225 struct scb *scb);
1226/*************************** Utility Functions ********************************/ 1199/*************************** Utility Functions ********************************/
1227struct ahc_phase_table_entry*
1228 ahc_lookup_phase_entry(int phase);
1229void ahc_compile_devinfo(struct ahc_devinfo *devinfo, 1200void ahc_compile_devinfo(struct ahc_devinfo *devinfo,
1230 u_int our_id, u_int target, 1201 u_int our_id, u_int target,
1231 u_int lun, char channel, 1202 u_int lun, char channel,
1232 role_t role); 1203 role_t role);
1233/************************** Transfer Negotiation ******************************/ 1204/************************** Transfer Negotiation ******************************/
1234struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1205const struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1235 u_int *ppr_options, u_int maxsync); 1206 u_int *ppr_options, u_int maxsync);
1236u_int ahc_find_period(struct ahc_softc *ahc, 1207u_int ahc_find_period(struct ahc_softc *ahc,
1237 u_int scsirate, u_int maxsync); 1208 u_int scsirate, u_int maxsync);
1238void ahc_validate_offset(struct ahc_softc *ahc,
1239 struct ahc_initiator_tinfo *tinfo,
1240 struct ahc_syncrate *syncrate,
1241 u_int *offset, int wide,
1242 role_t role);
1243void ahc_validate_width(struct ahc_softc *ahc,
1244 struct ahc_initiator_tinfo *tinfo,
1245 u_int *bus_width,
1246 role_t role);
1247/* 1209/*
1248 * Negotiation types. These are used to qualify if we should renegotiate 1210 * Negotiation types. These are used to qualify if we should renegotiate
1249 * even if our goal and current transport parameters are identical. 1211 * even if our goal and current transport parameters are identical.
@@ -1263,7 +1225,7 @@ void ahc_set_width(struct ahc_softc *ahc,
1263 u_int width, u_int type, int paused); 1225 u_int width, u_int type, int paused);
1264void ahc_set_syncrate(struct ahc_softc *ahc, 1226void ahc_set_syncrate(struct ahc_softc *ahc,
1265 struct ahc_devinfo *devinfo, 1227 struct ahc_devinfo *devinfo,
1266 struct ahc_syncrate *syncrate, 1228 const struct ahc_syncrate *syncrate,
1267 u_int period, u_int offset, 1229 u_int period, u_int offset,
1268 u_int ppr_options, 1230 u_int ppr_options,
1269 u_int type, int paused); 1231 u_int type, int paused);
@@ -1305,11 +1267,10 @@ extern uint32_t ahc_debug;
1305#define AHC_SHOW_MASKED_ERRORS 0x1000 1267#define AHC_SHOW_MASKED_ERRORS 0x1000
1306#define AHC_DEBUG_SEQUENCER 0x2000 1268#define AHC_DEBUG_SEQUENCER 0x2000
1307#endif 1269#endif
1308void ahc_print_scb(struct scb *scb);
1309void ahc_print_devinfo(struct ahc_softc *ahc, 1270void ahc_print_devinfo(struct ahc_softc *ahc,
1310 struct ahc_devinfo *dev); 1271 struct ahc_devinfo *dev);
1311void ahc_dump_card_state(struct ahc_softc *ahc); 1272void ahc_dump_card_state(struct ahc_softc *ahc);
1312int ahc_print_register(ahc_reg_parse_entry_t *table, 1273int ahc_print_register(const ahc_reg_parse_entry_t *table,
1313 u_int num_entries, 1274 u_int num_entries,
1314 const char *name, 1275 const char *name,
1315 u_int address, 1276 u_int address,
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index e196d83b93c7..0d2f763c3427 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -238,6 +238,7 @@ register SXFRCTL2 {
238register OPTIONMODE { 238register OPTIONMODE {
239 address 0x008 239 address 0x008
240 access_mode RW 240 access_mode RW
241 count 2
241 field AUTORATEEN 0x80 242 field AUTORATEEN 0x80
242 field AUTOACKEN 0x40 243 field AUTOACKEN 0x40
243 field ATNMGMNTEN 0x20 244 field ATNMGMNTEN 0x20
@@ -254,6 +255,7 @@ register TARGCRCCNT {
254 address 0x00a 255 address 0x00a
255 size 2 256 size 2
256 access_mode RW 257 access_mode RW
258 count 2
257} 259}
258 260
259/* 261/*
@@ -344,6 +346,7 @@ register SSTAT2 {
344register SSTAT3 { 346register SSTAT3 {
345 address 0x00e 347 address 0x00e
346 access_mode RO 348 access_mode RO
349 count 2
347 mask SCSICNT 0xf0 350 mask SCSICNT 0xf0
348 mask OFFCNT 0x0f 351 mask OFFCNT 0x0f
349 mask U2OFFCNT 0x7f 352 mask U2OFFCNT 0x7f
@@ -367,6 +370,7 @@ register SCSIID_ULTRA2 {
367register SIMODE0 { 370register SIMODE0 {
368 address 0x010 371 address 0x010
369 access_mode RW 372 access_mode RW
373 count 2
370 field ENSELDO 0x40 374 field ENSELDO 0x40
371 field ENSELDI 0x20 375 field ENSELDI 0x20
372 field ENSELINGO 0x10 376 field ENSELINGO 0x10
@@ -429,6 +433,7 @@ register SHADDR {
429register SELTIMER { 433register SELTIMER {
430 address 0x018 434 address 0x018
431 access_mode RW 435 access_mode RW
436 count 1
432 field STAGE6 0x20 437 field STAGE6 0x20
433 field STAGE5 0x10 438 field STAGE5 0x10
434 field STAGE4 0x08 439 field STAGE4 0x08
@@ -467,6 +472,7 @@ register TARGID {
467 address 0x01b 472 address 0x01b
468 size 2 473 size 2
469 access_mode RW 474 access_mode RW
475 count 14
470} 476}
471 477
472/* 478/*
@@ -480,6 +486,7 @@ register TARGID {
480register SPIOCAP { 486register SPIOCAP {
481 address 0x01b 487 address 0x01b
482 access_mode RW 488 access_mode RW
489 count 10
483 field SOFT1 0x80 490 field SOFT1 0x80
484 field SOFT0 0x40 491 field SOFT0 0x40
485 field SOFTCMDEN 0x20 492 field SOFTCMDEN 0x20
@@ -492,6 +499,7 @@ register SPIOCAP {
492 499
493register BRDCTL { 500register BRDCTL {
494 address 0x01d 501 address 0x01d
502 count 11
495 field BRDDAT7 0x80 503 field BRDDAT7 0x80
496 field BRDDAT6 0x40 504 field BRDDAT6 0x40
497 field BRDDAT5 0x20 505 field BRDDAT5 0x20
@@ -534,6 +542,7 @@ register BRDCTL {
534 */ 542 */
535register SEECTL { 543register SEECTL {
536 address 0x01e 544 address 0x01e
545 count 11
537 field EXTARBACK 0x80 546 field EXTARBACK 0x80
538 field EXTARBREQ 0x40 547 field EXTARBREQ 0x40
539 field SEEMS 0x20 548 field SEEMS 0x20
@@ -570,6 +579,7 @@ register SBLKCTL {
570register SEQCTL { 579register SEQCTL {
571 address 0x060 580 address 0x060
572 access_mode RW 581 access_mode RW
582 count 15
573 field PERRORDIS 0x80 583 field PERRORDIS 0x80
574 field PAUSEDIS 0x40 584 field PAUSEDIS 0x40
575 field FAILDIS 0x20 585 field FAILDIS 0x20
@@ -590,6 +600,7 @@ register SEQCTL {
590register SEQRAM { 600register SEQRAM {
591 address 0x061 601 address 0x061
592 access_mode RW 602 access_mode RW
603 count 2
593} 604}
594 605
595/* 606/*
@@ -604,6 +615,7 @@ register SEQADDR0 {
604register SEQADDR1 { 615register SEQADDR1 {
605 address 0x063 616 address 0x063
606 access_mode RW 617 access_mode RW
618 count 8
607 mask SEQADDR1_MASK 0x01 619 mask SEQADDR1_MASK 0x01
608} 620}
609 621
@@ -649,6 +661,7 @@ register NONE {
649register FLAGS { 661register FLAGS {
650 address 0x06b 662 address 0x06b
651 access_mode RO 663 access_mode RO
664 count 18
652 field ZERO 0x02 665 field ZERO 0x02
653 field CARRY 0x01 666 field CARRY 0x01
654} 667}
@@ -671,6 +684,7 @@ register FUNCTION1 {
671register STACK { 684register STACK {
672 address 0x06f 685 address 0x06f
673 access_mode RO 686 access_mode RO
687 count 5
674} 688}
675 689
676const STACK_SIZE 4 690const STACK_SIZE 4
@@ -692,6 +706,7 @@ register BCTL {
692register DSCOMMAND0 { 706register DSCOMMAND0 {
693 address 0x084 707 address 0x084
694 access_mode RW 708 access_mode RW
709 count 7
695 field CACHETHEN 0x80 /* Cache Threshold enable */ 710 field CACHETHEN 0x80 /* Cache Threshold enable */
696 field DPARCKEN 0x40 /* Data Parity Check Enable */ 711 field DPARCKEN 0x40 /* Data Parity Check Enable */
697 field MPARCKEN 0x20 /* Memory Parity Check Enable */ 712 field MPARCKEN 0x20 /* Memory Parity Check Enable */
@@ -717,6 +732,7 @@ register DSCOMMAND1 {
717register BUSTIME { 732register BUSTIME {
718 address 0x085 733 address 0x085
719 access_mode RW 734 access_mode RW
735 count 2
720 mask BOFF 0xf0 736 mask BOFF 0xf0
721 mask BON 0x0f 737 mask BON 0x0f
722} 738}
@@ -727,6 +743,7 @@ register BUSTIME {
727register BUSSPD { 743register BUSSPD {
728 address 0x086 744 address 0x086
729 access_mode RW 745 access_mode RW
746 count 2
730 mask DFTHRSH 0xc0 747 mask DFTHRSH 0xc0
731 mask STBOFF 0x38 748 mask STBOFF 0x38
732 mask STBON 0x07 749 mask STBON 0x07
@@ -737,6 +754,7 @@ register BUSSPD {
737/* aic7850/55/60/70/80/95 only */ 754/* aic7850/55/60/70/80/95 only */
738register DSPCISTATUS { 755register DSPCISTATUS {
739 address 0x086 756 address 0x086
757 count 4
740 mask DFTHRSH_100 0xc0 758 mask DFTHRSH_100 0xc0
741} 759}
742 760
@@ -758,6 +776,7 @@ const SEQ_MAILBOX_SHIFT 0
758register HCNTRL { 776register HCNTRL {
759 address 0x087 777 address 0x087
760 access_mode RW 778 access_mode RW
779 count 14
761 field POWRDN 0x40 780 field POWRDN 0x40
762 field SWINT 0x10 781 field SWINT 0x10
763 field IRQMS 0x08 782 field IRQMS 0x08
@@ -869,6 +888,7 @@ register INTSTAT {
869register ERROR { 888register ERROR {
870 address 0x092 889 address 0x092
871 access_mode RO 890 access_mode RO
891 count 26
872 field CIOPARERR 0x80 /* Ultra2 only */ 892 field CIOPARERR 0x80 /* Ultra2 only */
873 field PCIERRSTAT 0x40 /* PCI only */ 893 field PCIERRSTAT 0x40 /* PCI only */
874 field MPARERR 0x20 /* PCI only */ 894 field MPARERR 0x20 /* PCI only */
@@ -885,6 +905,7 @@ register ERROR {
885register CLRINT { 905register CLRINT {
886 address 0x092 906 address 0x092
887 access_mode WO 907 access_mode WO
908 count 24
888 field CLRPARERR 0x10 /* PCI only */ 909 field CLRPARERR 0x10 /* PCI only */
889 field CLRBRKADRINT 0x08 910 field CLRBRKADRINT 0x08
890 field CLRSCSIINT 0x04 911 field CLRSCSIINT 0x04
@@ -943,6 +964,7 @@ register DFDAT {
943register SCBCNT { 964register SCBCNT {
944 address 0x09a 965 address 0x09a
945 access_mode RW 966 access_mode RW
967 count 1
946 field SCBAUTO 0x80 968 field SCBAUTO 0x80
947 mask SCBCNT_MASK 0x1f 969 mask SCBCNT_MASK 0x1f
948} 970}
@@ -954,6 +976,7 @@ register SCBCNT {
954register QINFIFO { 976register QINFIFO {
955 address 0x09b 977 address 0x09b
956 access_mode RW 978 access_mode RW
979 count 12
957} 980}
958 981
959/* 982/*
@@ -972,11 +995,13 @@ register QINCNT {
972register QOUTFIFO { 995register QOUTFIFO {
973 address 0x09d 996 address 0x09d
974 access_mode WO 997 access_mode WO
998 count 7
975} 999}
976 1000
977register CRCCONTROL1 { 1001register CRCCONTROL1 {
978 address 0x09d 1002 address 0x09d
979 access_mode RW 1003 access_mode RW
1004 count 3
980 field CRCONSEEN 0x80 1005 field CRCONSEEN 0x80
981 field CRCVALCHKEN 0x40 1006 field CRCVALCHKEN 0x40
982 field CRCENDCHKEN 0x20 1007 field CRCENDCHKEN 0x20
@@ -1013,6 +1038,7 @@ register SCSIPHASE {
1013register SFUNCT { 1038register SFUNCT {
1014 address 0x09f 1039 address 0x09f
1015 access_mode RW 1040 access_mode RW
1041 count 4
1016 field ALT_MODE 0x80 1042 field ALT_MODE 0x80
1017} 1043}
1018 1044
@@ -1095,6 +1121,7 @@ scb {
1095 } 1121 }
1096 SCB_SCSIOFFSET { 1122 SCB_SCSIOFFSET {
1097 size 1 1123 size 1
1124 count 1
1098 } 1125 }
1099 SCB_NEXT { 1126 SCB_NEXT {
1100 size 1 1127 size 1
@@ -1118,6 +1145,7 @@ const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */
1118register SEECTL_2840 { 1145register SEECTL_2840 {
1119 address 0x0c0 1146 address 0x0c0
1120 access_mode RW 1147 access_mode RW
1148 count 2
1121 field CS_2840 0x04 1149 field CS_2840 0x04
1122 field CK_2840 0x02 1150 field CK_2840 0x02
1123 field DO_2840 0x01 1151 field DO_2840 0x01
@@ -1126,6 +1154,7 @@ register SEECTL_2840 {
1126register STATUS_2840 { 1154register STATUS_2840 {
1127 address 0x0c1 1155 address 0x0c1
1128 access_mode RW 1156 access_mode RW
1157 count 4
1129 field EEPROM_TF 0x80 1158 field EEPROM_TF 0x80
1130 mask BIOS_SEL 0x60 1159 mask BIOS_SEL 0x60
1131 mask ADSEL 0x1e 1160 mask ADSEL 0x1e
@@ -1161,6 +1190,7 @@ register CCSGCTL {
1161 1190
1162register CCSCBCNT { 1191register CCSCBCNT {
1163 address 0xEF 1192 address 0xEF
1193 count 1
1164} 1194}
1165 1195
1166register CCSCBCTL { 1196register CCSCBCTL {
@@ -1187,6 +1217,7 @@ register CCSCBRAM {
1187register SCBBADDR { 1217register SCBBADDR {
1188 address 0x0F0 1218 address 0x0F0
1189 access_mode RW 1219 access_mode RW
1220 count 3
1190} 1221}
1191 1222
1192register CCSCBPTR { 1223register CCSCBPTR {
@@ -1195,6 +1226,7 @@ register CCSCBPTR {
1195 1226
1196register HNSCB_QOFF { 1227register HNSCB_QOFF {
1197 address 0x0F4 1228 address 0x0F4
1229 count 4
1198} 1230}
1199 1231
1200register SNSCB_QOFF { 1232register SNSCB_QOFF {
@@ -1234,6 +1266,7 @@ register DFF_THRSH {
1234 mask WR_DFTHRSH_85 0x50 1266 mask WR_DFTHRSH_85 0x50
1235 mask WR_DFTHRSH_90 0x60 1267 mask WR_DFTHRSH_90 0x60
1236 mask WR_DFTHRSH_MAX 0x70 1268 mask WR_DFTHRSH_MAX 0x70
1269 count 4
1237} 1270}
1238 1271
1239register SG_CACHE_PRE { 1272register SG_CACHE_PRE {
@@ -1287,6 +1320,7 @@ scratch_ram {
1287 ULTRA_ENB { 1320 ULTRA_ENB {
1288 alias CMDSIZE_TABLE 1321 alias CMDSIZE_TABLE
1289 size 2 1322 size 2
1323 count 2
1290 } 1324 }
1291 /* 1325 /*
1292 * Bit vector of targets that have disconnection disabled as set by 1326 * Bit vector of targets that have disconnection disabled as set by
@@ -1296,6 +1330,7 @@ scratch_ram {
1296 */ 1330 */
1297 DISC_DSB { 1331 DISC_DSB {
1298 size 2 1332 size 2
1333 count 6
1299 } 1334 }
1300 CMDSIZE_TABLE_TAIL { 1335 CMDSIZE_TABLE_TAIL {
1301 size 4 1336 size 4
@@ -1323,6 +1358,7 @@ scratch_ram {
1323 /* Parameters for DMA Logic */ 1358 /* Parameters for DMA Logic */
1324 DMAPARAMS { 1359 DMAPARAMS {
1325 size 1 1360 size 1
1361 count 12
1326 field PRELOADEN 0x80 1362 field PRELOADEN 0x80
1327 field WIDEODD 0x40 1363 field WIDEODD 0x40
1328 field SCSIEN 0x20 1364 field SCSIEN 0x20
@@ -1436,11 +1472,12 @@ scratch_ram {
1436 KERNEL_TQINPOS { 1472 KERNEL_TQINPOS {
1437 size 1 1473 size 1
1438 } 1474 }
1439 TQINPOS { 1475 TQINPOS {
1440 size 1 1476 size 1
1441 } 1477 }
1442 ARG_1 { 1478 ARG_1 {
1443 size 1 1479 size 1
1480 count 1
1444 mask SEND_MSG 0x80 1481 mask SEND_MSG 0x80
1445 mask SEND_SENSE 0x40 1482 mask SEND_SENSE 0x40
1446 mask SEND_REJ 0x20 1483 mask SEND_REJ 0x20
@@ -1495,6 +1532,7 @@ scratch_ram {
1495 size 1 1532 size 1
1496 field HA_274_EXTENDED_TRANS 0x01 1533 field HA_274_EXTENDED_TRANS 0x01
1497 alias INITIATOR_TAG 1534 alias INITIATOR_TAG
1535 count 1
1498 } 1536 }
1499 1537
1500 SEQ_FLAGS2 { 1538 SEQ_FLAGS2 {
@@ -1518,6 +1556,7 @@ scratch_ram {
1518 */ 1556 */
1519 SCSICONF { 1557 SCSICONF {
1520 size 1 1558 size 1
1559 count 12
1521 field TERM_ENB 0x80 1560 field TERM_ENB 0x80
1522 field RESET_SCSI 0x40 1561 field RESET_SCSI 0x40
1523 field ENSPCHK 0x20 1562 field ENSPCHK 0x20
@@ -1527,16 +1566,19 @@ scratch_ram {
1527 INTDEF { 1566 INTDEF {
1528 address 0x05c 1567 address 0x05c
1529 size 1 1568 size 1
1569 count 1
1530 field EDGE_TRIG 0x80 1570 field EDGE_TRIG 0x80
1531 mask VECTOR 0x0f 1571 mask VECTOR 0x0f
1532 } 1572 }
1533 HOSTCONF { 1573 HOSTCONF {
1534 address 0x05d 1574 address 0x05d
1535 size 1 1575 size 1
1576 count 1
1536 } 1577 }
1537 HA_274_BIOSCTRL { 1578 HA_274_BIOSCTRL {
1538 address 0x05f 1579 address 0x05f
1539 size 1 1580 size 1
1581 count 1
1540 mask BIOSMODE 0x30 1582 mask BIOSMODE 0x30
1541 mask BIOSDISABLED 0x30 1583 mask BIOSDISABLED 0x30
1542 field CHANNEL_B_PRIMARY 0x08 1584 field CHANNEL_B_PRIMARY 0x08
@@ -1552,6 +1594,7 @@ scratch_ram {
1552 */ 1594 */
1553 TARG_OFFSET { 1595 TARG_OFFSET {
1554 size 16 1596 size 16
1597 count 1
1555 } 1598 }
1556} 1599}
1557 1600
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 3cb07e114e89..dd11999b77b6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -84,16 +84,16 @@ struct seeprom_cmd {
84}; 84};
85 85
86/* Short opcodes for the c46 */ 86/* Short opcodes for the c46 */
87static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; 87static const struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
88static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; 88static const struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
89 89
90/* Long opcodes for the C56/C66 */ 90/* Long opcodes for the C56/C66 */
91static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; 91static const struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
92static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; 92static const struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
93 93
94/* Common opcodes */ 94/* Common opcodes */
95static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}}; 95static const struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
96static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; 96static const struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
97 97
98/* 98/*
99 * Wait for the SEERDY to go high; about 800 ns. 99 * Wait for the SEERDY to go high; about 800 ns.
@@ -108,7 +108,7 @@ static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
108 * Send a START condition and the given command 108 * Send a START condition and the given command
109 */ 109 */
110static void 110static void
111send_seeprom_cmd(struct seeprom_descriptor *sd, struct seeprom_cmd *cmd) 111send_seeprom_cmd(struct seeprom_descriptor *sd, const struct seeprom_cmd *cmd)
112{ 112{
113 uint8_t temp; 113 uint8_t temp;
114 int i = 0; 114 int i = 0;
@@ -227,7 +227,7 @@ int
227ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, 227ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
228 u_int start_addr, u_int count) 228 u_int start_addr, u_int count)
229{ 229{
230 struct seeprom_cmd *ewen, *ewds; 230 const struct seeprom_cmd *ewen, *ewds;
231 uint16_t v; 231 uint16_t v;
232 uint8_t temp; 232 uint8_t temp;
233 int i, k; 233 int i, k;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 64e62ce59c15..0ae2b4605d09 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -51,8 +51,7 @@
51#endif 51#endif
52 52
53/***************************** Lookup Tables **********************************/ 53/***************************** Lookup Tables **********************************/
54char *ahc_chip_names[] = 54static const char *const ahc_chip_names[] = {
55{
56 "NONE", 55 "NONE",
57 "aic7770", 56 "aic7770",
58 "aic7850", 57 "aic7850",
@@ -75,10 +74,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
75 */ 74 */
76struct ahc_hard_error_entry { 75struct ahc_hard_error_entry {
77 uint8_t errno; 76 uint8_t errno;
78 char *errmesg; 77 const char *errmesg;
79}; 78};
80 79
81static struct ahc_hard_error_entry ahc_hard_errors[] = { 80static const struct ahc_hard_error_entry ahc_hard_errors[] = {
82 { ILLHADDR, "Illegal Host Access" }, 81 { ILLHADDR, "Illegal Host Access" },
83 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 82 { ILLSADDR, "Illegal Sequencer Address referrenced" },
84 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 83 { ILLOPCODE, "Illegal Opcode in sequencer program" },
@@ -90,7 +89,7 @@ static struct ahc_hard_error_entry ahc_hard_errors[] = {
90}; 89};
91static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors); 90static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
92 91
93static struct ahc_phase_table_entry ahc_phase_table[] = 92static const struct ahc_phase_table_entry ahc_phase_table[] =
94{ 93{
95 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 94 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
96 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 95 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
@@ -115,7 +114,7 @@ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1;
115 * Provides a mapping of tranfer periods in ns to the proper value to 114 * Provides a mapping of tranfer periods in ns to the proper value to
116 * stick in the scsixfer reg. 115 * stick in the scsixfer reg.
117 */ 116 */
118static struct ahc_syncrate ahc_syncrates[] = 117static const struct ahc_syncrate ahc_syncrates[] =
119{ 118{
120 /* ultra2 fast/ultra period rate */ 119 /* ultra2 fast/ultra period rate */
121 { 0x42, 0x000, 9, "80.0" }, 120 { 0x42, 0x000, 9, "80.0" },
@@ -148,7 +147,7 @@ static struct ahc_tmode_tstate*
148static void ahc_free_tstate(struct ahc_softc *ahc, 147static void ahc_free_tstate(struct ahc_softc *ahc,
149 u_int scsi_id, char channel, int force); 148 u_int scsi_id, char channel, int force);
150#endif 149#endif
151static struct ahc_syncrate* 150static const struct ahc_syncrate*
152 ahc_devlimited_syncrate(struct ahc_softc *ahc, 151 ahc_devlimited_syncrate(struct ahc_softc *ahc,
153 struct ahc_initiator_tinfo *, 152 struct ahc_initiator_tinfo *,
154 u_int *period, 153 u_int *period,
@@ -204,9 +203,9 @@ static void ahc_setup_target_msgin(struct ahc_softc *ahc,
204#endif 203#endif
205 204
206static bus_dmamap_callback_t ahc_dmamap_cb; 205static bus_dmamap_callback_t ahc_dmamap_cb;
207static void ahc_build_free_scb_list(struct ahc_softc *ahc); 206static void ahc_build_free_scb_list(struct ahc_softc *ahc);
208static int ahc_init_scbdata(struct ahc_softc *ahc); 207static int ahc_init_scbdata(struct ahc_softc *ahc);
209static void ahc_fini_scbdata(struct ahc_softc *ahc); 208static void ahc_fini_scbdata(struct ahc_softc *ahc);
210static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 209static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
211 struct scb *prev_scb, 210 struct scb *prev_scb,
212 struct scb *scb); 211 struct scb *scb);
@@ -222,7 +221,7 @@ static void ahc_dumpseq(struct ahc_softc *ahc);
222#endif 221#endif
223static int ahc_loadseq(struct ahc_softc *ahc); 222static int ahc_loadseq(struct ahc_softc *ahc);
224static int ahc_check_patch(struct ahc_softc *ahc, 223static int ahc_check_patch(struct ahc_softc *ahc,
225 struct patch **start_patch, 224 const struct patch **start_patch,
226 u_int start_instr, u_int *skip_addr); 225 u_int start_instr, u_int *skip_addr);
227static void ahc_download_instr(struct ahc_softc *ahc, 226static void ahc_download_instr(struct ahc_softc *ahc,
228 u_int instrptr, uint8_t *dconsts); 227 u_int instrptr, uint8_t *dconsts);
@@ -237,11 +236,582 @@ static void ahc_update_scsiid(struct ahc_softc *ahc,
237static int ahc_handle_target_cmd(struct ahc_softc *ahc, 236static int ahc_handle_target_cmd(struct ahc_softc *ahc,
238 struct target_cmd *cmd); 237 struct target_cmd *cmd);
239#endif 238#endif
239
240static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
241static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
242static void ahc_busy_tcl(struct ahc_softc *ahc,
243 u_int tcl, u_int busyid);
244
245/************************** SCB and SCB queue management **********************/
246static void ahc_run_untagged_queues(struct ahc_softc *ahc);
247static void ahc_run_untagged_queue(struct ahc_softc *ahc,
248 struct scb_tailq *queue);
249
250/****************************** Initialization ********************************/
251static void ahc_alloc_scbs(struct ahc_softc *ahc);
252static void ahc_shutdown(void *arg);
253
254/*************************** Interrupt Services *******************************/
255static void ahc_clear_intstat(struct ahc_softc *ahc);
256static void ahc_run_qoutfifo(struct ahc_softc *ahc);
257#ifdef AHC_TARGET_MODE
258static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
259#endif
260static void ahc_handle_brkadrint(struct ahc_softc *ahc);
261static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
262static void ahc_handle_scsiint(struct ahc_softc *ahc,
263 u_int intstat);
264static void ahc_clear_critical_section(struct ahc_softc *ahc);
265
266/***************************** Error Recovery *********************************/
267static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
268static int ahc_abort_scbs(struct ahc_softc *ahc, int target,
269 char channel, int lun, u_int tag,
270 role_t role, uint32_t status);
271static void ahc_calc_residual(struct ahc_softc *ahc,
272 struct scb *scb);
273
274/*********************** Untagged Transaction Routines ************************/
275static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
276static inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
277
278/*
279 * Block our completion routine from starting the next untagged
280 * transaction for this target or target lun.
281 */
282static inline void
283ahc_freeze_untagged_queues(struct ahc_softc *ahc)
284{
285 if ((ahc->flags & AHC_SCB_BTT) == 0)
286 ahc->untagged_queue_lock++;
287}
288
289/*
290 * Allow the next untagged transaction for this target or target lun
291 * to be executed. We use a counting semaphore to allow the lock
292 * to be acquired recursively. Once the count drops to zero, the
293 * transaction queues will be run.
294 */
295static inline void
296ahc_release_untagged_queues(struct ahc_softc *ahc)
297{
298 if ((ahc->flags & AHC_SCB_BTT) == 0) {
299 ahc->untagged_queue_lock--;
300 if (ahc->untagged_queue_lock == 0)
301 ahc_run_untagged_queues(ahc);
302 }
303}
304
240/************************* Sequencer Execution Control ************************/ 305/************************* Sequencer Execution Control ************************/
241/* 306/*
242 * Restart the sequencer program from address zero 307 * Work around any chip bugs related to halting sequencer execution.
308 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
309 * reading a register that will set this signal and deassert it.
310 * Without this workaround, if the chip is paused, by an interrupt or
311 * manual pause while accessing scb ram, accesses to certain registers
312 * will hang the system (infinite pci retries).
313 */
314static void
315ahc_pause_bug_fix(struct ahc_softc *ahc)
316{
317 if ((ahc->features & AHC_ULTRA2) != 0)
318 (void)ahc_inb(ahc, CCSCBCTL);
319}
320
321/*
322 * Determine whether the sequencer has halted code execution.
323 * Returns non-zero status if the sequencer is stopped.
324 */
325int
326ahc_is_paused(struct ahc_softc *ahc)
327{
328 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
329}
330
331/*
332 * Request that the sequencer stop and wait, indefinitely, for it
333 * to stop. The sequencer will only acknowledge that it is paused
334 * once it has reached an instruction boundary and PAUSEDIS is
335 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
336 * for critical sections.
337 */
338void
339ahc_pause(struct ahc_softc *ahc)
340{
341 ahc_outb(ahc, HCNTRL, ahc->pause);
342
343 /*
344 * Since the sequencer can disable pausing in a critical section, we
345 * must loop until it actually stops.
346 */
347 while (ahc_is_paused(ahc) == 0)
348 ;
349
350 ahc_pause_bug_fix(ahc);
351}
352
353/*
354 * Allow the sequencer to continue program execution.
355 * We check here to ensure that no additional interrupt
356 * sources that would cause the sequencer to halt have been
357 * asserted. If, for example, a SCSI bus reset is detected
358 * while we are fielding a different, pausing, interrupt type,
359 * we don't want to release the sequencer before going back
360 * into our interrupt handler and dealing with this new
361 * condition.
362 */
363void
364ahc_unpause(struct ahc_softc *ahc)
365{
366 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
367 ahc_outb(ahc, HCNTRL, ahc->unpause);
368}
369
370/************************** Memory mapping routines ***************************/
371static struct ahc_dma_seg *
372ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
373{
374 int sg_index;
375
376 sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
377 /* sg_list_phys points to entry 1, not 0 */
378 sg_index++;
379
380 return (&scb->sg_list[sg_index]);
381}
382
383static uint32_t
384ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
385{
386 int sg_index;
387
388 /* sg_list_phys points to entry 1, not 0 */
389 sg_index = sg - &scb->sg_list[1];
390
391 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
392}
393
394static uint32_t
395ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
396{
397 return (ahc->scb_data->hscb_busaddr
398 + (sizeof(struct hardware_scb) * index));
399}
400
401static void
402ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
403{
404 ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
405 ahc->scb_data->hscb_dmamap,
406 /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
407 /*len*/sizeof(*scb->hscb), op);
408}
409
410void
411ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
412{
413 if (scb->sg_count == 0)
414 return;
415
416 ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
417 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
418 * sizeof(struct ahc_dma_seg),
419 /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
420}
421
422#ifdef AHC_TARGET_MODE
423static uint32_t
424ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
425{
426 return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
427}
428#endif
429
430/*********************** Miscelaneous Support Functions ***********************/
431/*
432 * Determine whether the sequencer reported a residual
433 * for this SCB/transaction.
434 */
435static void
436ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
437{
438 uint32_t sgptr;
439
440 sgptr = ahc_le32toh(scb->hscb->sgptr);
441 if ((sgptr & SG_RESID_VALID) != 0)
442 ahc_calc_residual(ahc, scb);
443}
444
445/*
446 * Return pointers to the transfer negotiation information
447 * for the specified our_id/remote_id pair.
448 */
449struct ahc_initiator_tinfo *
450ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
451 u_int remote_id, struct ahc_tmode_tstate **tstate)
452{
453 /*
454 * Transfer data structures are stored from the perspective
455 * of the target role. Since the parameters for a connection
456 * in the initiator role to a given target are the same as
457 * when the roles are reversed, we pretend we are the target.
458 */
459 if (channel == 'B')
460 our_id += 8;
461 *tstate = ahc->enabled_targets[our_id];
462 return (&(*tstate)->transinfo[remote_id]);
463}
464
465uint16_t
466ahc_inw(struct ahc_softc *ahc, u_int port)
467{
468 uint16_t r = ahc_inb(ahc, port+1) << 8;
469 return r | ahc_inb(ahc, port);
470}
471
472void
473ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
474{
475 ahc_outb(ahc, port, value & 0xFF);
476 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
477}
478
479uint32_t
480ahc_inl(struct ahc_softc *ahc, u_int port)
481{
482 return ((ahc_inb(ahc, port))
483 | (ahc_inb(ahc, port+1) << 8)
484 | (ahc_inb(ahc, port+2) << 16)
485 | (ahc_inb(ahc, port+3) << 24));
486}
487
488void
489ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
490{
491 ahc_outb(ahc, port, (value) & 0xFF);
492 ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
493 ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
494 ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
495}
496
497uint64_t
498ahc_inq(struct ahc_softc *ahc, u_int port)
499{
500 return ((ahc_inb(ahc, port))
501 | (ahc_inb(ahc, port+1) << 8)
502 | (ahc_inb(ahc, port+2) << 16)
503 | (ahc_inb(ahc, port+3) << 24)
504 | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
505 | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
506 | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
507 | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
508}
509
510void
511ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
512{
513 ahc_outb(ahc, port, value & 0xFF);
514 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
515 ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
516 ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
517 ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
518 ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
519 ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
520 ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
521}
522
523/*
524 * Get a free scb. If there are none, see if we can allocate a new SCB.
525 */
526struct scb *
527ahc_get_scb(struct ahc_softc *ahc)
528{
529 struct scb *scb;
530
531 if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
532 ahc_alloc_scbs(ahc);
533 scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
534 if (scb == NULL)
535 return (NULL);
536 }
537 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
538 return (scb);
539}
540
541/*
542 * Return an SCB resource to the free list.
543 */
544void
545ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
546{
547 struct hardware_scb *hscb;
548
549 hscb = scb->hscb;
550 /* Clean up for the next user */
551 ahc->scb_data->scbindex[hscb->tag] = NULL;
552 scb->flags = SCB_FREE;
553 hscb->control = 0;
554
555 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
556
557 /* Notify the OSM that a resource is now available. */
558 ahc_platform_scb_free(ahc, scb);
559}
560
561struct scb *
562ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
563{
564 struct scb* scb;
565
566 scb = ahc->scb_data->scbindex[tag];
567 if (scb != NULL)
568 ahc_sync_scb(ahc, scb,
569 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
570 return (scb);
571}
572
573static void
574ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
575{
576 struct hardware_scb *q_hscb;
577 u_int saved_tag;
578
579 /*
580 * Our queuing method is a bit tricky. The card
581 * knows in advance which HSCB to download, and we
582 * can't disappoint it. To achieve this, the next
583 * SCB to download is saved off in ahc->next_queued_scb.
584 * When we are called to queue "an arbitrary scb",
585 * we copy the contents of the incoming HSCB to the one
586 * the sequencer knows about, swap HSCB pointers and
587 * finally assign the SCB to the tag indexed location
588 * in the scb_array. This makes sure that we can still
589 * locate the correct SCB by SCB_TAG.
590 */
591 q_hscb = ahc->next_queued_scb->hscb;
592 saved_tag = q_hscb->tag;
593 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
594 if ((scb->flags & SCB_CDB32_PTR) != 0) {
595 q_hscb->shared_data.cdb_ptr =
596 ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
597 + offsetof(struct hardware_scb, cdb32));
598 }
599 q_hscb->tag = saved_tag;
600 q_hscb->next = scb->hscb->tag;
601
602 /* Now swap HSCB pointers. */
603 ahc->next_queued_scb->hscb = scb->hscb;
604 scb->hscb = q_hscb;
605
606 /* Now define the mapping from tag to SCB in the scbindex */
607 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
608}
609
610/*
611 * Tell the sequencer about a new transaction to execute.
243 */ 612 */
244void 613void
614ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
615{
616 ahc_swap_with_next_hscb(ahc, scb);
617
618 if (scb->hscb->tag == SCB_LIST_NULL
619 || scb->hscb->next == SCB_LIST_NULL)
620 panic("Attempt to queue invalid SCB tag %x:%x\n",
621 scb->hscb->tag, scb->hscb->next);
622
623 /*
624 * Setup data "oddness".
625 */
626 scb->hscb->lun &= LID;
627 if (ahc_get_transfer_length(scb) & 0x1)
628 scb->hscb->lun |= SCB_XFERLEN_ODD;
629
630 /*
631 * Keep a history of SCBs we've downloaded in the qinfifo.
632 */
633 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
634
635 /*
636 * Make sure our data is consistent from the
637 * perspective of the adapter.
638 */
639 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
640
641 /* Tell the adapter about the newly queued SCB */
642 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
643 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
644 } else {
645 if ((ahc->features & AHC_AUTOPAUSE) == 0)
646 ahc_pause(ahc);
647 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
648 if ((ahc->features & AHC_AUTOPAUSE) == 0)
649 ahc_unpause(ahc);
650 }
651}
652
653struct scsi_sense_data *
654ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
655{
656 int offset;
657
658 offset = scb - ahc->scb_data->scbarray;
659 return (&ahc->scb_data->sense[offset]);
660}
661
662static uint32_t
663ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
664{
665 int offset;
666
667 offset = scb - ahc->scb_data->scbarray;
668 return (ahc->scb_data->sense_busaddr
669 + (offset * sizeof(struct scsi_sense_data)));
670}
671
672/************************** Interrupt Processing ******************************/
673static void
674ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
675{
676 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
677 /*offset*/0, /*len*/256, op);
678}
679
680static void
681ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
682{
683#ifdef AHC_TARGET_MODE
684 if ((ahc->flags & AHC_TARGETROLE) != 0) {
685 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
686 ahc->shared_data_dmamap,
687 ahc_targetcmd_offset(ahc, 0),
688 sizeof(struct target_cmd) * AHC_TMODE_CMDS,
689 op);
690 }
691#endif
692}
693
694/*
695 * See if the firmware has posted any completed commands
696 * into our in-core command complete fifos.
697 */
698#define AHC_RUN_QOUTFIFO 0x1
699#define AHC_RUN_TQINFIFO 0x2
700static u_int
701ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
702{
703 u_int retval;
704
705 retval = 0;
706 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
707 /*offset*/ahc->qoutfifonext, /*len*/1,
708 BUS_DMASYNC_POSTREAD);
709 if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
710 retval |= AHC_RUN_QOUTFIFO;
711#ifdef AHC_TARGET_MODE
712 if ((ahc->flags & AHC_TARGETROLE) != 0
713 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
714 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
715 ahc->shared_data_dmamap,
716 ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
717 /*len*/sizeof(struct target_cmd),
718 BUS_DMASYNC_POSTREAD);
719 if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
720 retval |= AHC_RUN_TQINFIFO;
721 }
722#endif
723 return (retval);
724}
725
726/*
727 * Catch an interrupt from the adapter
728 */
729int
730ahc_intr(struct ahc_softc *ahc)
731{
732 u_int intstat;
733
734 if ((ahc->pause & INTEN) == 0) {
735 /*
736 * Our interrupt is not enabled on the chip
737 * and may be disabled for re-entrancy reasons,
738 * so just return. This is likely just a shared
739 * interrupt.
740 */
741 return (0);
742 }
743 /*
744 * Instead of directly reading the interrupt status register,
745 * infer the cause of the interrupt by checking our in-core
746 * completion queues. This avoids a costly PCI bus read in
747 * most cases.
748 */
749 if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
750 && (ahc_check_cmdcmpltqueues(ahc) != 0))
751 intstat = CMDCMPLT;
752 else {
753 intstat = ahc_inb(ahc, INTSTAT);
754 }
755
756 if ((intstat & INT_PEND) == 0) {
757#if AHC_PCI_CONFIG > 0
758 if (ahc->unsolicited_ints > 500) {
759 ahc->unsolicited_ints = 0;
760 if ((ahc->chip & AHC_PCI) != 0
761 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
762 ahc->bus_intr(ahc);
763 }
764#endif
765 ahc->unsolicited_ints++;
766 return (0);
767 }
768 ahc->unsolicited_ints = 0;
769
770 if (intstat & CMDCMPLT) {
771 ahc_outb(ahc, CLRINT, CLRCMDINT);
772
773 /*
774 * Ensure that the chip sees that we've cleared
775 * this interrupt before we walk the output fifo.
776 * Otherwise, we may, due to posted bus writes,
777 * clear the interrupt after we finish the scan,
778 * and after the sequencer has added new entries
779 * and asserted the interrupt again.
780 */
781 ahc_flush_device_writes(ahc);
782 ahc_run_qoutfifo(ahc);
783#ifdef AHC_TARGET_MODE
784 if ((ahc->flags & AHC_TARGETROLE) != 0)
785 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
786#endif
787 }
788
789 /*
790 * Handle statuses that may invalidate our cached
791 * copy of INTSTAT separately.
792 */
793 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
794 /* Hot eject. Do nothing */
795 } else if (intstat & BRKADRINT) {
796 ahc_handle_brkadrint(ahc);
797 } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
798
799 ahc_pause_bug_fix(ahc);
800
801 if ((intstat & SEQINT) != 0)
802 ahc_handle_seqint(ahc, intstat);
803
804 if ((intstat & SCSIINT) != 0)
805 ahc_handle_scsiint(ahc, intstat);
806 }
807 return (1);
808}
809
810/************************* Sequencer Execution Control ************************/
811/*
812 * Restart the sequencer program from address zero
813 */
814static void
245ahc_restart(struct ahc_softc *ahc) 815ahc_restart(struct ahc_softc *ahc)
246{ 816{
247 817
@@ -302,7 +872,7 @@ ahc_restart(struct ahc_softc *ahc)
302} 872}
303 873
304/************************* Input/Output Queues ********************************/ 874/************************* Input/Output Queues ********************************/
305void 875static void
306ahc_run_qoutfifo(struct ahc_softc *ahc) 876ahc_run_qoutfifo(struct ahc_softc *ahc)
307{ 877{
308 struct scb *scb; 878 struct scb *scb;
@@ -349,7 +919,7 @@ ahc_run_qoutfifo(struct ahc_softc *ahc)
349 } 919 }
350} 920}
351 921
352void 922static void
353ahc_run_untagged_queues(struct ahc_softc *ahc) 923ahc_run_untagged_queues(struct ahc_softc *ahc)
354{ 924{
355 int i; 925 int i;
@@ -358,7 +928,7 @@ ahc_run_untagged_queues(struct ahc_softc *ahc)
358 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 928 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
359} 929}
360 930
361void 931static void
362ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 932ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
363{ 933{
364 struct scb *scb; 934 struct scb *scb;
@@ -374,7 +944,7 @@ ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
374} 944}
375 945
376/************************* Interrupt Handling *********************************/ 946/************************* Interrupt Handling *********************************/
377void 947static void
378ahc_handle_brkadrint(struct ahc_softc *ahc) 948ahc_handle_brkadrint(struct ahc_softc *ahc)
379{ 949{
380 /* 950 /*
@@ -403,7 +973,7 @@ ahc_handle_brkadrint(struct ahc_softc *ahc)
403 ahc_shutdown(ahc); 973 ahc_shutdown(ahc);
404} 974}
405 975
406void 976static void
407ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 977ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
408{ 978{
409 struct scb *scb; 979 struct scb *scb;
@@ -954,7 +1524,7 @@ unpause:
954 ahc_unpause(ahc); 1524 ahc_unpause(ahc);
955} 1525}
956 1526
957void 1527static void
958ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 1528ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
959{ 1529{
960 u_int scb_index; 1530 u_int scb_index;
@@ -1407,7 +1977,7 @@ ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1407} 1977}
1408 1978
1409#define AHC_MAX_STEPS 2000 1979#define AHC_MAX_STEPS 2000
1410void 1980static void
1411ahc_clear_critical_section(struct ahc_softc *ahc) 1981ahc_clear_critical_section(struct ahc_softc *ahc)
1412{ 1982{
1413 int stepping; 1983 int stepping;
@@ -1500,7 +2070,7 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
1500/* 2070/*
1501 * Clear any pending interrupt status. 2071 * Clear any pending interrupt status.
1502 */ 2072 */
1503void 2073static void
1504ahc_clear_intstat(struct ahc_softc *ahc) 2074ahc_clear_intstat(struct ahc_softc *ahc)
1505{ 2075{
1506 /* Clear any interrupt conditions this may have caused */ 2076 /* Clear any interrupt conditions this may have caused */
@@ -1519,7 +2089,8 @@ ahc_clear_intstat(struct ahc_softc *ahc)
1519uint32_t ahc_debug = AHC_DEBUG_OPTS; 2089uint32_t ahc_debug = AHC_DEBUG_OPTS;
1520#endif 2090#endif
1521 2091
1522void 2092#if 0 /* unused */
2093static void
1523ahc_print_scb(struct scb *scb) 2094ahc_print_scb(struct scb *scb)
1524{ 2095{
1525 int i; 2096 int i;
@@ -1551,6 +2122,7 @@ ahc_print_scb(struct scb *scb)
1551 } 2122 }
1552 } 2123 }
1553} 2124}
2125#endif
1554 2126
1555/************************* Transfer Negotiation *******************************/ 2127/************************* Transfer Negotiation *******************************/
1556/* 2128/*
@@ -1634,7 +2206,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1634 * by the capabilities of the bus connectivity of and sync settings for 2206 * by the capabilities of the bus connectivity of and sync settings for
1635 * the target. 2207 * the target.
1636 */ 2208 */
1637struct ahc_syncrate * 2209const struct ahc_syncrate *
1638ahc_devlimited_syncrate(struct ahc_softc *ahc, 2210ahc_devlimited_syncrate(struct ahc_softc *ahc,
1639 struct ahc_initiator_tinfo *tinfo, 2211 struct ahc_initiator_tinfo *tinfo,
1640 u_int *period, u_int *ppr_options, role_t role) 2212 u_int *period, u_int *ppr_options, role_t role)
@@ -1689,11 +2261,11 @@ ahc_devlimited_syncrate(struct ahc_softc *ahc,
1689 * Return the period and offset that should be sent to the target 2261 * Return the period and offset that should be sent to the target
1690 * if this was the beginning of an SDTR. 2262 * if this was the beginning of an SDTR.
1691 */ 2263 */
1692struct ahc_syncrate * 2264const struct ahc_syncrate *
1693ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 2265ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1694 u_int *ppr_options, u_int maxsync) 2266 u_int *ppr_options, u_int maxsync)
1695{ 2267{
1696 struct ahc_syncrate *syncrate; 2268 const struct ahc_syncrate *syncrate;
1697 2269
1698 if ((ahc->features & AHC_DT) == 0) 2270 if ((ahc->features & AHC_DT) == 0)
1699 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 2271 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
@@ -1768,7 +2340,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1768u_int 2340u_int
1769ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 2341ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1770{ 2342{
1771 struct ahc_syncrate *syncrate; 2343 const struct ahc_syncrate *syncrate;
1772 2344
1773 if ((ahc->features & AHC_ULTRA2) != 0) 2345 if ((ahc->features & AHC_ULTRA2) != 0)
1774 scsirate &= SXFR_ULTRA2; 2346 scsirate &= SXFR_ULTRA2;
@@ -1806,10 +2378,10 @@ ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1806 * Truncate the given synchronous offset to a value the 2378 * Truncate the given synchronous offset to a value the
1807 * current adapter type and syncrate are capable of. 2379 * current adapter type and syncrate are capable of.
1808 */ 2380 */
1809void 2381static void
1810ahc_validate_offset(struct ahc_softc *ahc, 2382ahc_validate_offset(struct ahc_softc *ahc,
1811 struct ahc_initiator_tinfo *tinfo, 2383 struct ahc_initiator_tinfo *tinfo,
1812 struct ahc_syncrate *syncrate, 2384 const struct ahc_syncrate *syncrate,
1813 u_int *offset, int wide, role_t role) 2385 u_int *offset, int wide, role_t role)
1814{ 2386{
1815 u_int maxoffset; 2387 u_int maxoffset;
@@ -1838,7 +2410,7 @@ ahc_validate_offset(struct ahc_softc *ahc,
1838 * Truncate the given transfer width parameter to a value the 2410 * Truncate the given transfer width parameter to a value the
1839 * current adapter type is capable of. 2411 * current adapter type is capable of.
1840 */ 2412 */
1841void 2413static void
1842ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 2414ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1843 u_int *bus_width, role_t role) 2415 u_int *bus_width, role_t role)
1844{ 2416{
@@ -1913,7 +2485,7 @@ ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1913 */ 2485 */
1914void 2486void
1915ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2487ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1916 struct ahc_syncrate *syncrate, u_int period, 2488 const struct ahc_syncrate *syncrate, u_int period,
1917 u_int offset, u_int ppr_options, u_int type, int paused) 2489 u_int offset, u_int ppr_options, u_int type, int paused)
1918{ 2490{
1919 struct ahc_initiator_tinfo *tinfo; 2491 struct ahc_initiator_tinfo *tinfo;
@@ -2220,11 +2792,11 @@ ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2220 role); 2792 role);
2221} 2793}
2222 2794
2223struct ahc_phase_table_entry* 2795static const struct ahc_phase_table_entry*
2224ahc_lookup_phase_entry(int phase) 2796ahc_lookup_phase_entry(int phase)
2225{ 2797{
2226 struct ahc_phase_table_entry *entry; 2798 const struct ahc_phase_table_entry *entry;
2227 struct ahc_phase_table_entry *last_entry; 2799 const struct ahc_phase_table_entry *last_entry;
2228 2800
2229 /* 2801 /*
2230 * num_phases doesn't include the default entry which 2802 * num_phases doesn't include the default entry which
@@ -2390,7 +2962,7 @@ ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2390 */ 2962 */
2391 struct ahc_initiator_tinfo *tinfo; 2963 struct ahc_initiator_tinfo *tinfo;
2392 struct ahc_tmode_tstate *tstate; 2964 struct ahc_tmode_tstate *tstate;
2393 struct ahc_syncrate *rate; 2965 const struct ahc_syncrate *rate;
2394 int dowide; 2966 int dowide;
2395 int dosync; 2967 int dosync;
2396 int doppr; 2968 int doppr;
@@ -2655,7 +3227,7 @@ proto_violation_reset:
2655 */ 3227 */
2656static void 3228static void
2657ahc_handle_message_phase(struct ahc_softc *ahc) 3229ahc_handle_message_phase(struct ahc_softc *ahc)
2658{ 3230{
2659 struct ahc_devinfo devinfo; 3231 struct ahc_devinfo devinfo;
2660 u_int bus_phase; 3232 u_int bus_phase;
2661 int end_session; 3233 int end_session;
@@ -3056,7 +3628,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3056 switch (ahc->msgin_buf[2]) { 3628 switch (ahc->msgin_buf[2]) {
3057 case MSG_EXT_SDTR: 3629 case MSG_EXT_SDTR:
3058 { 3630 {
3059 struct ahc_syncrate *syncrate; 3631 const struct ahc_syncrate *syncrate;
3060 u_int period; 3632 u_int period;
3061 u_int ppr_options; 3633 u_int ppr_options;
3062 u_int offset; 3634 u_int offset;
@@ -3231,7 +3803,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3231 } 3803 }
3232 case MSG_EXT_PPR: 3804 case MSG_EXT_PPR:
3233 { 3805 {
3234 struct ahc_syncrate *syncrate; 3806 const struct ahc_syncrate *syncrate;
3235 u_int period; 3807 u_int period;
3236 u_int offset; 3808 u_int offset;
3237 u_int bus_width; 3809 u_int bus_width;
@@ -3984,7 +4556,7 @@ ahc_free(struct ahc_softc *ahc)
3984 return; 4556 return;
3985} 4557}
3986 4558
3987void 4559static void
3988ahc_shutdown(void *arg) 4560ahc_shutdown(void *arg)
3989{ 4561{
3990 struct ahc_softc *ahc; 4562 struct ahc_softc *ahc;
@@ -4388,7 +4960,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
4388 free(scb_data->scbarray, M_DEVBUF); 4960 free(scb_data->scbarray, M_DEVBUF);
4389} 4961}
4390 4962
4391void 4963static void
4392ahc_alloc_scbs(struct ahc_softc *ahc) 4964ahc_alloc_scbs(struct ahc_softc *ahc)
4393{ 4965{
4394 struct scb_data *scb_data; 4966 struct scb_data *scb_data;
@@ -5121,7 +5693,7 @@ ahc_resume(struct ahc_softc *ahc)
5121 * Return the untagged transaction id for a given target/channel lun. 5693 * Return the untagged transaction id for a given target/channel lun.
5122 * Optionally, clear the entry. 5694 * Optionally, clear the entry.
5123 */ 5695 */
5124u_int 5696static u_int
5125ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5697ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5126{ 5698{
5127 u_int scbid; 5699 u_int scbid;
@@ -5142,7 +5714,7 @@ ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5142 return (scbid); 5714 return (scbid);
5143} 5715}
5144 5716
5145void 5717static void
5146ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5718ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5147{ 5719{
5148 u_int target_offset; 5720 u_int target_offset;
@@ -5160,7 +5732,7 @@ ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5160 } 5732 }
5161} 5733}
5162 5734
5163void 5735static void
5164ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5736ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
5165{ 5737{
5166 u_int target_offset; 5738 u_int target_offset;
@@ -5215,7 +5787,7 @@ ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
5215 return match; 5787 return match;
5216} 5788}
5217 5789
5218void 5790static void
5219ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5791ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
5220{ 5792{
5221 int target; 5793 int target;
@@ -5707,7 +6279,7 @@ ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5707 */ 6279 */
5708static u_int 6280static u_int
5709ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 6281ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5710{ 6282{
5711 u_int curscb, next; 6283 u_int curscb, next;
5712 6284
5713 /* 6285 /*
@@ -5756,7 +6328,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5756 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 6328 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
5757 * is paused before it is called. 6329 * is paused before it is called.
5758 */ 6330 */
5759int 6331static int
5760ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 6332ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5761 int lun, u_int tag, role_t role, uint32_t status) 6333 int lun, u_int tag, role_t role, uint32_t status)
5762{ 6334{
@@ -6078,7 +6650,7 @@ ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
6078/* 6650/*
6079 * Calculate the residual for a just completed SCB. 6651 * Calculate the residual for a just completed SCB.
6080 */ 6652 */
6081void 6653static void
6082ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6654ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
6083{ 6655{
6084 struct hardware_scb *hscb; 6656 struct hardware_scb *hscb;
@@ -6279,7 +6851,7 @@ ahc_loadseq(struct ahc_softc *ahc)
6279 struct cs cs_table[num_critical_sections]; 6851 struct cs cs_table[num_critical_sections];
6280 u_int begin_set[num_critical_sections]; 6852 u_int begin_set[num_critical_sections];
6281 u_int end_set[num_critical_sections]; 6853 u_int end_set[num_critical_sections];
6282 struct patch *cur_patch; 6854 const struct patch *cur_patch;
6283 u_int cs_count; 6855 u_int cs_count;
6284 u_int cur_cs; 6856 u_int cur_cs;
6285 u_int i; 6857 u_int i;
@@ -6384,11 +6956,11 @@ ahc_loadseq(struct ahc_softc *ahc)
6384} 6956}
6385 6957
6386static int 6958static int
6387ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6959ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch,
6388 u_int start_instr, u_int *skip_addr) 6960 u_int start_instr, u_int *skip_addr)
6389{ 6961{
6390 struct patch *cur_patch; 6962 const struct patch *cur_patch;
6391 struct patch *last_patch; 6963 const struct patch *last_patch;
6392 u_int num_patches; 6964 u_int num_patches;
6393 6965
6394 num_patches = ARRAY_SIZE(patches); 6966 num_patches = ARRAY_SIZE(patches);
@@ -6447,7 +7019,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6447 case AIC_OP_JE: 7019 case AIC_OP_JE:
6448 case AIC_OP_JZ: 7020 case AIC_OP_JZ:
6449 { 7021 {
6450 struct patch *cur_patch; 7022 const struct patch *cur_patch;
6451 int address_offset; 7023 int address_offset;
6452 u_int address; 7024 u_int address;
6453 u_int skip_addr; 7025 u_int skip_addr;
@@ -6545,7 +7117,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6545} 7117}
6546 7118
6547int 7119int
6548ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 7120ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
6549 const char *name, u_int address, u_int value, 7121 const char *name, u_int address, u_int value,
6550 u_int *cur_column, u_int wrap_point) 7122 u_int *cur_column, u_int wrap_point)
6551{ 7123{
@@ -7229,7 +7801,7 @@ ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
7229 ahc_outb(ahc, SCSIID, scsiid); 7801 ahc_outb(ahc, SCSIID, scsiid);
7230} 7802}
7231 7803
7232void 7804static void
7233ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7805ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
7234{ 7806{
7235 struct target_cmd *cmd; 7807 struct target_cmd *cmd;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
index cba2f23bbe79..09bf2f4d78d5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_inline.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -46,179 +46,13 @@
46#define _AIC7XXX_INLINE_H_ 46#define _AIC7XXX_INLINE_H_
47 47
48/************************* Sequencer Execution Control ************************/ 48/************************* Sequencer Execution Control ************************/
49static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); 49int ahc_is_paused(struct ahc_softc *ahc);
50static __inline int ahc_is_paused(struct ahc_softc *ahc); 50void ahc_pause(struct ahc_softc *ahc);
51static __inline void ahc_pause(struct ahc_softc *ahc); 51void ahc_unpause(struct ahc_softc *ahc);
52static __inline void ahc_unpause(struct ahc_softc *ahc);
53
54/*
55 * Work around any chip bugs related to halting sequencer execution.
56 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
57 * reading a register that will set this signal and deassert it.
58 * Without this workaround, if the chip is paused, by an interrupt or
59 * manual pause while accessing scb ram, accesses to certain registers
60 * will hang the system (infinite pci retries).
61 */
62static __inline void
63ahc_pause_bug_fix(struct ahc_softc *ahc)
64{
65 if ((ahc->features & AHC_ULTRA2) != 0)
66 (void)ahc_inb(ahc, CCSCBCTL);
67}
68
69/*
70 * Determine whether the sequencer has halted code execution.
71 * Returns non-zero status if the sequencer is stopped.
72 */
73static __inline int
74ahc_is_paused(struct ahc_softc *ahc)
75{
76 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
77}
78
79/*
80 * Request that the sequencer stop and wait, indefinitely, for it
81 * to stop. The sequencer will only acknowledge that it is paused
82 * once it has reached an instruction boundary and PAUSEDIS is
83 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
84 * for critical sections.
85 */
86static __inline void
87ahc_pause(struct ahc_softc *ahc)
88{
89 ahc_outb(ahc, HCNTRL, ahc->pause);
90
91 /*
92 * Since the sequencer can disable pausing in a critical section, we
93 * must loop until it actually stops.
94 */
95 while (ahc_is_paused(ahc) == 0)
96 ;
97
98 ahc_pause_bug_fix(ahc);
99}
100
101/*
102 * Allow the sequencer to continue program execution.
103 * We check here to ensure that no additional interrupt
104 * sources that would cause the sequencer to halt have been
105 * asserted. If, for example, a SCSI bus reset is detected
106 * while we are fielding a different, pausing, interrupt type,
107 * we don't want to release the sequencer before going back
108 * into our interrupt handler and dealing with this new
109 * condition.
110 */
111static __inline void
112ahc_unpause(struct ahc_softc *ahc)
113{
114 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
115 ahc_outb(ahc, HCNTRL, ahc->unpause);
116}
117
118/*********************** Untagged Transaction Routines ************************/
119static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
120static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
121
122/*
123 * Block our completion routine from starting the next untagged
124 * transaction for this target or target lun.
125 */
126static __inline void
127ahc_freeze_untagged_queues(struct ahc_softc *ahc)
128{
129 if ((ahc->flags & AHC_SCB_BTT) == 0)
130 ahc->untagged_queue_lock++;
131}
132
133/*
134 * Allow the next untagged transaction for this target or target lun
135 * to be executed. We use a counting semaphore to allow the lock
136 * to be acquired recursively. Once the count drops to zero, the
137 * transaction queues will be run.
138 */
139static __inline void
140ahc_release_untagged_queues(struct ahc_softc *ahc)
141{
142 if ((ahc->flags & AHC_SCB_BTT) == 0) {
143 ahc->untagged_queue_lock--;
144 if (ahc->untagged_queue_lock == 0)
145 ahc_run_untagged_queues(ahc);
146 }
147}
148 52
149/************************** Memory mapping routines ***************************/ 53/************************** Memory mapping routines ***************************/
150static __inline struct ahc_dma_seg * 54void ahc_sync_sglist(struct ahc_softc *ahc,
151 ahc_sg_bus_to_virt(struct scb *scb, 55 struct scb *scb, int op);
152 uint32_t sg_busaddr);
153static __inline uint32_t
154 ahc_sg_virt_to_bus(struct scb *scb,
155 struct ahc_dma_seg *sg);
156static __inline uint32_t
157 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index);
158static __inline void ahc_sync_scb(struct ahc_softc *ahc,
159 struct scb *scb, int op);
160static __inline void ahc_sync_sglist(struct ahc_softc *ahc,
161 struct scb *scb, int op);
162static __inline uint32_t
163 ahc_targetcmd_offset(struct ahc_softc *ahc,
164 u_int index);
165
166static __inline struct ahc_dma_seg *
167ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
168{
169 int sg_index;
170
171 sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
172 /* sg_list_phys points to entry 1, not 0 */
173 sg_index++;
174
175 return (&scb->sg_list[sg_index]);
176}
177
178static __inline uint32_t
179ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
180{
181 int sg_index;
182
183 /* sg_list_phys points to entry 1, not 0 */
184 sg_index = sg - &scb->sg_list[1];
185
186 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
187}
188
189static __inline uint32_t
190ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
191{
192 return (ahc->scb_data->hscb_busaddr
193 + (sizeof(struct hardware_scb) * index));
194}
195
196static __inline void
197ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
198{
199 ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
200 ahc->scb_data->hscb_dmamap,
201 /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
202 /*len*/sizeof(*scb->hscb), op);
203}
204
205static __inline void
206ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
207{
208 if (scb->sg_count == 0)
209 return;
210
211 ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
212 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
213 * sizeof(struct ahc_dma_seg),
214 /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
215}
216
217static __inline uint32_t
218ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
219{
220 return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
221}
222 56
223/******************************** Debugging ***********************************/ 57/******************************** Debugging ***********************************/
224static __inline char *ahc_name(struct ahc_softc *ahc); 58static __inline char *ahc_name(struct ahc_softc *ahc);
@@ -231,420 +65,34 @@ ahc_name(struct ahc_softc *ahc)
231 65
232/*********************** Miscellaneous Support Functions ***********************/ 66/*********************** Miscellaneous Support Functions ***********************/
233 67
234static __inline void ahc_update_residual(struct ahc_softc *ahc, 68struct ahc_initiator_tinfo *
235 struct scb *scb); 69 ahc_fetch_transinfo(struct ahc_softc *ahc,
236static __inline struct ahc_initiator_tinfo * 70 char channel, u_int our_id,
237 ahc_fetch_transinfo(struct ahc_softc *ahc, 71 u_int remote_id,
238 char channel, u_int our_id, 72 struct ahc_tmode_tstate **tstate);
239 u_int remote_id, 73uint16_t
240 struct ahc_tmode_tstate **tstate); 74 ahc_inw(struct ahc_softc *ahc, u_int port);
241static __inline uint16_t 75void ahc_outw(struct ahc_softc *ahc, u_int port,
242 ahc_inw(struct ahc_softc *ahc, u_int port); 76 u_int value);
243static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, 77uint32_t
244 u_int value); 78 ahc_inl(struct ahc_softc *ahc, u_int port);
245static __inline uint32_t 79void ahc_outl(struct ahc_softc *ahc, u_int port,
246 ahc_inl(struct ahc_softc *ahc, u_int port); 80 uint32_t value);
247static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, 81uint64_t
248 uint32_t value); 82 ahc_inq(struct ahc_softc *ahc, u_int port);
249static __inline uint64_t 83void ahc_outq(struct ahc_softc *ahc, u_int port,
250 ahc_inq(struct ahc_softc *ahc, u_int port); 84 uint64_t value);
251static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, 85struct scb*
252 uint64_t value); 86 ahc_get_scb(struct ahc_softc *ahc);
253static __inline struct scb* 87void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
254 ahc_get_scb(struct ahc_softc *ahc); 88struct scb *
255static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); 89 ahc_lookup_scb(struct ahc_softc *ahc, u_int tag);
256static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, 90void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
257 struct scb *scb); 91struct scsi_sense_data *
258static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); 92 ahc_get_sense_buf(struct ahc_softc *ahc,
259static __inline struct scsi_sense_data * 93 struct scb *scb);
260 ahc_get_sense_buf(struct ahc_softc *ahc,
261 struct scb *scb);
262static __inline uint32_t
263 ahc_get_sense_bufaddr(struct ahc_softc *ahc,
264 struct scb *scb);
265
266/*
267 * Determine whether the sequencer reported a residual
268 * for this SCB/transaction.
269 */
270static __inline void
271ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
272{
273 uint32_t sgptr;
274
275 sgptr = ahc_le32toh(scb->hscb->sgptr);
276 if ((sgptr & SG_RESID_VALID) != 0)
277 ahc_calc_residual(ahc, scb);
278}
279
280/*
281 * Return pointers to the transfer negotiation information
282 * for the specified our_id/remote_id pair.
283 */
284static __inline struct ahc_initiator_tinfo *
285ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
286 u_int remote_id, struct ahc_tmode_tstate **tstate)
287{
288 /*
289 * Transfer data structures are stored from the perspective
290 * of the target role. Since the parameters for a connection
291 * in the initiator role to a given target are the same as
292 * when the roles are reversed, we pretend we are the target.
293 */
294 if (channel == 'B')
295 our_id += 8;
296 *tstate = ahc->enabled_targets[our_id];
297 return (&(*tstate)->transinfo[remote_id]);
298}
299
300static __inline uint16_t
301ahc_inw(struct ahc_softc *ahc, u_int port)
302{
303 uint16_t r = ahc_inb(ahc, port+1) << 8;
304 return r | ahc_inb(ahc, port);
305}
306
307static __inline void
308ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
309{
310 ahc_outb(ahc, port, value & 0xFF);
311 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
312}
313
314static __inline uint32_t
315ahc_inl(struct ahc_softc *ahc, u_int port)
316{
317 return ((ahc_inb(ahc, port))
318 | (ahc_inb(ahc, port+1) << 8)
319 | (ahc_inb(ahc, port+2) << 16)
320 | (ahc_inb(ahc, port+3) << 24));
321}
322
323static __inline void
324ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
325{
326 ahc_outb(ahc, port, (value) & 0xFF);
327 ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
328 ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
329 ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
330}
331
332static __inline uint64_t
333ahc_inq(struct ahc_softc *ahc, u_int port)
334{
335 return ((ahc_inb(ahc, port))
336 | (ahc_inb(ahc, port+1) << 8)
337 | (ahc_inb(ahc, port+2) << 16)
338 | (ahc_inb(ahc, port+3) << 24)
339 | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
340 | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
341 | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
342 | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
343}
344
345static __inline void
346ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
347{
348 ahc_outb(ahc, port, value & 0xFF);
349 ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
350 ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
351 ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
352 ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
353 ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
354 ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
355 ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
356}
357
358/*
359 * Get a free scb. If there are none, see if we can allocate a new SCB.
360 */
361static __inline struct scb *
362ahc_get_scb(struct ahc_softc *ahc)
363{
364 struct scb *scb;
365
366 if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
367 ahc_alloc_scbs(ahc);
368 scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
369 if (scb == NULL)
370 return (NULL);
371 }
372 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
373 return (scb);
374}
375
376/*
377 * Return an SCB resource to the free list.
378 */
379static __inline void
380ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
381{
382 struct hardware_scb *hscb;
383
384 hscb = scb->hscb;
385 /* Clean up for the next user */
386 ahc->scb_data->scbindex[hscb->tag] = NULL;
387 scb->flags = SCB_FREE;
388 hscb->control = 0;
389
390 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
391
392 /* Notify the OSM that a resource is now available. */
393 ahc_platform_scb_free(ahc, scb);
394}
395
396static __inline struct scb *
397ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
398{
399 struct scb* scb;
400
401 scb = ahc->scb_data->scbindex[tag];
402 if (scb != NULL)
403 ahc_sync_scb(ahc, scb,
404 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
405 return (scb);
406}
407
408static __inline void
409ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
410{
411 struct hardware_scb *q_hscb;
412 u_int saved_tag;
413
414 /*
415 * Our queuing method is a bit tricky. The card
416 * knows in advance which HSCB to download, and we
417 * can't disappoint it. To achieve this, the next
418 * SCB to download is saved off in ahc->next_queued_scb.
419 * When we are called to queue "an arbitrary scb",
420 * we copy the contents of the incoming HSCB to the one
421 * the sequencer knows about, swap HSCB pointers and
422 * finally assign the SCB to the tag indexed location
423 * in the scb_array. This makes sure that we can still
424 * locate the correct SCB by SCB_TAG.
425 */
426 q_hscb = ahc->next_queued_scb->hscb;
427 saved_tag = q_hscb->tag;
428 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
429 if ((scb->flags & SCB_CDB32_PTR) != 0) {
430 q_hscb->shared_data.cdb_ptr =
431 ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
432 + offsetof(struct hardware_scb, cdb32));
433 }
434 q_hscb->tag = saved_tag;
435 q_hscb->next = scb->hscb->tag;
436
437 /* Now swap HSCB pointers. */
438 ahc->next_queued_scb->hscb = scb->hscb;
439 scb->hscb = q_hscb;
440
441 /* Now define the mapping from tag to SCB in the scbindex */
442 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
443}
444
445/*
446 * Tell the sequencer about a new transaction to execute.
447 */
448static __inline void
449ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
450{
451 ahc_swap_with_next_hscb(ahc, scb);
452
453 if (scb->hscb->tag == SCB_LIST_NULL
454 || scb->hscb->next == SCB_LIST_NULL)
455 panic("Attempt to queue invalid SCB tag %x:%x\n",
456 scb->hscb->tag, scb->hscb->next);
457
458 /*
459 * Setup data "oddness".
460 */
461 scb->hscb->lun &= LID;
462 if (ahc_get_transfer_length(scb) & 0x1)
463 scb->hscb->lun |= SCB_XFERLEN_ODD;
464
465 /*
466 * Keep a history of SCBs we've downloaded in the qinfifo.
467 */
468 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
469
470 /*
471 * Make sure our data is consistent from the
472 * perspective of the adapter.
473 */
474 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
475
476 /* Tell the adapter about the newly queued SCB */
477 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
478 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
479 } else {
480 if ((ahc->features & AHC_AUTOPAUSE) == 0)
481 ahc_pause(ahc);
482 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
483 if ((ahc->features & AHC_AUTOPAUSE) == 0)
484 ahc_unpause(ahc);
485 }
486}
487
488static __inline struct scsi_sense_data *
489ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
490{
491 int offset;
492
493 offset = scb - ahc->scb_data->scbarray;
494 return (&ahc->scb_data->sense[offset]);
495}
496
497static __inline uint32_t
498ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
499{
500 int offset;
501
502 offset = scb - ahc->scb_data->scbarray;
503 return (ahc->scb_data->sense_busaddr
504 + (offset * sizeof(struct scsi_sense_data)));
505}
506 94
507/************************** Interrupt Processing ******************************/ 95/************************** Interrupt Processing ******************************/
508static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); 96int ahc_intr(struct ahc_softc *ahc);
509static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op);
510static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc);
511static __inline int ahc_intr(struct ahc_softc *ahc);
512
513static __inline void
514ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
515{
516 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
517 /*offset*/0, /*len*/256, op);
518}
519
520static __inline void
521ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
522{
523#ifdef AHC_TARGET_MODE
524 if ((ahc->flags & AHC_TARGETROLE) != 0) {
525 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
526 ahc->shared_data_dmamap,
527 ahc_targetcmd_offset(ahc, 0),
528 sizeof(struct target_cmd) * AHC_TMODE_CMDS,
529 op);
530 }
531#endif
532}
533
534/*
535 * See if the firmware has posted any completed commands
536 * into our in-core command complete fifos.
537 */
538#define AHC_RUN_QOUTFIFO 0x1
539#define AHC_RUN_TQINFIFO 0x2
540static __inline u_int
541ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
542{
543 u_int retval;
544
545 retval = 0;
546 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
547 /*offset*/ahc->qoutfifonext, /*len*/1,
548 BUS_DMASYNC_POSTREAD);
549 if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
550 retval |= AHC_RUN_QOUTFIFO;
551#ifdef AHC_TARGET_MODE
552 if ((ahc->flags & AHC_TARGETROLE) != 0
553 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
554 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
555 ahc->shared_data_dmamap,
556 ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
557 /*len*/sizeof(struct target_cmd),
558 BUS_DMASYNC_POSTREAD);
559 if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
560 retval |= AHC_RUN_TQINFIFO;
561 }
562#endif
563 return (retval);
564}
565
566/*
567 * Catch an interrupt from the adapter
568 */
569static __inline int
570ahc_intr(struct ahc_softc *ahc)
571{
572 u_int intstat;
573
574 if ((ahc->pause & INTEN) == 0) {
575 /*
576 * Our interrupt is not enabled on the chip
577 * and may be disabled for re-entrancy reasons,
578 * so just return. This is likely just a shared
579 * interrupt.
580 */
581 return (0);
582 }
583 /*
584 * Instead of directly reading the interrupt status register,
585 * infer the cause of the interrupt by checking our in-core
586 * completion queues. This avoids a costly PCI bus read in
587 * most cases.
588 */
589 if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
590 && (ahc_check_cmdcmpltqueues(ahc) != 0))
591 intstat = CMDCMPLT;
592 else {
593 intstat = ahc_inb(ahc, INTSTAT);
594 }
595
596 if ((intstat & INT_PEND) == 0) {
597#if AHC_PCI_CONFIG > 0
598 if (ahc->unsolicited_ints > 500) {
599 ahc->unsolicited_ints = 0;
600 if ((ahc->chip & AHC_PCI) != 0
601 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
602 ahc->bus_intr(ahc);
603 }
604#endif
605 ahc->unsolicited_ints++;
606 return (0);
607 }
608 ahc->unsolicited_ints = 0;
609
610 if (intstat & CMDCMPLT) {
611 ahc_outb(ahc, CLRINT, CLRCMDINT);
612
613 /*
614 * Ensure that the chip sees that we've cleared
615 * this interrupt before we walk the output fifo.
616 * Otherwise, we may, due to posted bus writes,
617 * clear the interrupt after we finish the scan,
618 * and after the sequencer has added new entries
619 * and asserted the interrupt again.
620 */
621 ahc_flush_device_writes(ahc);
622 ahc_run_qoutfifo(ahc);
623#ifdef AHC_TARGET_MODE
624 if ((ahc->flags & AHC_TARGETROLE) != 0)
625 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
626#endif
627 }
628
629 /*
630 * Handle statuses that may invalidate our cached
631 * copy of INTSTAT separately.
632 */
633 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
634 /* Hot eject. Do nothing */
635 } else if (intstat & BRKADRINT) {
636 ahc_handle_brkadrint(ahc);
637 } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
638
639 ahc_pause_bug_fix(ahc);
640
641 if ((intstat & SEQINT) != 0)
642 ahc_handle_seqint(ahc, intstat);
643
644 if ((intstat & SCSIINT) != 0)
645 ahc_handle_scsiint(ahc, intstat);
646 }
647 return (1);
648}
649 97
650#endif /* _AIC7XXX_INLINE_H_ */ 98#endif /* _AIC7XXX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 42ad48e09f02..fd2b9785ff4f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -388,14 +388,83 @@ static int aic7xxx_setup(char *s);
388static int ahc_linux_unit; 388static int ahc_linux_unit;
389 389
390 390
391/************************** OS Utility Wrappers *******************************/
392void
393ahc_delay(long usec)
394{
395 /*
396 * udelay on Linux can have problems for
397 * multi-millisecond waits. Wait at most
398 * 1024us per call.
399 */
400 while (usec > 0) {
401 udelay(usec % 1024);
402 usec -= 1024;
403 }
404}
405
406/***************************** Low Level I/O **********************************/
407uint8_t
408ahc_inb(struct ahc_softc * ahc, long port)
409{
410 uint8_t x;
411
412 if (ahc->tag == BUS_SPACE_MEMIO) {
413 x = readb(ahc->bsh.maddr + port);
414 } else {
415 x = inb(ahc->bsh.ioport + port);
416 }
417 mb();
418 return (x);
419}
420
421void
422ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
423{
424 if (ahc->tag == BUS_SPACE_MEMIO) {
425 writeb(val, ahc->bsh.maddr + port);
426 } else {
427 outb(val, ahc->bsh.ioport + port);
428 }
429 mb();
430}
431
432void
433ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
434{
435 int i;
436
437 /*
438 * There is probably a more efficient way to do this on Linux
439 * but we don't use this for anything speed critical and this
440 * should work.
441 */
442 for (i = 0; i < count; i++)
443 ahc_outb(ahc, port, *array++);
444}
445
446void
447ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
448{
449 int i;
450
451 /*
452 * There is probably a more efficient way to do this on Linux
453 * but we don't use this for anything speed critical and this
454 * should work.
455 */
456 for (i = 0; i < count; i++)
457 *array++ = ahc_inb(ahc, port);
458}
459
391/********************************* Inlines ************************************/ 460/********************************* Inlines ************************************/
392static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); 461static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
393 462
394static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, 463static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
395 struct ahc_dma_seg *sg, 464 struct ahc_dma_seg *sg,
396 dma_addr_t addr, bus_size_t len); 465 dma_addr_t addr, bus_size_t len);
397 466
398static __inline void 467static void
399ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) 468ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
400{ 469{
401 struct scsi_cmnd *cmd; 470 struct scsi_cmnd *cmd;
@@ -406,7 +475,7 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
406 scsi_dma_unmap(cmd); 475 scsi_dma_unmap(cmd);
407} 476}
408 477
409static __inline int 478static int
410ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, 479ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
411 struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len) 480 struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
412{ 481{
@@ -442,13 +511,11 @@ ahc_linux_info(struct Scsi_Host *host)
442 bp = &buffer[0]; 511 bp = &buffer[0];
443 ahc = *(struct ahc_softc **)host->hostdata; 512 ahc = *(struct ahc_softc **)host->hostdata;
444 memset(bp, 0, sizeof(buffer)); 513 memset(bp, 0, sizeof(buffer));
445 strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev "); 514 strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n"
446 strcat(bp, AIC7XXX_DRIVER_VERSION); 515 " <");
447 strcat(bp, "\n");
448 strcat(bp, " <");
449 strcat(bp, ahc->description); 516 strcat(bp, ahc->description);
450 strcat(bp, ">\n"); 517 strcat(bp, ">\n"
451 strcat(bp, " "); 518 " ");
452 ahc_controller_info(ahc, ahc_info); 519 ahc_controller_info(ahc, ahc_info);
453 strcat(bp, ahc_info); 520 strcat(bp, ahc_info);
454 strcat(bp, "\n"); 521 strcat(bp, "\n");
@@ -964,7 +1031,7 @@ aic7xxx_setup(char *s)
964 char *p; 1031 char *p;
965 char *end; 1032 char *end;
966 1033
967 static struct { 1034 static const struct {
968 const char *name; 1035 const char *name;
969 uint32_t *flag; 1036 uint32_t *flag;
970 } options[] = { 1037 } options[] = {
@@ -2317,7 +2384,7 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
2317 unsigned int ppr_options = tinfo->goal.ppr_options; 2384 unsigned int ppr_options = tinfo->goal.ppr_options;
2318 unsigned long flags; 2385 unsigned long flags;
2319 unsigned long offset = tinfo->goal.offset; 2386 unsigned long offset = tinfo->goal.offset;
2320 struct ahc_syncrate *syncrate; 2387 const struct ahc_syncrate *syncrate;
2321 2388
2322 if (offset == 0) 2389 if (offset == 0)
2323 offset = MAX_OFFSET; 2390 offset = MAX_OFFSET;
@@ -2361,7 +2428,7 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
2361 unsigned int ppr_options = 0; 2428 unsigned int ppr_options = 0;
2362 unsigned int period = 0; 2429 unsigned int period = 0;
2363 unsigned long flags; 2430 unsigned long flags;
2364 struct ahc_syncrate *syncrate = NULL; 2431 const struct ahc_syncrate *syncrate = NULL;
2365 2432
2366 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, 2433 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2367 starget->channel + 'A', ROLE_INITIATOR); 2434 starget->channel + 'A', ROLE_INITIATOR);
@@ -2391,7 +2458,7 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
2391 unsigned int period = tinfo->goal.period; 2458 unsigned int period = tinfo->goal.period;
2392 unsigned int width = tinfo->goal.width; 2459 unsigned int width = tinfo->goal.width;
2393 unsigned long flags; 2460 unsigned long flags;
2394 struct ahc_syncrate *syncrate; 2461 const struct ahc_syncrate *syncrate;
2395 2462
2396 if (dt && spi_max_width(starget)) { 2463 if (dt && spi_max_width(starget)) {
2397 ppr_options |= MSG_EXT_PPR_DT_REQ; 2464 ppr_options |= MSG_EXT_PPR_DT_REQ;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index b48dab447bde..3f7238db35e5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -365,7 +365,7 @@ struct ahc_platform_data {
365#define AHC_LINUX_NOIRQ ((uint32_t)~0) 365#define AHC_LINUX_NOIRQ ((uint32_t)~0)
366 uint32_t irq; /* IRQ for this adapter */ 366 uint32_t irq; /* IRQ for this adapter */
367 uint32_t bios_address; 367 uint32_t bios_address;
368 uint32_t mem_busaddr; /* Mem Base Addr */ 368 resource_size_t mem_busaddr; /* Mem Base Addr */
369}; 369};
370 370
371/************************** OS Utility Wrappers *******************************/ 371/************************** OS Utility Wrappers *******************************/
@@ -375,82 +375,16 @@ struct ahc_platform_data {
375#define malloc(size, type, flags) kmalloc(size, flags) 375#define malloc(size, type, flags) kmalloc(size, flags)
376#define free(ptr, type) kfree(ptr) 376#define free(ptr, type) kfree(ptr)
377 377
378static __inline void ahc_delay(long); 378void ahc_delay(long);
379static __inline void
380ahc_delay(long usec)
381{
382 /*
383 * udelay on Linux can have problems for
384 * multi-millisecond waits. Wait at most
385 * 1024us per call.
386 */
387 while (usec > 0) {
388 udelay(usec % 1024);
389 usec -= 1024;
390 }
391}
392 379
393 380
394/***************************** Low Level I/O **********************************/ 381/***************************** Low Level I/O **********************************/
395static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port); 382uint8_t ahc_inb(struct ahc_softc * ahc, long port);
396static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val); 383void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
397static __inline void ahc_outsb(struct ahc_softc * ahc, long port, 384void ahc_outsb(struct ahc_softc * ahc, long port,
398 uint8_t *, int count); 385 uint8_t *, int count);
399static __inline void ahc_insb(struct ahc_softc * ahc, long port, 386void ahc_insb(struct ahc_softc * ahc, long port,
400 uint8_t *, int count); 387 uint8_t *, int count);
401
402static __inline uint8_t
403ahc_inb(struct ahc_softc * ahc, long port)
404{
405 uint8_t x;
406
407 if (ahc->tag == BUS_SPACE_MEMIO) {
408 x = readb(ahc->bsh.maddr + port);
409 } else {
410 x = inb(ahc->bsh.ioport + port);
411 }
412 mb();
413 return (x);
414}
415
416static __inline void
417ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
418{
419 if (ahc->tag == BUS_SPACE_MEMIO) {
420 writeb(val, ahc->bsh.maddr + port);
421 } else {
422 outb(val, ahc->bsh.ioport + port);
423 }
424 mb();
425}
426
427static __inline void
428ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
429{
430 int i;
431
432 /*
433 * There is probably a more efficient way to do this on Linux
434 * but we don't use this for anything speed critical and this
435 * should work.
436 */
437 for (i = 0; i < count; i++)
438 ahc_outb(ahc, port, *array++);
439}
440
441static __inline void
442ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
443{
444 int i;
445
446 /*
447 * There is probably a more efficient way to do this on Linux
448 * but we don't use this for anything speed critical and this
449 * should work.
450 */
451 for (i = 0; i < count; i++)
452 *array++ = ahc_inb(ahc, port);
453}
454 388
455/**************************** Initialization **********************************/ 389/**************************** Initialization **********************************/
456int ahc_linux_register_host(struct ahc_softc *, 390int ahc_linux_register_host(struct ahc_softc *,
@@ -464,9 +398,6 @@ struct info_str {
464 int pos; 398 int pos;
465}; 399};
466 400
467void ahc_format_transinfo(struct info_str *info,
468 struct ahc_transinfo *tinfo);
469
470/******************************** Locking *************************************/ 401/******************************** Locking *************************************/
471/* Lock protecting internal data structures */ 402/* Lock protecting internal data structures */
472 403
@@ -555,61 +486,12 @@ void ahc_linux_pci_exit(void);
555int ahc_pci_map_registers(struct ahc_softc *ahc); 486int ahc_pci_map_registers(struct ahc_softc *ahc);
556int ahc_pci_map_int(struct ahc_softc *ahc); 487int ahc_pci_map_int(struct ahc_softc *ahc);
557 488
558static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci, 489uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
559 int reg, int width); 490 int reg, int width);
560 491
561static __inline uint32_t 492void ahc_pci_write_config(ahc_dev_softc_t pci,
562ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width) 493 int reg, uint32_t value,
563{ 494 int width);
564 switch (width) {
565 case 1:
566 {
567 uint8_t retval;
568
569 pci_read_config_byte(pci, reg, &retval);
570 return (retval);
571 }
572 case 2:
573 {
574 uint16_t retval;
575 pci_read_config_word(pci, reg, &retval);
576 return (retval);
577 }
578 case 4:
579 {
580 uint32_t retval;
581 pci_read_config_dword(pci, reg, &retval);
582 return (retval);
583 }
584 default:
585 panic("ahc_pci_read_config: Read size too big");
586 /* NOTREACHED */
587 return (0);
588 }
589}
590
591static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
592 int reg, uint32_t value,
593 int width);
594
595static __inline void
596ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
597{
598 switch (width) {
599 case 1:
600 pci_write_config_byte(pci, reg, value);
601 break;
602 case 2:
603 pci_write_config_word(pci, reg, value);
604 break;
605 case 4:
606 pci_write_config_dword(pci, reg, value);
607 break;
608 default:
609 panic("ahc_pci_write_config: Write size too big");
610 /* NOTREACHED */
611 }
612}
613 495
614static __inline int ahc_get_pci_function(ahc_dev_softc_t); 496static __inline int ahc_get_pci_function(ahc_dev_softc_t);
615static __inline int 497static __inline int
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 3d3eaef65fb3..0d7628f1f1ef 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -46,7 +46,7 @@
46*/ 46*/
47#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI) 47#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI)
48 48
49static struct pci_device_id ahc_linux_pci_id_table[] = { 49static const struct pci_device_id ahc_linux_pci_id_table[] = {
50 /* aic7850 based controllers */ 50 /* aic7850 based controllers */
51 ID(ID_AHA_2902_04_10_15_20C_30C), 51 ID(ID_AHA_2902_04_10_15_20C_30C),
52 /* aic7860 based controllers */ 52 /* aic7860 based controllers */
@@ -206,7 +206,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
206 const uint64_t mask_39bit = 0x7FFFFFFFFFULL; 206 const uint64_t mask_39bit = 0x7FFFFFFFFFULL;
207 struct ahc_softc *ahc; 207 struct ahc_softc *ahc;
208 ahc_dev_softc_t pci; 208 ahc_dev_softc_t pci;
209 struct ahc_pci_identity *entry; 209 const struct ahc_pci_identity *entry;
210 char *name; 210 char *name;
211 int error; 211 int error;
212 struct device *dev = &pdev->dev; 212 struct device *dev = &pdev->dev;
@@ -269,6 +269,57 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
269 return (0); 269 return (0);
270} 270}
271 271
272/******************************* PCI Routines *********************************/
273uint32_t
274ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
275{
276 switch (width) {
277 case 1:
278 {
279 uint8_t retval;
280
281 pci_read_config_byte(pci, reg, &retval);
282 return (retval);
283 }
284 case 2:
285 {
286 uint16_t retval;
287 pci_read_config_word(pci, reg, &retval);
288 return (retval);
289 }
290 case 4:
291 {
292 uint32_t retval;
293 pci_read_config_dword(pci, reg, &retval);
294 return (retval);
295 }
296 default:
297 panic("ahc_pci_read_config: Read size too big");
298 /* NOTREACHED */
299 return (0);
300 }
301}
302
303void
304ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
305{
306 switch (width) {
307 case 1:
308 pci_write_config_byte(pci, reg, value);
309 break;
310 case 2:
311 pci_write_config_word(pci, reg, value);
312 break;
313 case 4:
314 pci_write_config_dword(pci, reg, value);
315 break;
316 default:
317 panic("ahc_pci_write_config: Write size too big");
318 /* NOTREACHED */
319 }
320}
321
322
272static struct pci_driver aic7xxx_pci_driver = { 323static struct pci_driver aic7xxx_pci_driver = {
273 .name = "aic7xxx", 324 .name = "aic7xxx",
274 .probe = ahc_linux_pci_dev_probe, 325 .probe = ahc_linux_pci_dev_probe,
@@ -293,7 +344,7 @@ ahc_linux_pci_exit(void)
293} 344}
294 345
295static int 346static int
296ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base) 347ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base)
297{ 348{
298 if (aic7xxx_allow_memio == 0) 349 if (aic7xxx_allow_memio == 0)
299 return (ENOMEM); 350 return (ENOMEM);
@@ -308,10 +359,10 @@ ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
308 359
309static int 360static int
310ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, 361ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
311 u_long *bus_addr, 362 resource_size_t *bus_addr,
312 uint8_t __iomem **maddr) 363 uint8_t __iomem **maddr)
313{ 364{
314 u_long start; 365 resource_size_t start;
315 int error; 366 int error;
316 367
317 error = 0; 368 error = 0;
@@ -336,7 +387,7 @@ int
336ahc_pci_map_registers(struct ahc_softc *ahc) 387ahc_pci_map_registers(struct ahc_softc *ahc)
337{ 388{
338 uint32_t command; 389 uint32_t command;
339 u_long base; 390 resource_size_t base;
340 uint8_t __iomem *maddr; 391 uint8_t __iomem *maddr;
341 int error; 392 int error;
342 393
@@ -374,12 +425,12 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
374 } else 425 } else
375 command |= PCIM_CMD_MEMEN; 426 command |= PCIM_CMD_MEMEN;
376 } else { 427 } else {
377 printf("aic7xxx: PCI%d:%d:%d MEM region 0x%lx " 428 printf("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
378 "unavailable. Cannot memory map device.\n", 429 "unavailable. Cannot memory map device.\n",
379 ahc_get_pci_bus(ahc->dev_softc), 430 ahc_get_pci_bus(ahc->dev_softc),
380 ahc_get_pci_slot(ahc->dev_softc), 431 ahc_get_pci_slot(ahc->dev_softc),
381 ahc_get_pci_function(ahc->dev_softc), 432 ahc_get_pci_function(ahc->dev_softc),
382 base); 433 (unsigned long long)base);
383 } 434 }
384 435
385 /* 436 /*
@@ -390,15 +441,15 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
390 error = ahc_linux_pci_reserve_io_region(ahc, &base); 441 error = ahc_linux_pci_reserve_io_region(ahc, &base);
391 if (error == 0) { 442 if (error == 0) {
392 ahc->tag = BUS_SPACE_PIO; 443 ahc->tag = BUS_SPACE_PIO;
393 ahc->bsh.ioport = base; 444 ahc->bsh.ioport = (u_long)base;
394 command |= PCIM_CMD_PORTEN; 445 command |= PCIM_CMD_PORTEN;
395 } else { 446 } else {
396 printf("aic7xxx: PCI%d:%d:%d IO region 0x%lx[0..255] " 447 printf("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
397 "unavailable. Cannot map device.\n", 448 "unavailable. Cannot map device.\n",
398 ahc_get_pci_bus(ahc->dev_softc), 449 ahc_get_pci_bus(ahc->dev_softc),
399 ahc_get_pci_slot(ahc->dev_softc), 450 ahc_get_pci_slot(ahc->dev_softc),
400 ahc_get_pci_function(ahc->dev_softc), 451 ahc_get_pci_function(ahc->dev_softc),
401 base); 452 (unsigned long long)base);
402 } 453 }
403 } 454 }
404 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4); 455 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 56848f41e4f9..c07cb6eebb02 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -168,8 +168,7 @@ static ahc_device_setup_t ahc_aha394XX_setup;
168static ahc_device_setup_t ahc_aha494XX_setup; 168static ahc_device_setup_t ahc_aha494XX_setup;
169static ahc_device_setup_t ahc_aha398XX_setup; 169static ahc_device_setup_t ahc_aha398XX_setup;
170 170
171static struct ahc_pci_identity ahc_pci_ident_table [] = 171static const struct ahc_pci_identity ahc_pci_ident_table[] = {
172{
173 /* aic7850 based controllers */ 172 /* aic7850 based controllers */
174 { 173 {
175 ID_AHA_2902_04_10_15_20C_30C, 174 ID_AHA_2902_04_10_15_20C_30C,
@@ -668,7 +667,7 @@ ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
668 return (result); 667 return (result);
669} 668}
670 669
671struct ahc_pci_identity * 670const struct ahc_pci_identity *
672ahc_find_pci_device(ahc_dev_softc_t pci) 671ahc_find_pci_device(ahc_dev_softc_t pci)
673{ 672{
674 uint64_t full_id; 673 uint64_t full_id;
@@ -676,7 +675,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
676 uint16_t vendor; 675 uint16_t vendor;
677 uint16_t subdevice; 676 uint16_t subdevice;
678 uint16_t subvendor; 677 uint16_t subvendor;
679 struct ahc_pci_identity *entry; 678 const struct ahc_pci_identity *entry;
680 u_int i; 679 u_int i;
681 680
682 vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); 681 vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -710,7 +709,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
710} 709}
711 710
712int 711int
713ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry) 712ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
714{ 713{
715 u_int command; 714 u_int command;
716 u_int our_id; 715 u_int our_id;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 99e5443e7535..e92991a7c485 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -58,7 +58,7 @@ static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
58 * Table of syncrates that don't follow the "divisible by 4" 58 * Table of syncrates that don't follow the "divisible by 4"
59 * rule. This table will be expanded in future SCSI specs. 59 * rule. This table will be expanded in future SCSI specs.
60 */ 60 */
61static struct { 61static const struct {
62 u_int period_factor; 62 u_int period_factor;
63 u_int period; /* in 100ths of ns */ 63 u_int period; /* in 100ths of ns */
64} scsi_syncrates[] = { 64} scsi_syncrates[] = {
@@ -137,7 +137,7 @@ copy_info(struct info_str *info, char *fmt, ...)
137 return (len); 137 return (len);
138} 138}
139 139
140void 140static void
141ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo) 141ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
142{ 142{
143 u_int speed; 143 u_int speed;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 88bfd767c51c..309a562b009e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -8,7 +8,7 @@
8 8
9#include "aic7xxx_osm.h" 9#include "aic7xxx_osm.h"
10 10
11static ahc_reg_parse_entry_t SCSISEQ_parse_table[] = { 11static const ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
12 { "SCSIRSTO", 0x01, 0x01 }, 12 { "SCSIRSTO", 0x01, 0x01 },
13 { "ENAUTOATNP", 0x02, 0x02 }, 13 { "ENAUTOATNP", 0x02, 0x02 },
14 { "ENAUTOATNI", 0x04, 0x04 }, 14 { "ENAUTOATNI", 0x04, 0x04 },
@@ -26,7 +26,7 @@ ahc_scsiseq_print(u_int regvalue, u_int *cur_col, u_int wrap)
26 0x00, regvalue, cur_col, wrap)); 26 0x00, regvalue, cur_col, wrap));
27} 27}
28 28
29static ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = { 29static const ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
30 { "CLRCHN", 0x02, 0x02 }, 30 { "CLRCHN", 0x02, 0x02 },
31 { "SCAMEN", 0x04, 0x04 }, 31 { "SCAMEN", 0x04, 0x04 },
32 { "SPIOEN", 0x08, 0x08 }, 32 { "SPIOEN", 0x08, 0x08 },
@@ -43,7 +43,7 @@ ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
43 0x01, regvalue, cur_col, wrap)); 43 0x01, regvalue, cur_col, wrap));
44} 44}
45 45
46static ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = { 46static const ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
47 { "STPWEN", 0x01, 0x01 }, 47 { "STPWEN", 0x01, 0x01 },
48 { "ACTNEGEN", 0x02, 0x02 }, 48 { "ACTNEGEN", 0x02, 0x02 },
49 { "ENSTIMER", 0x04, 0x04 }, 49 { "ENSTIMER", 0x04, 0x04 },
@@ -60,7 +60,7 @@ ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
60 0x02, regvalue, cur_col, wrap)); 60 0x02, regvalue, cur_col, wrap));
61} 61}
62 62
63static ahc_reg_parse_entry_t SCSISIGO_parse_table[] = { 63static const ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
64 { "ACKO", 0x01, 0x01 }, 64 { "ACKO", 0x01, 0x01 },
65 { "REQO", 0x02, 0x02 }, 65 { "REQO", 0x02, 0x02 },
66 { "BSYO", 0x04, 0x04 }, 66 { "BSYO", 0x04, 0x04 },
@@ -85,7 +85,7 @@ ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
85 0x03, regvalue, cur_col, wrap)); 85 0x03, regvalue, cur_col, wrap));
86} 86}
87 87
88static ahc_reg_parse_entry_t SCSISIGI_parse_table[] = { 88static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
89 { "ACKI", 0x01, 0x01 }, 89 { "ACKI", 0x01, 0x01 },
90 { "REQI", 0x02, 0x02 }, 90 { "REQI", 0x02, 0x02 },
91 { "BSYI", 0x04, 0x04 }, 91 { "BSYI", 0x04, 0x04 },
@@ -112,7 +112,7 @@ ahc_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
112 0x03, regvalue, cur_col, wrap)); 112 0x03, regvalue, cur_col, wrap));
113} 113}
114 114
115static ahc_reg_parse_entry_t SCSIRATE_parse_table[] = { 115static const ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
116 { "SINGLE_EDGE", 0x10, 0x10 }, 116 { "SINGLE_EDGE", 0x10, 0x10 },
117 { "ENABLE_CRC", 0x40, 0x40 }, 117 { "ENABLE_CRC", 0x40, 0x40 },
118 { "WIDEXFER", 0x80, 0x80 }, 118 { "WIDEXFER", 0x80, 0x80 },
@@ -128,7 +128,7 @@ ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
128 0x04, regvalue, cur_col, wrap)); 128 0x04, regvalue, cur_col, wrap));
129} 129}
130 130
131static ahc_reg_parse_entry_t SCSIID_parse_table[] = { 131static const ahc_reg_parse_entry_t SCSIID_parse_table[] = {
132 { "TWIN_CHNLB", 0x80, 0x80 }, 132 { "TWIN_CHNLB", 0x80, 0x80 },
133 { "OID", 0x0f, 0x0f }, 133 { "OID", 0x0f, 0x0f },
134 { "TWIN_TID", 0x70, 0x70 }, 134 { "TWIN_TID", 0x70, 0x70 },
@@ -151,20 +151,13 @@ ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
151} 151}
152 152
153int 153int
154ahc_scsidath_print(u_int regvalue, u_int *cur_col, u_int wrap)
155{
156 return (ahc_print_register(NULL, 0, "SCSIDATH",
157 0x07, regvalue, cur_col, wrap));
158}
159
160int
161ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap) 154ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
162{ 155{
163 return (ahc_print_register(NULL, 0, "STCNT", 156 return (ahc_print_register(NULL, 0, "STCNT",
164 0x08, regvalue, cur_col, wrap)); 157 0x08, regvalue, cur_col, wrap));
165} 158}
166 159
167static ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = { 160static const ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
168 { "DIS_MSGIN_DUALEDGE", 0x01, 0x01 }, 161 { "DIS_MSGIN_DUALEDGE", 0x01, 0x01 },
169 { "AUTO_MSGOUT_DE", 0x02, 0x02 }, 162 { "AUTO_MSGOUT_DE", 0x02, 0x02 },
170 { "SCSIDATL_IMGEN", 0x04, 0x04 }, 163 { "SCSIDATL_IMGEN", 0x04, 0x04 },
@@ -190,7 +183,7 @@ ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
190 0x0a, regvalue, cur_col, wrap)); 183 0x0a, regvalue, cur_col, wrap));
191} 184}
192 185
193static ahc_reg_parse_entry_t CLRSINT0_parse_table[] = { 186static const ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
194 { "CLRSPIORDY", 0x02, 0x02 }, 187 { "CLRSPIORDY", 0x02, 0x02 },
195 { "CLRSWRAP", 0x08, 0x08 }, 188 { "CLRSWRAP", 0x08, 0x08 },
196 { "CLRIOERR", 0x08, 0x08 }, 189 { "CLRIOERR", 0x08, 0x08 },
@@ -206,7 +199,7 @@ ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
206 0x0b, regvalue, cur_col, wrap)); 199 0x0b, regvalue, cur_col, wrap));
207} 200}
208 201
209static ahc_reg_parse_entry_t SSTAT0_parse_table[] = { 202static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
210 { "DMADONE", 0x01, 0x01 }, 203 { "DMADONE", 0x01, 0x01 },
211 { "SPIORDY", 0x02, 0x02 }, 204 { "SPIORDY", 0x02, 0x02 },
212 { "SDONE", 0x04, 0x04 }, 205 { "SDONE", 0x04, 0x04 },
@@ -225,7 +218,7 @@ ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
225 0x0b, regvalue, cur_col, wrap)); 218 0x0b, regvalue, cur_col, wrap));
226} 219}
227 220
228static ahc_reg_parse_entry_t CLRSINT1_parse_table[] = { 221static const ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
229 { "CLRREQINIT", 0x01, 0x01 }, 222 { "CLRREQINIT", 0x01, 0x01 },
230 { "CLRPHASECHG", 0x02, 0x02 }, 223 { "CLRPHASECHG", 0x02, 0x02 },
231 { "CLRSCSIPERR", 0x04, 0x04 }, 224 { "CLRSCSIPERR", 0x04, 0x04 },
@@ -242,7 +235,7 @@ ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
242 0x0c, regvalue, cur_col, wrap)); 235 0x0c, regvalue, cur_col, wrap));
243} 236}
244 237
245static ahc_reg_parse_entry_t SSTAT1_parse_table[] = { 238static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
246 { "REQINIT", 0x01, 0x01 }, 239 { "REQINIT", 0x01, 0x01 },
247 { "PHASECHG", 0x02, 0x02 }, 240 { "PHASECHG", 0x02, 0x02 },
248 { "SCSIPERR", 0x04, 0x04 }, 241 { "SCSIPERR", 0x04, 0x04 },
@@ -260,7 +253,7 @@ ahc_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
260 0x0c, regvalue, cur_col, wrap)); 253 0x0c, regvalue, cur_col, wrap));
261} 254}
262 255
263static ahc_reg_parse_entry_t SSTAT2_parse_table[] = { 256static const ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
264 { "DUAL_EDGE_ERR", 0x01, 0x01 }, 257 { "DUAL_EDGE_ERR", 0x01, 0x01 },
265 { "CRCREQERR", 0x02, 0x02 }, 258 { "CRCREQERR", 0x02, 0x02 },
266 { "CRCENDERR", 0x04, 0x04 }, 259 { "CRCENDERR", 0x04, 0x04 },
@@ -278,7 +271,7 @@ ahc_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
278 0x0d, regvalue, cur_col, wrap)); 271 0x0d, regvalue, cur_col, wrap));
279} 272}
280 273
281static ahc_reg_parse_entry_t SSTAT3_parse_table[] = { 274static const ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
282 { "OFFCNT", 0x0f, 0x0f }, 275 { "OFFCNT", 0x0f, 0x0f },
283 { "U2OFFCNT", 0x7f, 0x7f }, 276 { "U2OFFCNT", 0x7f, 0x7f },
284 { "SCSICNT", 0xf0, 0xf0 } 277 { "SCSICNT", 0xf0, 0xf0 }
@@ -291,7 +284,7 @@ ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
291 0x0e, regvalue, cur_col, wrap)); 284 0x0e, regvalue, cur_col, wrap));
292} 285}
293 286
294static ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = { 287static const ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
295 { "OID", 0x0f, 0x0f }, 288 { "OID", 0x0f, 0x0f },
296 { "TID", 0xf0, 0xf0 } 289 { "TID", 0xf0, 0xf0 }
297}; 290};
@@ -303,7 +296,7 @@ ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
303 0x0f, regvalue, cur_col, wrap)); 296 0x0f, regvalue, cur_col, wrap));
304} 297}
305 298
306static ahc_reg_parse_entry_t SIMODE0_parse_table[] = { 299static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
307 { "ENDMADONE", 0x01, 0x01 }, 300 { "ENDMADONE", 0x01, 0x01 },
308 { "ENSPIORDY", 0x02, 0x02 }, 301 { "ENSPIORDY", 0x02, 0x02 },
309 { "ENSDONE", 0x04, 0x04 }, 302 { "ENSDONE", 0x04, 0x04 },
@@ -321,7 +314,7 @@ ahc_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
321 0x10, regvalue, cur_col, wrap)); 314 0x10, regvalue, cur_col, wrap));
322} 315}
323 316
324static ahc_reg_parse_entry_t SIMODE1_parse_table[] = { 317static const ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
325 { "ENREQINIT", 0x01, 0x01 }, 318 { "ENREQINIT", 0x01, 0x01 },
326 { "ENPHASECHG", 0x02, 0x02 }, 319 { "ENPHASECHG", 0x02, 0x02 },
327 { "ENSCSIPERR", 0x04, 0x04 }, 320 { "ENSCSIPERR", 0x04, 0x04 },
@@ -347,33 +340,13 @@ ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
347} 340}
348 341
349int 342int
350ahc_scsibush_print(u_int regvalue, u_int *cur_col, u_int wrap)
351{
352 return (ahc_print_register(NULL, 0, "SCSIBUSH",
353 0x13, regvalue, cur_col, wrap));
354}
355
356static ahc_reg_parse_entry_t SXFRCTL2_parse_table[] = {
357 { "CMDDMAEN", 0x08, 0x08 },
358 { "AUTORSTDIS", 0x10, 0x10 },
359 { "ASYNC_SETUP", 0x07, 0x07 }
360};
361
362int
363ahc_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
364{
365 return (ahc_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
366 0x13, regvalue, cur_col, wrap));
367}
368
369int
370ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap) 343ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
371{ 344{
372 return (ahc_print_register(NULL, 0, "SHADDR", 345 return (ahc_print_register(NULL, 0, "SHADDR",
373 0x14, regvalue, cur_col, wrap)); 346 0x14, regvalue, cur_col, wrap));
374} 347}
375 348
376static ahc_reg_parse_entry_t SELTIMER_parse_table[] = { 349static const ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
377 { "STAGE1", 0x01, 0x01 }, 350 { "STAGE1", 0x01, 0x01 },
378 { "STAGE2", 0x02, 0x02 }, 351 { "STAGE2", 0x02, 0x02 },
379 { "STAGE3", 0x04, 0x04 }, 352 { "STAGE3", 0x04, 0x04 },
@@ -389,7 +362,7 @@ ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
389 0x18, regvalue, cur_col, wrap)); 362 0x18, regvalue, cur_col, wrap));
390} 363}
391 364
392static ahc_reg_parse_entry_t SELID_parse_table[] = { 365static const ahc_reg_parse_entry_t SELID_parse_table[] = {
393 { "ONEBIT", 0x08, 0x08 }, 366 { "ONEBIT", 0x08, 0x08 },
394 { "SELID_MASK", 0xf0, 0xf0 } 367 { "SELID_MASK", 0xf0, 0xf0 }
395}; 368};
@@ -401,21 +374,6 @@ ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
401 0x19, regvalue, cur_col, wrap)); 374 0x19, regvalue, cur_col, wrap));
402} 375}
403 376
404static ahc_reg_parse_entry_t SCAMCTL_parse_table[] = {
405 { "DFLTTID", 0x10, 0x10 },
406 { "ALTSTIM", 0x20, 0x20 },
407 { "CLRSCAMSELID", 0x40, 0x40 },
408 { "ENSCAMSELO", 0x80, 0x80 },
409 { "SCAMLVL", 0x03, 0x03 }
410};
411
412int
413ahc_scamctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
414{
415 return (ahc_print_register(SCAMCTL_parse_table, 5, "SCAMCTL",
416 0x1a, regvalue, cur_col, wrap));
417}
418
419int 377int
420ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap) 378ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
421{ 379{
@@ -423,7 +381,7 @@ ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
423 0x1b, regvalue, cur_col, wrap)); 381 0x1b, regvalue, cur_col, wrap));
424} 382}
425 383
426static ahc_reg_parse_entry_t SPIOCAP_parse_table[] = { 384static const ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
427 { "SSPIOCPS", 0x01, 0x01 }, 385 { "SSPIOCPS", 0x01, 0x01 },
428 { "ROM", 0x02, 0x02 }, 386 { "ROM", 0x02, 0x02 },
429 { "EEPROM", 0x04, 0x04 }, 387 { "EEPROM", 0x04, 0x04 },
@@ -441,7 +399,7 @@ ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
441 0x1b, regvalue, cur_col, wrap)); 399 0x1b, regvalue, cur_col, wrap));
442} 400}
443 401
444static ahc_reg_parse_entry_t BRDCTL_parse_table[] = { 402static const ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
445 { "BRDCTL0", 0x01, 0x01 }, 403 { "BRDCTL0", 0x01, 0x01 },
446 { "BRDSTB_ULTRA2", 0x01, 0x01 }, 404 { "BRDSTB_ULTRA2", 0x01, 0x01 },
447 { "BRDCTL1", 0x02, 0x02 }, 405 { "BRDCTL1", 0x02, 0x02 },
@@ -464,7 +422,7 @@ ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
464 0x1d, regvalue, cur_col, wrap)); 422 0x1d, regvalue, cur_col, wrap));
465} 423}
466 424
467static ahc_reg_parse_entry_t SEECTL_parse_table[] = { 425static const ahc_reg_parse_entry_t SEECTL_parse_table[] = {
468 { "SEEDI", 0x01, 0x01 }, 426 { "SEEDI", 0x01, 0x01 },
469 { "SEEDO", 0x02, 0x02 }, 427 { "SEEDO", 0x02, 0x02 },
470 { "SEECK", 0x04, 0x04 }, 428 { "SEECK", 0x04, 0x04 },
@@ -482,7 +440,7 @@ ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
482 0x1e, regvalue, cur_col, wrap)); 440 0x1e, regvalue, cur_col, wrap));
483} 441}
484 442
485static ahc_reg_parse_entry_t SBLKCTL_parse_table[] = { 443static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
486 { "XCVR", 0x01, 0x01 }, 444 { "XCVR", 0x01, 0x01 },
487 { "SELWIDE", 0x02, 0x02 }, 445 { "SELWIDE", 0x02, 0x02 },
488 { "ENAB20", 0x04, 0x04 }, 446 { "ENAB20", 0x04, 0x04 },
@@ -522,13 +480,6 @@ ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
522} 480}
523 481
524int 482int
525ahc_cmdsize_table_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
526{
527 return (ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL",
528 0x34, regvalue, cur_col, wrap));
529}
530
531int
532ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap) 483ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
533{ 484{
534 return (ahc_print_register(NULL, 0, "MWI_RESIDUAL", 485 return (ahc_print_register(NULL, 0, "MWI_RESIDUAL",
@@ -549,7 +500,7 @@ ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
549 0x3a, regvalue, cur_col, wrap)); 500 0x3a, regvalue, cur_col, wrap));
550} 501}
551 502
552static ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = { 503static const ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
553 { "FIFORESET", 0x01, 0x01 }, 504 { "FIFORESET", 0x01, 0x01 },
554 { "FIFOFLUSH", 0x02, 0x02 }, 505 { "FIFOFLUSH", 0x02, 0x02 },
555 { "DIRECTION", 0x04, 0x04 }, 506 { "DIRECTION", 0x04, 0x04 },
@@ -569,7 +520,7 @@ ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
569 0x3b, regvalue, cur_col, wrap)); 520 0x3b, regvalue, cur_col, wrap));
570} 521}
571 522
572static ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = { 523static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
573 { "NO_DISCONNECT", 0x01, 0x01 }, 524 { "NO_DISCONNECT", 0x01, 0x01 },
574 { "SPHASE_PENDING", 0x02, 0x02 }, 525 { "SPHASE_PENDING", 0x02, 0x02 },
575 { "DPHASE_PENDING", 0x04, 0x04 }, 526 { "DPHASE_PENDING", 0x04, 0x04 },
@@ -602,7 +553,7 @@ ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
602 0x3e, regvalue, cur_col, wrap)); 553 0x3e, regvalue, cur_col, wrap));
603} 554}
604 555
605static ahc_reg_parse_entry_t LASTPHASE_parse_table[] = { 556static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
606 { "MSGI", 0x20, 0x20 }, 557 { "MSGI", 0x20, 0x20 },
607 { "IOI", 0x40, 0x40 }, 558 { "IOI", 0x40, 0x40 },
608 { "CDI", 0x80, 0x80 }, 559 { "CDI", 0x80, 0x80 },
@@ -645,13 +596,6 @@ ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
645} 596}
646 597
647int 598int
648ahc_complete_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
649{
650 return (ahc_print_register(NULL, 0, "COMPLETE_SCBH",
651 0x43, regvalue, cur_col, wrap));
652}
653
654int
655ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap) 599ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
656{ 600{
657 return (ahc_print_register(NULL, 0, "HSCB_ADDR", 601 return (ahc_print_register(NULL, 0, "HSCB_ADDR",
@@ -700,7 +644,7 @@ ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
700 0x50, regvalue, cur_col, wrap)); 644 0x50, regvalue, cur_col, wrap));
701} 645}
702 646
703static ahc_reg_parse_entry_t ARG_1_parse_table[] = { 647static const ahc_reg_parse_entry_t ARG_1_parse_table[] = {
704 { "CONT_TARG_SESSION", 0x02, 0x02 }, 648 { "CONT_TARG_SESSION", 0x02, 0x02 },
705 { "CONT_MSG_LOOP", 0x04, 0x04 }, 649 { "CONT_MSG_LOOP", 0x04, 0x04 },
706 { "EXIT_MSG_LOOP", 0x08, 0x08 }, 650 { "EXIT_MSG_LOOP", 0x08, 0x08 },
@@ -731,7 +675,7 @@ ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
731 0x53, regvalue, cur_col, wrap)); 675 0x53, regvalue, cur_col, wrap));
732} 676}
733 677
734static ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = { 678static const ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
735 { "ENAUTOATNP", 0x02, 0x02 }, 679 { "ENAUTOATNP", 0x02, 0x02 },
736 { "ENAUTOATNI", 0x04, 0x04 }, 680 { "ENAUTOATNI", 0x04, 0x04 },
737 { "ENAUTOATNO", 0x08, 0x08 }, 681 { "ENAUTOATNO", 0x08, 0x08 },
@@ -747,7 +691,7 @@ ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
747 0x54, regvalue, cur_col, wrap)); 691 0x54, regvalue, cur_col, wrap));
748} 692}
749 693
750static ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = { 694static const ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
751 { "HA_274_EXTENDED_TRANS",0x01, 0x01 } 695 { "HA_274_EXTENDED_TRANS",0x01, 0x01 }
752}; 696};
753 697
@@ -758,7 +702,7 @@ ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
758 0x56, regvalue, cur_col, wrap)); 702 0x56, regvalue, cur_col, wrap));
759} 703}
760 704
761static ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = { 705static const ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
762 { "SCB_DMA", 0x01, 0x01 }, 706 { "SCB_DMA", 0x01, 0x01 },
763 { "TARGET_MSG_PENDING", 0x02, 0x02 } 707 { "TARGET_MSG_PENDING", 0x02, 0x02 }
764}; 708};
@@ -770,7 +714,7 @@ ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
770 0x57, regvalue, cur_col, wrap)); 714 0x57, regvalue, cur_col, wrap));
771} 715}
772 716
773static ahc_reg_parse_entry_t SCSICONF_parse_table[] = { 717static const ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
774 { "ENSPCHK", 0x20, 0x20 }, 718 { "ENSPCHK", 0x20, 0x20 },
775 { "RESET_SCSI", 0x40, 0x40 }, 719 { "RESET_SCSI", 0x40, 0x40 },
776 { "TERM_ENB", 0x80, 0x80 }, 720 { "TERM_ENB", 0x80, 0x80 },
@@ -785,7 +729,7 @@ ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
785 0x5a, regvalue, cur_col, wrap)); 729 0x5a, regvalue, cur_col, wrap));
786} 730}
787 731
788static ahc_reg_parse_entry_t INTDEF_parse_table[] = { 732static const ahc_reg_parse_entry_t INTDEF_parse_table[] = {
789 { "EDGE_TRIG", 0x80, 0x80 }, 733 { "EDGE_TRIG", 0x80, 0x80 },
790 { "VECTOR", 0x0f, 0x0f } 734 { "VECTOR", 0x0f, 0x0f }
791}; 735};
@@ -804,7 +748,7 @@ ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
804 0x5d, regvalue, cur_col, wrap)); 748 0x5d, regvalue, cur_col, wrap));
805} 749}
806 750
807static ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = { 751static const ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
808 { "CHANNEL_B_PRIMARY", 0x08, 0x08 }, 752 { "CHANNEL_B_PRIMARY", 0x08, 0x08 },
809 { "BIOSMODE", 0x30, 0x30 }, 753 { "BIOSMODE", 0x30, 0x30 },
810 { "BIOSDISABLED", 0x30, 0x30 } 754 { "BIOSDISABLED", 0x30, 0x30 }
@@ -817,7 +761,7 @@ ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
817 0x5f, regvalue, cur_col, wrap)); 761 0x5f, regvalue, cur_col, wrap));
818} 762}
819 763
820static ahc_reg_parse_entry_t SEQCTL_parse_table[] = { 764static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
821 { "LOADRAM", 0x01, 0x01 }, 765 { "LOADRAM", 0x01, 0x01 },
822 { "SEQRESET", 0x02, 0x02 }, 766 { "SEQRESET", 0x02, 0x02 },
823 { "STEP", 0x04, 0x04 }, 767 { "STEP", 0x04, 0x04 },
@@ -849,7 +793,7 @@ ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
849 0x62, regvalue, cur_col, wrap)); 793 0x62, regvalue, cur_col, wrap));
850} 794}
851 795
852static ahc_reg_parse_entry_t SEQADDR1_parse_table[] = { 796static const ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
853 { "SEQADDR1_MASK", 0x01, 0x01 } 797 { "SEQADDR1_MASK", 0x01, 0x01 }
854}; 798};
855 799
@@ -902,7 +846,7 @@ ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
902 0x6a, regvalue, cur_col, wrap)); 846 0x6a, regvalue, cur_col, wrap));
903} 847}
904 848
905static ahc_reg_parse_entry_t FLAGS_parse_table[] = { 849static const ahc_reg_parse_entry_t FLAGS_parse_table[] = {
906 { "CARRY", 0x01, 0x01 }, 850 { "CARRY", 0x01, 0x01 },
907 { "ZERO", 0x02, 0x02 } 851 { "ZERO", 0x02, 0x02 }
908}; 852};
@@ -929,13 +873,6 @@ ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
929} 873}
930 874
931int 875int
932ahc_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
933{
934 return (ahc_print_register(NULL, 0, "FUNCTION1",
935 0x6e, regvalue, cur_col, wrap));
936}
937
938int
939ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap) 876ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
940{ 877{
941 return (ahc_print_register(NULL, 0, "STACK", 878 return (ahc_print_register(NULL, 0, "STACK",
@@ -956,19 +893,7 @@ ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
956 0x70, regvalue, cur_col, wrap)); 893 0x70, regvalue, cur_col, wrap));
957} 894}
958 895
959static ahc_reg_parse_entry_t BCTL_parse_table[] = { 896static const ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
960 { "ENABLE", 0x01, 0x01 },
961 { "ACE", 0x08, 0x08 }
962};
963
964int
965ahc_bctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
966{
967 return (ahc_print_register(BCTL_parse_table, 2, "BCTL",
968 0x84, regvalue, cur_col, wrap));
969}
970
971static ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
972 { "CIOPARCKEN", 0x01, 0x01 }, 897 { "CIOPARCKEN", 0x01, 0x01 },
973 { "USCBSIZE32", 0x02, 0x02 }, 898 { "USCBSIZE32", 0x02, 0x02 },
974 { "RAMPS", 0x04, 0x04 }, 899 { "RAMPS", 0x04, 0x04 },
@@ -986,7 +911,7 @@ ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
986 0x84, regvalue, cur_col, wrap)); 911 0x84, regvalue, cur_col, wrap));
987} 912}
988 913
989static ahc_reg_parse_entry_t BUSTIME_parse_table[] = { 914static const ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
990 { "BON", 0x0f, 0x0f }, 915 { "BON", 0x0f, 0x0f },
991 { "BOFF", 0xf0, 0xf0 } 916 { "BOFF", 0xf0, 0xf0 }
992}; 917};
@@ -998,7 +923,7 @@ ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
998 0x85, regvalue, cur_col, wrap)); 923 0x85, regvalue, cur_col, wrap));
999} 924}
1000 925
1001static ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = { 926static const ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
1002 { "HADDLDSEL0", 0x01, 0x01 }, 927 { "HADDLDSEL0", 0x01, 0x01 },
1003 { "HADDLDSEL1", 0x02, 0x02 }, 928 { "HADDLDSEL1", 0x02, 0x02 },
1004 { "DSLATT", 0xfc, 0xfc } 929 { "DSLATT", 0xfc, 0xfc }
@@ -1011,7 +936,7 @@ ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1011 0x85, regvalue, cur_col, wrap)); 936 0x85, regvalue, cur_col, wrap));
1012} 937}
1013 938
1014static ahc_reg_parse_entry_t BUSSPD_parse_table[] = { 939static const ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
1015 { "STBON", 0x07, 0x07 }, 940 { "STBON", 0x07, 0x07 },
1016 { "STBOFF", 0x38, 0x38 }, 941 { "STBOFF", 0x38, 0x38 },
1017 { "DFTHRSH_75", 0x80, 0x80 }, 942 { "DFTHRSH_75", 0x80, 0x80 },
@@ -1026,7 +951,7 @@ ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
1026 0x86, regvalue, cur_col, wrap)); 951 0x86, regvalue, cur_col, wrap));
1027} 952}
1028 953
1029static ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = { 954static const ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
1030 { "SEQ_MAILBOX", 0x0f, 0x0f }, 955 { "SEQ_MAILBOX", 0x0f, 0x0f },
1031 { "HOST_TQINPOS", 0x80, 0x80 }, 956 { "HOST_TQINPOS", 0x80, 0x80 },
1032 { "HOST_MAILBOX", 0xf0, 0xf0 } 957 { "HOST_MAILBOX", 0xf0, 0xf0 }
@@ -1039,7 +964,7 @@ ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
1039 0x86, regvalue, cur_col, wrap)); 964 0x86, regvalue, cur_col, wrap));
1040} 965}
1041 966
1042static ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = { 967static const ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
1043 { "DFTHRSH_100", 0xc0, 0xc0 } 968 { "DFTHRSH_100", 0xc0, 0xc0 }
1044}; 969};
1045 970
@@ -1050,7 +975,7 @@ ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
1050 0x86, regvalue, cur_col, wrap)); 975 0x86, regvalue, cur_col, wrap));
1051} 976}
1052 977
1053static ahc_reg_parse_entry_t HCNTRL_parse_table[] = { 978static const ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
1054 { "CHIPRST", 0x01, 0x01 }, 979 { "CHIPRST", 0x01, 0x01 },
1055 { "CHIPRSTACK", 0x01, 0x01 }, 980 { "CHIPRSTACK", 0x01, 0x01 },
1056 { "INTEN", 0x02, 0x02 }, 981 { "INTEN", 0x02, 0x02 },
@@ -1088,7 +1013,7 @@ ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1088 0x90, regvalue, cur_col, wrap)); 1013 0x90, regvalue, cur_col, wrap));
1089} 1014}
1090 1015
1091static ahc_reg_parse_entry_t INTSTAT_parse_table[] = { 1016static const ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
1092 { "SEQINT", 0x01, 0x01 }, 1017 { "SEQINT", 0x01, 0x01 },
1093 { "CMDCMPLT", 0x02, 0x02 }, 1018 { "CMDCMPLT", 0x02, 0x02 },
1094 { "SCSIINT", 0x04, 0x04 }, 1019 { "SCSIINT", 0x04, 0x04 },
@@ -1119,7 +1044,7 @@ ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1119 0x91, regvalue, cur_col, wrap)); 1044 0x91, regvalue, cur_col, wrap));
1120} 1045}
1121 1046
1122static ahc_reg_parse_entry_t CLRINT_parse_table[] = { 1047static const ahc_reg_parse_entry_t CLRINT_parse_table[] = {
1123 { "CLRSEQINT", 0x01, 0x01 }, 1048 { "CLRSEQINT", 0x01, 0x01 },
1124 { "CLRCMDINT", 0x02, 0x02 }, 1049 { "CLRCMDINT", 0x02, 0x02 },
1125 { "CLRSCSIINT", 0x04, 0x04 }, 1050 { "CLRSCSIINT", 0x04, 0x04 },
@@ -1134,7 +1059,7 @@ ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
1134 0x92, regvalue, cur_col, wrap)); 1059 0x92, regvalue, cur_col, wrap));
1135} 1060}
1136 1061
1137static ahc_reg_parse_entry_t ERROR_parse_table[] = { 1062static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
1138 { "ILLHADDR", 0x01, 0x01 }, 1063 { "ILLHADDR", 0x01, 0x01 },
1139 { "ILLSADDR", 0x02, 0x02 }, 1064 { "ILLSADDR", 0x02, 0x02 },
1140 { "ILLOPCODE", 0x04, 0x04 }, 1065 { "ILLOPCODE", 0x04, 0x04 },
@@ -1152,7 +1077,7 @@ ahc_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
1152 0x92, regvalue, cur_col, wrap)); 1077 0x92, regvalue, cur_col, wrap));
1153} 1078}
1154 1079
1155static ahc_reg_parse_entry_t DFCNTRL_parse_table[] = { 1080static const ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
1156 { "FIFORESET", 0x01, 0x01 }, 1081 { "FIFORESET", 0x01, 0x01 },
1157 { "FIFOFLUSH", 0x02, 0x02 }, 1082 { "FIFOFLUSH", 0x02, 0x02 },
1158 { "DIRECTION", 0x04, 0x04 }, 1083 { "DIRECTION", 0x04, 0x04 },
@@ -1172,7 +1097,7 @@ ahc_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
1172 0x93, regvalue, cur_col, wrap)); 1097 0x93, regvalue, cur_col, wrap));
1173} 1098}
1174 1099
1175static ahc_reg_parse_entry_t DFSTATUS_parse_table[] = { 1100static const ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
1176 { "FIFOEMP", 0x01, 0x01 }, 1101 { "FIFOEMP", 0x01, 0x01 },
1177 { "FIFOFULL", 0x02, 0x02 }, 1102 { "FIFOFULL", 0x02, 0x02 },
1178 { "DFTHRESH", 0x04, 0x04 }, 1103 { "DFTHRESH", 0x04, 0x04 },
@@ -1198,20 +1123,13 @@ ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1198} 1123}
1199 1124
1200int 1125int
1201ahc_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1202{
1203 return (ahc_print_register(NULL, 0, "DFRADDR",
1204 0x97, regvalue, cur_col, wrap));
1205}
1206
1207int
1208ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap) 1126ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
1209{ 1127{
1210 return (ahc_print_register(NULL, 0, "DFDAT", 1128 return (ahc_print_register(NULL, 0, "DFDAT",
1211 0x99, regvalue, cur_col, wrap)); 1129 0x99, regvalue, cur_col, wrap));
1212} 1130}
1213 1131
1214static ahc_reg_parse_entry_t SCBCNT_parse_table[] = { 1132static const ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
1215 { "SCBAUTO", 0x80, 0x80 }, 1133 { "SCBAUTO", 0x80, 0x80 },
1216 { "SCBCNT_MASK", 0x1f, 0x1f } 1134 { "SCBCNT_MASK", 0x1f, 0x1f }
1217}; 1135};
@@ -1231,20 +1149,13 @@ ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1231} 1149}
1232 1150
1233int 1151int
1234ahc_qincnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1235{
1236 return (ahc_print_register(NULL, 0, "QINCNT",
1237 0x9c, regvalue, cur_col, wrap));
1238}
1239
1240int
1241ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap) 1152ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
1242{ 1153{
1243 return (ahc_print_register(NULL, 0, "QOUTFIFO", 1154 return (ahc_print_register(NULL, 0, "QOUTFIFO",
1244 0x9d, regvalue, cur_col, wrap)); 1155 0x9d, regvalue, cur_col, wrap));
1245} 1156}
1246 1157
1247static ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = { 1158static const ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
1248 { "TARGCRCCNTEN", 0x04, 0x04 }, 1159 { "TARGCRCCNTEN", 0x04, 0x04 },
1249 { "TARGCRCENDEN", 0x08, 0x08 }, 1160 { "TARGCRCENDEN", 0x08, 0x08 },
1250 { "CRCREQCHKEN", 0x10, 0x10 }, 1161 { "CRCREQCHKEN", 0x10, 0x10 },
@@ -1260,14 +1171,7 @@ ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
1260 0x9d, regvalue, cur_col, wrap)); 1171 0x9d, regvalue, cur_col, wrap));
1261} 1172}
1262 1173
1263int 1174static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
1264ahc_qoutcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1265{
1266 return (ahc_print_register(NULL, 0, "QOUTCNT",
1267 0x9e, regvalue, cur_col, wrap));
1268}
1269
1270static ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
1271 { "DATA_OUT_PHASE", 0x01, 0x01 }, 1175 { "DATA_OUT_PHASE", 0x01, 0x01 },
1272 { "DATA_IN_PHASE", 0x02, 0x02 }, 1176 { "DATA_IN_PHASE", 0x02, 0x02 },
1273 { "MSG_OUT_PHASE", 0x04, 0x04 }, 1177 { "MSG_OUT_PHASE", 0x04, 0x04 },
@@ -1284,7 +1188,7 @@ ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
1284 0x9e, regvalue, cur_col, wrap)); 1188 0x9e, regvalue, cur_col, wrap));
1285} 1189}
1286 1190
1287static ahc_reg_parse_entry_t SFUNCT_parse_table[] = { 1191static const ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
1288 { "ALT_MODE", 0x80, 0x80 } 1192 { "ALT_MODE", 0x80, 0x80 }
1289}; 1193};
1290 1194
@@ -1351,7 +1255,7 @@ ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1351 0xac, regvalue, cur_col, wrap)); 1255 0xac, regvalue, cur_col, wrap));
1352} 1256}
1353 1257
1354static ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = { 1258static const ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
1355 { "SG_LAST_SEG", 0x80, 0x80 }, 1259 { "SG_LAST_SEG", 0x80, 0x80 },
1356 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f } 1260 { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }
1357}; 1261};
@@ -1363,7 +1267,7 @@ ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
1363 0xb0, regvalue, cur_col, wrap)); 1267 0xb0, regvalue, cur_col, wrap));
1364} 1268}
1365 1269
1366static ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = { 1270static const ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
1367 { "SG_LIST_NULL", 0x01, 0x01 }, 1271 { "SG_LIST_NULL", 0x01, 0x01 },
1368 { "SG_FULL_RESID", 0x02, 0x02 }, 1272 { "SG_FULL_RESID", 0x02, 0x02 },
1369 { "SG_RESID_VALID", 0x04, 0x04 } 1273 { "SG_RESID_VALID", 0x04, 0x04 }
@@ -1376,7 +1280,7 @@ ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1376 0xb4, regvalue, cur_col, wrap)); 1280 0xb4, regvalue, cur_col, wrap));
1377} 1281}
1378 1282
1379static ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = { 1283static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
1380 { "DISCONNECTED", 0x04, 0x04 }, 1284 { "DISCONNECTED", 0x04, 0x04 },
1381 { "ULTRAENB", 0x08, 0x08 }, 1285 { "ULTRAENB", 0x08, 0x08 },
1382 { "MK_MESSAGE", 0x10, 0x10 }, 1286 { "MK_MESSAGE", 0x10, 0x10 },
@@ -1394,7 +1298,7 @@ ahc_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
1394 0xb8, regvalue, cur_col, wrap)); 1298 0xb8, regvalue, cur_col, wrap));
1395} 1299}
1396 1300
1397static ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = { 1301static const ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
1398 { "TWIN_CHNLB", 0x80, 0x80 }, 1302 { "TWIN_CHNLB", 0x80, 0x80 },
1399 { "OID", 0x0f, 0x0f }, 1303 { "OID", 0x0f, 0x0f },
1400 { "TWIN_TID", 0x70, 0x70 }, 1304 { "TWIN_TID", 0x70, 0x70 },
@@ -1408,7 +1312,7 @@ ahc_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
1408 0xb9, regvalue, cur_col, wrap)); 1312 0xb9, regvalue, cur_col, wrap));
1409} 1313}
1410 1314
1411static ahc_reg_parse_entry_t SCB_LUN_parse_table[] = { 1315static const ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
1412 { "SCB_XFERLEN_ODD", 0x80, 0x80 }, 1316 { "SCB_XFERLEN_ODD", 0x80, 0x80 },
1413 { "LID", 0x3f, 0x3f } 1317 { "LID", 0x3f, 0x3f }
1414}; 1318};
@@ -1455,14 +1359,7 @@ ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
1455 0xbf, regvalue, cur_col, wrap)); 1359 0xbf, regvalue, cur_col, wrap));
1456} 1360}
1457 1361
1458int 1362static const ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
1459ahc_scb_64_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
1460{
1461 return (ahc_print_register(NULL, 0, "SCB_64_SPARE",
1462 0xc0, regvalue, cur_col, wrap));
1463}
1464
1465static ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
1466 { "DO_2840", 0x01, 0x01 }, 1363 { "DO_2840", 0x01, 0x01 },
1467 { "CK_2840", 0x02, 0x02 }, 1364 { "CK_2840", 0x02, 0x02 },
1468 { "CS_2840", 0x04, 0x04 } 1365 { "CS_2840", 0x04, 0x04 }
@@ -1475,7 +1372,7 @@ ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
1475 0xc0, regvalue, cur_col, wrap)); 1372 0xc0, regvalue, cur_col, wrap));
1476} 1373}
1477 1374
1478static ahc_reg_parse_entry_t STATUS_2840_parse_table[] = { 1375static const ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
1479 { "DI_2840", 0x01, 0x01 }, 1376 { "DI_2840", 0x01, 0x01 },
1480 { "EEPROM_TF", 0x80, 0x80 }, 1377 { "EEPROM_TF", 0x80, 0x80 },
1481 { "ADSEL", 0x1e, 0x1e }, 1378 { "ADSEL", 0x1e, 0x1e },
@@ -1524,7 +1421,7 @@ ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1524 0xea, regvalue, cur_col, wrap)); 1421 0xea, regvalue, cur_col, wrap));
1525} 1422}
1526 1423
1527static ahc_reg_parse_entry_t CCSGCTL_parse_table[] = { 1424static const ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
1528 { "CCSGRESET", 0x01, 0x01 }, 1425 { "CCSGRESET", 0x01, 0x01 },
1529 { "SG_FETCH_NEEDED", 0x02, 0x02 }, 1426 { "SG_FETCH_NEEDED", 0x02, 0x02 },
1530 { "CCSGEN", 0x08, 0x08 }, 1427 { "CCSGEN", 0x08, 0x08 },
@@ -1552,7 +1449,7 @@ ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
1552 0xed, regvalue, cur_col, wrap)); 1449 0xed, regvalue, cur_col, wrap));
1553} 1450}
1554 1451
1555static ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = { 1452static const ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
1556 { "CCSCBRESET", 0x01, 0x01 }, 1453 { "CCSCBRESET", 0x01, 0x01 },
1557 { "CCSCBDIR", 0x04, 0x04 }, 1454 { "CCSCBDIR", 0x04, 0x04 },
1558 { "CCSCBEN", 0x08, 0x08 }, 1455 { "CCSCBEN", 0x08, 0x08 },
@@ -1610,7 +1507,7 @@ ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
1610 0xf8, regvalue, cur_col, wrap)); 1507 0xf8, regvalue, cur_col, wrap));
1611} 1508}
1612 1509
1613static ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = { 1510static const ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
1614 { "SDSCB_ROLLOVER", 0x10, 0x10 }, 1511 { "SDSCB_ROLLOVER", 0x10, 0x10 },
1615 { "SNSCB_ROLLOVER", 0x20, 0x20 }, 1512 { "SNSCB_ROLLOVER", 0x20, 0x20 },
1616 { "SCB_AVAIL", 0x40, 0x40 }, 1513 { "SCB_AVAIL", 0x40, 0x40 },
@@ -1625,7 +1522,7 @@ ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
1625 0xfa, regvalue, cur_col, wrap)); 1522 0xfa, regvalue, cur_col, wrap));
1626} 1523}
1627 1524
1628static ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = { 1525static const ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
1629 { "RD_DFTHRSH_MIN", 0x00, 0x00 }, 1526 { "RD_DFTHRSH_MIN", 0x00, 0x00 },
1630 { "WR_DFTHRSH_MIN", 0x00, 0x00 }, 1527 { "WR_DFTHRSH_MIN", 0x00, 0x00 },
1631 { "RD_DFTHRSH_25", 0x01, 0x01 }, 1528 { "RD_DFTHRSH_25", 0x01, 0x01 },
@@ -1653,7 +1550,7 @@ ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
1653 0xfb, regvalue, cur_col, wrap)); 1550 0xfb, regvalue, cur_col, wrap));
1654} 1551}
1655 1552
1656static ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = { 1553static const ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
1657 { "LAST_SEG_DONE", 0x01, 0x01 }, 1554 { "LAST_SEG_DONE", 0x01, 0x01 },
1658 { "LAST_SEG", 0x02, 0x02 }, 1555 { "LAST_SEG", 0x02, 0x02 },
1659 { "SG_ADDR_MASK", 0xf8, 0xf8 } 1556 { "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -1666,7 +1563,7 @@ ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
1666 0xfc, regvalue, cur_col, wrap)); 1563 0xfc, regvalue, cur_col, wrap));
1667} 1564}
1668 1565
1669static ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = { 1566static const ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
1670 { "LAST_SEG_DONE", 0x01, 0x01 }, 1567 { "LAST_SEG_DONE", 0x01, 0x01 },
1671 { "LAST_SEG", 0x02, 0x02 }, 1568 { "LAST_SEG", 0x02, 0x02 },
1672 { "SG_ADDR_MASK", 0xf8, 0xf8 } 1569 { "SG_ADDR_MASK", 0xf8, 0xf8 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
index 4cee08521e75..07e93fbae706 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
@@ -5,7 +5,7 @@
5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
7 */ 7 */
8static uint8_t seqprog[] = { 8static const uint8_t seqprog[] = {
9 0xb2, 0x00, 0x00, 0x08, 9 0xb2, 0x00, 0x00, 0x08,
10 0xf7, 0x11, 0x22, 0x08, 10 0xf7, 0x11, 0x22, 0x08,
11 0x00, 0x65, 0xee, 0x59, 11 0x00, 0x65, 0xee, 0x59,
@@ -1081,7 +1081,7 @@ ahc_patch0_func(struct ahc_softc *ahc)
1081 return (0); 1081 return (0);
1082} 1082}
1083 1083
1084static struct patch { 1084static const struct patch {
1085 ahc_patch_func_t *patch_func; 1085 ahc_patch_func_t *patch_func;
1086 uint32_t begin :10, 1086 uint32_t begin :10,
1087 skip_instr :10, 1087 skip_instr :10,
@@ -1291,7 +1291,7 @@ static struct patch {
1291 { ahc_patch4_func, 865, 12, 1 } 1291 { ahc_patch4_func, 865, 12, 1 }
1292}; 1292};
1293 1293
1294static struct cs { 1294static const struct cs {
1295 uint16_t begin; 1295 uint16_t begin;
1296 uint16_t end; 1296 uint16_t end;
1297} critical_sections[] = { 1297} critical_sections[] = {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 924102720b14..e4a778720301 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -362,7 +362,7 @@ output_code()
362" *\n" 362" *\n"
363"%s */\n", versions); 363"%s */\n", versions);
364 364
365 fprintf(ofile, "static uint8_t seqprog[] = {\n"); 365 fprintf(ofile, "static const uint8_t seqprog[] = {\n");
366 for (cur_instr = STAILQ_FIRST(&seq_program); 366 for (cur_instr = STAILQ_FIRST(&seq_program);
367 cur_instr != NULL; 367 cur_instr != NULL;
368 cur_instr = STAILQ_NEXT(cur_instr, links)) { 368 cur_instr = STAILQ_NEXT(cur_instr, links)) {
@@ -415,7 +415,7 @@ output_code()
415 } 415 }
416 416
417 fprintf(ofile, 417 fprintf(ofile,
418"static struct patch {\n" 418"static const struct patch {\n"
419" %spatch_func_t *patch_func;\n" 419" %spatch_func_t *patch_func;\n"
420" uint32_t begin :10,\n" 420" uint32_t begin :10,\n"
421" skip_instr :10,\n" 421" skip_instr :10,\n"
@@ -435,7 +435,7 @@ output_code()
435 fprintf(ofile, "\n};\n\n"); 435 fprintf(ofile, "\n};\n\n");
436 436
437 fprintf(ofile, 437 fprintf(ofile,
438"static struct cs {\n" 438"static const struct cs {\n"
439" uint16_t begin;\n" 439" uint16_t begin;\n"
440" uint16_t end;\n" 440" uint16_t end;\n"
441"} critical_sections[] = {\n"); 441"} critical_sections[] = {\n");
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 702e2dbd11fb..81be6a261cc8 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -101,11 +101,12 @@ static void format_3_instr(int opcode, symbol_ref_t *src,
101 expression_t *immed, symbol_ref_t *address); 101 expression_t *immed, symbol_ref_t *address);
102static void test_readable_symbol(symbol_t *symbol); 102static void test_readable_symbol(symbol_t *symbol);
103static void test_writable_symbol(symbol_t *symbol); 103static void test_writable_symbol(symbol_t *symbol);
104static void type_check(symbol_t *symbol, expression_t *expression, int and_op); 104static void type_check(symbol_ref_t *sym, expression_t *expression, int and_op);
105static void make_expression(expression_t *immed, int value); 105static void make_expression(expression_t *immed, int value);
106static void add_conditional(symbol_t *symbol); 106static void add_conditional(symbol_t *symbol);
107static void add_version(const char *verstring); 107static void add_version(const char *verstring);
108static int is_download_const(expression_t *immed); 108static int is_download_const(expression_t *immed);
109static int is_location_address(symbol_t *symbol);
109void yyerror(const char *string); 110void yyerror(const char *string);
110 111
111#define SRAM_SYMNAME "SRAM_BASE" 112#define SRAM_SYMNAME "SRAM_BASE"
@@ -142,6 +143,8 @@ void yyerror(const char *string);
142 143
143%token <value> T_ADDRESS 144%token <value> T_ADDRESS
144 145
146%token T_COUNT
147
145%token T_ACCESS_MODE 148%token T_ACCESS_MODE
146 149
147%token T_MODES 150%token T_MODES
@@ -192,10 +195,10 @@ void yyerror(const char *string);
192 195
193%token <value> T_OR 196%token <value> T_OR
194 197
195/* 16 bit extensions */ 198/* 16 bit extensions, not implemented
196%token <value> T_OR16 T_AND16 T_XOR16 T_ADD16 199 * %token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
197%token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG 200 * %token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
198 201 */
199%token T_RET 202%token T_RET
200 203
201%token T_NOP 204%token T_NOP
@@ -214,7 +217,7 @@ void yyerror(const char *string);
214 217
215%type <expression> expression immediate immediate_or_a 218%type <expression> expression immediate immediate_or_a
216 219
217%type <value> export ret f1_opcode f2_opcode f4_opcode jmp_jc_jnc_call jz_jnz je_jne 220%type <value> export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne
218 221
219%type <value> mode_value mode_list macro_arglist 222%type <value> mode_value mode_list macro_arglist
220 223
@@ -313,13 +316,13 @@ reg_definition:
313 stop("Register multiply defined", EX_DATAERR); 316 stop("Register multiply defined", EX_DATAERR);
314 /* NOTREACHED */ 317 /* NOTREACHED */
315 } 318 }
316 cur_symbol = $1; 319 cur_symbol = $1;
317 cur_symbol->type = cur_symtype; 320 cur_symbol->type = cur_symtype;
318 initialize_symbol(cur_symbol); 321 initialize_symbol(cur_symbol);
319 } 322 }
320 reg_attribute_list 323 reg_attribute_list
321 '}' 324 '}'
322 { 325 {
323 /* 326 /*
324 * Default to allowing everything in for registers 327 * Default to allowing everything in for registers
325 * with no bit or mask definitions. 328 * with no bit or mask definitions.
@@ -349,9 +352,10 @@ reg_attribute_list:
349| reg_attribute_list reg_attribute 352| reg_attribute_list reg_attribute
350; 353;
351 354
352reg_attribute: 355reg_attribute:
353 reg_address 356 reg_address
354| size 357| size
358| count
355| access_mode 359| access_mode
356| modes 360| modes
357| field_defn 361| field_defn
@@ -392,6 +396,13 @@ size:
392 } 396 }
393; 397;
394 398
399count:
400 T_COUNT T_NUMBER
401 {
402 cur_symbol->count += $2;
403 }
404;
405
395access_mode: 406access_mode:
396 T_ACCESS_MODE T_MODE 407 T_ACCESS_MODE T_MODE
397 { 408 {
@@ -641,14 +652,14 @@ expression:
641 &($1.referenced_syms), 652 &($1.referenced_syms),
642 &($3.referenced_syms)); 653 &($3.referenced_syms));
643 } 654 }
644| expression T_EXPR_LSHIFT expression 655| expression T_EXPR_LSHIFT expression
645 { 656 {
646 $$.value = $1.value << $3.value; 657 $$.value = $1.value << $3.value;
647 symlist_merge(&$$.referenced_syms, 658 symlist_merge(&$$.referenced_syms,
648 &$1.referenced_syms, 659 &$1.referenced_syms,
649 &$3.referenced_syms); 660 &$3.referenced_syms);
650 } 661 }
651| expression T_EXPR_RSHIFT expression 662| expression T_EXPR_RSHIFT expression
652 { 663 {
653 $$.value = $1.value >> $3.value; 664 $$.value = $1.value >> $3.value;
654 symlist_merge(&$$.referenced_syms, 665 symlist_merge(&$$.referenced_syms,
@@ -714,7 +725,7 @@ expression:
714; 725;
715 726
716constant: 727constant:
717 T_CONST T_SYMBOL expression 728 T_CONST T_SYMBOL expression
718 { 729 {
719 if ($2->type != UNINITIALIZED) { 730 if ($2->type != UNINITIALIZED) {
720 stop("Re-definition of symbol as a constant", 731 stop("Re-definition of symbol as a constant",
@@ -800,6 +811,7 @@ scratch_ram:
800 cur_symtype = SRAMLOC; 811 cur_symtype = SRAMLOC;
801 cur_symbol->type = SRAMLOC; 812 cur_symbol->type = SRAMLOC;
802 initialize_symbol(cur_symbol); 813 initialize_symbol(cur_symbol);
814 cur_symbol->count += 1;
803 } 815 }
804 reg_address 816 reg_address
805 { 817 {
@@ -831,6 +843,7 @@ scb:
831 initialize_symbol(cur_symbol); 843 initialize_symbol(cur_symbol);
832 /* 64 bytes of SCB space */ 844 /* 64 bytes of SCB space */
833 cur_symbol->info.rinfo->size = 64; 845 cur_symbol->info.rinfo->size = 64;
846 cur_symbol->count += 1;
834 } 847 }
835 reg_address 848 reg_address
836 { 849 {
@@ -1311,14 +1324,18 @@ f2_opcode:
1311| T_ROR { $$ = AIC_OP_ROR; } 1324| T_ROR { $$ = AIC_OP_ROR; }
1312; 1325;
1313 1326
1314f4_opcode: 1327/*
1315 T_OR16 { $$ = AIC_OP_OR16; } 1328 * 16bit opcodes, not used
1316| T_AND16 { $$ = AIC_OP_AND16; } 1329 *
1317| T_XOR16 { $$ = AIC_OP_XOR16; } 1330 *f4_opcode:
1318| T_ADD16 { $$ = AIC_OP_ADD16; } 1331 * T_OR16 { $$ = AIC_OP_OR16; }
1319| T_ADC16 { $$ = AIC_OP_ADC16; } 1332 *| T_AND16 { $$ = AIC_OP_AND16; }
1320| T_MVI16 { $$ = AIC_OP_MVI16; } 1333 *| T_XOR16 { $$ = AIC_OP_XOR16; }
1321; 1334 *| T_ADD16 { $$ = AIC_OP_ADD16; }
1335 *| T_ADC16 { $$ = AIC_OP_ADC16; }
1336 *| T_MVI16 { $$ = AIC_OP_MVI16; }
1337 *;
1338 */
1322 1339
1323code: 1340code:
1324 f2_opcode destination ',' expression opt_source ret ';' 1341 f2_opcode destination ',' expression opt_source ret ';'
@@ -1357,6 +1374,7 @@ code:
1357code: 1374code:
1358 T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';' 1375 T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';'
1359 { 1376 {
1377 type_check(&$2, &$4, AIC_OP_OR);
1360 format_3_instr($5, &$2, &$4, &$6); 1378 format_3_instr($5, &$2, &$4, &$6);
1361 } 1379 }
1362; 1380;
@@ -1528,7 +1546,7 @@ initialize_symbol(symbol_t *symbol)
1528 sizeof(struct cond_info)); 1546 sizeof(struct cond_info));
1529 break; 1547 break;
1530 case MACRO: 1548 case MACRO:
1531 symbol->info.macroinfo = 1549 symbol->info.macroinfo =
1532 (struct macro_info *)malloc(sizeof(struct macro_info)); 1550 (struct macro_info *)malloc(sizeof(struct macro_info));
1533 if (symbol->info.macroinfo == NULL) { 1551 if (symbol->info.macroinfo == NULL) {
1534 stop("Can't create macro info", EX_SOFTWARE); 1552 stop("Can't create macro info", EX_SOFTWARE);
@@ -1552,7 +1570,6 @@ add_macro_arg(const char *argtext, int argnum)
1552 struct macro_arg *marg; 1570 struct macro_arg *marg;
1553 int i; 1571 int i;
1554 int retval; 1572 int retval;
1555
1556 1573
1557 if (cur_symbol == NULL || cur_symbol->type != MACRO) { 1574 if (cur_symbol == NULL || cur_symbol->type != MACRO) {
1558 stop("Invalid current symbol for adding macro arg", 1575 stop("Invalid current symbol for adding macro arg",
@@ -1633,8 +1650,10 @@ format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed,
1633 test_writable_symbol(dest->symbol); 1650 test_writable_symbol(dest->symbol);
1634 test_readable_symbol(src->symbol); 1651 test_readable_symbol(src->symbol);
1635 1652
1636 /* Ensure that immediate makes sense for this destination */ 1653 if (!is_location_address(dest->symbol)) {
1637 type_check(dest->symbol, immed, opcode); 1654 /* Ensure that immediate makes sense for this destination */
1655 type_check(dest, immed, opcode);
1656 }
1638 1657
1639 /* Allocate sequencer space for the instruction and fill it out */ 1658 /* Allocate sequencer space for the instruction and fill it out */
1640 instr = seq_alloc(); 1659 instr = seq_alloc();
@@ -1766,9 +1785,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
1766 /* Test register permissions */ 1785 /* Test register permissions */
1767 test_readable_symbol(src->symbol); 1786 test_readable_symbol(src->symbol);
1768 1787
1769 /* Ensure that immediate makes sense for this source */
1770 type_check(src->symbol, immed, opcode);
1771
1772 /* Allocate sequencer space for the instruction and fill it out */ 1788 /* Allocate sequencer space for the instruction and fill it out */
1773 instr = seq_alloc(); 1789 instr = seq_alloc();
1774 f3_instr = &instr->format.format3; 1790 f3_instr = &instr->format.format3;
@@ -1797,7 +1813,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
1797static void 1813static void
1798test_readable_symbol(symbol_t *symbol) 1814test_readable_symbol(symbol_t *symbol)
1799{ 1815{
1800
1801 if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) { 1816 if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) {
1802 snprintf(errbuf, sizeof(errbuf), 1817 snprintf(errbuf, sizeof(errbuf),
1803 "Register %s unavailable in source reg mode %d", 1818 "Register %s unavailable in source reg mode %d",
@@ -1815,7 +1830,6 @@ test_readable_symbol(symbol_t *symbol)
1815static void 1830static void
1816test_writable_symbol(symbol_t *symbol) 1831test_writable_symbol(symbol_t *symbol)
1817{ 1832{
1818
1819 if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) { 1833 if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) {
1820 snprintf(errbuf, sizeof(errbuf), 1834 snprintf(errbuf, sizeof(errbuf),
1821 "Register %s unavailable in destination reg mode %d", 1835 "Register %s unavailable in destination reg mode %d",
@@ -1831,25 +1845,34 @@ test_writable_symbol(symbol_t *symbol)
1831} 1845}
1832 1846
1833static void 1847static void
1834type_check(symbol_t *symbol, expression_t *expression, int opcode) 1848type_check(symbol_ref_t *sym, expression_t *expression, int opcode)
1835{ 1849{
1850 symbol_t *symbol = sym->symbol;
1836 symbol_node_t *node; 1851 symbol_node_t *node;
1837 int and_op; 1852 int and_op;
1853 int8_t value, mask;
1838 1854
1839 and_op = FALSE; 1855 and_op = FALSE;
1840 if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || opcode == AIC_OP_JZ)
1841 and_op = TRUE;
1842
1843 /* 1856 /*
1844 * Make sure that we aren't attempting to write something 1857 * Make sure that we aren't attempting to write something
1845 * that hasn't been defined. If this is an and operation, 1858 * that hasn't been defined. If this is an and operation,
1846 * this is a mask, so "undefined" bits are okay. 1859 * this is a mask, so "undefined" bits are okay.
1847 */ 1860 */
1848 if (and_op == FALSE 1861 if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ ||
1849 && (expression->value & ~symbol->info.rinfo->valid_bitmask) != 0) { 1862 opcode == AIC_OP_JZ || opcode == AIC_OP_JNE ||
1863 opcode == AIC_OP_BMOV)
1864 and_op = TRUE;
1865
1866 /*
1867 * Defaulting to 8 bit logic
1868 */
1869 mask = (int8_t)~symbol->info.rinfo->valid_bitmask;
1870 value = (int8_t)expression->value;
1871
1872 if (and_op == FALSE && (mask & value) != 0 ) {
1850 snprintf(errbuf, sizeof(errbuf), 1873 snprintf(errbuf, sizeof(errbuf),
1851 "Invalid bit(s) 0x%x in immediate written to %s", 1874 "Invalid bit(s) 0x%x in immediate written to %s",
1852 expression->value & ~symbol->info.rinfo->valid_bitmask, 1875 (mask & value),
1853 symbol->name); 1876 symbol->name);
1854 stop(errbuf, EX_DATAERR); 1877 stop(errbuf, EX_DATAERR);
1855 /* NOTREACHED */ 1878 /* NOTREACHED */
@@ -1959,3 +1982,13 @@ is_download_const(expression_t *immed)
1959 1982
1960 return (FALSE); 1983 return (FALSE);
1961} 1984}
1985
1986static int
1987is_location_address(symbol_t *sym)
1988{
1989 if (sym->type == SCBLOC ||
1990 sym->type == SRAMLOC)
1991 return (TRUE);
1992 return (FALSE);
1993}
1994
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 7c3983f868a9..2c7f02daf88d 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -162,6 +162,7 @@ register { return T_REGISTER; }
162const { yylval.value = FALSE; return T_CONST; } 162const { yylval.value = FALSE; return T_CONST; }
163download { return T_DOWNLOAD; } 163download { return T_DOWNLOAD; }
164address { return T_ADDRESS; } 164address { return T_ADDRESS; }
165count { return T_COUNT; }
165access_mode { return T_ACCESS_MODE; } 166access_mode { return T_ACCESS_MODE; }
166modes { return T_MODES; } 167modes { return T_MODES; }
167RW|RO|WO { 168RW|RO|WO {
@@ -228,15 +229,15 @@ ret { return T_RET; }
228nop { return T_NOP; } 229nop { return T_NOP; }
229 230
230 /* ARP2 16bit extensions */ 231 /* ARP2 16bit extensions */
231or16 { return T_OR16; } 232 /* or16 { return T_OR16; } */
232and16 { return T_AND16; } 233 /* and16 { return T_AND16; }*/
233xor16 { return T_XOR16; } 234 /* xor16 { return T_XOR16; }*/
234add16 { return T_ADD16; } 235 /* add16 { return T_ADD16; }*/
235adc16 { return T_ADC16; } 236 /* adc16 { return T_ADC16; }*/
236mvi16 { return T_MVI16; } 237 /* mvi16 { return T_MVI16; }*/
237test16 { return T_TEST16; } 238 /* test16 { return T_TEST16; }*/
238cmp16 { return T_CMP16; } 239 /* cmp16 { return T_CMP16; }*/
239cmpxchg { return T_CMPXCHG; } 240 /* cmpxchg { return T_CMPXCHG; }*/
240 241
241 /* Allowed Symbols */ 242 /* Allowed Symbols */
242\<\< { return T_EXPR_LSHIFT; } 243\<\< { return T_EXPR_LSHIFT; }
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index f1f448dff569..fcd357872b43 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -77,6 +77,7 @@ symbol_create(char *name)
77 if (new_symbol->name == NULL) 77 if (new_symbol->name == NULL)
78 stop("Unable to strdup symbol name", EX_SOFTWARE); 78 stop("Unable to strdup symbol name", EX_SOFTWARE);
79 new_symbol->type = UNINITIALIZED; 79 new_symbol->type = UNINITIALIZED;
80 new_symbol->count = 1;
80 return (new_symbol); 81 return (new_symbol);
81} 82}
82 83
@@ -198,6 +199,12 @@ symtable_get(char *name)
198 } 199 }
199 } 200 }
200 memcpy(&stored_ptr, data.data, sizeof(stored_ptr)); 201 memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
202 stored_ptr->count++;
203 data.data = &stored_ptr;
204 if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) {
205 perror("Symtable put failed");
206 exit(EX_SOFTWARE);
207 }
201 return (stored_ptr); 208 return (stored_ptr);
202} 209}
203 210
@@ -256,7 +263,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
256 && (curnode->symbol->info.finfo->value > 263 && (curnode->symbol->info.finfo->value >
257 newnode->symbol->info.finfo->value)))) 264 newnode->symbol->info.finfo->value))))
258 || (!field && (curnode->symbol->info.rinfo->address > 265 || (!field && (curnode->symbol->info.rinfo->address >
259 newnode->symbol->info.rinfo->address))) { 266 newnode->symbol->info.rinfo->address))) {
260 SLIST_INSERT_HEAD(symlist, newnode, links); 267 SLIST_INSERT_HEAD(symlist, newnode, links);
261 return; 268 return;
262 } 269 }
@@ -271,7 +278,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
271 278
272 cursymbol = SLIST_NEXT(curnode, links)->symbol; 279 cursymbol = SLIST_NEXT(curnode, links)->symbol;
273 if ((field 280 if ((field
274 && (cursymbol->type > symbol->type 281 && (cursymbol->type > symbol->type
275 || (cursymbol->type == symbol->type 282 || (cursymbol->type == symbol->type
276 && (cursymbol->info.finfo->value > 283 && (cursymbol->info.finfo->value >
277 symbol->info.finfo->value)))) 284 symbol->info.finfo->value))))
@@ -351,7 +358,7 @@ aic_print_reg_dump_types(FILE *ofile)
351{ 358{
352 if (ofile == NULL) 359 if (ofile == NULL)
353 return; 360 return;
354 361
355 fprintf(ofile, 362 fprintf(ofile,
356"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n" 363"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n"
357"typedef struct %sreg_parse_entry {\n" 364"typedef struct %sreg_parse_entry {\n"
@@ -370,7 +377,7 @@ aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode)
370 return; 377 return;
371 378
372 fprintf(dfile, 379 fprintf(dfile,
373"static %sreg_parse_entry_t %s_parse_table[] = {\n", 380"static const %sreg_parse_entry_t %s_parse_table[] = {\n",
374 prefix, 381 prefix,
375 regnode->symbol->name); 382 regnode->symbol->name);
376} 383}
@@ -385,7 +392,7 @@ aic_print_reg_dump_end(FILE *ofile, FILE *dfile,
385 lower_name = strdup(regnode->symbol->name); 392 lower_name = strdup(regnode->symbol->name);
386 if (lower_name == NULL) 393 if (lower_name == NULL)
387 stop("Unable to strdup symbol name", EX_SOFTWARE); 394 stop("Unable to strdup symbol name", EX_SOFTWARE);
388 395
389 for (letter = lower_name; *letter != '\0'; letter++) 396 for (letter = lower_name; *letter != '\0'; letter++)
390 *letter = tolower(*letter); 397 *letter = tolower(*letter);
391 398
@@ -472,6 +479,7 @@ symtable_dump(FILE *ofile, FILE *dfile)
472 DBT key; 479 DBT key;
473 DBT data; 480 DBT data;
474 int flag; 481 int flag;
482 int reg_count = 0, reg_used = 0;
475 u_int i; 483 u_int i;
476 484
477 if (symtable == NULL) 485 if (symtable == NULL)
@@ -541,6 +549,9 @@ symtable_dump(FILE *ofile, FILE *dfile)
541 int num_entries; 549 int num_entries;
542 550
543 num_entries = 0; 551 num_entries = 0;
552 reg_count++;
553 if (curnode->symbol->count == 1)
554 break;
544 fields = &curnode->symbol->info.rinfo->fields; 555 fields = &curnode->symbol->info.rinfo->fields;
545 SLIST_FOREACH(fieldnode, fields, links) { 556 SLIST_FOREACH(fieldnode, fields, links) {
546 if (num_entries == 0) 557 if (num_entries == 0)
@@ -553,11 +564,14 @@ symtable_dump(FILE *ofile, FILE *dfile)
553 } 564 }
554 aic_print_reg_dump_end(ofile, dfile, 565 aic_print_reg_dump_end(ofile, dfile,
555 curnode, num_entries); 566 curnode, num_entries);
567 reg_used++;
556 } 568 }
557 default: 569 default:
558 break; 570 break;
559 } 571 }
560 } 572 }
573 fprintf(stderr, "%s: %d of %d register definitions used\n", appname,
574 reg_used, reg_count);
561 575
562 /* Fold in the masks and bits */ 576 /* Fold in the masks and bits */
563 while (SLIST_FIRST(&masks) != NULL) { 577 while (SLIST_FIRST(&masks) != NULL) {
@@ -646,7 +660,6 @@ symtable_dump(FILE *ofile, FILE *dfile)
646 free(curnode); 660 free(curnode);
647 } 661 }
648 662
649
650 fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n"); 663 fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n");
651 664
652 for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) { 665 for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index afc22e8b4903..05190c1a2fb7 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -128,6 +128,7 @@ typedef struct expression_info {
128typedef struct symbol { 128typedef struct symbol {
129 char *name; 129 char *name;
130 symtype type; 130 symtype type;
131 int count;
131 union { 132 union {
132 struct reg_info *rinfo; 133 struct reg_info *rinfo;
133 struct field_info *finfo; 134 struct field_info *finfo;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 8be3d76656fa..a73a6bbb1b2b 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -2286,17 +2286,14 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec,
2286 } 2286 }
2287} 2287}
2288 2288
2289static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost) 2289static irqreturn_t ihdlr(struct Scsi_Host *shost)
2290{ 2290{
2291 struct scsi_cmnd *SCpnt; 2291 struct scsi_cmnd *SCpnt;
2292 unsigned int i, k, c, status, tstatus, reg; 2292 unsigned int i, k, c, status, tstatus, reg;
2293 struct mssp *spp; 2293 struct mssp *spp;
2294 struct mscp *cpp; 2294 struct mscp *cpp;
2295 struct hostdata *ha = (struct hostdata *)shost->hostdata; 2295 struct hostdata *ha = (struct hostdata *)shost->hostdata;
2296 2296 int irq = shost->irq;
2297 if (shost->irq != irq)
2298 panic("%s: ihdlr, irq %d, shost->irq %d.\n", ha->board_name, irq,
2299 shost->irq);
2300 2297
2301 /* Check if this board need to be serviced */ 2298 /* Check if this board need to be serviced */
2302 if (!(inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)) 2299 if (!(inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED))
@@ -2535,7 +2532,7 @@ static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost)
2535 return IRQ_NONE; 2532 return IRQ_NONE;
2536} 2533}
2537 2534
2538static irqreturn_t do_interrupt_handler(int irq, void *shap) 2535static irqreturn_t do_interrupt_handler(int dummy, void *shap)
2539{ 2536{
2540 struct Scsi_Host *shost; 2537 struct Scsi_Host *shost;
2541 unsigned int j; 2538 unsigned int j;
@@ -2548,7 +2545,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap)
2548 shost = sh[j]; 2545 shost = sh[j];
2549 2546
2550 spin_lock_irqsave(shost->host_lock, spin_flags); 2547 spin_lock_irqsave(shost->host_lock, spin_flags);
2551 ret = ihdlr(irq, shost); 2548 ret = ihdlr(shost);
2552 spin_unlock_irqrestore(shost->host_lock, spin_flags); 2549 spin_unlock_irqrestore(shost->host_lock, spin_flags);
2553 return ret; 2550 return ret;
2554} 2551}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index bfdee5968892..a0b6d414953d 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -978,7 +978,7 @@ static int esp_check_spur_intr(struct esp *esp)
978 */ 978 */
979 if (!esp->ops->dma_error(esp)) { 979 if (!esp->ops->dma_error(esp)) {
980 printk(KERN_ERR PFX "esp%d: Spurious irq, " 980 printk(KERN_ERR PFX "esp%d: Spurious irq, "
981 "sreg=%x.\n", 981 "sreg=%02x.\n",
982 esp->host->unique_id, esp->sreg); 982 esp->host->unique_id, esp->sreg);
983 return -1; 983 return -1;
984 } 984 }
@@ -1447,6 +1447,9 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1447 if (offset > 15) 1447 if (offset > 15)
1448 goto do_reject; 1448 goto do_reject;
1449 1449
1450 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
1451 offset = 0;
1452
1450 if (offset) { 1453 if (offset) {
1451 int rounded_up, one_clock; 1454 int rounded_up, one_clock;
1452 1455
@@ -1697,7 +1700,12 @@ again:
1697 else 1700 else
1698 ent->flags &= ~ESP_CMD_FLAG_WRITE; 1701 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1699 1702
1700 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); 1703 if (esp->ops->dma_length_limit)
1704 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1705 dma_len);
1706 else
1707 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1708
1701 esp->data_dma_len = dma_len; 1709 esp->data_dma_len = dma_len;
1702 1710
1703 if (!dma_len) { 1711 if (!dma_len) {
@@ -1761,7 +1769,6 @@ again:
1761 esp_advance_dma(esp, ent, cmd, bytes_sent); 1769 esp_advance_dma(esp, ent, cmd, bytes_sent);
1762 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1770 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1763 goto again; 1771 goto again;
1764 break;
1765 } 1772 }
1766 1773
1767 case ESP_EVENT_STATUS: { 1774 case ESP_EVENT_STATUS: {
@@ -2235,7 +2242,7 @@ static void esp_bootup_reset(struct esp *esp)
2235 2242
2236static void esp_set_clock_params(struct esp *esp) 2243static void esp_set_clock_params(struct esp *esp)
2237{ 2244{
2238 int fmhz; 2245 int fhz;
2239 u8 ccf; 2246 u8 ccf;
2240 2247
2241 /* This is getting messy but it has to be done correctly or else 2248 /* This is getting messy but it has to be done correctly or else
@@ -2270,9 +2277,9 @@ static void esp_set_clock_params(struct esp *esp)
2270 * This entails the smallest and largest sync period we could ever 2277 * This entails the smallest and largest sync period we could ever
2271 * handle on this ESP. 2278 * handle on this ESP.
2272 */ 2279 */
2273 fmhz = esp->cfreq; 2280 fhz = esp->cfreq;
2274 2281
2275 ccf = ((fmhz / 1000000) + 4) / 5; 2282 ccf = ((fhz / 1000000) + 4) / 5;
2276 if (ccf == 1) 2283 if (ccf == 1)
2277 ccf = 2; 2284 ccf = 2;
2278 2285
@@ -2281,16 +2288,16 @@ static void esp_set_clock_params(struct esp *esp)
2281 * been unable to find the clock-frequency PROM property. All 2288 * been unable to find the clock-frequency PROM property. All
2282 * other machines provide useful values it seems. 2289 * other machines provide useful values it seems.
2283 */ 2290 */
2284 if (fmhz <= 5000000 || ccf < 1 || ccf > 8) { 2291 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2285 fmhz = 20000000; 2292 fhz = 20000000;
2286 ccf = 4; 2293 ccf = 4;
2287 } 2294 }
2288 2295
2289 esp->cfact = (ccf == 8 ? 0 : ccf); 2296 esp->cfact = (ccf == 8 ? 0 : ccf);
2290 esp->cfreq = fmhz; 2297 esp->cfreq = fhz;
2291 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); 2298 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2292 esp->ctick = ESP_TICK(ccf, esp->ccycle); 2299 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2293 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); 2300 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2294 esp->sync_defp = SYNC_DEFP_SLOW; 2301 esp->sync_defp = SYNC_DEFP_SLOW;
2295} 2302}
2296 2303
@@ -2382,6 +2389,12 @@ static int esp_slave_configure(struct scsi_device *dev)
2382 struct esp_target_data *tp = &esp->target[dev->id]; 2389 struct esp_target_data *tp = &esp->target[dev->id];
2383 int goal_tags, queue_depth; 2390 int goal_tags, queue_depth;
2384 2391
2392 if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
2393 /* Bypass async domain validation */
2394 dev->ppr = 0;
2395 dev->sdtr = 0;
2396 }
2397
2385 goal_tags = 0; 2398 goal_tags = 0;
2386 2399
2387 if (dev->tagged_supported) { 2400 if (dev->tagged_supported) {
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index d5576d54ce76..bb43a1388188 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -224,7 +224,7 @@
224#define ESP_TIMEO_CONST 8192 224#define ESP_TIMEO_CONST 8192
225#define ESP_NEG_DEFP(mhz, cfact) \ 225#define ESP_NEG_DEFP(mhz, cfact) \
226 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) 226 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
227#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) 227#define ESP_HZ_TO_CYCLE(hertz) ((1000000000) / ((hertz) / 1000))
228#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) 228#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
229 229
230/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high 230/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
@@ -240,9 +240,9 @@ struct esp_cmd_priv {
240 int num_sg; 240 int num_sg;
241 } u; 241 } u;
242 242
243 unsigned int cur_residue; 243 int cur_residue;
244 struct scatterlist *cur_sg; 244 struct scatterlist *cur_sg;
245 unsigned int tot_residue; 245 int tot_residue;
246}; 246};
247#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp)) 247#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
248 248
@@ -368,6 +368,12 @@ struct esp_driver_ops {
368 */ 368 */
369 int (*irq_pending)(struct esp *esp); 369 int (*irq_pending)(struct esp *esp);
370 370
371 /* Return the maximum allowable size of a DMA transfer for a
372 * given buffer.
373 */
374 u32 (*dma_length_limit)(struct esp *esp, u32 dma_addr,
375 u32 dma_len);
376
371 /* Reset the DMA engine entirely. On return, ESP interrupts 377 /* Reset the DMA engine entirely. On return, ESP interrupts
372 * should be enabled. Often the interrupt enabling is 378 * should be enabled. Often the interrupt enabling is
373 * controlled in the DMA engine. 379 * controlled in the DMA engine.
@@ -471,6 +477,7 @@ struct esp {
471#define ESP_FLAG_DOING_SLOWCMD 0x00000004 477#define ESP_FLAG_DOING_SLOWCMD 0x00000004
472#define ESP_FLAG_WIDE_CAPABLE 0x00000008 478#define ESP_FLAG_WIDE_CAPABLE 0x00000008
473#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 479#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
480#define ESP_FLAG_DISABLE_SYNC 0x00000020
474 481
475 u8 select_state; 482 u8 select_state;
476#define ESP_SELECT_NONE 0x00 /* Not selecting */ 483#define ESP_SELECT_NONE 0x00 /* Not selecting */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c264a8c5f01e..3690360d7a79 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -199,9 +199,13 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
199 if (!shost->can_queue) { 199 if (!shost->can_queue) {
200 printk(KERN_ERR "%s: can_queue = 0 no longer supported\n", 200 printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
201 sht->name); 201 sht->name);
202 goto out; 202 goto fail;
203 } 203 }
204 204
205 error = scsi_setup_command_freelist(shost);
206 if (error)
207 goto fail;
208
205 if (!shost->shost_gendev.parent) 209 if (!shost->shost_gendev.parent)
206 shost->shost_gendev.parent = dev ? dev : &platform_bus; 210 shost->shost_gendev.parent = dev ? dev : &platform_bus;
207 211
@@ -255,6 +259,8 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
255 out_del_gendev: 259 out_del_gendev:
256 device_del(&shost->shost_gendev); 260 device_del(&shost->shost_gendev);
257 out: 261 out:
262 scsi_destroy_command_freelist(shost);
263 fail:
258 return error; 264 return error;
259} 265}
260EXPORT_SYMBOL(scsi_add_host); 266EXPORT_SYMBOL(scsi_add_host);
@@ -284,6 +290,11 @@ static void scsi_host_dev_release(struct device *dev)
284 kfree(shost); 290 kfree(shost);
285} 291}
286 292
293struct device_type scsi_host_type = {
294 .name = "scsi_host",
295 .release = scsi_host_dev_release,
296};
297
287/** 298/**
288 * scsi_host_alloc - register a scsi host adapter instance. 299 * scsi_host_alloc - register a scsi host adapter instance.
289 * @sht: pointer to scsi host template 300 * @sht: pointer to scsi host template
@@ -376,33 +387,31 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
376 else 387 else
377 shost->dma_boundary = 0xffffffff; 388 shost->dma_boundary = 0xffffffff;
378 389
379 rval = scsi_setup_command_freelist(shost);
380 if (rval)
381 goto fail_kfree;
382
383 device_initialize(&shost->shost_gendev); 390 device_initialize(&shost->shost_gendev);
384 snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d", 391 snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
385 shost->host_no); 392 shost->host_no);
386 shost->shost_gendev.release = scsi_host_dev_release; 393#ifndef CONFIG_SYSFS_DEPRECATED
394 shost->shost_gendev.bus = &scsi_bus_type;
395#endif
396 shost->shost_gendev.type = &scsi_host_type;
387 397
388 device_initialize(&shost->shost_dev); 398 device_initialize(&shost->shost_dev);
389 shost->shost_dev.parent = &shost->shost_gendev; 399 shost->shost_dev.parent = &shost->shost_gendev;
390 shost->shost_dev.class = &shost_class; 400 shost->shost_dev.class = &shost_class;
391 snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d", 401 snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d",
392 shost->host_no); 402 shost->host_no);
403 shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
393 404
394 shost->ehandler = kthread_run(scsi_error_handler, shost, 405 shost->ehandler = kthread_run(scsi_error_handler, shost,
395 "scsi_eh_%d", shost->host_no); 406 "scsi_eh_%d", shost->host_no);
396 if (IS_ERR(shost->ehandler)) { 407 if (IS_ERR(shost->ehandler)) {
397 rval = PTR_ERR(shost->ehandler); 408 rval = PTR_ERR(shost->ehandler);
398 goto fail_destroy_freelist; 409 goto fail_kfree;
399 } 410 }
400 411
401 scsi_proc_hostdir_add(shost->hostt); 412 scsi_proc_hostdir_add(shost->hostt);
402 return shost; 413 return shost;
403 414
404 fail_destroy_freelist:
405 scsi_destroy_command_freelist(shost);
406 fail_kfree: 415 fail_kfree:
407 kfree(shost); 416 kfree(shost);
408 return NULL; 417 return NULL;
@@ -496,7 +505,7 @@ void scsi_exit_hosts(void)
496 505
497int scsi_is_host_device(const struct device *dev) 506int scsi_is_host_device(const struct device *dev)
498{ 507{
499 return dev->release == scsi_host_dev_release; 508 return dev->type == &scsi_host_type;
500} 509}
501EXPORT_SYMBOL(scsi_is_host_device); 510EXPORT_SYMBOL(scsi_is_host_device);
502 511
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 93c3fc20aa59..32553639aded 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -258,8 +258,7 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
258 258
259 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 259 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
260 /* force an abort */ 260 /* force an abort */
261 hwif->OUTB(WIN_IDLEIMMEDIATE, 261 hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
262 hwif->io_ports[IDE_COMMAND_OFFSET]);
263 262
264 rq->errors++; 263 rq->errors++;
265 264
@@ -393,7 +392,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
393 printk ("ide-scsi: %s: DMA complete\n", drive->name); 392 printk ("ide-scsi: %s: DMA complete\n", drive->name);
394#endif /* IDESCSI_DEBUG_LOG */ 393#endif /* IDESCSI_DEBUG_LOG */
395 pc->xferred = pc->req_xfer; 394 pc->xferred = pc->req_xfer;
396 (void) HWIF(drive)->ide_dma_end(drive); 395 (void)hwif->dma_ops->dma_end(drive);
397 } 396 }
398 397
399 /* Clear the interrupt */ 398 /* Clear the interrupt */
@@ -410,9 +409,9 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
410 idescsi_end_request (drive, 1, 0); 409 idescsi_end_request (drive, 1, 0);
411 return ide_stopped; 410 return ide_stopped;
412 } 411 }
413 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | 412 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
414 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); 413 hwif->INB(hwif->io_ports.lbam_addr);
415 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 414 ireason = hwif->INB(hwif->io_ports.nsect_addr);
416 415
417 if (ireason & CD) { 416 if (ireason & CD) {
418 printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n"); 417 printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n");
@@ -485,7 +484,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
485 "initiated yet DRQ isn't asserted\n"); 484 "initiated yet DRQ isn't asserted\n");
486 return startstop; 485 return startstop;
487 } 486 }
488 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); 487 ireason = hwif->INB(hwif->io_ports.nsect_addr);
489 if ((ireason & CD) == 0 || (ireason & IO)) { 488 if ((ireason & CD) == 0 || (ireason & IO)) {
490 printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while " 489 printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while "
491 "issuing a packet command\n"); 490 "issuing a packet command\n");
@@ -498,7 +497,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
498 drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12); 497 drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12);
499 if (pc->flags & PC_FLAG_DMA_OK) { 498 if (pc->flags & PC_FLAG_DMA_OK) {
500 pc->flags |= PC_FLAG_DMA_IN_PROGRESS; 499 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
501 hwif->dma_start(drive); 500 hwif->dma_ops->dma_start(drive);
502 } 501 }
503 return ide_started; 502 return ide_started;
504} 503}
@@ -560,7 +559,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
560 559
561 if (drive->using_dma && !idescsi_map_sg(drive, pc)) { 560 if (drive->using_dma && !idescsi_map_sg(drive, pc)) {
562 hwif->sg_mapped = 1; 561 hwif->sg_mapped = 1;
563 dma = !hwif->dma_setup(drive); 562 dma = !hwif->dma_ops->dma_setup(drive);
564 hwif->sg_mapped = 0; 563 hwif->sg_mapped = 0;
565 } 564 }
566 565
@@ -575,7 +574,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
575 return ide_started; 574 return ide_started;
576 } else { 575 } else {
577 /* Issue the packet command */ 576 /* Issue the packet command */
578 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); 577 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
579 return idescsi_transfer_pc(drive); 578 return idescsi_transfer_pc(drive);
580 } 579 }
581} 580}
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 5d231015bb20..b2d481dd3750 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -217,11 +217,15 @@ static int __devexit esp_jazz_remove(struct platform_device *dev)
217 return 0; 217 return 0;
218} 218}
219 219
220/* work with hotplug and coldplug */
221MODULE_ALIAS("platform:jazz_esp");
222
220static struct platform_driver esp_jazz_driver = { 223static struct platform_driver esp_jazz_driver = {
221 .probe = esp_jazz_probe, 224 .probe = esp_jazz_probe,
222 .remove = __devexit_p(esp_jazz_remove), 225 .remove = __devexit_p(esp_jazz_remove),
223 .driver = { 226 .driver = {
224 .name = "jazz_esp", 227 .name = "jazz_esp",
228 .owner = THIS_MODULE,
225 }, 229 },
226}; 230};
227 231
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a9fbb3f88659..960baaf11fb1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -182,8 +182,8 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
182 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); 182 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
183} 183}
184static ssize_t 184static ssize_t
185lpfc_state_show(struct device *dev, struct device_attribute *attr, 185lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
186 char *buf) 186 char *buf)
187{ 187{
188 struct Scsi_Host *shost = class_to_shost(dev); 188 struct Scsi_Host *shost = class_to_shost(dev);
189 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 189 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -936,7 +936,7 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
936static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); 936static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
937static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 937static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
938static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 938static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
939static DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL); 939static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL);
940static DEVICE_ATTR(option_rom_version, S_IRUGO, 940static DEVICE_ATTR(option_rom_version, S_IRUGO,
941 lpfc_option_rom_version_show, NULL); 941 lpfc_option_rom_version_show, NULL);
942static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 942static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1666,7 +1666,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
1666 &dev_attr_fwrev, 1666 &dev_attr_fwrev,
1667 &dev_attr_hdw, 1667 &dev_attr_hdw,
1668 &dev_attr_option_rom_version, 1668 &dev_attr_option_rom_version,
1669 &dev_attr_state, 1669 &dev_attr_link_state,
1670 &dev_attr_num_discovered_ports, 1670 &dev_attr_num_discovered_ports,
1671 &dev_attr_lpfc_drvr_version, 1671 &dev_attr_lpfc_drvr_version,
1672 &dev_attr_lpfc_temp_sensor, 1672 &dev_attr_lpfc_temp_sensor,
@@ -1714,7 +1714,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
1714 1714
1715struct device_attribute *lpfc_vport_attrs[] = { 1715struct device_attribute *lpfc_vport_attrs[] = {
1716 &dev_attr_info, 1716 &dev_attr_info,
1717 &dev_attr_state, 1717 &dev_attr_link_state,
1718 &dev_attr_num_discovered_ports, 1718 &dev_attr_num_discovered_ports,
1719 &dev_attr_lpfc_drvr_version, 1719 &dev_attr_lpfc_drvr_version,
1720 &dev_attr_lpfc_log_verbose, 1720 &dev_attr_lpfc_log_verbose,
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
new file mode 100644
index 000000000000..cd37bd69a115
--- /dev/null
+++ b/drivers/scsi/mac_esp.c
@@ -0,0 +1,657 @@
1/* mac_esp.c: ESP front-end for Macintosh Quadra systems.
2 *
3 * Adapted from jazz_esp.c and the old mac_esp.c.
4 *
5 * The pseudo DMA algorithm is based on the one used in NetBSD.
6 * See sys/arch/mac68k/obio/esp.c for some background information.
7 *
8 * Copyright (C) 2007-2008 Finn Thain
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/platform_device.h>
17#include <linux/dma-mapping.h>
18#include <linux/scatterlist.h>
19#include <linux/delay.h>
20#include <linux/io.h>
21#include <linux/nubus.h>
22
23#include <asm/irq.h>
24#include <asm/dma.h>
25
26#include <asm/macints.h>
27#include <asm/macintosh.h>
28
29#include <scsi/scsi_host.h>
30
31#include "esp_scsi.h"
32
33#define DRV_MODULE_NAME "mac_esp"
34#define PFX DRV_MODULE_NAME ": "
35#define DRV_VERSION "1.000"
36#define DRV_MODULE_RELDATE "Sept 15, 2007"
37
38#define MAC_ESP_IO_BASE 0x50F00000
39#define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000)
40#define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000)
41#define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000)
42#define MAC_ESP_REGS_SPACING 0x402
43#define MAC_ESP_PDMA_REG 0xF9800024
44#define MAC_ESP_PDMA_REG_SPACING 0x4
45#define MAC_ESP_PDMA_IO_OFFSET 0x100
46
47#define esp_read8(REG) mac_esp_read8(esp, REG)
48#define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG)
49
50struct mac_esp_priv {
51 struct esp *esp;
52 void __iomem *pdma_regs;
53 void __iomem *pdma_io;
54 int error;
55};
56static struct platform_device *internal_esp, *external_esp;
57
58#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
59 platform_get_drvdata((struct platform_device *) \
60 (esp->dev)))
61
62static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
63{
64 nubus_writeb(val, esp->regs + reg * 16);
65}
66
67static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
68{
69 return nubus_readb(esp->regs + reg * 16);
70}
71
72/* For pseudo DMA and PIO we need the virtual address
73 * so this address mapping is the identity mapping.
74 */
75
76static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
77 size_t sz, int dir)
78{
79 return (dma_addr_t)buf;
80}
81
82static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
83 int num_sg, int dir)
84{
85 int i;
86
87 for (i = 0; i < num_sg; i++)
88 sg[i].dma_address = (u32)sg_virt(&sg[i]);
89 return num_sg;
90}
91
92static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
93 size_t sz, int dir)
94{
95 /* Nothing to do. */
96}
97
98static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
99 int num_sg, int dir)
100{
101 /* Nothing to do. */
102}
103
104static void mac_esp_reset_dma(struct esp *esp)
105{
106 /* Nothing to do. */
107}
108
109static void mac_esp_dma_drain(struct esp *esp)
110{
111 /* Nothing to do. */
112}
113
114static void mac_esp_dma_invalidate(struct esp *esp)
115{
116 /* Nothing to do. */
117}
118
119static int mac_esp_dma_error(struct esp *esp)
120{
121 return MAC_ESP_GET_PRIV(esp)->error;
122}
123
124static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
125{
126 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
127 int i = 500000;
128
129 do {
130 if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES))
131 return 0;
132
133 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
134 return 1;
135
136 udelay(2);
137 } while (--i);
138
139 printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
140 esp_read8(ESP_STATUS));
141 mep->error = 1;
142 return 1;
143}
144
145static inline int mac_esp_wait_for_dreq(struct esp *esp)
146{
147 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
148 int i = 500000;
149
150 do {
151 if (mep->pdma_regs == NULL) {
152 if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
153 return 0;
154 } else {
155 if (nubus_readl(mep->pdma_regs) & 0x200)
156 return 0;
157 }
158
159 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
160 return 1;
161
162 udelay(2);
163 } while (--i);
164
165 printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
166 esp_read8(ESP_STATUS));
167 mep->error = 1;
168 return 1;
169}
170
171#define MAC_ESP_PDMA_LOOP(operands) \
172 asm volatile ( \
173 " tstw %2 \n" \
174 " jbeq 20f \n" \
175 "1: movew " operands " \n" \
176 "2: movew " operands " \n" \
177 "3: movew " operands " \n" \
178 "4: movew " operands " \n" \
179 "5: movew " operands " \n" \
180 "6: movew " operands " \n" \
181 "7: movew " operands " \n" \
182 "8: movew " operands " \n" \
183 "9: movew " operands " \n" \
184 "10: movew " operands " \n" \
185 "11: movew " operands " \n" \
186 "12: movew " operands " \n" \
187 "13: movew " operands " \n" \
188 "14: movew " operands " \n" \
189 "15: movew " operands " \n" \
190 "16: movew " operands " \n" \
191 " subqw #1,%2 \n" \
192 " jbne 1b \n" \
193 "20: tstw %3 \n" \
194 " jbeq 30f \n" \
195 "21: movew " operands " \n" \
196 " subqw #1,%3 \n" \
197 " jbne 21b \n" \
198 "30: tstw %4 \n" \
199 " jbeq 40f \n" \
200 "31: moveb " operands " \n" \
201 "32: nop \n" \
202 "40: \n" \
203 " \n" \
204 " .section __ex_table,\"a\" \n" \
205 " .align 4 \n" \
206 " .long 1b,40b \n" \
207 " .long 2b,40b \n" \
208 " .long 3b,40b \n" \
209 " .long 4b,40b \n" \
210 " .long 5b,40b \n" \
211 " .long 6b,40b \n" \
212 " .long 7b,40b \n" \
213 " .long 8b,40b \n" \
214 " .long 9b,40b \n" \
215 " .long 10b,40b \n" \
216 " .long 11b,40b \n" \
217 " .long 12b,40b \n" \
218 " .long 13b,40b \n" \
219 " .long 14b,40b \n" \
220 " .long 15b,40b \n" \
221 " .long 16b,40b \n" \
222 " .long 21b,40b \n" \
223 " .long 31b,40b \n" \
224 " .long 32b,40b \n" \
225 " .previous \n" \
226 : "+a" (addr) \
227 : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count))
228
229static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
230 u32 dma_count, int write, u8 cmd)
231{
232 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
233 unsigned long flags;
234
235 local_irq_save(flags);
236
237 mep->error = 0;
238
239 if (!write)
240 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
241
242 esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW);
243 esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED);
244
245 scsi_esp_cmd(esp, cmd);
246
247 do {
248 unsigned int count32 = esp_count >> 5;
249 unsigned int count2 = (esp_count & 0x1F) >> 1;
250 unsigned int start_addr = addr;
251
252 if (mac_esp_wait_for_dreq(esp))
253 break;
254
255 if (write) {
256 MAC_ESP_PDMA_LOOP("%1@,%0@+");
257
258 esp_count -= addr - start_addr;
259 } else {
260 unsigned int n;
261
262 MAC_ESP_PDMA_LOOP("%0@+,%1@");
263
264 if (mac_esp_wait_for_empty_fifo(esp))
265 break;
266
267 n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW);
268 addr = start_addr + esp_count - n;
269 esp_count = n;
270 }
271 } while (esp_count);
272
273 local_irq_restore(flags);
274}
275
276/*
277 * Programmed IO routines follow.
278 */
279
280static inline int mac_esp_wait_for_fifo(struct esp *esp)
281{
282 int i = 500000;
283
284 do {
285 if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)
286 return 0;
287
288 udelay(2);
289 } while (--i);
290
291 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
292 esp_read8(ESP_STATUS));
293 return 1;
294}
295
296static inline int mac_esp_wait_for_intr(struct esp *esp)
297{
298 int i = 500000;
299
300 do {
301 esp->sreg = esp_read8(ESP_STATUS);
302 if (esp->sreg & ESP_STAT_INTR)
303 return 0;
304
305 udelay(2);
306 } while (--i);
307
308 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
309 return 1;
310}
311
312#define MAC_ESP_PIO_LOOP(operands, reg1) \
313 asm volatile ( \
314 "1: moveb " operands " \n" \
315 " subqw #1,%1 \n" \
316 " jbne 1b \n" \
317 : "+a" (addr), "+r" (reg1) \
318 : "a" (fifo))
319
320#define MAC_ESP_PIO_FILL(operands, reg1) \
321 asm volatile ( \
322 " moveb " operands " \n" \
323 " moveb " operands " \n" \
324 " moveb " operands " \n" \
325 " moveb " operands " \n" \
326 " moveb " operands " \n" \
327 " moveb " operands " \n" \
328 " moveb " operands " \n" \
329 " moveb " operands " \n" \
330 " moveb " operands " \n" \
331 " moveb " operands " \n" \
332 " moveb " operands " \n" \
333 " moveb " operands " \n" \
334 " moveb " operands " \n" \
335 " moveb " operands " \n" \
336 " moveb " operands " \n" \
337 " moveb " operands " \n" \
338 " subqw #8,%1 \n" \
339 " subqw #8,%1 \n" \
340 : "+a" (addr), "+r" (reg1) \
341 : "a" (fifo))
342
343#define MAC_ESP_FIFO_SIZE 16
344
345static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
346 u32 dma_count, int write, u8 cmd)
347{
348 unsigned long flags;
349 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
350 u8 *fifo = esp->regs + ESP_FDATA * 16;
351
352 local_irq_save(flags);
353
354 cmd &= ~ESP_CMD_DMA;
355 mep->error = 0;
356
357 if (write) {
358 scsi_esp_cmd(esp, cmd);
359
360 if (!mac_esp_wait_for_intr(esp)) {
361 if (mac_esp_wait_for_fifo(esp))
362 esp_count = 0;
363 } else {
364 esp_count = 0;
365 }
366 } else {
367 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
368
369 if (esp_count >= MAC_ESP_FIFO_SIZE)
370 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
371 else
372 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
373
374 scsi_esp_cmd(esp, cmd);
375 }
376
377 while (esp_count) {
378 unsigned int n;
379
380 if (mac_esp_wait_for_intr(esp)) {
381 mep->error = 1;
382 break;
383 }
384
385 if (esp->sreg & ESP_STAT_SPAM) {
386 printk(KERN_ERR PFX "gross error\n");
387 mep->error = 1;
388 break;
389 }
390
391 n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
392
393 if (write) {
394 if (n > esp_count)
395 n = esp_count;
396 esp_count -= n;
397
398 MAC_ESP_PIO_LOOP("%2@,%0@+", n);
399
400 if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP)
401 break;
402
403 if (esp_count) {
404 esp->ireg = esp_read8(ESP_INTRPT);
405 if (esp->ireg & ESP_INTR_DC)
406 break;
407
408 scsi_esp_cmd(esp, ESP_CMD_TI);
409 }
410 } else {
411 esp->ireg = esp_read8(ESP_INTRPT);
412 if (esp->ireg & ESP_INTR_DC)
413 break;
414
415 n = MAC_ESP_FIFO_SIZE - n;
416 if (n > esp_count)
417 n = esp_count;
418
419 if (n == MAC_ESP_FIFO_SIZE) {
420 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
421 } else {
422 esp_count -= n;
423 MAC_ESP_PIO_LOOP("%0@+,%2@", n);
424 }
425
426 scsi_esp_cmd(esp, ESP_CMD_TI);
427 }
428 }
429
430 local_irq_restore(flags);
431}
432
433static int mac_esp_irq_pending(struct esp *esp)
434{
435 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
436 return 1;
437 return 0;
438}
439
440static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
441{
442 return dma_len > 0xFFFF ? 0xFFFF : dma_len;
443}
444
445static struct esp_driver_ops mac_esp_ops = {
446 .esp_write8 = mac_esp_write8,
447 .esp_read8 = mac_esp_read8,
448 .map_single = mac_esp_map_single,
449 .map_sg = mac_esp_map_sg,
450 .unmap_single = mac_esp_unmap_single,
451 .unmap_sg = mac_esp_unmap_sg,
452 .irq_pending = mac_esp_irq_pending,
453 .dma_length_limit = mac_esp_dma_length_limit,
454 .reset_dma = mac_esp_reset_dma,
455 .dma_drain = mac_esp_dma_drain,
456 .dma_invalidate = mac_esp_dma_invalidate,
457 .send_dma_cmd = mac_esp_send_pdma_cmd,
458 .dma_error = mac_esp_dma_error,
459};
460
461static int __devinit esp_mac_probe(struct platform_device *dev)
462{
463 struct scsi_host_template *tpnt = &scsi_esp_template;
464 struct Scsi_Host *host;
465 struct esp *esp;
466 int err;
467 int chips_present;
468 struct mac_esp_priv *mep;
469
470 if (!MACH_IS_MAC)
471 return -ENODEV;
472
473 switch (macintosh_config->scsi_type) {
474 case MAC_SCSI_QUADRA:
475 case MAC_SCSI_QUADRA3:
476 chips_present = 1;
477 break;
478 case MAC_SCSI_QUADRA2:
479 if ((macintosh_config->ident == MAC_MODEL_Q900) ||
480 (macintosh_config->ident == MAC_MODEL_Q950))
481 chips_present = 2;
482 else
483 chips_present = 1;
484 break;
485 default:
486 chips_present = 0;
487 }
488
489 if (dev->id + 1 > chips_present)
490 return -ENODEV;
491
492 host = scsi_host_alloc(tpnt, sizeof(struct esp));
493
494 err = -ENOMEM;
495 if (!host)
496 goto fail;
497
498 host->max_id = 8;
499 host->use_clustering = DISABLE_CLUSTERING;
500 esp = shost_priv(host);
501
502 esp->host = host;
503 esp->dev = dev;
504
505 esp->command_block = kzalloc(16, GFP_KERNEL);
506 if (!esp->command_block)
507 goto fail_unlink;
508 esp->command_block_dma = (dma_addr_t)esp->command_block;
509
510 esp->scsi_id = 7;
511 host->this_id = esp->scsi_id;
512 esp->scsi_id_mask = 1 << esp->scsi_id;
513
514 mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL);
515 if (!mep)
516 goto fail_free_command_block;
517 mep->esp = esp;
518 platform_set_drvdata(dev, mep);
519
520 switch (macintosh_config->scsi_type) {
521 case MAC_SCSI_QUADRA:
522 esp->cfreq = 16500000;
523 esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA;
524 mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
525 mep->pdma_regs = NULL;
526 break;
527 case MAC_SCSI_QUADRA2:
528 esp->cfreq = 25000000;
529 esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 +
530 dev->id * MAC_ESP_REGS_SPACING);
531 mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
532 mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG +
533 dev->id * MAC_ESP_PDMA_REG_SPACING);
534 nubus_writel(0x1d1, mep->pdma_regs);
535 break;
536 case MAC_SCSI_QUADRA3:
537 /* These quadras have a real DMA controller (the PSC) but we
538 * don't know how to drive it so we must use PIO instead.
539 */
540 esp->cfreq = 25000000;
541 esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3;
542 mep->pdma_io = NULL;
543 mep->pdma_regs = NULL;
544 break;
545 }
546
547 esp->ops = &mac_esp_ops;
548 if (mep->pdma_io == NULL) {
549 printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
550 esp_write8(0, ESP_TCLOW);
551 esp_write8(0, ESP_TCMED);
552 esp->flags = ESP_FLAG_DISABLE_SYNC;
553 mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
554 } else {
555 printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
556 }
557
558 host->irq = IRQ_MAC_SCSI;
559 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP",
560 esp);
561 if (err < 0)
562 goto fail_free_priv;
563
564 err = scsi_esp_register(esp, &dev->dev);
565 if (err)
566 goto fail_free_irq;
567
568 return 0;
569
570fail_free_irq:
571 free_irq(host->irq, esp);
572fail_free_priv:
573 kfree(mep);
574fail_free_command_block:
575 kfree(esp->command_block);
576fail_unlink:
577 scsi_host_put(host);
578fail:
579 return err;
580}
581
582static int __devexit esp_mac_remove(struct platform_device *dev)
583{
584 struct mac_esp_priv *mep = platform_get_drvdata(dev);
585 struct esp *esp = mep->esp;
586 unsigned int irq = esp->host->irq;
587
588 scsi_esp_unregister(esp);
589
590 free_irq(irq, esp);
591
592 kfree(mep);
593
594 kfree(esp->command_block);
595
596 scsi_host_put(esp->host);
597
598 return 0;
599}
600
601static struct platform_driver esp_mac_driver = {
602 .probe = esp_mac_probe,
603 .remove = __devexit_p(esp_mac_remove),
604 .driver = {
605 .name = DRV_MODULE_NAME,
606 },
607};
608
609static int __init mac_esp_init(void)
610{
611 int err;
612
613 err = platform_driver_register(&esp_mac_driver);
614 if (err)
615 return err;
616
617 internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0);
618 if (internal_esp && platform_device_add(internal_esp)) {
619 platform_device_put(internal_esp);
620 internal_esp = NULL;
621 }
622
623 external_esp = platform_device_alloc(DRV_MODULE_NAME, 1);
624 if (external_esp && platform_device_add(external_esp)) {
625 platform_device_put(external_esp);
626 external_esp = NULL;
627 }
628
629 if (internal_esp || external_esp) {
630 return 0;
631 } else {
632 platform_driver_unregister(&esp_mac_driver);
633 return -ENOMEM;
634 }
635}
636
637static void __exit mac_esp_exit(void)
638{
639 platform_driver_unregister(&esp_mac_driver);
640
641 if (internal_esp) {
642 platform_device_unregister(internal_esp);
643 internal_esp = NULL;
644 }
645 if (external_esp) {
646 platform_device_unregister(external_esp);
647 external_esp = NULL;
648 }
649}
650
651MODULE_DESCRIPTION("Mac ESP SCSI driver");
652MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
653MODULE_LICENSE("GPLv2");
654MODULE_VERSION(DRV_VERSION);
655
656module_init(mac_esp_init);
657module_exit(mac_esp_exit);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d61df036910c..287690853caf 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -609,8 +609,8 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
609} 609}
610 610
611static ssize_t 611static ssize_t
612qla2x00_state_show(struct device *dev, struct device_attribute *attr, 612qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
613 char *buf) 613 char *buf)
614{ 614{
615 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 615 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
616 int len = 0; 616 int len = 0;
@@ -814,7 +814,7 @@ static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
814static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); 814static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
815static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); 815static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
816static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); 816static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
817static DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL); 817static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
818static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); 818static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
819static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 819static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
820 qla2x00_zio_timer_store); 820 qla2x00_zio_timer_store);
@@ -838,7 +838,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
838 &dev_attr_model_name, 838 &dev_attr_model_name,
839 &dev_attr_model_desc, 839 &dev_attr_model_desc,
840 &dev_attr_pci_info, 840 &dev_attr_pci_info,
841 &dev_attr_state, 841 &dev_attr_link_state,
842 &dev_attr_zio, 842 &dev_attr_zio,
843 &dev_attr_zio_timer, 843 &dev_attr_zio_timer,
844 &dev_attr_beacon, 844 &dev_attr_beacon,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9d12d9f26209..cbef785765cf 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -38,78 +38,38 @@ qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
38} 38}
39 39
40static int 40static int
41qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram, 41qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
42 uint32_t cram_size, uint32_t *ext_mem, void **nxt) 42 uint32_t ram_dwords, void **nxt)
43{ 43{
44 int rval; 44 int rval;
45 uint32_t cnt, stat, timer, risc_address, ext_mem_cnt; 45 uint32_t cnt, stat, timer, dwords, idx;
46 uint16_t mb[4]; 46 uint16_t mb0;
47 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 47 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
48 dma_addr_t dump_dma = ha->gid_list_dma;
49 uint32_t *dump = (uint32_t *)ha->gid_list;
48 50
49 rval = QLA_SUCCESS; 51 rval = QLA_SUCCESS;
50 risc_address = ext_mem_cnt = 0; 52 mb0 = 0;
51 memset(mb, 0, sizeof(mb));
52 53
53 /* Code RAM. */ 54 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
54 risc_address = 0x20000;
55 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
56 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 55 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
57 56
58 for (cnt = 0; cnt < cram_size / 4 && rval == QLA_SUCCESS; 57 dwords = GID_LIST_SIZE / 4;
59 cnt++, risc_address++) { 58 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
60 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address)); 59 cnt += dwords, addr += dwords) {
61 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address)); 60 if (cnt + dwords > ram_dwords)
62 RD_REG_WORD(&reg->mailbox8); 61 dwords = ram_dwords - cnt;
63 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
64
65 for (timer = 6000000; timer; timer--) {
66 /* Check for pending interrupts. */
67 stat = RD_REG_DWORD(&reg->host_status);
68 if (stat & HSRX_RISC_INT) {
69 stat &= 0xff;
70 62
71 if (stat == 0x1 || stat == 0x2 || 63 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
72 stat == 0x10 || stat == 0x11) { 64 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
73 set_bit(MBX_INTERRUPT,
74 &ha->mbx_cmd_flags);
75 65
76 mb[0] = RD_REG_WORD(&reg->mailbox0); 66 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
77 mb[2] = RD_REG_WORD(&reg->mailbox2); 67 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
78 mb[3] = RD_REG_WORD(&reg->mailbox3); 68 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
69 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
79 70
80 WRT_REG_DWORD(&reg->hccr, 71 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
81 HCCRX_CLR_RISC_INT); 72 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
82 RD_REG_DWORD(&reg->hccr);
83 break;
84 }
85
86 /* Clear this intr; it wasn't a mailbox intr */
87 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
88 RD_REG_DWORD(&reg->hccr);
89 }
90 udelay(5);
91 }
92
93 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
94 rval = mb[0] & MBS_MASK;
95 code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
96 } else {
97 rval = QLA_FUNCTION_FAILED;
98 }
99 }
100
101 if (rval == QLA_SUCCESS) {
102 /* External Memory. */
103 risc_address = 0x100000;
104 ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
105 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
106 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
107 }
108 for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
109 cnt++, risc_address++) {
110 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
111 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
112 RD_REG_WORD(&reg->mailbox8);
113 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); 73 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
114 74
115 for (timer = 6000000; timer; timer--) { 75 for (timer = 6000000; timer; timer--) {
@@ -123,9 +83,7 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
123 set_bit(MBX_INTERRUPT, 83 set_bit(MBX_INTERRUPT,
124 &ha->mbx_cmd_flags); 84 &ha->mbx_cmd_flags);
125 85
126 mb[0] = RD_REG_WORD(&reg->mailbox0); 86 mb0 = RD_REG_WORD(&reg->mailbox0);
127 mb[2] = RD_REG_WORD(&reg->mailbox2);
128 mb[3] = RD_REG_WORD(&reg->mailbox3);
129 87
130 WRT_REG_DWORD(&reg->hccr, 88 WRT_REG_DWORD(&reg->hccr,
131 HCCRX_CLR_RISC_INT); 89 HCCRX_CLR_RISC_INT);
@@ -141,17 +99,34 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
141 } 99 }
142 100
143 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 101 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
144 rval = mb[0] & MBS_MASK; 102 rval = mb0 & MBS_MASK;
145 ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]); 103 for (idx = 0; idx < dwords; idx++)
104 ram[cnt + idx] = swab32(dump[idx]);
146 } else { 105 } else {
147 rval = QLA_FUNCTION_FAILED; 106 rval = QLA_FUNCTION_FAILED;
148 } 107 }
149 } 108 }
150 109
151 *nxt = rval == QLA_SUCCESS ? &ext_mem[cnt]: NULL; 110 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
152 return rval; 111 return rval;
153} 112}
154 113
114static int
115qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
116 uint32_t cram_size, void **nxt)
117{
118 int rval;
119
120 /* Code RAM. */
121 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
122 if (rval != QLA_SUCCESS)
123 return rval;
124
125 /* External Memory. */
126 return qla24xx_dump_ram(ha, 0x100000, *nxt,
127 ha->fw_memory_size - 0x100000 + 1, nxt);
128}
129
155static uint32_t * 130static uint32_t *
156qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, 131qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
157 uint32_t count, uint32_t *buf) 132 uint32_t count, uint32_t *buf)
@@ -239,6 +214,90 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
239 return rval; 214 return rval;
240} 215}
241 216
217static int
218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
219 uint16_t ram_words, void **nxt)
220{
221 int rval;
222 uint32_t cnt, stat, timer, words, idx;
223 uint16_t mb0;
224 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
225 dma_addr_t dump_dma = ha->gid_list_dma;
226 uint16_t *dump = (uint16_t *)ha->gid_list;
227
228 rval = QLA_SUCCESS;
229 mb0 = 0;
230
231 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
232 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
233
234 words = GID_LIST_SIZE / 2;
235 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
236 cnt += words, addr += words) {
237 if (cnt + words > ram_words)
238 words = ram_words - cnt;
239
240 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
241 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
242
243 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
244 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
245 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
246 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
247
248 WRT_MAILBOX_REG(ha, reg, 4, words);
249 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
250
251 for (timer = 6000000; timer; timer--) {
252 /* Check for pending interrupts. */
253 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
254 if (stat & HSR_RISC_INT) {
255 stat &= 0xff;
256
257 if (stat == 0x1 || stat == 0x2) {
258 set_bit(MBX_INTERRUPT,
259 &ha->mbx_cmd_flags);
260
261 mb0 = RD_MAILBOX_REG(ha, reg, 0);
262
263 /* Release mailbox registers. */
264 WRT_REG_WORD(&reg->semaphore, 0);
265 WRT_REG_WORD(&reg->hccr,
266 HCCR_CLR_RISC_INT);
267 RD_REG_WORD(&reg->hccr);
268 break;
269 } else if (stat == 0x10 || stat == 0x11) {
270 set_bit(MBX_INTERRUPT,
271 &ha->mbx_cmd_flags);
272
273 mb0 = RD_MAILBOX_REG(ha, reg, 0);
274
275 WRT_REG_WORD(&reg->hccr,
276 HCCR_CLR_RISC_INT);
277 RD_REG_WORD(&reg->hccr);
278 break;
279 }
280
281 /* clear this intr; it wasn't a mailbox intr */
282 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
283 RD_REG_WORD(&reg->hccr);
284 }
285 udelay(5);
286 }
287
288 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
289 rval = mb0 & MBS_MASK;
290 for (idx = 0; idx < words; idx++)
291 ram[cnt + idx] = swab16(dump[idx]);
292 } else {
293 rval = QLA_FUNCTION_FAILED;
294 }
295 }
296
297 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
298 return rval;
299}
300
242static inline void 301static inline void
243qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, 302qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
244 uint16_t *buf) 303 uint16_t *buf)
@@ -258,19 +317,14 @@ void
258qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 317qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
259{ 318{
260 int rval; 319 int rval;
261 uint32_t cnt, timer; 320 uint32_t cnt;
262 uint32_t risc_address;
263 uint16_t mb0, mb2;
264 321
265 uint32_t stat;
266 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 322 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
267 uint16_t __iomem *dmp_reg; 323 uint16_t __iomem *dmp_reg;
268 unsigned long flags; 324 unsigned long flags;
269 struct qla2300_fw_dump *fw; 325 struct qla2300_fw_dump *fw;
270 uint32_t data_ram_cnt; 326 void *nxt;
271 327
272 risc_address = data_ram_cnt = 0;
273 mb0 = mb2 = 0;
274 flags = 0; 328 flags = 0;
275 329
276 if (!hardware_locked) 330 if (!hardware_locked)
@@ -388,185 +442,23 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
388 } 442 }
389 } 443 }
390 444
391 if (rval == QLA_SUCCESS) { 445 /* Get RISC SRAM. */
392 /* Get RISC SRAM. */ 446 if (rval == QLA_SUCCESS)
393 risc_address = 0x800; 447 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
394 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); 448 sizeof(fw->risc_ram) / 2, &nxt);
395 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
396 }
397 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
398 cnt++, risc_address++) {
399 WRT_MAILBOX_REG(ha, reg, 1, (uint16_t)risc_address);
400 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
401
402 for (timer = 6000000; timer; timer--) {
403 /* Check for pending interrupts. */
404 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
405 if (stat & HSR_RISC_INT) {
406 stat &= 0xff;
407
408 if (stat == 0x1 || stat == 0x2) {
409 set_bit(MBX_INTERRUPT,
410 &ha->mbx_cmd_flags);
411
412 mb0 = RD_MAILBOX_REG(ha, reg, 0);
413 mb2 = RD_MAILBOX_REG(ha, reg, 2);
414
415 /* Release mailbox registers. */
416 WRT_REG_WORD(&reg->semaphore, 0);
417 WRT_REG_WORD(&reg->hccr,
418 HCCR_CLR_RISC_INT);
419 RD_REG_WORD(&reg->hccr);
420 break;
421 } else if (stat == 0x10 || stat == 0x11) {
422 set_bit(MBX_INTERRUPT,
423 &ha->mbx_cmd_flags);
424
425 mb0 = RD_MAILBOX_REG(ha, reg, 0);
426 mb2 = RD_MAILBOX_REG(ha, reg, 2);
427
428 WRT_REG_WORD(&reg->hccr,
429 HCCR_CLR_RISC_INT);
430 RD_REG_WORD(&reg->hccr);
431 break;
432 }
433
434 /* clear this intr; it wasn't a mailbox intr */
435 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
436 RD_REG_WORD(&reg->hccr);
437 }
438 udelay(5);
439 }
440
441 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
442 rval = mb0 & MBS_MASK;
443 fw->risc_ram[cnt] = htons(mb2);
444 } else {
445 rval = QLA_FUNCTION_FAILED;
446 }
447 }
448
449 if (rval == QLA_SUCCESS) {
450 /* Get stack SRAM. */
451 risc_address = 0x10000;
452 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
453 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
454 }
455 for (cnt = 0; cnt < sizeof(fw->stack_ram) / 2 && rval == QLA_SUCCESS;
456 cnt++, risc_address++) {
457 WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
458 WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
459 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
460
461 for (timer = 6000000; timer; timer--) {
462 /* Check for pending interrupts. */
463 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
464 if (stat & HSR_RISC_INT) {
465 stat &= 0xff;
466
467 if (stat == 0x1 || stat == 0x2) {
468 set_bit(MBX_INTERRUPT,
469 &ha->mbx_cmd_flags);
470
471 mb0 = RD_MAILBOX_REG(ha, reg, 0);
472 mb2 = RD_MAILBOX_REG(ha, reg, 2);
473
474 /* Release mailbox registers. */
475 WRT_REG_WORD(&reg->semaphore, 0);
476 WRT_REG_WORD(&reg->hccr,
477 HCCR_CLR_RISC_INT);
478 RD_REG_WORD(&reg->hccr);
479 break;
480 } else if (stat == 0x10 || stat == 0x11) {
481 set_bit(MBX_INTERRUPT,
482 &ha->mbx_cmd_flags);
483
484 mb0 = RD_MAILBOX_REG(ha, reg, 0);
485 mb2 = RD_MAILBOX_REG(ha, reg, 2);
486
487 WRT_REG_WORD(&reg->hccr,
488 HCCR_CLR_RISC_INT);
489 RD_REG_WORD(&reg->hccr);
490 break;
491 }
492
493 /* clear this intr; it wasn't a mailbox intr */
494 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
495 RD_REG_WORD(&reg->hccr);
496 }
497 udelay(5);
498 }
499
500 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
501 rval = mb0 & MBS_MASK;
502 fw->stack_ram[cnt] = htons(mb2);
503 } else {
504 rval = QLA_FUNCTION_FAILED;
505 }
506 }
507
508 if (rval == QLA_SUCCESS) {
509 /* Get data SRAM. */
510 risc_address = 0x11000;
511 data_ram_cnt = ha->fw_memory_size - risc_address + 1;
512 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
513 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
514 }
515 for (cnt = 0; cnt < data_ram_cnt && rval == QLA_SUCCESS;
516 cnt++, risc_address++) {
517 WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
518 WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
519 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
520
521 for (timer = 6000000; timer; timer--) {
522 /* Check for pending interrupts. */
523 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
524 if (stat & HSR_RISC_INT) {
525 stat &= 0xff;
526
527 if (stat == 0x1 || stat == 0x2) {
528 set_bit(MBX_INTERRUPT,
529 &ha->mbx_cmd_flags);
530
531 mb0 = RD_MAILBOX_REG(ha, reg, 0);
532 mb2 = RD_MAILBOX_REG(ha, reg, 2);
533
534 /* Release mailbox registers. */
535 WRT_REG_WORD(&reg->semaphore, 0);
536 WRT_REG_WORD(&reg->hccr,
537 HCCR_CLR_RISC_INT);
538 RD_REG_WORD(&reg->hccr);
539 break;
540 } else if (stat == 0x10 || stat == 0x11) {
541 set_bit(MBX_INTERRUPT,
542 &ha->mbx_cmd_flags);
543
544 mb0 = RD_MAILBOX_REG(ha, reg, 0);
545 mb2 = RD_MAILBOX_REG(ha, reg, 2);
546
547 WRT_REG_WORD(&reg->hccr,
548 HCCR_CLR_RISC_INT);
549 RD_REG_WORD(&reg->hccr);
550 break;
551 }
552 449
553 /* clear this intr; it wasn't a mailbox intr */ 450 /* Get stack SRAM. */
554 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 451 if (rval == QLA_SUCCESS)
555 RD_REG_WORD(&reg->hccr); 452 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
556 } 453 sizeof(fw->stack_ram) / 2, &nxt);
557 udelay(5);
558 }
559 454
560 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 455 /* Get data SRAM. */
561 rval = mb0 & MBS_MASK; 456 if (rval == QLA_SUCCESS)
562 fw->data_ram[cnt] = htons(mb2); 457 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
563 } else { 458 ha->fw_memory_size - 0x11000 + 1, &nxt);
564 rval = QLA_FUNCTION_FAILED;
565 }
566 }
567 459
568 if (rval == QLA_SUCCESS) 460 if (rval == QLA_SUCCESS)
569 qla2xxx_copy_queues(ha, &fw->data_ram[cnt]); 461 qla2xxx_copy_queues(ha, nxt);
570 462
571 if (rval != QLA_SUCCESS) { 463 if (rval != QLA_SUCCESS) {
572 qla_printk(KERN_WARNING, ha, 464 qla_printk(KERN_WARNING, ha,
@@ -1010,7 +902,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1010 goto qla24xx_fw_dump_failed_0; 902 goto qla24xx_fw_dump_failed_0;
1011 903
1012 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 904 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1013 fw->ext_mem, &nxt); 905 &nxt);
1014 if (rval != QLA_SUCCESS) 906 if (rval != QLA_SUCCESS)
1015 goto qla24xx_fw_dump_failed_0; 907 goto qla24xx_fw_dump_failed_0;
1016 908
@@ -1318,7 +1210,7 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1318 goto qla25xx_fw_dump_failed_0; 1210 goto qla25xx_fw_dump_failed_0;
1319 1211
1320 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1212 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1321 fw->ext_mem, &nxt); 1213 &nxt);
1322 if (rval != QLA_SUCCESS) 1214 if (rval != QLA_SUCCESS)
1323 goto qla25xx_fw_dump_failed_0; 1215 goto qla25xx_fw_dump_failed_0;
1324 1216
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 078f2a15f40b..cf194517400d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1036,22 +1036,6 @@ struct mid_db_entry_24xx {
1036 uint8_t reserved_1; 1036 uint8_t reserved_1;
1037}; 1037};
1038 1038
1039 /*
1040 * Virtual Fabric ID type definition.
1041 */
1042typedef struct vf_id {
1043 uint16_t id : 12;
1044 uint16_t priority : 4;
1045} vf_id_t;
1046
1047/*
1048 * Virtual Fabric HopCt type definition.
1049 */
1050typedef struct vf_hopct {
1051 uint16_t reserved : 8;
1052 uint16_t hopct : 8;
1053} vf_hopct_t;
1054
1055/* 1039/*
1056 * Virtual Port Control IOCB 1040 * Virtual Port Control IOCB
1057 */ 1041 */
@@ -1082,10 +1066,10 @@ struct vp_ctrl_entry_24xx {
1082 1066
1083 uint8_t vp_idx_map[16]; 1067 uint8_t vp_idx_map[16];
1084 uint16_t flags; 1068 uint16_t flags;
1085 struct vf_id id; 1069 uint16_t id;
1086 uint16_t reserved_4; 1070 uint16_t reserved_4;
1087 struct vf_hopct hopct; 1071 uint16_t hopct;
1088 uint8_t reserved_5[8]; 1072 uint8_t reserved_5[24];
1089}; 1073};
1090 1074
1091/* 1075/*
@@ -1132,9 +1116,9 @@ struct vp_config_entry_24xx {
1132 uint16_t reserved_vp2; 1116 uint16_t reserved_vp2;
1133 uint8_t port_name_idx2[WWN_SIZE]; 1117 uint8_t port_name_idx2[WWN_SIZE];
1134 uint8_t node_name_idx2[WWN_SIZE]; 1118 uint8_t node_name_idx2[WWN_SIZE];
1135 struct vf_id id; 1119 uint16_t id;
1136 uint16_t reserved_4; 1120 uint16_t reserved_4;
1137 struct vf_hopct hopct; 1121 uint16_t hopct;
1138 uint8_t reserved_5; 1122 uint8_t reserved_5;
1139}; 1123};
1140 1124
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 76eb4fecce65..f8827068d30f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -152,10 +152,6 @@ extern int
152qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 152qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
153 153
154extern int 154extern int
155qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t,
156 uint32_t);
157
158extern int
159qla2x00_abort_command(scsi_qla_host_t *, srb_t *); 155qla2x00_abort_command(scsi_qla_host_t *, srb_t *);
160 156
161extern int 157extern int
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 750d7ef83aae..4cb80b476c85 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1583,8 +1583,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1583 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); 1583 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1584 eiter->len = __constant_cpu_to_be16(4 + 4); 1584 eiter->len = __constant_cpu_to_be16(4 + 4);
1585 max_frame_size = IS_FWI2_CAPABLE(ha) ? 1585 max_frame_size = IS_FWI2_CAPABLE(ha) ?
1586 (uint32_t) icb24->frame_payload_size: 1586 le16_to_cpu(icb24->frame_payload_size):
1587 (uint32_t) ha->init_cb->frame_payload_size; 1587 le16_to_cpu(ha->init_cb->frame_payload_size);
1588 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1588 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1589 size += 4 + 4; 1589 size += 4 + 4;
1590 1590
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 01e26087c1dd..bbbc5a632a1d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3645,7 +3645,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3645 if (le16_to_cpu(nv->login_timeout) < 4) 3645 if (le16_to_cpu(nv->login_timeout) < 4)
3646 nv->login_timeout = __constant_cpu_to_le16(4); 3646 nv->login_timeout = __constant_cpu_to_le16(4);
3647 ha->login_timeout = le16_to_cpu(nv->login_timeout); 3647 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3648 icb->login_timeout = cpu_to_le16(nv->login_timeout); 3648 icb->login_timeout = nv->login_timeout;
3649 3649
3650 /* Set minimum RATOV to 100 tenths of a second. */ 3650 /* Set minimum RATOV to 100 tenths of a second. */
3651 ha->r_a_tov = 100; 3651 ha->r_a_tov = 100;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 285479b62d8f..5d9a64a7879b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -409,6 +409,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
409 } 409 }
410 410
411 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 411 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
412 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
412 413
413 ha->flags.management_server_logged_in = 0; 414 ha->flags.management_server_logged_in = 0;
414 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]); 415 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
@@ -454,8 +455,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
454 455
455 ha->flags.management_server_logged_in = 0; 456 ha->flags.management_server_logged_in = 0;
456 ha->link_data_rate = PORT_SPEED_UNKNOWN; 457 ha->link_data_rate = PORT_SPEED_UNKNOWN;
457 if (ql2xfdmienable)
458 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
459 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0); 458 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
460 break; 459 break;
461 460
@@ -511,6 +510,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
511 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 510 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
512 } 511 }
513 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 512 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
513 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
514 514
515 ha->flags.gpsc_supported = 1; 515 ha->flags.gpsc_supported = 1;
516 ha->flags.management_server_logged_in = 0; 516 ha->flags.management_server_logged_in = 0;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7d0a8a4c7719..210060420809 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -681,7 +681,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
681 * Context: 681 * Context:
682 * Kernel context. 682 * Kernel context.
683 */ 683 */
684int 684static int
685qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer, 685qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
686 dma_addr_t phys_addr, size_t size, uint32_t tov) 686 dma_addr_t phys_addr, size_t size, uint32_t tov)
687{ 687{
@@ -784,7 +784,6 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
784 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 784 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
785 ha->host_no, rval)); 785 ha->host_no, rval));
786 } else { 786 } else {
787 sp->flags |= SRB_ABORT_PENDING;
788 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 787 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
789 ha->host_no)); 788 ha->host_no));
790 } 789 }
@@ -1469,7 +1468,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1469 lg->port_id[0] = al_pa; 1468 lg->port_id[0] = al_pa;
1470 lg->port_id[1] = area; 1469 lg->port_id[1] = area;
1471 lg->port_id[2] = domain; 1470 lg->port_id[2] = domain;
1472 lg->vp_index = cpu_to_le16(ha->vp_idx); 1471 lg->vp_index = ha->vp_idx;
1473 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1472 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1474 if (rval != QLA_SUCCESS) { 1473 if (rval != QLA_SUCCESS) {
1475 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1474 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
@@ -1724,7 +1723,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1724 lg->port_id[0] = al_pa; 1723 lg->port_id[0] = al_pa;
1725 lg->port_id[1] = area; 1724 lg->port_id[1] = area;
1726 lg->port_id[2] = domain; 1725 lg->port_id[2] = domain;
1727 lg->vp_index = cpu_to_le16(ha->vp_idx); 1726 lg->vp_index = ha->vp_idx;
1728 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1727 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1729 if (rval != QLA_SUCCESS) { 1728 if (rval != QLA_SUCCESS) {
1730 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1729 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
@@ -2210,7 +2209,6 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2210 rval = QLA_FUNCTION_FAILED; 2209 rval = QLA_FUNCTION_FAILED;
2211 } else { 2210 } else {
2212 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2211 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2213 sp->flags |= SRB_ABORT_PENDING;
2214 } 2212 }
2215 2213
2216 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2214 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2644,12 +2642,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2644 struct vp_rpt_id_entry_24xx *rptid_entry) 2642 struct vp_rpt_id_entry_24xx *rptid_entry)
2645{ 2643{
2646 uint8_t vp_idx; 2644 uint8_t vp_idx;
2645 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2647 scsi_qla_host_t *vha; 2646 scsi_qla_host_t *vha;
2648 2647
2649 if (rptid_entry->entry_status != 0) 2648 if (rptid_entry->entry_status != 0)
2650 return; 2649 return;
2651 if (rptid_entry->entry_status != __constant_cpu_to_le16(CS_COMPLETE))
2652 return;
2653 2650
2654 if (rptid_entry->format == 0) { 2651 if (rptid_entry->format == 0) {
2655 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2652 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
@@ -2659,17 +2656,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2659 rptid_entry->port_id[2], rptid_entry->port_id[1], 2656 rptid_entry->port_id[2], rptid_entry->port_id[1],
2660 rptid_entry->port_id[0])); 2657 rptid_entry->port_id[0]));
2661 } else if (rptid_entry->format == 1) { 2658 } else if (rptid_entry->format == 1) {
2662 vp_idx = LSB(rptid_entry->vp_idx); 2659 vp_idx = LSB(stat);
2663 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2660 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
2664 "- status %d - " 2661 "- status %d - "
2665 "with port id %02x%02x%02x\n",__func__,ha->host_no, 2662 "with port id %02x%02x%02x\n",__func__,ha->host_no,
2666 vp_idx, MSB(rptid_entry->vp_idx), 2663 vp_idx, MSB(stat),
2667 rptid_entry->port_id[2], rptid_entry->port_id[1], 2664 rptid_entry->port_id[2], rptid_entry->port_id[1],
2668 rptid_entry->port_id[0])); 2665 rptid_entry->port_id[0]));
2669 if (vp_idx == 0) 2666 if (vp_idx == 0)
2670 return; 2667 return;
2671 2668
2672 if (MSB(rptid_entry->vp_idx) == 1) 2669 if (MSB(stat) == 1)
2673 return; 2670 return;
2674 2671
2675 list_for_each_entry(vha, &ha->vp_list, vp_list) 2672 list_for_each_entry(vha, &ha->vp_list, vp_list)
@@ -2982,8 +2979,8 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
2982 /* We update the firmware with only one data sequence. */ 2979 /* We update the firmware with only one data sequence. */
2983 options |= VCO_END_OF_DATA; 2980 options |= VCO_END_OF_DATA;
2984 2981
2985 retry = 0;
2986 do { 2982 do {
2983 retry = 0;
2987 memset(mn, 0, sizeof(*mn)); 2984 memset(mn, 0, sizeof(*mn));
2988 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 2985 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
2989 mn->p.req.entry_count = 1; 2986 mn->p.req.entry_count = 1;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8b33b163b1d4..3223fd16bcfe 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -67,7 +67,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
67 67
68static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 68static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
69 69
70int ql2xfdmienable; 70int ql2xfdmienable=1;
71module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 71module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
72MODULE_PARM_DESC(ql2xfdmienable, 72MODULE_PARM_DESC(ql2xfdmienable,
73 "Enables FDMI registratons " 73 "Enables FDMI registratons "
@@ -2135,7 +2135,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2135 kfree(ha->nvram); 2135 kfree(ha->nvram);
2136} 2136}
2137 2137
2138struct qla_work_evt * 2138static struct qla_work_evt *
2139qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2139qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2140 int locked) 2140 int locked)
2141{ 2141{
@@ -2152,7 +2152,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2152 return e; 2152 return e;
2153} 2153}
2154 2154
2155int 2155static int
2156qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2156qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
2157{ 2157{
2158 unsigned long flags; 2158 unsigned long flags;
@@ -2373,7 +2373,7 @@ qla2x00_do_dpc(void *data)
2373 } else { 2373 } else {
2374 fcport->login_retry = 0; 2374 fcport->login_retry = 0;
2375 } 2375 }
2376 if (fcport->login_retry == 0) 2376 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2377 fcport->loop_id = FC_NO_LOOP_ID; 2377 fcport->loop_id = FC_NO_LOOP_ID;
2378 } 2378 }
2379 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2379 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
@@ -2599,6 +2599,10 @@ qla2x00_timer(scsi_qla_host_t *ha)
2599 start_dpc++; 2599 start_dpc++;
2600 } 2600 }
2601 2601
2602 /* Process any deferred work. */
2603 if (!list_empty(&ha->work_list))
2604 start_dpc++;
2605
2602 /* Schedule the DPC routine if needed */ 2606 /* Schedule the DPC routine if needed */
2603 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2607 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
2604 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2608 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f42f17acf2cf..afeae2bfe7eb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k1" 10#define QLA2XXX_VERSION "8.02.01-k2"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3f34e9376b0a..b33e72516ef8 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -121,6 +121,7 @@ extern struct scsi_transport_template blank_transport_template;
121extern void __scsi_remove_device(struct scsi_device *); 121extern void __scsi_remove_device(struct scsi_device *);
122 122
123extern struct bus_type scsi_bus_type; 123extern struct bus_type scsi_bus_type;
124extern struct attribute_group *scsi_sysfs_shost_attr_groups[];
124 125
125/* scsi_netlink.c */ 126/* scsi_netlink.c */
126#ifdef CONFIG_SCSI_NETLINK 127#ifdef CONFIG_SCSI_NETLINK
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index ed395154a5b1..3a1c99d5c775 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -190,10 +190,14 @@ void scsi_proc_host_rm(struct Scsi_Host *shost)
190 */ 190 */
191static int proc_print_scsidevice(struct device *dev, void *data) 191static int proc_print_scsidevice(struct device *dev, void *data)
192{ 192{
193 struct scsi_device *sdev = to_scsi_device(dev); 193 struct scsi_device *sdev;
194 struct seq_file *s = data; 194 struct seq_file *s = data;
195 int i; 195 int i;
196 196
197 if (!scsi_is_sdev_device(dev))
198 goto out;
199
200 sdev = to_scsi_device(dev);
197 seq_printf(s, 201 seq_printf(s,
198 "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ", 202 "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
199 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 203 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
@@ -230,6 +234,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
230 else 234 else
231 seq_printf(s, "\n"); 235 seq_printf(s, "\n");
232 236
237out:
233 return 0; 238 return 0;
234} 239}
235 240
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e67c14e31bab..fcd7455ffc39 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -322,6 +322,21 @@ out:
322 return NULL; 322 return NULL;
323} 323}
324 324
325static void scsi_target_destroy(struct scsi_target *starget)
326{
327 struct device *dev = &starget->dev;
328 struct Scsi_Host *shost = dev_to_shost(dev->parent);
329 unsigned long flags;
330
331 transport_destroy_device(dev);
332 spin_lock_irqsave(shost->host_lock, flags);
333 if (shost->hostt->target_destroy)
334 shost->hostt->target_destroy(starget);
335 list_del_init(&starget->siblings);
336 spin_unlock_irqrestore(shost->host_lock, flags);
337 put_device(dev);
338}
339
325static void scsi_target_dev_release(struct device *dev) 340static void scsi_target_dev_release(struct device *dev)
326{ 341{
327 struct device *parent = dev->parent; 342 struct device *parent = dev->parent;
@@ -331,9 +346,14 @@ static void scsi_target_dev_release(struct device *dev)
331 put_device(parent); 346 put_device(parent);
332} 347}
333 348
349struct device_type scsi_target_type = {
350 .name = "scsi_target",
351 .release = scsi_target_dev_release,
352};
353
334int scsi_is_target_device(const struct device *dev) 354int scsi_is_target_device(const struct device *dev)
335{ 355{
336 return dev->release == scsi_target_dev_release; 356 return dev->type == &scsi_target_type;
337} 357}
338EXPORT_SYMBOL(scsi_is_target_device); 358EXPORT_SYMBOL(scsi_is_target_device);
339 359
@@ -391,14 +411,17 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
391 device_initialize(dev); 411 device_initialize(dev);
392 starget->reap_ref = 1; 412 starget->reap_ref = 1;
393 dev->parent = get_device(parent); 413 dev->parent = get_device(parent);
394 dev->release = scsi_target_dev_release;
395 sprintf(dev->bus_id, "target%d:%d:%d", 414 sprintf(dev->bus_id, "target%d:%d:%d",
396 shost->host_no, channel, id); 415 shost->host_no, channel, id);
416#ifndef CONFIG_SYSFS_DEPRECATED
417 dev->bus = &scsi_bus_type;
418#endif
419 dev->type = &scsi_target_type;
397 starget->id = id; 420 starget->id = id;
398 starget->channel = channel; 421 starget->channel = channel;
399 INIT_LIST_HEAD(&starget->siblings); 422 INIT_LIST_HEAD(&starget->siblings);
400 INIT_LIST_HEAD(&starget->devices); 423 INIT_LIST_HEAD(&starget->devices);
401 starget->state = STARGET_RUNNING; 424 starget->state = STARGET_CREATED;
402 starget->scsi_level = SCSI_2; 425 starget->scsi_level = SCSI_2;
403 retry: 426 retry:
404 spin_lock_irqsave(shost->host_lock, flags); 427 spin_lock_irqsave(shost->host_lock, flags);
@@ -411,18 +434,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
411 spin_unlock_irqrestore(shost->host_lock, flags); 434 spin_unlock_irqrestore(shost->host_lock, flags);
412 /* allocate and add */ 435 /* allocate and add */
413 transport_setup_device(dev); 436 transport_setup_device(dev);
414 error = device_add(dev);
415 if (error) {
416 dev_err(dev, "target device_add failed, error %d\n", error);
417 spin_lock_irqsave(shost->host_lock, flags);
418 list_del_init(&starget->siblings);
419 spin_unlock_irqrestore(shost->host_lock, flags);
420 transport_destroy_device(dev);
421 put_device(parent);
422 kfree(starget);
423 return NULL;
424 }
425 transport_add_device(dev);
426 if (shost->hostt->target_alloc) { 437 if (shost->hostt->target_alloc) {
427 error = shost->hostt->target_alloc(starget); 438 error = shost->hostt->target_alloc(starget);
428 439
@@ -430,9 +441,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
430 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); 441 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
431 /* don't want scsi_target_reap to do the final 442 /* don't want scsi_target_reap to do the final
432 * put because it will be under the host lock */ 443 * put because it will be under the host lock */
433 get_device(dev); 444 scsi_target_destroy(starget);
434 scsi_target_reap(starget);
435 put_device(dev);
436 return NULL; 445 return NULL;
437 } 446 }
438 } 447 }
@@ -459,18 +468,10 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
459{ 468{
460 struct scsi_target *starget = 469 struct scsi_target *starget =
461 container_of(work, struct scsi_target, ew.work); 470 container_of(work, struct scsi_target, ew.work);
462 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
463 unsigned long flags;
464 471
465 transport_remove_device(&starget->dev); 472 transport_remove_device(&starget->dev);
466 device_del(&starget->dev); 473 device_del(&starget->dev);
467 transport_destroy_device(&starget->dev); 474 scsi_target_destroy(starget);
468 spin_lock_irqsave(shost->host_lock, flags);
469 if (shost->hostt->target_destroy)
470 shost->hostt->target_destroy(starget);
471 list_del_init(&starget->siblings);
472 spin_unlock_irqrestore(shost->host_lock, flags);
473 put_device(&starget->dev);
474} 475}
475 476
476/** 477/**
@@ -485,21 +486,25 @@ void scsi_target_reap(struct scsi_target *starget)
485{ 486{
486 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 487 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
487 unsigned long flags; 488 unsigned long flags;
489 enum scsi_target_state state;
490 int empty;
488 491
489 spin_lock_irqsave(shost->host_lock, flags); 492 spin_lock_irqsave(shost->host_lock, flags);
493 state = starget->state;
494 empty = --starget->reap_ref == 0 &&
495 list_empty(&starget->devices) ? 1 : 0;
496 spin_unlock_irqrestore(shost->host_lock, flags);
490 497
491 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { 498 if (!empty)
492 BUG_ON(starget->state == STARGET_DEL);
493 starget->state = STARGET_DEL;
494 spin_unlock_irqrestore(shost->host_lock, flags);
495 execute_in_process_context(scsi_target_reap_usercontext,
496 &starget->ew);
497 return; 499 return;
498 500
499 } 501 BUG_ON(state == STARGET_DEL);
500 spin_unlock_irqrestore(shost->host_lock, flags); 502 starget->state = STARGET_DEL;
501 503 if (state == STARGET_CREATED)
502 return; 504 scsi_target_destroy(starget);
505 else
506 execute_in_process_context(scsi_target_reap_usercontext,
507 &starget->ew);
503} 508}
504 509
505/** 510/**
@@ -1048,8 +1053,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1048 scsi_inq_str(vend, result, 8, 16), 1053 scsi_inq_str(vend, result, 8, 16),
1049 scsi_inq_str(mod, result, 16, 32)); 1054 scsi_inq_str(mod, result, 16, 32));
1050 }); 1055 });
1056
1051 } 1057 }
1052 1058
1053 res = SCSI_SCAN_TARGET_PRESENT; 1059 res = SCSI_SCAN_TARGET_PRESENT;
1054 goto out_free_result; 1060 goto out_free_result;
1055 } 1061 }
@@ -1489,7 +1495,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1489 if (scsi_host_scan_allowed(shost)) 1495 if (scsi_host_scan_allowed(shost))
1490 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1496 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1491 mutex_unlock(&shost->scan_mutex); 1497 mutex_unlock(&shost->scan_mutex);
1492 transport_configure_device(&starget->dev);
1493 scsi_target_reap(starget); 1498 scsi_target_reap(starget);
1494 put_device(&starget->dev); 1499 put_device(&starget->dev);
1495 1500
@@ -1570,7 +1575,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1570 out_reap: 1575 out_reap:
1571 /* now determine if the target has any children at all 1576 /* now determine if the target has any children at all
1572 * and if not, nuke it */ 1577 * and if not, nuke it */
1573 transport_configure_device(&starget->dev);
1574 scsi_target_reap(starget); 1578 scsi_target_reap(starget);
1575 1579
1576 put_device(&starget->dev); 1580 put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 67bb20ed45d2..049103f1d16f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -21,6 +21,8 @@
21#include "scsi_priv.h" 21#include "scsi_priv.h"
22#include "scsi_logging.h" 22#include "scsi_logging.h"
23 23
24static struct device_type scsi_dev_type;
25
24static const struct { 26static const struct {
25 enum scsi_device_state value; 27 enum scsi_device_state value;
26 char *name; 28 char *name;
@@ -249,18 +251,27 @@ shost_rd_attr(sg_tablesize, "%hu\n");
249shost_rd_attr(unchecked_isa_dma, "%d\n"); 251shost_rd_attr(unchecked_isa_dma, "%d\n");
250shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); 252shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
251 253
252static struct device_attribute *scsi_sysfs_shost_attrs[] = { 254static struct attribute *scsi_sysfs_shost_attrs[] = {
253 &dev_attr_unique_id, 255 &dev_attr_unique_id.attr,
254 &dev_attr_host_busy, 256 &dev_attr_host_busy.attr,
255 &dev_attr_cmd_per_lun, 257 &dev_attr_cmd_per_lun.attr,
256 &dev_attr_can_queue, 258 &dev_attr_can_queue.attr,
257 &dev_attr_sg_tablesize, 259 &dev_attr_sg_tablesize.attr,
258 &dev_attr_unchecked_isa_dma, 260 &dev_attr_unchecked_isa_dma.attr,
259 &dev_attr_proc_name, 261 &dev_attr_proc_name.attr,
260 &dev_attr_scan, 262 &dev_attr_scan.attr,
261 &dev_attr_hstate, 263 &dev_attr_hstate.attr,
262 &dev_attr_supported_mode, 264 &dev_attr_supported_mode.attr,
263 &dev_attr_active_mode, 265 &dev_attr_active_mode.attr,
266 NULL
267};
268
269struct attribute_group scsi_shost_attr_group = {
270 .attrs = scsi_sysfs_shost_attrs,
271};
272
273struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
274 &scsi_shost_attr_group,
264 NULL 275 NULL
265}; 276};
266 277
@@ -335,7 +346,12 @@ static struct class sdev_class = {
335/* all probing is done in the individual ->probe routines */ 346/* all probing is done in the individual ->probe routines */
336static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) 347static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
337{ 348{
338 struct scsi_device *sdp = to_scsi_device(dev); 349 struct scsi_device *sdp;
350
351 if (dev->type != &scsi_dev_type)
352 return 0;
353
354 sdp = to_scsi_device(dev);
339 if (sdp->no_uld_attach) 355 if (sdp->no_uld_attach)
340 return 0; 356 return 0;
341 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 357 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
@@ -351,10 +367,16 @@ static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
351 367
352static int scsi_bus_suspend(struct device * dev, pm_message_t state) 368static int scsi_bus_suspend(struct device * dev, pm_message_t state)
353{ 369{
354 struct device_driver *drv = dev->driver; 370 struct device_driver *drv;
355 struct scsi_device *sdev = to_scsi_device(dev); 371 struct scsi_device *sdev;
356 int err; 372 int err;
357 373
374 if (dev->type != &scsi_dev_type)
375 return 0;
376
377 drv = dev->driver;
378 sdev = to_scsi_device(dev);
379
358 err = scsi_device_quiesce(sdev); 380 err = scsi_device_quiesce(sdev);
359 if (err) 381 if (err)
360 return err; 382 return err;
@@ -370,10 +392,16 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
370 392
371static int scsi_bus_resume(struct device * dev) 393static int scsi_bus_resume(struct device * dev)
372{ 394{
373 struct device_driver *drv = dev->driver; 395 struct device_driver *drv;
374 struct scsi_device *sdev = to_scsi_device(dev); 396 struct scsi_device *sdev;
375 int err = 0; 397 int err = 0;
376 398
399 if (dev->type != &scsi_dev_type)
400 return 0;
401
402 drv = dev->driver;
403 sdev = to_scsi_device(dev);
404
377 if (drv && drv->resume) 405 if (drv && drv->resume)
378 err = drv->resume(dev); 406 err = drv->resume(dev);
379 407
@@ -781,6 +809,27 @@ sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
781 return count; 809 return count;
782} 810}
783 811
812static int scsi_target_add(struct scsi_target *starget)
813{
814 int error;
815
816 if (starget->state != STARGET_CREATED)
817 return 0;
818
819 error = device_add(&starget->dev);
820 if (error) {
821 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
822 get_device(&starget->dev);
823 scsi_target_reap(starget);
824 put_device(&starget->dev);
825 return error;
826 }
827 transport_add_device(&starget->dev);
828 starget->state = STARGET_RUNNING;
829
830 return 0;
831}
832
784static struct device_attribute sdev_attr_queue_type_rw = 833static struct device_attribute sdev_attr_queue_type_rw =
785 __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, 834 __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
786 sdev_store_queue_type_rw); 835 sdev_store_queue_type_rw);
@@ -796,10 +845,16 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
796{ 845{
797 int error, i; 846 int error, i;
798 struct request_queue *rq = sdev->request_queue; 847 struct request_queue *rq = sdev->request_queue;
848 struct scsi_target *starget = sdev->sdev_target;
799 849
800 if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0) 850 if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
801 return error; 851 return error;
802 852
853 error = scsi_target_add(starget);
854 if (error)
855 return error;
856
857 transport_configure_device(&starget->dev);
803 error = device_add(&sdev->sdev_gendev); 858 error = device_add(&sdev->sdev_gendev);
804 if (error) { 859 if (error) {
805 put_device(sdev->sdev_gendev.parent); 860 put_device(sdev->sdev_gendev.parent);
@@ -834,7 +889,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
834 goto out; 889 goto out;
835 } 890 }
836 891
837 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL); 892 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
838 893
839 if (error) 894 if (error)
840 sdev_printk(KERN_INFO, sdev, 895 sdev_printk(KERN_INFO, sdev,
@@ -971,44 +1026,6 @@ int scsi_register_interface(struct class_interface *intf)
971} 1026}
972EXPORT_SYMBOL(scsi_register_interface); 1027EXPORT_SYMBOL(scsi_register_interface);
973 1028
974
975static struct device_attribute *class_attr_overridden(
976 struct device_attribute **attrs,
977 struct device_attribute *attr)
978{
979 int i;
980
981 if (!attrs)
982 return NULL;
983 for (i = 0; attrs[i]; i++)
984 if (!strcmp(attrs[i]->attr.name, attr->attr.name))
985 return attrs[i];
986 return NULL;
987}
988
989static int class_attr_add(struct device *classdev,
990 struct device_attribute *attr)
991{
992 struct device_attribute *base_attr;
993
994 /*
995 * Spare the caller from having to copy things it's not interested in.
996 */
997 base_attr = class_attr_overridden(scsi_sysfs_shost_attrs, attr);
998 if (base_attr) {
999 /* extend permissions */
1000 attr->attr.mode |= base_attr->attr.mode;
1001
1002 /* override null show/store with default */
1003 if (!attr->show)
1004 attr->show = base_attr->show;
1005 if (!attr->store)
1006 attr->store = base_attr->store;
1007 }
1008
1009 return device_create_file(classdev, attr);
1010}
1011
1012/** 1029/**
1013 * scsi_sysfs_add_host - add scsi host to subsystem 1030 * scsi_sysfs_add_host - add scsi host to subsystem
1014 * @shost: scsi host struct to add to subsystem 1031 * @shost: scsi host struct to add to subsystem
@@ -1018,20 +1035,11 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
1018{ 1035{
1019 int error, i; 1036 int error, i;
1020 1037
1038 /* add host specific attributes */
1021 if (shost->hostt->shost_attrs) { 1039 if (shost->hostt->shost_attrs) {
1022 for (i = 0; shost->hostt->shost_attrs[i]; i++) { 1040 for (i = 0; shost->hostt->shost_attrs[i]; i++) {
1023 error = class_attr_add(&shost->shost_dev,
1024 shost->hostt->shost_attrs[i]);
1025 if (error)
1026 return error;
1027 }
1028 }
1029
1030 for (i = 0; scsi_sysfs_shost_attrs[i]; i++) {
1031 if (!class_attr_overridden(shost->hostt->shost_attrs,
1032 scsi_sysfs_shost_attrs[i])) {
1033 error = device_create_file(&shost->shost_dev, 1041 error = device_create_file(&shost->shost_dev,
1034 scsi_sysfs_shost_attrs[i]); 1042 shost->hostt->shost_attrs[i]);
1035 if (error) 1043 if (error)
1036 return error; 1044 return error;
1037 } 1045 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6b092a6c295d..5fd64e70029d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1961,12 +1961,17 @@ fc_timed_out(struct scsi_cmnd *scmd)
1961} 1961}
1962 1962
1963/* 1963/*
1964 * Must be called with shost->host_lock held 1964 * Called by fc_user_scan to locate an rport on the shost that
1965 * matches the channel and target id, and invoke scsi_scan_target()
1966 * on the rport.
1965 */ 1967 */
1966static int fc_user_scan(struct Scsi_Host *shost, uint channel, 1968static void
1967 uint id, uint lun) 1969fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
1968{ 1970{
1969 struct fc_rport *rport; 1971 struct fc_rport *rport;
1972 unsigned long flags;
1973
1974 spin_lock_irqsave(shost->host_lock, flags);
1970 1975
1971 list_for_each_entry(rport, &fc_host_rports(shost), peers) { 1976 list_for_each_entry(rport, &fc_host_rports(shost), peers) {
1972 if (rport->scsi_target_id == -1) 1977 if (rport->scsi_target_id == -1)
@@ -1975,13 +1980,54 @@ static int fc_user_scan(struct Scsi_Host *shost, uint channel,
1975 if (rport->port_state != FC_PORTSTATE_ONLINE) 1980 if (rport->port_state != FC_PORTSTATE_ONLINE)
1976 continue; 1981 continue;
1977 1982
1978 if ((channel == SCAN_WILD_CARD || channel == rport->channel) && 1983 if ((channel == rport->channel) &&
1979 (id == SCAN_WILD_CARD || id == rport->scsi_target_id)) { 1984 (id == rport->scsi_target_id)) {
1980 scsi_scan_target(&rport->dev, rport->channel, 1985 spin_unlock_irqrestore(shost->host_lock, flags);
1981 rport->scsi_target_id, lun, 1); 1986 scsi_scan_target(&rport->dev, channel, id, lun, 1);
1987 return;
1982 } 1988 }
1983 } 1989 }
1984 1990
1991 spin_unlock_irqrestore(shost->host_lock, flags);
1992}
1993
1994/*
1995 * Called via sysfs scan routines. Necessary, as the FC transport
1996 * wants to place all target objects below the rport object. So this
1997 * routine must invoke the scsi_scan_target() routine with the rport
1998 * object as the parent.
1999 */
2000static int
2001fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
2002{
2003 uint chlo, chhi;
2004 uint tgtlo, tgthi;
2005
2006 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
2007 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
2008 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
2009 return -EINVAL;
2010
2011 if (channel == SCAN_WILD_CARD) {
2012 chlo = 0;
2013 chhi = shost->max_channel + 1;
2014 } else {
2015 chlo = channel;
2016 chhi = channel + 1;
2017 }
2018
2019 if (id == SCAN_WILD_CARD) {
2020 tgtlo = 0;
2021 tgthi = shost->max_id;
2022 } else {
2023 tgtlo = id;
2024 tgthi = id + 1;
2025 }
2026
2027 for ( ; chlo < chhi; chlo++)
2028 for ( ; tgtlo < tgthi; tgtlo++)
2029 fc_user_scan_tgt(shost, chlo, tgtlo, lun);
2030
1985 return 0; 2031 return 0;
1986} 2032}
1987 2033
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 27ec625ab771..7899e3dda9bf 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -192,6 +192,16 @@ static void sas_non_host_smp_request(struct request_queue *q)
192 sas_smp_request(q, rphy_to_shost(rphy), rphy); 192 sas_smp_request(q, rphy_to_shost(rphy), rphy);
193} 193}
194 194
195static void sas_host_release(struct device *dev)
196{
197 struct Scsi_Host *shost = dev_to_shost(dev);
198 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
199 struct request_queue *q = sas_host->q;
200
201 if (q)
202 blk_cleanup_queue(q);
203}
204
195static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) 205static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
196{ 206{
197 struct request_queue *q; 207 struct request_queue *q;
@@ -199,6 +209,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
199 struct device *dev; 209 struct device *dev;
200 char namebuf[BUS_ID_SIZE]; 210 char namebuf[BUS_ID_SIZE];
201 const char *name; 211 const char *name;
212 void (*release)(struct device *);
202 213
203 if (!to_sas_internal(shost->transportt)->f->smp_handler) { 214 if (!to_sas_internal(shost->transportt)->f->smp_handler) {
204 printk("%s can't handle SMP requests\n", shost->hostt->name); 215 printk("%s can't handle SMP requests\n", shost->hostt->name);
@@ -209,17 +220,19 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
209 q = blk_init_queue(sas_non_host_smp_request, NULL); 220 q = blk_init_queue(sas_non_host_smp_request, NULL);
210 dev = &rphy->dev; 221 dev = &rphy->dev;
211 name = dev->bus_id; 222 name = dev->bus_id;
223 release = NULL;
212 } else { 224 } else {
213 q = blk_init_queue(sas_host_smp_request, NULL); 225 q = blk_init_queue(sas_host_smp_request, NULL);
214 dev = &shost->shost_gendev; 226 dev = &shost->shost_gendev;
215 snprintf(namebuf, sizeof(namebuf), 227 snprintf(namebuf, sizeof(namebuf),
216 "sas_host%d", shost->host_no); 228 "sas_host%d", shost->host_no);
217 name = namebuf; 229 name = namebuf;
230 release = sas_host_release;
218 } 231 }
219 if (!q) 232 if (!q)
220 return -ENOMEM; 233 return -ENOMEM;
221 234
222 error = bsg_register_queue(q, dev, name); 235 error = bsg_register_queue(q, dev, name, release);
223 if (error) { 236 if (error) {
224 blk_cleanup_queue(q); 237 blk_cleanup_queue(q);
225 return -ENOMEM; 238 return -ENOMEM;
@@ -253,7 +266,6 @@ static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
253 return; 266 return;
254 267
255 bsg_unregister_queue(q); 268 bsg_unregister_queue(q);
256 blk_cleanup_queue(q);
257} 269}
258 270
259/* 271/*
@@ -1301,6 +1313,9 @@ static void sas_expander_release(struct device *dev)
1301 struct sas_rphy *rphy = dev_to_rphy(dev); 1313 struct sas_rphy *rphy = dev_to_rphy(dev);
1302 struct sas_expander_device *edev = rphy_to_expander_device(rphy); 1314 struct sas_expander_device *edev = rphy_to_expander_device(rphy);
1303 1315
1316 if (rphy->q)
1317 blk_cleanup_queue(rphy->q);
1318
1304 put_device(dev->parent); 1319 put_device(dev->parent);
1305 kfree(edev); 1320 kfree(edev);
1306} 1321}
@@ -1310,6 +1325,9 @@ static void sas_end_device_release(struct device *dev)
1310 struct sas_rphy *rphy = dev_to_rphy(dev); 1325 struct sas_rphy *rphy = dev_to_rphy(dev);
1311 struct sas_end_device *edev = rphy_to_end_device(rphy); 1326 struct sas_end_device *edev = rphy_to_end_device(rphy);
1312 1327
1328 if (rphy->q)
1329 blk_cleanup_queue(rphy->q);
1330
1313 put_device(dev->parent); 1331 put_device(dev->parent);
1314 kfree(edev); 1332 kfree(edev);
1315} 1333}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index bc12b5d5d676..75a64a6cae8c 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -24,6 +24,7 @@
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/sysfs.h>
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include "scsi_priv.h" 29#include "scsi_priv.h"
29#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
@@ -1374,11 +1375,11 @@ static int spi_host_configure(struct transport_container *tc,
1374 * overloads the return by setting 1<<1 if the attribute should 1375 * overloads the return by setting 1<<1 if the attribute should
1375 * be writeable */ 1376 * be writeable */
1376#define TARGET_ATTRIBUTE_HELPER(name) \ 1377#define TARGET_ATTRIBUTE_HELPER(name) \
1377 (si->f->show_##name ? 1 : 0) + \ 1378 (si->f->show_##name ? S_IRUGO : 0) | \
1378 (si->f->set_##name ? 2 : 0) 1379 (si->f->set_##name ? S_IWUSR : 0)
1379 1380
1380static int target_attribute_is_visible(struct kobject *kobj, 1381static mode_t target_attribute_is_visible(struct kobject *kobj,
1381 struct attribute *attr, int i) 1382 struct attribute *attr, int i)
1382{ 1383{
1383 struct device *cdev = container_of(kobj, struct device, kobj); 1384 struct device *cdev = container_of(kobj, struct device, kobj);
1384 struct scsi_target *starget = transport_class_to_starget(cdev); 1385 struct scsi_target *starget = transport_class_to_starget(cdev);
@@ -1428,7 +1429,7 @@ static int target_attribute_is_visible(struct kobject *kobj,
1428 spi_support_ius(starget)) 1429 spi_support_ius(starget))
1429 return TARGET_ATTRIBUTE_HELPER(hold_mcs); 1430 return TARGET_ATTRIBUTE_HELPER(hold_mcs);
1430 else if (attr == &dev_attr_revalidate.attr) 1431 else if (attr == &dev_attr_revalidate.attr)
1431 return 1; 1432 return S_IWUSR;
1432 1433
1433 return 0; 1434 return 0;
1434} 1435}
@@ -1462,25 +1463,9 @@ static int spi_target_configure(struct transport_container *tc,
1462 struct device *cdev) 1463 struct device *cdev)
1463{ 1464{
1464 struct kobject *kobj = &cdev->kobj; 1465 struct kobject *kobj = &cdev->kobj;
1465 int i; 1466
1466 struct attribute *attr; 1467 /* force an update based on parameters read from the device */
1467 int rc; 1468 sysfs_update_group(kobj, &target_attribute_group);
1468
1469 for (i = 0; (attr = target_attributes[i]) != NULL; i++) {
1470 int j = target_attribute_group.is_visible(kobj, attr, i);
1471
1472 /* FIXME: as well as returning -EEXIST, which we'd like
1473 * to ignore, sysfs also does a WARN_ON and dumps a trace,
1474 * which is bad, so temporarily, skip attributes that are
1475 * already visible (the revalidate one) */
1476 if (j && attr != &dev_attr_revalidate.attr)
1477 rc = sysfs_add_file_to_group(kobj, attr,
1478 target_attribute_group.name);
1479 /* and make the attribute writeable if we have a set
1480 * function */
1481 if ((j & 1))
1482 rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
1483 }
1484 1469
1485 return 0; 1470 return 0;
1486} 1471}
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 03e359670506..31fe6051c799 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -313,7 +313,8 @@ static struct platform_driver sgiwd93_driver = {
313 .probe = sgiwd93_probe, 313 .probe = sgiwd93_probe,
314 .remove = __devexit_p(sgiwd93_remove), 314 .remove = __devexit_p(sgiwd93_remove),
315 .driver = { 315 .driver = {
316 .name = "sgiwd93" 316 .name = "sgiwd93",
317 .owner = THIS_MODULE,
317 } 318 }
318}; 319};
319 320
@@ -333,3 +334,4 @@ module_exit(sgiwd93_module_exit);
333MODULE_DESCRIPTION("SGI WD33C93 driver"); 334MODULE_DESCRIPTION("SGI WD33C93 driver");
334MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); 335MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
335MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
337MODULE_ALIAS("platform:sgiwd93");
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 0a6b45b1b003..2bbef4c45a0d 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -53,6 +53,7 @@
53MODULE_AUTHOR("Thomas Bogendörfer"); 53MODULE_AUTHOR("Thomas Bogendörfer");
54MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver"); 54MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
55MODULE_LICENSE("GPL"); 55MODULE_LICENSE("GPL");
56MODULE_ALIAS("platform:snirm_53c710");
56 57
57#define SNIRM710_CLOCK 32 58#define SNIRM710_CLOCK 32
58 59
@@ -136,6 +137,7 @@ static struct platform_driver snirm710_driver = {
136 .remove = __devexit_p(snirm710_driver_remove), 137 .remove = __devexit_p(snirm710_driver_remove),
137 .driver = { 138 .driver = {
138 .name = "snirm_53c710", 139 .name = "snirm_53c710",
140 .owner = THIS_MODULE,
139 }, 141 },
140}; 142};
141 143
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a860c3a9ae99..e8db66ad0bde 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4322,7 +4322,7 @@ static void do_remove_sysfs_files(void)
4322static ssize_t 4322static ssize_t
4323st_defined_show(struct device *dev, struct device_attribute *attr, char *buf) 4323st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
4324{ 4324{
4325 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4325 struct st_modedef *STm = dev_get_drvdata(dev);
4326 ssize_t l = 0; 4326 ssize_t l = 0;
4327 4327
4328 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); 4328 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
@@ -4334,7 +4334,7 @@ DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
4334static ssize_t 4334static ssize_t
4335st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf) 4335st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
4336{ 4336{
4337 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4337 struct st_modedef *STm = dev_get_drvdata(dev);
4338 ssize_t l = 0; 4338 ssize_t l = 0;
4339 4339
4340 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); 4340 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
@@ -4346,7 +4346,7 @@ DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
4346static ssize_t 4346static ssize_t
4347st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf) 4347st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
4348{ 4348{
4349 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4349 struct st_modedef *STm = dev_get_drvdata(dev);
4350 ssize_t l = 0; 4350 ssize_t l = 0;
4351 char *fmt; 4351 char *fmt;
4352 4352
@@ -4361,7 +4361,7 @@ static ssize_t
4361st_defcompression_show(struct device *dev, struct device_attribute *attr, 4361st_defcompression_show(struct device *dev, struct device_attribute *attr,
4362 char *buf) 4362 char *buf)
4363{ 4363{
4364 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4364 struct st_modedef *STm = dev_get_drvdata(dev);
4365 ssize_t l = 0; 4365 ssize_t l = 0;
4366 4366
4367 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); 4367 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
@@ -4373,7 +4373,7 @@ DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
4373static ssize_t 4373static ssize_t
4374st_options_show(struct device *dev, struct device_attribute *attr, char *buf) 4374st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
4375{ 4375{
4376 struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev); 4376 struct st_modedef *STm = dev_get_drvdata(dev);
4377 struct scsi_tape *STp; 4377 struct scsi_tape *STp;
4378 int i, j, options; 4378 int i, j, options;
4379 ssize_t l = 0; 4379 ssize_t l = 0;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 06152c7fa689..7514b3a0390e 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -294,6 +294,7 @@ static struct platform_driver esp_sun3x_driver = {
294 .remove = __devexit_p(esp_sun3x_remove), 294 .remove = __devexit_p(esp_sun3x_remove),
295 .driver = { 295 .driver = {
296 .name = "sun3x_esp", 296 .name = "sun3x_esp",
297 .owner = THIS_MODULE,
297 }, 298 },
298}; 299};
299 300
@@ -314,3 +315,4 @@ MODULE_VERSION(DRV_VERSION);
314 315
315module_init(sun3x_esp_init); 316module_init(sun3x_esp_init);
316module_exit(sun3x_esp_exit); 317module_exit(sun3x_esp_exit);
318MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 58d7eee4fe81..640333b1e75c 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1715,13 +1715,12 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned in
1715 1715
1716} 1716}
1717 1717
1718static irqreturn_t ihdlr(int irq, unsigned int j) { 1718static irqreturn_t ihdlr(unsigned int j)
1719{
1719 struct scsi_cmnd *SCpnt; 1720 struct scsi_cmnd *SCpnt;
1720 unsigned int i, k, c, status, tstatus, reg, ret; 1721 unsigned int i, k, c, status, tstatus, reg, ret;
1721 struct mscp *spp, *cpp; 1722 struct mscp *spp, *cpp;
1722 1723 int irq = sh[j]->irq;
1723 if (sh[j]->irq != irq)
1724 panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
1725 1724
1726 /* Check if this board need to be serviced */ 1725 /* Check if this board need to be serviced */
1727 if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none; 1726 if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
@@ -1935,7 +1934,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap) {
1935 if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE; 1934 if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
1936 1935
1937 spin_lock_irqsave(sh[j]->host_lock, spin_flags); 1936 spin_lock_irqsave(sh[j]->host_lock, spin_flags);
1938 ret = ihdlr(irq, j); 1937 ret = ihdlr(j);
1939 spin_unlock_irqrestore(sh[j]->host_lock, spin_flags); 1938 spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
1940 return ret; 1939 return ret;
1941} 1940}