diff options
Diffstat (limited to 'drivers')
89 files changed, 2569 insertions, 6483 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 560496b43306..8cb37e3557d4 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -34,7 +34,6 @@ obj-$(CONFIG_NUBUS) += nubus/ | |||
34 | obj-$(CONFIG_ATM) += atm/ | 34 | obj-$(CONFIG_ATM) += atm/ |
35 | obj-y += macintosh/ | 35 | obj-y += macintosh/ |
36 | obj-$(CONFIG_IDE) += ide/ | 36 | obj-$(CONFIG_IDE) += ide/ |
37 | obj-$(CONFIG_FC4) += fc4/ | ||
38 | obj-$(CONFIG_SCSI) += scsi/ | 37 | obj-$(CONFIG_SCSI) += scsi/ |
39 | obj-$(CONFIG_ATA) += ata/ | 38 | obj-$(CONFIG_ATA) += ata/ |
40 | obj-$(CONFIG_FUSION) += message/ | 39 | obj-$(CONFIG_FUSION) += message/ |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 47c806040524..95229e77bffe 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -227,7 +227,7 @@ struct ahci_port_priv { | |||
227 | 227 | ||
228 | static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); | 228 | static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
229 | static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); | 229 | static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
230 | static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 230 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
231 | static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); | 231 | static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); |
232 | static void ahci_irq_clear(struct ata_port *ap); | 232 | static void ahci_irq_clear(struct ata_port *ap); |
233 | static int ahci_port_start(struct ata_port *ap); | 233 | static int ahci_port_start(struct ata_port *ap); |
@@ -729,7 +729,7 @@ static int ahci_stop_engine(struct ata_port *ap) | |||
729 | 729 | ||
730 | /* wait for engine to stop. This could be as long as 500 msec */ | 730 | /* wait for engine to stop. This could be as long as 500 msec */ |
731 | tmp = ata_wait_register(port_mmio + PORT_CMD, | 731 | tmp = ata_wait_register(port_mmio + PORT_CMD, |
732 | PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); | 732 | PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); |
733 | if (tmp & PORT_CMD_LIST_ON) | 733 | if (tmp & PORT_CMD_LIST_ON) |
734 | return -EIO; | 734 | return -EIO; |
735 | 735 | ||
@@ -1564,9 +1564,9 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance) | |||
1564 | if (!irq_stat) | 1564 | if (!irq_stat) |
1565 | return IRQ_NONE; | 1565 | return IRQ_NONE; |
1566 | 1566 | ||
1567 | spin_lock(&host->lock); | 1567 | spin_lock(&host->lock); |
1568 | 1568 | ||
1569 | for (i = 0; i < host->n_ports; i++) { | 1569 | for (i = 0; i < host->n_ports; i++) { |
1570 | struct ata_port *ap; | 1570 | struct ata_port *ap; |
1571 | 1571 | ||
1572 | if (!(irq_stat & (1 << i))) | 1572 | if (!(irq_stat & (1 << i))) |
@@ -1829,9 +1829,9 @@ static int ahci_port_start(struct ata_port *ap) | |||
1829 | pp->cmd_tbl_dma = mem_dma; | 1829 | pp->cmd_tbl_dma = mem_dma; |
1830 | 1830 | ||
1831 | /* | 1831 | /* |
1832 | * Save off initial list of interrupts to be enabled. | 1832 | * Save off initial list of interrupts to be enabled. |
1833 | * This could be changed later | 1833 | * This could be changed later |
1834 | */ | 1834 | */ |
1835 | pp->intr_mask = DEF_PORT_IRQ; | 1835 | pp->intr_mask = DEF_PORT_IRQ; |
1836 | 1836 | ||
1837 | ap->private_data = pp; | 1837 | ap->private_data = pp; |
@@ -1918,12 +1918,12 @@ static void ahci_print_info(struct ata_host *host) | |||
1918 | dev_printk(KERN_INFO, &pdev->dev, | 1918 | dev_printk(KERN_INFO, &pdev->dev, |
1919 | "AHCI %02x%02x.%02x%02x " | 1919 | "AHCI %02x%02x.%02x%02x " |
1920 | "%u slots %u ports %s Gbps 0x%x impl %s mode\n" | 1920 | "%u slots %u ports %s Gbps 0x%x impl %s mode\n" |
1921 | , | 1921 | , |
1922 | 1922 | ||
1923 | (vers >> 24) & 0xff, | 1923 | (vers >> 24) & 0xff, |
1924 | (vers >> 16) & 0xff, | 1924 | (vers >> 16) & 0xff, |
1925 | (vers >> 8) & 0xff, | 1925 | (vers >> 8) & 0xff, |
1926 | vers & 0xff, | 1926 | vers & 0xff, |
1927 | 1927 | ||
1928 | ((cap >> 8) & 0x1f) + 1, | 1928 | ((cap >> 8) & 0x1f) + 1, |
1929 | (cap & 0x1f) + 1, | 1929 | (cap & 0x1f) + 1, |
@@ -1935,7 +1935,7 @@ static void ahci_print_info(struct ata_host *host) | |||
1935 | "flags: " | 1935 | "flags: " |
1936 | "%s%s%s%s%s%s%s" | 1936 | "%s%s%s%s%s%s%s" |
1937 | "%s%s%s%s%s%s%s\n" | 1937 | "%s%s%s%s%s%s%s\n" |
1938 | , | 1938 | , |
1939 | 1939 | ||
1940 | cap & (1 << 31) ? "64bit " : "", | 1940 | cap & (1 << 31) ? "64bit " : "", |
1941 | cap & (1 << 30) ? "ncq " : "", | 1941 | cap & (1 << 30) ? "ncq " : "", |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 3c6f43e381f4..a4b2cb29f46c 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -157,12 +157,12 @@ struct piix_host_priv { | |||
157 | const int *map; | 157 | const int *map; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | static int piix_init_one (struct pci_dev *pdev, | 160 | static int piix_init_one(struct pci_dev *pdev, |
161 | const struct pci_device_id *ent); | 161 | const struct pci_device_id *ent); |
162 | static void piix_pata_error_handler(struct ata_port *ap); | 162 | static void piix_pata_error_handler(struct ata_port *ap); |
163 | static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); | 163 | static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); |
164 | static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); | 164 | static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); |
165 | static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev); | 165 | static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); |
166 | static int ich_pata_cable_detect(struct ata_port *ap); | 166 | static int ich_pata_cable_detect(struct ata_port *ap); |
167 | #ifdef CONFIG_PM | 167 | #ifdef CONFIG_PM |
168 | static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | 168 | static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); |
@@ -650,9 +650,9 @@ static int ich_pata_cable_detect(struct ata_port *ap) | |||
650 | while (lap->device) { | 650 | while (lap->device) { |
651 | if (lap->device == pdev->device && | 651 | if (lap->device == pdev->device && |
652 | lap->subvendor == pdev->subsystem_vendor && | 652 | lap->subvendor == pdev->subsystem_vendor && |
653 | lap->subdevice == pdev->subsystem_device) { | 653 | lap->subdevice == pdev->subsystem_device) |
654 | return ATA_CBL_PATA40_SHORT; | 654 | return ATA_CBL_PATA40_SHORT; |
655 | } | 655 | |
656 | lap++; | 656 | lap++; |
657 | } | 657 | } |
658 | 658 | ||
@@ -699,7 +699,7 @@ static void piix_pata_error_handler(struct ata_port *ap) | |||
699 | * None (inherited from caller). | 699 | * None (inherited from caller). |
700 | */ | 700 | */ |
701 | 701 | ||
702 | static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) | 702 | static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) |
703 | { | 703 | { |
704 | unsigned int pio = adev->pio_mode - XFER_PIO_0; | 704 | unsigned int pio = adev->pio_mode - XFER_PIO_0; |
705 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | 705 | struct pci_dev *dev = to_pci_dev(ap->host->dev); |
@@ -786,7 +786,7 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) | |||
786 | * None (inherited from caller). | 786 | * None (inherited from caller). |
787 | */ | 787 | */ |
788 | 788 | ||
789 | static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich) | 789 | static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) |
790 | { | 790 | { |
791 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | 791 | struct pci_dev *dev = to_pci_dev(ap->host->dev); |
792 | u8 master_port = ap->port_no ? 0x42 : 0x40; | 792 | u8 master_port = ap->port_no ? 0x42 : 0x40; |
@@ -813,7 +813,7 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i | |||
813 | int u_clock, u_speed; | 813 | int u_clock, u_speed; |
814 | 814 | ||
815 | /* | 815 | /* |
816 | * UDMA is handled by a combination of clock switching and | 816 | * UDMA is handled by a combination of clock switching and |
817 | * selection of dividers | 817 | * selection of dividers |
818 | * | 818 | * |
819 | * Handy rule: Odd modes are UDMATIMx 01, even are 02 | 819 | * Handy rule: Odd modes are UDMATIMx 01, even are 02 |
@@ -905,7 +905,7 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i | |||
905 | * None (inherited from caller). | 905 | * None (inherited from caller). |
906 | */ | 906 | */ |
907 | 907 | ||
908 | static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev) | 908 | static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev) |
909 | { | 909 | { |
910 | do_pata_set_dmamode(ap, adev, 0); | 910 | do_pata_set_dmamode(ap, adev, 0); |
911 | } | 911 | } |
@@ -921,7 +921,7 @@ static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
921 | * None (inherited from caller). | 921 | * None (inherited from caller). |
922 | */ | 922 | */ |
923 | 923 | ||
924 | static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev) | 924 | static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev) |
925 | { | 925 | { |
926 | do_pata_set_dmamode(ap, adev, 1); | 926 | do_pata_set_dmamode(ap, adev, 1); |
927 | } | 927 | } |
@@ -1106,8 +1106,7 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) | |||
1106 | u16 cfg; | 1106 | u16 cfg; |
1107 | int no_piix_dma = 0; | 1107 | int no_piix_dma = 0; |
1108 | 1108 | ||
1109 | while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) | 1109 | while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) { |
1110 | { | ||
1111 | /* Look for 450NX PXB. Check for problem configurations | 1110 | /* Look for 450NX PXB. Check for problem configurations |
1112 | A PCI quirk checks bit 6 already */ | 1111 | A PCI quirk checks bit 6 already */ |
1113 | pci_read_config_word(pdev, 0x41, &cfg); | 1112 | pci_read_config_word(pdev, 0x41, &cfg); |
@@ -1241,7 +1240,7 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) | |||
1241 | * Zero on success, or -ERRNO value. | 1240 | * Zero on success, or -ERRNO value. |
1242 | */ | 1241 | */ |
1243 | 1242 | ||
1244 | static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 1243 | static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1245 | { | 1244 | { |
1246 | static int printed_version; | 1245 | static int printed_version; |
1247 | struct device *dev = &pdev->dev; | 1246 | struct device *dev = &pdev->dev; |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 3f7533589041..08a52dd45fb6 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <acpi/actypes.h> | 26 | #include <acpi/actypes.h> |
27 | 27 | ||
28 | #define NO_PORT_MULT 0xffff | 28 | #define NO_PORT_MULT 0xffff |
29 | #define SATA_ADR(root,pmp) (((root) << 16) | (pmp)) | 29 | #define SATA_ADR(root, pmp) (((root) << 16) | (pmp)) |
30 | 30 | ||
31 | #define REGS_PER_GTF 7 | 31 | #define REGS_PER_GTF 7 |
32 | struct ata_acpi_gtf { | 32 | struct ata_acpi_gtf { |
@@ -96,8 +96,8 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap) | |||
96 | } | 96 | } |
97 | } | 97 | } |
98 | 98 | ||
99 | static void ata_acpi_handle_hotplug (struct ata_port *ap, struct kobject *kobj, | 99 | static void ata_acpi_handle_hotplug(struct ata_port *ap, struct kobject *kobj, |
100 | u32 event) | 100 | u32 event) |
101 | { | 101 | { |
102 | char event_string[12]; | 102 | char event_string[12]; |
103 | char *envp[] = { event_string, NULL }; | 103 | char *envp[] = { event_string, NULL }; |
@@ -114,7 +114,7 @@ static void ata_acpi_handle_hotplug (struct ata_port *ap, struct kobject *kobj, | |||
114 | } | 114 | } |
115 | 115 | ||
116 | if (kobj) { | 116 | if (kobj) { |
117 | sprintf(event_string, "BAY_EVENT=%d", event); | 117 | sprintf(event_string, "BAY_EVENT=%d", event); |
118 | kobject_uevent_env(kobj, KOBJ_CHANGE, envp); | 118 | kobject_uevent_env(kobj, KOBJ_CHANGE, envp); |
119 | } | 119 | } |
120 | } | 120 | } |
@@ -127,14 +127,14 @@ static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) | |||
127 | if (dev->sdev) | 127 | if (dev->sdev) |
128 | kobj = &dev->sdev->sdev_gendev.kobj; | 128 | kobj = &dev->sdev->sdev_gendev.kobj; |
129 | 129 | ||
130 | ata_acpi_handle_hotplug (dev->link->ap, kobj, event); | 130 | ata_acpi_handle_hotplug(dev->link->ap, kobj, event); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) | 133 | static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) |
134 | { | 134 | { |
135 | struct ata_port *ap = data; | 135 | struct ata_port *ap = data; |
136 | 136 | ||
137 | ata_acpi_handle_hotplug (ap, &ap->dev->kobj, event); | 137 | ata_acpi_handle_hotplug(ap, &ap->dev->kobj, event); |
138 | } | 138 | } |
139 | 139 | ||
140 | /** | 140 | /** |
@@ -398,11 +398,11 @@ int ata_acpi_cbl_80wire(struct ata_port *ap) | |||
398 | { | 398 | { |
399 | struct ata_acpi_gtm gtm; | 399 | struct ata_acpi_gtm gtm; |
400 | int valid = 0; | 400 | int valid = 0; |
401 | 401 | ||
402 | /* No _GTM data, no information */ | 402 | /* No _GTM data, no information */ |
403 | if (ata_acpi_gtm(ap, >m) < 0) | 403 | if (ata_acpi_gtm(ap, >m) < 0) |
404 | return 0; | 404 | return 0; |
405 | 405 | ||
406 | /* Split timing, DMA enabled */ | 406 | /* Split timing, DMA enabled */ |
407 | if ((gtm.flags & 0x11) == 0x11 && gtm.drive[0].dma < 55) | 407 | if ((gtm.flags & 0x11) == 0x11 && gtm.drive[0].dma < 55) |
408 | valid |= 1; | 408 | valid |= 1; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 69092bce1ada..2d147b51c978 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -49,11 +49,11 @@ | |||
49 | #include <linux/workqueue.h> | 49 | #include <linux/workqueue.h> |
50 | #include <linux/jiffies.h> | 50 | #include <linux/jiffies.h> |
51 | #include <linux/scatterlist.h> | 51 | #include <linux/scatterlist.h> |
52 | #include <linux/io.h> | ||
52 | #include <scsi/scsi.h> | 53 | #include <scsi/scsi.h> |
53 | #include <scsi/scsi_cmnd.h> | 54 | #include <scsi/scsi_cmnd.h> |
54 | #include <scsi/scsi_host.h> | 55 | #include <scsi/scsi_host.h> |
55 | #include <linux/libata.h> | 56 | #include <linux/libata.h> |
56 | #include <asm/io.h> | ||
57 | #include <asm/semaphore.h> | 57 | #include <asm/semaphore.h> |
58 | #include <asm/byteorder.h> | 58 | #include <asm/byteorder.h> |
59 | 59 | ||
@@ -93,7 +93,7 @@ int libata_fua = 0; | |||
93 | module_param_named(fua, libata_fua, int, 0444); | 93 | module_param_named(fua, libata_fua, int, 0444); |
94 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); | 94 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); |
95 | 95 | ||
96 | static int ata_ignore_hpa = 0; | 96 | static int ata_ignore_hpa; |
97 | module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); | 97 | module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); |
98 | MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); | 98 | MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); |
99 | 99 | ||
@@ -713,7 +713,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) | |||
713 | } | 713 | } |
714 | 714 | ||
715 | if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { | 715 | if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { |
716 | printk("ata: SEMB device ignored\n"); | 716 | printk(KERN_INFO "ata: SEMB device ignored\n"); |
717 | return ATA_DEV_SEMB_UNSUP; /* not yet */ | 717 | return ATA_DEV_SEMB_UNSUP; /* not yet */ |
718 | } | 718 | } |
719 | 719 | ||
@@ -939,7 +939,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) | |||
939 | *max_sectors = ata_tf_to_lba48(&tf); | 939 | *max_sectors = ata_tf_to_lba48(&tf); |
940 | else | 940 | else |
941 | *max_sectors = ata_tf_to_lba(&tf); | 941 | *max_sectors = ata_tf_to_lba(&tf); |
942 | if (dev->horkage & ATA_HORKAGE_HPA_SIZE) | 942 | if (dev->horkage & ATA_HORKAGE_HPA_SIZE) |
943 | (*max_sectors)--; | 943 | (*max_sectors)--; |
944 | return 0; | 944 | return 0; |
945 | } | 945 | } |
@@ -1151,7 +1151,7 @@ void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown) | |||
1151 | * LOCKING: | 1151 | * LOCKING: |
1152 | * caller. | 1152 | * caller. |
1153 | */ | 1153 | */ |
1154 | void ata_noop_dev_select (struct ata_port *ap, unsigned int device) | 1154 | void ata_noop_dev_select(struct ata_port *ap, unsigned int device) |
1155 | { | 1155 | { |
1156 | } | 1156 | } |
1157 | 1157 | ||
@@ -1171,7 +1171,7 @@ void ata_noop_dev_select (struct ata_port *ap, unsigned int device) | |||
1171 | * caller. | 1171 | * caller. |
1172 | */ | 1172 | */ |
1173 | 1173 | ||
1174 | void ata_std_dev_select (struct ata_port *ap, unsigned int device) | 1174 | void ata_std_dev_select(struct ata_port *ap, unsigned int device) |
1175 | { | 1175 | { |
1176 | u8 tmp; | 1176 | u8 tmp; |
1177 | 1177 | ||
@@ -1292,7 +1292,7 @@ static unsigned int ata_id_xfermask(const u16 *id) | |||
1292 | */ | 1292 | */ |
1293 | u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; | 1293 | u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; |
1294 | if (mode < 5) /* Valid PIO range */ | 1294 | if (mode < 5) /* Valid PIO range */ |
1295 | pio_mask = (2 << mode) - 1; | 1295 | pio_mask = (2 << mode) - 1; |
1296 | else | 1296 | else |
1297 | pio_mask = 1; | 1297 | pio_mask = 1; |
1298 | 1298 | ||
@@ -1693,7 +1693,7 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) | |||
1693 | * for pre-ATA4 drives. | 1693 | * for pre-ATA4 drives. |
1694 | * | 1694 | * |
1695 | * FIXME: ATA_CMD_ID_ATA is optional for early drives and right | 1695 | * FIXME: ATA_CMD_ID_ATA is optional for early drives and right |
1696 | * now we abort if we hit that case. | 1696 | * now we abort if we hit that case. |
1697 | * | 1697 | * |
1698 | * LOCKING: | 1698 | * LOCKING: |
1699 | * Kernel thread context (may sleep) | 1699 | * Kernel thread context (may sleep) |
@@ -1979,9 +1979,8 @@ int ata_dev_configure(struct ata_device *dev) | |||
1979 | "supports DRM functions and may " | 1979 | "supports DRM functions and may " |
1980 | "not be fully accessable.\n"); | 1980 | "not be fully accessable.\n"); |
1981 | snprintf(revbuf, 7, "CFA"); | 1981 | snprintf(revbuf, 7, "CFA"); |
1982 | } | 1982 | } else |
1983 | else | 1983 | snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); |
1984 | snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); | ||
1985 | 1984 | ||
1986 | dev->n_sectors = ata_id_n_sectors(id); | 1985 | dev->n_sectors = ata_id_n_sectors(id); |
1987 | 1986 | ||
@@ -2110,7 +2109,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2110 | /* Let the user know. We don't want to disallow opens for | 2109 | /* Let the user know. We don't want to disallow opens for |
2111 | rescue purposes, or in case the vendor is just a blithering | 2110 | rescue purposes, or in case the vendor is just a blithering |
2112 | idiot */ | 2111 | idiot */ |
2113 | if (print_info) { | 2112 | if (print_info) { |
2114 | ata_dev_printk(dev, KERN_WARNING, | 2113 | ata_dev_printk(dev, KERN_WARNING, |
2115 | "Drive reports diagnostics failure. This may indicate a drive\n"); | 2114 | "Drive reports diagnostics failure. This may indicate a drive\n"); |
2116 | ata_dev_printk(dev, KERN_WARNING, | 2115 | ata_dev_printk(dev, KERN_WARNING, |
@@ -2667,8 +2666,8 @@ static const struct ata_timing ata_timing[] = { | |||
2667 | { 0xFF } | 2666 | { 0xFF } |
2668 | }; | 2667 | }; |
2669 | 2668 | ||
2670 | #define ENOUGH(v,unit) (((v)-1)/(unit)+1) | 2669 | #define ENOUGH(v, unit) (((v)-1)/(unit)+1) |
2671 | #define EZ(v,unit) ((v)?ENOUGH(v,unit):0) | 2670 | #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) |
2672 | 2671 | ||
2673 | static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) | 2672 | static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) |
2674 | { | 2673 | { |
@@ -2695,7 +2694,7 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, | |||
2695 | if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); | 2694 | if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); |
2696 | } | 2695 | } |
2697 | 2696 | ||
2698 | static const struct ata_timing* ata_timing_find_mode(unsigned short speed) | 2697 | static const struct ata_timing *ata_timing_find_mode(unsigned short speed) |
2699 | { | 2698 | { |
2700 | const struct ata_timing *t; | 2699 | const struct ata_timing *t; |
2701 | 2700 | ||
@@ -2727,10 +2726,10 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, | |||
2727 | 2726 | ||
2728 | if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ | 2727 | if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ |
2729 | memset(&p, 0, sizeof(p)); | 2728 | memset(&p, 0, sizeof(p)); |
2730 | if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { | 2729 | if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { |
2731 | if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; | 2730 | if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; |
2732 | else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; | 2731 | else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; |
2733 | } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { | 2732 | } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { |
2734 | p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; | 2733 | p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; |
2735 | } | 2734 | } |
2736 | ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); | 2735 | ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); |
@@ -2876,14 +2875,17 @@ static int ata_dev_set_mode(struct ata_device *dev) | |||
2876 | dev->flags |= ATA_DFLAG_PIO; | 2875 | dev->flags |= ATA_DFLAG_PIO; |
2877 | 2876 | ||
2878 | err_mask = ata_dev_set_xfermode(dev); | 2877 | err_mask = ata_dev_set_xfermode(dev); |
2878 | |||
2879 | /* Old CFA may refuse this command, which is just fine */ | 2879 | /* Old CFA may refuse this command, which is just fine */ |
2880 | if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) | 2880 | if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) |
2881 | err_mask &= ~AC_ERR_DEV; | 2881 | err_mask &= ~AC_ERR_DEV; |
2882 | |||
2882 | /* Some very old devices and some bad newer ones fail any kind of | 2883 | /* Some very old devices and some bad newer ones fail any kind of |
2883 | SET_XFERMODE request but support PIO0-2 timings and no IORDY */ | 2884 | SET_XFERMODE request but support PIO0-2 timings and no IORDY */ |
2884 | if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && | 2885 | if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && |
2885 | dev->pio_mode <= XFER_PIO_2) | 2886 | dev->pio_mode <= XFER_PIO_2) |
2886 | err_mask &= ~AC_ERR_DEV; | 2887 | err_mask &= ~AC_ERR_DEV; |
2888 | |||
2887 | if (err_mask) { | 2889 | if (err_mask) { |
2888 | ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " | 2890 | ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " |
2889 | "(err_mask=0x%x)\n", err_mask); | 2891 | "(err_mask=0x%x)\n", err_mask); |
@@ -3265,7 +3267,7 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
3265 | * the bus shows 0xFF because the odd clown forgets the D7 | 3267 | * the bus shows 0xFF because the odd clown forgets the D7 |
3266 | * pulldown resistor. | 3268 | * pulldown resistor. |
3267 | */ | 3269 | */ |
3268 | if (ata_check_status(ap) == 0xFF) | 3270 | if (ata_chk_status(ap) == 0xFF) |
3269 | return -ENODEV; | 3271 | return -ENODEV; |
3270 | 3272 | ||
3271 | return ata_bus_post_reset(ap, devmask, deadline); | 3273 | return ata_bus_post_reset(ap, devmask, deadline); |
@@ -3943,7 +3945,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3943 | { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, | 3945 | { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, |
3944 | { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, | 3946 | { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, |
3945 | { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, | 3947 | { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, |
3946 | { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, | 3948 | { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, |
3947 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, | 3949 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, |
3948 | { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ | 3950 | { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ |
3949 | { "IOMEGA ZIP 250 ATAPI Floppy", | 3951 | { "IOMEGA ZIP 250 ATAPI Floppy", |
@@ -3959,7 +3961,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3959 | 3961 | ||
3960 | /* Devices where NCQ should be avoided */ | 3962 | /* Devices where NCQ should be avoided */ |
3961 | /* NCQ is slow */ | 3963 | /* NCQ is slow */ |
3962 | { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, | 3964 | { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, |
3963 | /* http://thread.gmane.org/gmane.linux.ide/14907 */ | 3965 | /* http://thread.gmane.org/gmane.linux.ide/14907 */ |
3964 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, | 3966 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, |
3965 | /* NCQ is broken */ | 3967 | /* NCQ is broken */ |
@@ -3979,6 +3981,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3979 | { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, | 3981 | { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, |
3980 | { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, }, | 3982 | { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, }, |
3981 | { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, | 3983 | { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, |
3984 | { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, }, | ||
3982 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, | 3985 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, |
3983 | { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, }, | 3986 | { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, }, |
3984 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, | 3987 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, |
@@ -4106,7 +4109,7 @@ static void ata_dev_xfermask(struct ata_device *dev) | |||
4106 | } | 4109 | } |
4107 | 4110 | ||
4108 | if ((host->flags & ATA_HOST_SIMPLEX) && | 4111 | if ((host->flags & ATA_HOST_SIMPLEX) && |
4109 | host->simplex_claimed && host->simplex_claimed != ap) { | 4112 | host->simplex_claimed && host->simplex_claimed != ap) { |
4110 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | 4113 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
4111 | ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " | 4114 | ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " |
4112 | "other device, disabling DMA\n"); | 4115 | "other device, disabling DMA\n"); |
@@ -4128,11 +4131,11 @@ static void ata_dev_xfermask(struct ata_device *dev) | |||
4128 | */ | 4131 | */ |
4129 | if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) | 4132 | if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) |
4130 | /* UDMA/44 or higher would be available */ | 4133 | /* UDMA/44 or higher would be available */ |
4131 | if((ap->cbl == ATA_CBL_PATA40) || | 4134 | if ((ap->cbl == ATA_CBL_PATA40) || |
4132 | (ata_drive_40wire(dev->id) && | 4135 | (ata_drive_40wire(dev->id) && |
4133 | (ap->cbl == ATA_CBL_PATA_UNK || | 4136 | (ap->cbl == ATA_CBL_PATA_UNK || |
4134 | ap->cbl == ATA_CBL_PATA80))) { | 4137 | ap->cbl == ATA_CBL_PATA80))) { |
4135 | ata_dev_printk(dev, KERN_WARNING, | 4138 | ata_dev_printk(dev, KERN_WARNING, |
4136 | "limited to UDMA/33 due to 40-wire cable\n"); | 4139 | "limited to UDMA/33 due to 40-wire cable\n"); |
4137 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); | 4140 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
4138 | } | 4141 | } |
@@ -4395,7 +4398,7 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) | |||
4395 | u32 addr, offset; | 4398 | u32 addr, offset; |
4396 | u32 sg_len, len, blen; | 4399 | u32 sg_len, len, blen; |
4397 | 4400 | ||
4398 | /* determine if physical DMA addr spans 64K boundary. | 4401 | /* determine if physical DMA addr spans 64K boundary. |
4399 | * Note h/w doesn't support 64-bit, so we unconditionally | 4402 | * Note h/w doesn't support 64-bit, so we unconditionally |
4400 | * truncate dma_addr_t to u32. | 4403 | * truncate dma_addr_t to u32. |
4401 | */ | 4404 | */ |
@@ -4980,7 +4983,7 @@ next_sg: | |||
4980 | "%u bytes trailing data\n", bytes); | 4983 | "%u bytes trailing data\n", bytes); |
4981 | 4984 | ||
4982 | for (i = 0; i < words; i++) | 4985 | for (i = 0; i < words; i++) |
4983 | ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write); | 4986 | ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write); |
4984 | 4987 | ||
4985 | ap->hsm_task_state = HSM_ST_LAST; | 4988 | ap->hsm_task_state = HSM_ST_LAST; |
4986 | return; | 4989 | return; |
@@ -5908,8 +5911,8 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
5908 | * One if interrupt was handled, zero if not (shared irq). | 5911 | * One if interrupt was handled, zero if not (shared irq). |
5909 | */ | 5912 | */ |
5910 | 5913 | ||
5911 | inline unsigned int ata_host_intr (struct ata_port *ap, | 5914 | inline unsigned int ata_host_intr(struct ata_port *ap, |
5912 | struct ata_queued_cmd *qc) | 5915 | struct ata_queued_cmd *qc) |
5913 | { | 5916 | { |
5914 | struct ata_eh_info *ehi = &ap->link.eh_info; | 5917 | struct ata_eh_info *ehi = &ap->link.eh_info; |
5915 | u8 status, host_stat = 0; | 5918 | u8 status, host_stat = 0; |
@@ -6009,7 +6012,7 @@ idle_irq: | |||
6009 | * IRQ_NONE or IRQ_HANDLED. | 6012 | * IRQ_NONE or IRQ_HANDLED. |
6010 | */ | 6013 | */ |
6011 | 6014 | ||
6012 | irqreturn_t ata_interrupt (int irq, void *dev_instance) | 6015 | irqreturn_t ata_interrupt(int irq, void *dev_instance) |
6013 | { | 6016 | { |
6014 | struct ata_host *host = dev_instance; | 6017 | struct ata_host *host = dev_instance; |
6015 | unsigned int i; | 6018 | unsigned int i; |
@@ -6212,7 +6215,7 @@ int ata_flush_cache(struct ata_device *dev) | |||
6212 | 6215 | ||
6213 | /* This is wrong. On a failed flush we get back the LBA of the lost | 6216 | /* This is wrong. On a failed flush we get back the LBA of the lost |
6214 | sector and we should (assuming it wasn't aborted as unknown) issue | 6217 | sector and we should (assuming it wasn't aborted as unknown) issue |
6215 | a further flush command to continue the writeback until it | 6218 | a further flush command to continue the writeback until it |
6216 | does not error */ | 6219 | does not error */ |
6217 | err_mask = ata_do_simple_cmd(dev, cmd); | 6220 | err_mask = ata_do_simple_cmd(dev, cmd); |
6218 | if (err_mask) { | 6221 | if (err_mask) { |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 2eaa39fc65d0..93e2b545b439 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -1197,7 +1197,7 @@ void ata_eh_done(struct ata_link *link, struct ata_device *dev, | |||
1197 | * RETURNS: | 1197 | * RETURNS: |
1198 | * Descriptive string for @err_mask | 1198 | * Descriptive string for @err_mask |
1199 | */ | 1199 | */ |
1200 | static const char * ata_err_string(unsigned int err_mask) | 1200 | static const char *ata_err_string(unsigned int err_mask) |
1201 | { | 1201 | { |
1202 | if (err_mask & AC_ERR_HOST_BUS) | 1202 | if (err_mask & AC_ERR_HOST_BUS) |
1203 | return "host bus error"; | 1203 | return "host bus error"; |
@@ -1934,7 +1934,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
1934 | ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", | 1934 | ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", |
1935 | ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", | 1935 | ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", |
1936 | ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", | 1936 | ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", |
1937 | ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "" ); | 1937 | ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); |
1938 | 1938 | ||
1939 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 1939 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
1940 | static const char *dma_str[] = { | 1940 | static const char *dma_str[] = { |
@@ -1969,17 +1969,17 @@ static void ata_eh_link_report(struct ata_link *link) | |||
1969 | qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); | 1969 | qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); |
1970 | 1970 | ||
1971 | if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | | 1971 | if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | |
1972 | ATA_ERR) ) { | 1972 | ATA_ERR)) { |
1973 | if (res->command & ATA_BUSY) | 1973 | if (res->command & ATA_BUSY) |
1974 | ata_dev_printk(qc->dev, KERN_ERR, | 1974 | ata_dev_printk(qc->dev, KERN_ERR, |
1975 | "status: { Busy }\n" ); | 1975 | "status: { Busy }\n"); |
1976 | else | 1976 | else |
1977 | ata_dev_printk(qc->dev, KERN_ERR, | 1977 | ata_dev_printk(qc->dev, KERN_ERR, |
1978 | "status: { %s%s%s%s}\n", | 1978 | "status: { %s%s%s%s}\n", |
1979 | res->command & ATA_DRDY ? "DRDY " : "", | 1979 | res->command & ATA_DRDY ? "DRDY " : "", |
1980 | res->command & ATA_DF ? "DF " : "", | 1980 | res->command & ATA_DF ? "DF " : "", |
1981 | res->command & ATA_DRQ ? "DRQ " : "", | 1981 | res->command & ATA_DRQ ? "DRQ " : "", |
1982 | res->command & ATA_ERR ? "ERR " : "" ); | 1982 | res->command & ATA_ERR ? "ERR " : ""); |
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | if (cmd->command != ATA_CMD_PACKET && | 1985 | if (cmd->command != ATA_CMD_PACKET && |
@@ -1990,7 +1990,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
1990 | res->feature & ATA_ICRC ? "ICRC " : "", | 1990 | res->feature & ATA_ICRC ? "ICRC " : "", |
1991 | res->feature & ATA_UNC ? "UNC " : "", | 1991 | res->feature & ATA_UNC ? "UNC " : "", |
1992 | res->feature & ATA_IDNF ? "IDNF " : "", | 1992 | res->feature & ATA_IDNF ? "IDNF " : "", |
1993 | res->feature & ATA_ABORTED ? "ABRT " : "" ); | 1993 | res->feature & ATA_ABORTED ? "ABRT " : ""); |
1994 | } | 1994 | } |
1995 | } | 1995 | } |
1996 | 1996 | ||
@@ -2611,7 +2611,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2611 | ehc->i.flags = 0; | 2611 | ehc->i.flags = 0; |
2612 | continue; | 2612 | continue; |
2613 | 2613 | ||
2614 | dev_fail: | 2614 | dev_fail: |
2615 | nr_failed_devs++; | 2615 | nr_failed_devs++; |
2616 | if (ata_eh_handle_dev_fail(dev, rc)) | 2616 | if (ata_eh_handle_dev_fail(dev, rc)) |
2617 | nr_disabled_devs++; | 2617 | nr_disabled_devs++; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 5b758b9ad0b8..f5d5420a1ba2 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include <scsi/scsi_transport.h> | 45 | #include <scsi/scsi_transport.h> |
46 | #include <linux/libata.h> | 46 | #include <linux/libata.h> |
47 | #include <linux/hdreg.h> | 47 | #include <linux/hdreg.h> |
48 | #include <asm/uaccess.h> | 48 | #include <linux/uaccess.h> |
49 | 49 | ||
50 | #include "libata.h" | 50 | #include "libata.h" |
51 | 51 | ||
@@ -53,9 +53,9 @@ | |||
53 | 53 | ||
54 | typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); | 54 | typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); |
55 | 55 | ||
56 | static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, | 56 | static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, |
57 | const struct scsi_device *scsidev); | 57 | const struct scsi_device *scsidev); |
58 | static struct ata_device * ata_scsi_find_dev(struct ata_port *ap, | 58 | static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, |
59 | const struct scsi_device *scsidev); | 59 | const struct scsi_device *scsidev); |
60 | static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | 60 | static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, |
61 | unsigned int id, unsigned int lun); | 61 | unsigned int id, unsigned int lun); |
@@ -228,7 +228,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
228 | 228 | ||
229 | scsi_cmd[1] = (4 << 1); /* PIO Data-in */ | 229 | scsi_cmd[1] = (4 << 1); /* PIO Data-in */ |
230 | scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, | 230 | scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, |
231 | block count in sector count field */ | 231 | block count in sector count field */ |
232 | data_dir = DMA_FROM_DEVICE; | 232 | data_dir = DMA_FROM_DEVICE; |
233 | } else { | 233 | } else { |
234 | scsi_cmd[1] = (3 << 1); /* Non-data */ | 234 | scsi_cmd[1] = (3 << 1); /* Non-data */ |
@@ -252,7 +252,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
252 | /* Good values for timeout and retries? Values below | 252 | /* Good values for timeout and retries? Values below |
253 | from scsi_ioctl_send_command() for default case... */ | 253 | from scsi_ioctl_send_command() for default case... */ |
254 | cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, | 254 | cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, |
255 | sensebuf, (10*HZ), 5, 0); | 255 | sensebuf, (10*HZ), 5, 0); |
256 | 256 | ||
257 | if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ | 257 | if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ |
258 | u8 *desc = sensebuf + 8; | 258 | u8 *desc = sensebuf + 8; |
@@ -263,18 +263,18 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
263 | if (cmd_result & SAM_STAT_CHECK_CONDITION) { | 263 | if (cmd_result & SAM_STAT_CHECK_CONDITION) { |
264 | struct scsi_sense_hdr sshdr; | 264 | struct scsi_sense_hdr sshdr; |
265 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, | 265 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, |
266 | &sshdr); | 266 | &sshdr); |
267 | if (sshdr.sense_key==0 && | 267 | if (sshdr.sense_key == 0 && |
268 | sshdr.asc==0 && sshdr.ascq==0) | 268 | sshdr.asc == 0 && sshdr.ascq == 0) |
269 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; | 269 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; |
270 | } | 270 | } |
271 | 271 | ||
272 | /* Send userspace a few ATA registers (same as drivers/ide) */ | 272 | /* Send userspace a few ATA registers (same as drivers/ide) */ |
273 | if (sensebuf[0] == 0x72 && /* format is "descriptor" */ | 273 | if (sensebuf[0] == 0x72 && /* format is "descriptor" */ |
274 | desc[0] == 0x09 ) { /* code is "ATA Descriptor" */ | 274 | desc[0] == 0x09) { /* code is "ATA Descriptor" */ |
275 | args[0] = desc[13]; /* status */ | 275 | args[0] = desc[13]; /* status */ |
276 | args[1] = desc[3]; /* error */ | 276 | args[1] = desc[3]; /* error */ |
277 | args[2] = desc[5]; /* sector count (0:7) */ | 277 | args[2] = desc[5]; /* sector count (0:7) */ |
278 | if (copy_to_user(arg, args, sizeof(args))) | 278 | if (copy_to_user(arg, args, sizeof(args))) |
279 | rc = -EFAULT; | 279 | rc = -EFAULT; |
280 | } | 280 | } |
@@ -350,8 +350,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
350 | struct scsi_sense_hdr sshdr; | 350 | struct scsi_sense_hdr sshdr; |
351 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, | 351 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, |
352 | &sshdr); | 352 | &sshdr); |
353 | if (sshdr.sense_key==0 && | 353 | if (sshdr.sense_key == 0 && |
354 | sshdr.asc==0 && sshdr.ascq==0) | 354 | sshdr.asc == 0 && sshdr.ascq == 0) |
355 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; | 355 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; |
356 | } | 356 | } |
357 | 357 | ||
@@ -975,7 +975,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) | |||
975 | if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && | 975 | if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && |
976 | (system_state == SYSTEM_HALT || | 976 | (system_state == SYSTEM_HALT || |
977 | system_state == SYSTEM_POWER_OFF)) { | 977 | system_state == SYSTEM_POWER_OFF)) { |
978 | static unsigned long warned = 0; | 978 | static unsigned long warned; |
979 | 979 | ||
980 | if (!test_and_set_bit(0, &warned)) { | 980 | if (!test_and_set_bit(0, &warned)) { |
981 | ata_dev_printk(qc->dev, KERN_WARNING, | 981 | ata_dev_printk(qc->dev, KERN_WARNING, |
@@ -1364,7 +1364,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | |||
1364 | struct ata_eh_info *ehi = &qc->dev->link->eh_info; | 1364 | struct ata_eh_info *ehi = &qc->dev->link->eh_info; |
1365 | struct scsi_cmnd *cmd = qc->scsicmd; | 1365 | struct scsi_cmnd *cmd = qc->scsicmd; |
1366 | u8 *cdb = cmd->cmnd; | 1366 | u8 *cdb = cmd->cmnd; |
1367 | int need_sense = (qc->err_mask != 0); | 1367 | int need_sense = (qc->err_mask != 0); |
1368 | 1368 | ||
1369 | /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and | 1369 | /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and |
1370 | * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE | 1370 | * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE |
@@ -1396,7 +1396,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | |||
1396 | * was no error, SK, ASC and ASCQ will all be zero. | 1396 | * was no error, SK, ASC and ASCQ will all be zero. |
1397 | */ | 1397 | */ |
1398 | if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && | 1398 | if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && |
1399 | ((cdb[2] & 0x20) || need_sense)) { | 1399 | ((cdb[2] & 0x20) || need_sense)) { |
1400 | ata_gen_passthru_sense(qc); | 1400 | ata_gen_passthru_sense(qc); |
1401 | } else { | 1401 | } else { |
1402 | if (!need_sense) { | 1402 | if (!need_sense) { |
@@ -1500,7 +1500,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
1500 | return 0; | 1500 | return 0; |
1501 | 1501 | ||
1502 | early_finish: | 1502 | early_finish: |
1503 | ata_qc_free(qc); | 1503 | ata_qc_free(qc); |
1504 | qc->scsidone(cmd); | 1504 | qc->scsidone(cmd); |
1505 | DPRINTK("EXIT - early finish (good or error)\n"); | 1505 | DPRINTK("EXIT - early finish (good or error)\n"); |
1506 | return 0; | 1506 | return 0; |
@@ -1590,8 +1590,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) | |||
1590 | */ | 1590 | */ |
1591 | 1591 | ||
1592 | void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | 1592 | void ata_scsi_rbuf_fill(struct ata_scsi_args *args, |
1593 | unsigned int (*actor) (struct ata_scsi_args *args, | 1593 | unsigned int (*actor) (struct ata_scsi_args *args, |
1594 | u8 *rbuf, unsigned int buflen)) | 1594 | u8 *rbuf, unsigned int buflen)) |
1595 | { | 1595 | { |
1596 | u8 *rbuf; | 1596 | u8 *rbuf; |
1597 | unsigned int buflen, rc; | 1597 | unsigned int buflen, rc; |
@@ -2140,7 +2140,7 @@ saving_not_supp: | |||
2140 | * None. | 2140 | * None. |
2141 | */ | 2141 | */ |
2142 | unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, | 2142 | unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, |
2143 | unsigned int buflen) | 2143 | unsigned int buflen) |
2144 | { | 2144 | { |
2145 | u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ | 2145 | u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ |
2146 | 2146 | ||
@@ -2464,7 +2464,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2464 | return 0; | 2464 | return 0; |
2465 | } | 2465 | } |
2466 | 2466 | ||
2467 | static struct ata_device * ata_find_dev(struct ata_port *ap, int devno) | 2467 | static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) |
2468 | { | 2468 | { |
2469 | if (ap->nr_pmp_links == 0) { | 2469 | if (ap->nr_pmp_links == 0) { |
2470 | if (likely(devno < ata_link_max_devices(&ap->link))) | 2470 | if (likely(devno < ata_link_max_devices(&ap->link))) |
@@ -2477,8 +2477,8 @@ static struct ata_device * ata_find_dev(struct ata_port *ap, int devno) | |||
2477 | return NULL; | 2477 | return NULL; |
2478 | } | 2478 | } |
2479 | 2479 | ||
2480 | static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, | 2480 | static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, |
2481 | const struct scsi_device *scsidev) | 2481 | const struct scsi_device *scsidev) |
2482 | { | 2482 | { |
2483 | int devno; | 2483 | int devno; |
2484 | 2484 | ||
@@ -2564,27 +2564,27 @@ static u8 | |||
2564 | ata_scsi_map_proto(u8 byte1) | 2564 | ata_scsi_map_proto(u8 byte1) |
2565 | { | 2565 | { |
2566 | switch((byte1 & 0x1e) >> 1) { | 2566 | switch((byte1 & 0x1e) >> 1) { |
2567 | case 3: /* Non-data */ | 2567 | case 3: /* Non-data */ |
2568 | return ATA_PROT_NODATA; | 2568 | return ATA_PROT_NODATA; |
2569 | 2569 | ||
2570 | case 6: /* DMA */ | 2570 | case 6: /* DMA */ |
2571 | case 10: /* UDMA Data-in */ | 2571 | case 10: /* UDMA Data-in */ |
2572 | case 11: /* UDMA Data-Out */ | 2572 | case 11: /* UDMA Data-Out */ |
2573 | return ATA_PROT_DMA; | 2573 | return ATA_PROT_DMA; |
2574 | 2574 | ||
2575 | case 4: /* PIO Data-in */ | 2575 | case 4: /* PIO Data-in */ |
2576 | case 5: /* PIO Data-out */ | 2576 | case 5: /* PIO Data-out */ |
2577 | return ATA_PROT_PIO; | 2577 | return ATA_PROT_PIO; |
2578 | 2578 | ||
2579 | case 0: /* Hard Reset */ | 2579 | case 0: /* Hard Reset */ |
2580 | case 1: /* SRST */ | 2580 | case 1: /* SRST */ |
2581 | case 8: /* Device Diagnostic */ | 2581 | case 8: /* Device Diagnostic */ |
2582 | case 9: /* Device Reset */ | 2582 | case 9: /* Device Reset */ |
2583 | case 7: /* DMA Queued */ | 2583 | case 7: /* DMA Queued */ |
2584 | case 12: /* FPDMA */ | 2584 | case 12: /* FPDMA */ |
2585 | case 15: /* Return Response Info */ | 2585 | case 15: /* Return Response Info */ |
2586 | default: /* Reserved */ | 2586 | default: /* Reserved */ |
2587 | break; | 2587 | break; |
2588 | } | 2588 | } |
2589 | 2589 | ||
2590 | return ATA_PROT_UNKNOWN; | 2590 | return ATA_PROT_UNKNOWN; |
@@ -2919,94 +2919,94 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
2919 | args.done = done; | 2919 | args.done = done; |
2920 | 2920 | ||
2921 | switch(scsicmd[0]) { | 2921 | switch(scsicmd[0]) { |
2922 | /* TODO: worth improving? */ | 2922 | /* TODO: worth improving? */ |
2923 | case FORMAT_UNIT: | 2923 | case FORMAT_UNIT: |
2924 | ata_scsi_invalid_field(cmd, done); | ||
2925 | break; | ||
2926 | |||
2927 | case INQUIRY: | ||
2928 | if (scsicmd[1] & 2) /* is CmdDt set? */ | ||
2924 | ata_scsi_invalid_field(cmd, done); | 2929 | ata_scsi_invalid_field(cmd, done); |
2930 | else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ | ||
2931 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); | ||
2932 | else switch (scsicmd[2]) { | ||
2933 | case 0x00: | ||
2934 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); | ||
2925 | break; | 2935 | break; |
2926 | 2936 | case 0x80: | |
2927 | case INQUIRY: | 2937 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); |
2928 | if (scsicmd[1] & 2) /* is CmdDt set? */ | ||
2929 | ata_scsi_invalid_field(cmd, done); | ||
2930 | else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ | ||
2931 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); | ||
2932 | else switch (scsicmd[2]) { | ||
2933 | case 0x00: | ||
2934 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); | ||
2935 | break; | ||
2936 | case 0x80: | ||
2937 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); | ||
2938 | break; | ||
2939 | case 0x83: | ||
2940 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); | ||
2941 | break; | ||
2942 | case 0x89: | ||
2943 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); | ||
2944 | break; | ||
2945 | default: | ||
2946 | ata_scsi_invalid_field(cmd, done); | ||
2947 | break; | ||
2948 | } | ||
2949 | break; | 2938 | break; |
2950 | 2939 | case 0x83: | |
2951 | case MODE_SENSE: | 2940 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); |
2952 | case MODE_SENSE_10: | ||
2953 | ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); | ||
2954 | break; | 2941 | break; |
2955 | 2942 | case 0x89: | |
2956 | case MODE_SELECT: /* unconditionally return */ | 2943 | ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); |
2957 | case MODE_SELECT_10: /* bad-field-in-cdb */ | 2944 | break; |
2945 | default: | ||
2958 | ata_scsi_invalid_field(cmd, done); | 2946 | ata_scsi_invalid_field(cmd, done); |
2959 | break; | 2947 | break; |
2948 | } | ||
2949 | break; | ||
2950 | |||
2951 | case MODE_SENSE: | ||
2952 | case MODE_SENSE_10: | ||
2953 | ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); | ||
2954 | break; | ||
2960 | 2955 | ||
2961 | case READ_CAPACITY: | 2956 | case MODE_SELECT: /* unconditionally return */ |
2957 | case MODE_SELECT_10: /* bad-field-in-cdb */ | ||
2958 | ata_scsi_invalid_field(cmd, done); | ||
2959 | break; | ||
2960 | |||
2961 | case READ_CAPACITY: | ||
2962 | ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); | ||
2963 | break; | ||
2964 | |||
2965 | case SERVICE_ACTION_IN: | ||
2966 | if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) | ||
2962 | ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); | 2967 | ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); |
2963 | break; | 2968 | else |
2969 | ata_scsi_invalid_field(cmd, done); | ||
2970 | break; | ||
2964 | 2971 | ||
2965 | case SERVICE_ACTION_IN: | 2972 | case REPORT_LUNS: |
2966 | if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) | 2973 | ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); |
2967 | ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); | 2974 | break; |
2968 | else | ||
2969 | ata_scsi_invalid_field(cmd, done); | ||
2970 | break; | ||
2971 | 2975 | ||
2972 | case REPORT_LUNS: | 2976 | case REQUEST_SENSE: |
2973 | ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); | 2977 | ata_scsi_set_sense(cmd, 0, 0, 0); |
2974 | break; | 2978 | cmd->result = (DRIVER_SENSE << 24); |
2979 | done(cmd); | ||
2980 | break; | ||
2975 | 2981 | ||
2976 | case REQUEST_SENSE: | 2982 | /* if we reach this, then writeback caching is disabled, |
2977 | ata_scsi_set_sense(cmd, 0, 0, 0); | 2983 | * turning this into a no-op. |
2978 | cmd->result = (DRIVER_SENSE << 24); | 2984 | */ |
2979 | done(cmd); | 2985 | case SYNCHRONIZE_CACHE: |
2980 | break; | 2986 | /* fall through */ |
2987 | |||
2988 | /* no-op's, complete with success */ | ||
2989 | case REZERO_UNIT: | ||
2990 | case SEEK_6: | ||
2991 | case SEEK_10: | ||
2992 | case TEST_UNIT_READY: | ||
2993 | ata_scsi_rbuf_fill(&args, ata_scsiop_noop); | ||
2994 | break; | ||
2981 | 2995 | ||
2982 | /* if we reach this, then writeback caching is disabled, | 2996 | case SEND_DIAGNOSTIC: |
2983 | * turning this into a no-op. | 2997 | tmp8 = scsicmd[1] & ~(1 << 3); |
2984 | */ | 2998 | if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) |
2985 | case SYNCHRONIZE_CACHE: | ||
2986 | /* fall through */ | ||
2987 | |||
2988 | /* no-op's, complete with success */ | ||
2989 | case REZERO_UNIT: | ||
2990 | case SEEK_6: | ||
2991 | case SEEK_10: | ||
2992 | case TEST_UNIT_READY: | ||
2993 | ata_scsi_rbuf_fill(&args, ata_scsiop_noop); | 2999 | ata_scsi_rbuf_fill(&args, ata_scsiop_noop); |
2994 | break; | 3000 | else |
2995 | 3001 | ata_scsi_invalid_field(cmd, done); | |
2996 | case SEND_DIAGNOSTIC: | 3002 | break; |
2997 | tmp8 = scsicmd[1] & ~(1 << 3); | ||
2998 | if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) | ||
2999 | ata_scsi_rbuf_fill(&args, ata_scsiop_noop); | ||
3000 | else | ||
3001 | ata_scsi_invalid_field(cmd, done); | ||
3002 | break; | ||
3003 | 3003 | ||
3004 | /* all other commands */ | 3004 | /* all other commands */ |
3005 | default: | 3005 | default: |
3006 | ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); | 3006 | ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); |
3007 | /* "Invalid command operation code" */ | 3007 | /* "Invalid command operation code" */ |
3008 | done(cmd); | 3008 | done(cmd); |
3009 | break; | 3009 | break; |
3010 | } | 3010 | } |
3011 | } | 3011 | } |
3012 | 3012 | ||
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 8227c45109ec..48acc09dab96 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -248,7 +248,7 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc) | |||
248 | * LOCKING: | 248 | * LOCKING: |
249 | * spin_lock_irqsave(host lock) | 249 | * spin_lock_irqsave(host lock) |
250 | */ | 250 | */ |
251 | void ata_bmdma_start (struct ata_queued_cmd *qc) | 251 | void ata_bmdma_start(struct ata_queued_cmd *qc) |
252 | { | 252 | { |
253 | struct ata_port *ap = qc->ap; | 253 | struct ata_port *ap = qc->ap; |
254 | u8 dmactl; | 254 | u8 dmactl; |
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index bb97ef583f9b..b9a17eb100d0 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c | |||
@@ -17,7 +17,7 @@ | |||
17 | * TODO: | 17 | * TODO: |
18 | * Test PARISC SuperIO | 18 | * Test PARISC SuperIO |
19 | * Get someone to test on SPARC | 19 | * Get someone to test on SPARC |
20 | * Implement lazy pio/dma switching for better performance | 20 | * Implement lazy pio/dma switching for better performance |
21 | * 8bit shared timing. | 21 | * 8bit shared timing. |
22 | * See if we need to kill the FIFO for ATAPI | 22 | * See if we need to kill the FIFO for ATAPI |
23 | */ | 23 | */ |
@@ -60,10 +60,10 @@ static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo | |||
60 | u16 clocking; | 60 | u16 clocking; |
61 | u8 iordy; | 61 | u8 iordy; |
62 | u8 status; | 62 | u8 status; |
63 | 63 | ||
64 | /* Timing register format is 17 - low nybble read timing with | 64 | /* Timing register format is 17 - low nybble read timing with |
65 | the high nybble being 16 - x for recovery time in PCI clocks */ | 65 | the high nybble being 16 - x for recovery time in PCI clocks */ |
66 | 66 | ||
67 | ata_timing_compute(adev, adev->pio_mode, &t, T, 0); | 67 | ata_timing_compute(adev, adev->pio_mode, &t, T, 0); |
68 | 68 | ||
69 | clocking = 17 - FIT(t.active, 2, 17); | 69 | clocking = 17 - FIT(t.active, 2, 17); |
@@ -71,7 +71,7 @@ static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo | |||
71 | /* Use the same timing for read and write bytes */ | 71 | /* Use the same timing for read and write bytes */ |
72 | clocking |= (clocking << 8); | 72 | clocking |= (clocking << 8); |
73 | pci_write_config_word(dev, timing, clocking); | 73 | pci_write_config_word(dev, timing, clocking); |
74 | 74 | ||
75 | /* Set the IORDY enable versus DMA enable on or off properly */ | 75 | /* Set the IORDY enable versus DMA enable on or off properly */ |
76 | pci_read_config_byte(dev, 0x42, &iordy); | 76 | pci_read_config_byte(dev, 0x42, &iordy); |
77 | iordy &= ~(1 << (4 + unit)); | 77 | iordy &= ~(1 << (4 + unit)); |
@@ -185,7 +185,7 @@ static void ns87415_bmdma_irq_clear(struct ata_port *ap) | |||
185 | 185 | ||
186 | if (!mmio) | 186 | if (!mmio) |
187 | return; | 187 | return; |
188 | iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR), | 188 | iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR), |
189 | mmio + ATA_DMA_CMD); | 189 | mmio + ATA_DMA_CMD); |
190 | } | 190 | } |
191 | 191 | ||
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 7f1b13e89cf7..b39648f0914b 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -845,7 +845,7 @@ static int __mv_stop_dma(struct ata_port *ap) | |||
845 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 845 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
846 | } else { | 846 | } else { |
847 | WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); | 847 | WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); |
848 | } | 848 | } |
849 | 849 | ||
850 | /* now properly wait for the eDMA to stop */ | 850 | /* now properly wait for the eDMA to stop */ |
851 | for (i = 1000; i > 0; i--) { | 851 | for (i = 1000; i > 0; i--) { |
@@ -883,7 +883,7 @@ static void mv_dump_mem(void __iomem *start, unsigned bytes) | |||
883 | for (b = 0; b < bytes; ) { | 883 | for (b = 0; b < bytes; ) { |
884 | DPRINTK("%p: ", start + b); | 884 | DPRINTK("%p: ", start + b); |
885 | for (w = 0; b < bytes && w < 4; w++) { | 885 | for (w = 0; b < bytes && w < 4; w++) { |
886 | printk("%08x ",readl(start + b)); | 886 | printk("%08x ", readl(start + b)); |
887 | b += sizeof(u32); | 887 | b += sizeof(u32); |
888 | } | 888 | } |
889 | printk("\n"); | 889 | printk("\n"); |
@@ -899,8 +899,8 @@ static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) | |||
899 | for (b = 0; b < bytes; ) { | 899 | for (b = 0; b < bytes; ) { |
900 | DPRINTK("%02x: ", b); | 900 | DPRINTK("%02x: ", b); |
901 | for (w = 0; b < bytes && w < 4; w++) { | 901 | for (w = 0; b < bytes && w < 4; w++) { |
902 | (void) pci_read_config_dword(pdev,b,&dw); | 902 | (void) pci_read_config_dword(pdev, b, &dw); |
903 | printk("%08x ",dw); | 903 | printk("%08x ", dw); |
904 | b += sizeof(u32); | 904 | b += sizeof(u32); |
905 | } | 905 | } |
906 | printk("\n"); | 906 | printk("\n"); |
@@ -944,9 +944,9 @@ static void mv_dump_all_regs(void __iomem *mmio_base, int port, | |||
944 | } | 944 | } |
945 | for (p = start_port; p < start_port + num_ports; p++) { | 945 | for (p = start_port; p < start_port + num_ports; p++) { |
946 | port_base = mv_port_base(mmio_base, p); | 946 | port_base = mv_port_base(mmio_base, p); |
947 | DPRINTK("EDMA regs (port %i):\n",p); | 947 | DPRINTK("EDMA regs (port %i):\n", p); |
948 | mv_dump_mem(port_base, 0x54); | 948 | mv_dump_mem(port_base, 0x54); |
949 | DPRINTK("SATA regs (port %i):\n",p); | 949 | DPRINTK("SATA regs (port %i):\n", p); |
950 | mv_dump_mem(port_base+0x300, 0x60); | 950 | mv_dump_mem(port_base+0x300, 0x60); |
951 | } | 951 | } |
952 | #endif | 952 | #endif |
@@ -1184,7 +1184,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1184 | u16 flags = 0; | 1184 | u16 flags = 0; |
1185 | unsigned in_index; | 1185 | unsigned in_index; |
1186 | 1186 | ||
1187 | if (qc->tf.protocol != ATA_PROT_DMA) | 1187 | if (qc->tf.protocol != ATA_PROT_DMA) |
1188 | return; | 1188 | return; |
1189 | 1189 | ||
1190 | /* Fill in command request block | 1190 | /* Fill in command request block |
@@ -1276,7 +1276,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1276 | unsigned in_index; | 1276 | unsigned in_index; |
1277 | u32 flags = 0; | 1277 | u32 flags = 0; |
1278 | 1278 | ||
1279 | if (qc->tf.protocol != ATA_PROT_DMA) | 1279 | if (qc->tf.protocol != ATA_PROT_DMA) |
1280 | return; | 1280 | return; |
1281 | 1281 | ||
1282 | /* Fill in Gen IIE command request block | 1282 | /* Fill in Gen IIE command request block |
@@ -1606,7 +1606,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1606 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | 1606 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
1607 | 1607 | ||
1608 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", | 1608 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", |
1609 | hc,relevant,hc_irq_cause); | 1609 | hc, relevant, hc_irq_cause); |
1610 | 1610 | ||
1611 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { | 1611 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { |
1612 | struct ata_port *ap = host->ports[port]; | 1612 | struct ata_port *ap = host->ports[port]; |
@@ -1983,9 +1983,8 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1983 | for (i = 0; i < 1000; i++) { | 1983 | for (i = 0; i < 1000; i++) { |
1984 | udelay(1); | 1984 | udelay(1); |
1985 | t = readl(reg); | 1985 | t = readl(reg); |
1986 | if (PCI_MASTER_EMPTY & t) { | 1986 | if (PCI_MASTER_EMPTY & t) |
1987 | break; | 1987 | break; |
1988 | } | ||
1989 | } | 1988 | } |
1990 | if (!(PCI_MASTER_EMPTY & t)) { | 1989 | if (!(PCI_MASTER_EMPTY & t)) { |
1991 | printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); | 1990 | printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); |
@@ -2668,7 +2667,7 @@ static void mv_print_info(struct ata_host *host) | |||
2668 | */ | 2667 | */ |
2669 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 2668 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2670 | { | 2669 | { |
2671 | static int printed_version = 0; | 2670 | static int printed_version; |
2672 | unsigned int board_idx = (unsigned int)ent->driver_data; | 2671 | unsigned int board_idx = (unsigned int)ent->driver_data; |
2673 | const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; | 2672 | const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; |
2674 | struct ata_host *host; | 2673 | struct ata_host *host; |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 240a8920d0bd..2e0279fdd7aa 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -163,7 +163,7 @@ enum { | |||
163 | NV_ADMA_STAT_STOPPED = (1 << 10), | 163 | NV_ADMA_STAT_STOPPED = (1 << 10), |
164 | NV_ADMA_STAT_DONE = (1 << 12), | 164 | NV_ADMA_STAT_DONE = (1 << 12), |
165 | NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR | | 165 | NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR | |
166 | NV_ADMA_STAT_TIMEOUT, | 166 | NV_ADMA_STAT_TIMEOUT, |
167 | 167 | ||
168 | /* port flags */ | 168 | /* port flags */ |
169 | NV_ADMA_PORT_REGISTER_MODE = (1 << 0), | 169 | NV_ADMA_PORT_REGISTER_MODE = (1 << 0), |
@@ -228,7 +228,7 @@ struct nv_adma_cpb { | |||
228 | u8 reserved1; /* 1 */ | 228 | u8 reserved1; /* 1 */ |
229 | u8 ctl_flags; /* 2 */ | 229 | u8 ctl_flags; /* 2 */ |
230 | /* len is length of taskfile in 64 bit words */ | 230 | /* len is length of taskfile in 64 bit words */ |
231 | u8 len; /* 3 */ | 231 | u8 len; /* 3 */ |
232 | u8 tag; /* 4 */ | 232 | u8 tag; /* 4 */ |
233 | u8 next_cpb_idx; /* 5 */ | 233 | u8 next_cpb_idx; /* 5 */ |
234 | __le16 reserved2; /* 6-7 */ | 234 | __le16 reserved2; /* 6-7 */ |
@@ -244,9 +244,9 @@ struct nv_adma_port_priv { | |||
244 | dma_addr_t cpb_dma; | 244 | dma_addr_t cpb_dma; |
245 | struct nv_adma_prd *aprd; | 245 | struct nv_adma_prd *aprd; |
246 | dma_addr_t aprd_dma; | 246 | dma_addr_t aprd_dma; |
247 | void __iomem * ctl_block; | 247 | void __iomem *ctl_block; |
248 | void __iomem * gen_block; | 248 | void __iomem *gen_block; |
249 | void __iomem * notifier_clear_block; | 249 | void __iomem *notifier_clear_block; |
250 | u8 flags; | 250 | u8 flags; |
251 | int last_issue_ncq; | 251 | int last_issue_ncq; |
252 | }; | 252 | }; |
@@ -293,7 +293,7 @@ struct nv_swncq_port_priv { | |||
293 | 293 | ||
294 | #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) | 294 | #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) |
295 | 295 | ||
296 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 296 | static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
297 | #ifdef CONFIG_PM | 297 | #ifdef CONFIG_PM |
298 | static int nv_pci_device_resume(struct pci_dev *pdev); | 298 | static int nv_pci_device_resume(struct pci_dev *pdev); |
299 | #endif | 299 | #endif |
@@ -301,8 +301,8 @@ static void nv_ck804_host_stop(struct ata_host *host); | |||
301 | static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); | 301 | static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); |
302 | static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); | 302 | static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); |
303 | static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); | 303 | static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); |
304 | static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); | 304 | static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
305 | static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 305 | static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
306 | 306 | ||
307 | static void nv_nf2_freeze(struct ata_port *ap); | 307 | static void nv_nf2_freeze(struct ata_port *ap); |
308 | static void nv_nf2_thaw(struct ata_port *ap); | 308 | static void nv_nf2_thaw(struct ata_port *ap); |
@@ -653,12 +653,12 @@ static void nv_adma_register_mode(struct ata_port *ap) | |||
653 | return; | 653 | return; |
654 | 654 | ||
655 | status = readw(mmio + NV_ADMA_STAT); | 655 | status = readw(mmio + NV_ADMA_STAT); |
656 | while(!(status & NV_ADMA_STAT_IDLE) && count < 20) { | 656 | while (!(status & NV_ADMA_STAT_IDLE) && count < 20) { |
657 | ndelay(50); | 657 | ndelay(50); |
658 | status = readw(mmio + NV_ADMA_STAT); | 658 | status = readw(mmio + NV_ADMA_STAT); |
659 | count++; | 659 | count++; |
660 | } | 660 | } |
661 | if(count == 20) | 661 | if (count == 20) |
662 | ata_port_printk(ap, KERN_WARNING, | 662 | ata_port_printk(ap, KERN_WARNING, |
663 | "timeout waiting for ADMA IDLE, stat=0x%hx\n", | 663 | "timeout waiting for ADMA IDLE, stat=0x%hx\n", |
664 | status); | 664 | status); |
@@ -668,12 +668,12 @@ static void nv_adma_register_mode(struct ata_port *ap) | |||
668 | 668 | ||
669 | count = 0; | 669 | count = 0; |
670 | status = readw(mmio + NV_ADMA_STAT); | 670 | status = readw(mmio + NV_ADMA_STAT); |
671 | while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) { | 671 | while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) { |
672 | ndelay(50); | 672 | ndelay(50); |
673 | status = readw(mmio + NV_ADMA_STAT); | 673 | status = readw(mmio + NV_ADMA_STAT); |
674 | count++; | 674 | count++; |
675 | } | 675 | } |
676 | if(count == 20) | 676 | if (count == 20) |
677 | ata_port_printk(ap, KERN_WARNING, | 677 | ata_port_printk(ap, KERN_WARNING, |
678 | "timeout waiting for ADMA LEGACY, stat=0x%hx\n", | 678 | "timeout waiting for ADMA LEGACY, stat=0x%hx\n", |
679 | status); | 679 | status); |
@@ -697,13 +697,13 @@ static void nv_adma_mode(struct ata_port *ap) | |||
697 | writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); | 697 | writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); |
698 | 698 | ||
699 | status = readw(mmio + NV_ADMA_STAT); | 699 | status = readw(mmio + NV_ADMA_STAT); |
700 | while(((status & NV_ADMA_STAT_LEGACY) || | 700 | while (((status & NV_ADMA_STAT_LEGACY) || |
701 | !(status & NV_ADMA_STAT_IDLE)) && count < 20) { | 701 | !(status & NV_ADMA_STAT_IDLE)) && count < 20) { |
702 | ndelay(50); | 702 | ndelay(50); |
703 | status = readw(mmio + NV_ADMA_STAT); | 703 | status = readw(mmio + NV_ADMA_STAT); |
704 | count++; | 704 | count++; |
705 | } | 705 | } |
706 | if(count == 20) | 706 | if (count == 20) |
707 | ata_port_printk(ap, KERN_WARNING, | 707 | ata_port_printk(ap, KERN_WARNING, |
708 | "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", | 708 | "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", |
709 | status); | 709 | status); |
@@ -747,8 +747,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev) | |||
747 | on the port. */ | 747 | on the port. */ |
748 | adma_enable = 0; | 748 | adma_enable = 0; |
749 | nv_adma_register_mode(ap); | 749 | nv_adma_register_mode(ap); |
750 | } | 750 | } else { |
751 | else { | ||
752 | bounce_limit = *ap->dev->dma_mask; | 751 | bounce_limit = *ap->dev->dma_mask; |
753 | segment_boundary = NV_ADMA_DMA_BOUNDARY; | 752 | segment_boundary = NV_ADMA_DMA_BOUNDARY; |
754 | sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; | 753 | sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; |
@@ -757,23 +756,22 @@ static int nv_adma_slave_config(struct scsi_device *sdev) | |||
757 | 756 | ||
758 | pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg); | 757 | pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg); |
759 | 758 | ||
760 | if(ap->port_no == 1) | 759 | if (ap->port_no == 1) |
761 | config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | | 760 | config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | |
762 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN; | 761 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN; |
763 | else | 762 | else |
764 | config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | | 763 | config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | |
765 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN; | 764 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN; |
766 | 765 | ||
767 | if(adma_enable) { | 766 | if (adma_enable) { |
768 | new_reg = current_reg | config_mask; | 767 | new_reg = current_reg | config_mask; |
769 | pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; | 768 | pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; |
770 | } | 769 | } else { |
771 | else { | ||
772 | new_reg = current_reg & ~config_mask; | 770 | new_reg = current_reg & ~config_mask; |
773 | pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; | 771 | pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; |
774 | } | 772 | } |
775 | 773 | ||
776 | if(current_reg != new_reg) | 774 | if (current_reg != new_reg) |
777 | pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); | 775 | pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); |
778 | 776 | ||
779 | blk_queue_bounce_limit(sdev->request_queue, bounce_limit); | 777 | blk_queue_bounce_limit(sdev->request_queue, bounce_limit); |
@@ -807,7 +805,7 @@ static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb) | |||
807 | { | 805 | { |
808 | unsigned int idx = 0; | 806 | unsigned int idx = 0; |
809 | 807 | ||
810 | if(tf->flags & ATA_TFLAG_ISADDR) { | 808 | if (tf->flags & ATA_TFLAG_ISADDR) { |
811 | if (tf->flags & ATA_TFLAG_LBA48) { | 809 | if (tf->flags & ATA_TFLAG_LBA48) { |
812 | cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); | 810 | cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); |
813 | cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); | 811 | cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); |
@@ -824,12 +822,12 @@ static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb) | |||
824 | cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); | 822 | cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); |
825 | } | 823 | } |
826 | 824 | ||
827 | if(tf->flags & ATA_TFLAG_DEVICE) | 825 | if (tf->flags & ATA_TFLAG_DEVICE) |
828 | cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); | 826 | cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); |
829 | 827 | ||
830 | cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); | 828 | cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); |
831 | 829 | ||
832 | while(idx < 12) | 830 | while (idx < 12) |
833 | cpb[idx++] = cpu_to_le16(IGN); | 831 | cpb[idx++] = cpu_to_le16(IGN); |
834 | 832 | ||
835 | return idx; | 833 | return idx; |
@@ -850,7 +848,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | |||
850 | int freeze = 0; | 848 | int freeze = 0; |
851 | 849 | ||
852 | ata_ehi_clear_desc(ehi); | 850 | ata_ehi_clear_desc(ehi); |
853 | __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags ); | 851 | __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags); |
854 | if (flags & NV_CPB_RESP_ATA_ERR) { | 852 | if (flags & NV_CPB_RESP_ATA_ERR) { |
855 | ata_ehi_push_desc(ehi, "ATA error"); | 853 | ata_ehi_push_desc(ehi, "ATA error"); |
856 | ehi->err_mask |= AC_ERR_DEV; | 854 | ehi->err_mask |= AC_ERR_DEV; |
@@ -879,7 +877,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | |||
879 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); | 877 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); |
880 | VPRINTK("CPB flags done, flags=0x%x\n", flags); | 878 | VPRINTK("CPB flags done, flags=0x%x\n", flags); |
881 | if (likely(qc)) { | 879 | if (likely(qc)) { |
882 | DPRINTK("Completing qc from tag %d\n",cpb_num); | 880 | DPRINTK("Completing qc from tag %d\n", cpb_num); |
883 | ata_qc_complete(qc); | 881 | ata_qc_complete(qc); |
884 | } else { | 882 | } else { |
885 | struct ata_eh_info *ehi = &ap->link.eh_info; | 883 | struct ata_eh_info *ehi = &ap->link.eh_info; |
@@ -952,7 +950,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
952 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { | 950 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { |
953 | u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) | 951 | u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) |
954 | >> (NV_INT_PORT_SHIFT * i); | 952 | >> (NV_INT_PORT_SHIFT * i); |
955 | if(ata_tag_valid(ap->link.active_tag)) | 953 | if (ata_tag_valid(ap->link.active_tag)) |
956 | /** NV_INT_DEV indication seems unreliable at times | 954 | /** NV_INT_DEV indication seems unreliable at times |
957 | at least in ADMA mode. Force it on always when a | 955 | at least in ADMA mode. Force it on always when a |
958 | command is active, to prevent losing interrupts. */ | 956 | command is active, to prevent losing interrupts. */ |
@@ -966,7 +964,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
966 | 964 | ||
967 | gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); | 965 | gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); |
968 | 966 | ||
969 | if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && | 967 | if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && |
970 | !notifier_error) | 968 | !notifier_error) |
971 | /* Nothing to do */ | 969 | /* Nothing to do */ |
972 | continue; | 970 | continue; |
@@ -990,7 +988,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
990 | struct ata_eh_info *ehi = &ap->link.eh_info; | 988 | struct ata_eh_info *ehi = &ap->link.eh_info; |
991 | 989 | ||
992 | ata_ehi_clear_desc(ehi); | 990 | ata_ehi_clear_desc(ehi); |
993 | __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status ); | 991 | __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); |
994 | if (status & NV_ADMA_STAT_TIMEOUT) { | 992 | if (status & NV_ADMA_STAT_TIMEOUT) { |
995 | ehi->err_mask |= AC_ERR_SYSTEM; | 993 | ehi->err_mask |= AC_ERR_SYSTEM; |
996 | ata_ehi_push_desc(ehi, "timeout"); | 994 | ata_ehi_push_desc(ehi, "timeout"); |
@@ -1056,14 +1054,14 @@ static void nv_adma_freeze(struct ata_port *ap) | |||
1056 | return; | 1054 | return; |
1057 | 1055 | ||
1058 | /* clear any outstanding CK804 notifications */ | 1056 | /* clear any outstanding CK804 notifications */ |
1059 | writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), | 1057 | writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), |
1060 | ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); | 1058 | ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); |
1061 | 1059 | ||
1062 | /* Disable interrupt */ | 1060 | /* Disable interrupt */ |
1063 | tmp = readw(mmio + NV_ADMA_CTL); | 1061 | tmp = readw(mmio + NV_ADMA_CTL); |
1064 | writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), | 1062 | writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), |
1065 | mmio + NV_ADMA_CTL); | 1063 | mmio + NV_ADMA_CTL); |
1066 | readw( mmio + NV_ADMA_CTL ); /* flush posted write */ | 1064 | readw(mmio + NV_ADMA_CTL ); /* flush posted write */ |
1067 | } | 1065 | } |
1068 | 1066 | ||
1069 | static void nv_adma_thaw(struct ata_port *ap) | 1067 | static void nv_adma_thaw(struct ata_port *ap) |
@@ -1079,9 +1077,9 @@ static void nv_adma_thaw(struct ata_port *ap) | |||
1079 | 1077 | ||
1080 | /* Enable interrupt */ | 1078 | /* Enable interrupt */ |
1081 | tmp = readw(mmio + NV_ADMA_CTL); | 1079 | tmp = readw(mmio + NV_ADMA_CTL); |
1082 | writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), | 1080 | writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), |
1083 | mmio + NV_ADMA_CTL); | 1081 | mmio + NV_ADMA_CTL); |
1084 | readw( mmio + NV_ADMA_CTL ); /* flush posted write */ | 1082 | readw(mmio + NV_ADMA_CTL ); /* flush posted write */ |
1085 | } | 1083 | } |
1086 | 1084 | ||
1087 | static void nv_adma_irq_clear(struct ata_port *ap) | 1085 | static void nv_adma_irq_clear(struct ata_port *ap) |
@@ -1096,7 +1094,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) | |||
1096 | } | 1094 | } |
1097 | 1095 | ||
1098 | /* clear any outstanding CK804 notifications */ | 1096 | /* clear any outstanding CK804 notifications */ |
1099 | writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), | 1097 | writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), |
1100 | ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); | 1098 | ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); |
1101 | 1099 | ||
1102 | /* clear ADMA status */ | 1100 | /* clear ADMA status */ |
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 9f9f7b30654a..b6026bceccd1 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c | |||
@@ -62,13 +62,13 @@ | |||
62 | submit ATA packet to hardware | 62 | submit ATA packet to hardware |
63 | hardware executes ATA WRITE command, w/ data in DIMM | 63 | hardware executes ATA WRITE command, w/ data in DIMM |
64 | hardware raises interrupt | 64 | hardware raises interrupt |
65 | 65 | ||
66 | and each READ looks like this: | 66 | and each READ looks like this: |
67 | 67 | ||
68 | submit ATA packet to hardware | 68 | submit ATA packet to hardware |
69 | hardware executes ATA READ command, w/ data in DIMM | 69 | hardware executes ATA READ command, w/ data in DIMM |
70 | hardware raises interrupt | 70 | hardware raises interrupt |
71 | 71 | ||
72 | submit HDMA packet to hardware | 72 | submit HDMA packet to hardware |
73 | hardware copies data from DIMM to system memory | 73 | hardware copies data from DIMM to system memory |
74 | hardware raises interrupt | 74 | hardware raises interrupt |
diff --git a/drivers/fc4/Kconfig b/drivers/fc4/Kconfig deleted file mode 100644 index 345dbe6f10df..000000000000 --- a/drivers/fc4/Kconfig +++ /dev/null | |||
@@ -1,81 +0,0 @@ | |||
1 | # | ||
2 | # FC4 device configuration | ||
3 | # | ||
4 | |||
5 | menu "Fibre Channel support" | ||
6 | |||
7 | config FC4 | ||
8 | tristate "Fibre Channel and FC4 SCSI support" | ||
9 | ---help--- | ||
10 | Fibre Channel is a high speed serial protocol mainly used to | ||
11 | connect large storage devices to the computer; it is compatible with | ||
12 | and intended to replace SCSI. | ||
13 | |||
14 | This is an experimental support for storage arrays connected to your | ||
15 | computer using optical fibre cables and the "X3.269-199X Fibre | ||
16 | Channel Protocol for SCSI" specification. If you want to use this, | ||
17 | you need to say Y here and to "SCSI support" as well as to the | ||
18 | drivers for the storage array itself and for the interface adapter | ||
19 | such as SOC or SOC+. This subsystem could even serve for IP | ||
20 | networking, with some code extensions. | ||
21 | |||
22 | If unsure, say N. | ||
23 | |||
24 | comment "FC4 drivers" | ||
25 | depends on FC4 | ||
26 | |||
27 | config FC4_SOC | ||
28 | tristate "Sun SOC/Sbus" | ||
29 | depends on FC4!=n && SPARC | ||
30 | help | ||
31 | Serial Optical Channel is an interface card with one or two Fibre | ||
32 | Optic ports, each of which can be connected to a disk array. Note | ||
33 | that if you have older firmware in the card, you'll need the | ||
34 | microcode from the Solaris driver to make it work. | ||
35 | |||
36 | To compile this support as a module, choose M here: the module will | ||
37 | be called soc. | ||
38 | |||
39 | config FC4_SOCAL | ||
40 | tristate "Sun SOC+ (aka SOCAL)" | ||
41 | depends on FC4!=n && SPARC | ||
42 | ---help--- | ||
43 | Serial Optical Channel Plus is an interface card with up to two | ||
44 | Fibre Optic ports. This card supports FC Arbitrated Loop (usually | ||
45 | A5000 or internal FC disks in E[3-6]000 machines through the | ||
46 | Interface Board). You'll probably need the microcode from the | ||
47 | Solaris driver to make it work. | ||
48 | |||
49 | To compile this support as a module, choose M here: the module will | ||
50 | be called socal. | ||
51 | |||
52 | comment "FC4 targets" | ||
53 | depends on FC4 | ||
54 | |||
55 | config SCSI_PLUTO | ||
56 | tristate "SparcSTORAGE Array 100 and 200 series" | ||
57 | depends on FC4!=n && SCSI | ||
58 | help | ||
59 | If you never bought a disk array made by Sun, go with N. | ||
60 | |||
61 | To compile this support as a module, choose M here: the module will | ||
62 | be called pluto. | ||
63 | |||
64 | config SCSI_FCAL | ||
65 | tristate "Sun Enterprise Network Array (A5000 and EX500)" if SPARC | ||
66 | depends on FC4!=n && SCSI | ||
67 | help | ||
68 | This driver drives FC-AL disks connected through a Fibre Channel | ||
69 | card using the drivers/fc4 layer (currently only SOCAL). The most | ||
70 | common is either A5000 array or internal disks in E[3-6]000 | ||
71 | machines. | ||
72 | |||
73 | To compile this support as a module, choose M here: the module will | ||
74 | be called fcal. | ||
75 | |||
76 | config SCSI_FCAL | ||
77 | prompt "Generic FC-AL disk driver" | ||
78 | depends on FC4!=n && SCSI && !SPARC | ||
79 | |||
80 | endmenu | ||
81 | |||
diff --git a/drivers/fc4/Makefile b/drivers/fc4/Makefile deleted file mode 100644 index 0db3fbb553e9..000000000000 --- a/drivers/fc4/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the Linux Fibre Channel device drivers. | ||
3 | # | ||
4 | |||
5 | fc4-objs := fc.o fc_syms.o | ||
6 | |||
7 | obj-$(CONFIG_FC4) += fc4.o | ||
8 | obj-$(CONFIG_FC4_SOC) += soc.o | ||
9 | obj-$(CONFIG_FC4_SOCAL) += socal.o | ||
diff --git a/drivers/fc4/fc-al.h b/drivers/fc4/fc-al.h deleted file mode 100644 index 62d3ca436d72..000000000000 --- a/drivers/fc4/fc-al.h +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | /* fc-al.h: Definitions for Fibre Channel Arbitrated Loop topology. | ||
2 | * | ||
3 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
4 | * | ||
5 | * Sources: | ||
6 | * Fibre Channel Arbitrated Loop (FC-AL), ANSI, Rev. 4.5, 1995 | ||
7 | */ | ||
8 | |||
9 | #ifndef __FC_AL_H | ||
10 | #define __FC_AL_H | ||
11 | |||
12 | /* Loop initialization payloads */ | ||
13 | #define FC_AL_LISM 0x11010000 /* Select Master, 12B payload */ | ||
14 | #define FC_AL_LIFA 0x11020000 /* Fabric Assign AL_PA bitmap, 20B payload */ | ||
15 | #define FC_AL_LIPA 0x11030000 /* Previously Acquired AL_PA bitmap, 20B payload */ | ||
16 | #define FC_AL_LIHA 0x11040000 /* Hard Assigned AL_PA bitmap, 20B payload */ | ||
17 | #define FC_AL_LISA 0x11050000 /* Soft Assigned AL_PA bitmap, 20B payload */ | ||
18 | #define FC_AL_LIRP 0x11060000 /* Report AL_PA position map, 132B payload */ | ||
19 | #define FC_AL_LILP 0x11070000 /* Loop AL_PA position map, 132B payload */ | ||
20 | |||
21 | typedef struct { | ||
22 | u32 magic; | ||
23 | u8 len; | ||
24 | u8 alpa[127]; | ||
25 | } fc_al_posmap; | ||
26 | |||
27 | #endif /* !(__FC_H) */ | ||
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c deleted file mode 100644 index 82de9e1adb1e..000000000000 --- a/drivers/fc4/fc.c +++ /dev/null | |||
@@ -1,1146 +0,0 @@ | |||
1 | /* fc.c: Generic Fibre Channel and FC4 SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
4 | * Copyright (C) 1997,1998 Jirka Hanika (geo@ff.cuni.cz) | ||
5 | * | ||
6 | * There are two kinds of Fibre Channel adapters used in Linux. Either | ||
7 | * the adapter is "smart" and does all FC bookkeeping by itself and | ||
8 | * just presents a standard SCSI interface to the operating system | ||
9 | * (that's e.g. the case with Qlogic FC cards), or leaves most of the FC | ||
10 | * bookkeeping to the OS (e.g. soc, socal). Drivers for the former adapters | ||
11 | * will look like normal SCSI drivers (with the exception of max_id will be | ||
12 | * usually 127), the latter on the other side allows SCSI, IP over FC and other | ||
13 | * protocols. This driver tree is for the latter adapters. | ||
14 | * | ||
15 | * This file should support both Point-to-Point and Arbitrated Loop topologies. | ||
16 | * | ||
17 | * Sources: | ||
18 | * Fibre Channel Physical & Signaling Interface (FC-PH), dpANS, 1994 | ||
19 | * dpANS Fibre Channel Protocol for SCSI (X3.269-199X), Rev. 012, 1995 | ||
20 | * Fibre Channel Arbitrated Loop (FC-AL), Rev. 4.5, 1995 | ||
21 | * Fibre Channel Private Loop SCSI Direct Attach (FC-PLDA), Rev. 2.1, 1997 | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/jiffies.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/fcntl.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/ptrace.h> | ||
31 | #include <linux/ioport.h> | ||
32 | #include <linux/in.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/init.h> | ||
36 | |||
37 | #include <asm/pgtable.h> | ||
38 | #include <asm/irq.h> | ||
39 | #include <asm/semaphore.h> | ||
40 | #include "fcp_impl.h" | ||
41 | #include <scsi/scsi_host.h> | ||
42 | |||
43 | /* #define FCDEBUG */ | ||
44 | |||
45 | #define fc_printk printk ("%s: ", fc->name); printk | ||
46 | |||
47 | #ifdef FCDEBUG | ||
48 | #define FCD(x) fc_printk x; | ||
49 | #define FCND(x) printk ("FC: "); printk x; | ||
50 | #else | ||
51 | #define FCD(x) | ||
52 | #define FCND(x) | ||
53 | #endif | ||
54 | |||
55 | #ifdef __sparc__ | ||
56 | #define dma_alloc_consistent(d,s,p) sbus_alloc_consistent(d,s,p) | ||
57 | #define dma_free_consistent(d,s,v,h) sbus_free_consistent(d,s,v,h) | ||
58 | #define dma_map_single(d,v,s,dir) sbus_map_single(d,v,s,dir) | ||
59 | #define dma_unmap_single(d,h,s,dir) sbus_unmap_single(d,h,s,dir) | ||
60 | #define dma_map_sg(d,s,n,dir) sbus_map_sg(d,s,n,dir) | ||
61 | #define dma_unmap_sg(d,s,n,dir) sbus_unmap_sg(d,s,n,dir) | ||
62 | #else | ||
63 | #define dma_alloc_consistent(d,s,p) pci_alloc_consistent(d,s,p) | ||
64 | #define dma_free_consistent(d,s,v,h) pci_free_consistent(d,s,v,h) | ||
65 | #define dma_map_single(d,v,s,dir) pci_map_single(d,v,s,dir) | ||
66 | #define dma_unmap_single(d,h,s,dir) pci_unmap_single(d,h,s,dir) | ||
67 | #define dma_map_sg(d,s,n,dir) pci_map_sg(d,s,n,dir) | ||
68 | #define dma_unmap_sg(d,s,n,dir) pci_unmap_sg(d,s,n,dir) | ||
69 | #endif | ||
70 | |||
71 | #define FCP_CMND(SCpnt) ((fcp_cmnd *)&(SCpnt->SCp)) | ||
72 | #define FC_SCMND(SCpnt) ((fc_channel *)(SCpnt->device->host->hostdata[0])) | ||
73 | #define SC_FCMND(fcmnd) ((struct scsi_cmnd *)((long)fcmnd - (long)&(((struct scsi_cmnd *)0)->SCp))) | ||
74 | |||
75 | static int fcp_scsi_queue_it(fc_channel *, struct scsi_cmnd *, fcp_cmnd *, int); | ||
76 | void fcp_queue_empty(fc_channel *); | ||
77 | |||
78 | static void fcp_scsi_insert_queue (fc_channel *fc, fcp_cmnd *fcmd) | ||
79 | { | ||
80 | if (!fc->scsi_que) { | ||
81 | fc->scsi_que = fcmd; | ||
82 | fcmd->next = fcmd; | ||
83 | fcmd->prev = fcmd; | ||
84 | } else { | ||
85 | fc->scsi_que->prev->next = fcmd; | ||
86 | fcmd->prev = fc->scsi_que->prev; | ||
87 | fc->scsi_que->prev = fcmd; | ||
88 | fcmd->next = fc->scsi_que; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static void fcp_scsi_remove_queue (fc_channel *fc, fcp_cmnd *fcmd) | ||
93 | { | ||
94 | if (fcmd == fcmd->next) { | ||
95 | fc->scsi_que = NULL; | ||
96 | return; | ||
97 | } | ||
98 | if (fcmd == fc->scsi_que) | ||
99 | fc->scsi_que = fcmd->next; | ||
100 | fcmd->prev->next = fcmd->next; | ||
101 | fcmd->next->prev = fcmd->prev; | ||
102 | } | ||
103 | |||
104 | fc_channel *fc_channels = NULL; | ||
105 | |||
106 | #define LSMAGIC 620829043 | ||
107 | typedef struct { | ||
108 | /* Must be first */ | ||
109 | struct semaphore sem; | ||
110 | int magic; | ||
111 | int count; | ||
112 | logi *logi; | ||
113 | fcp_cmnd *fcmds; | ||
114 | atomic_t todo; | ||
115 | struct timer_list timer; | ||
116 | unsigned char grace[0]; | ||
117 | } ls; | ||
118 | |||
119 | #define LSOMAGIC 654907799 | ||
120 | typedef struct { | ||
121 | /* Must be first */ | ||
122 | struct semaphore sem; | ||
123 | int magic; | ||
124 | int count; | ||
125 | fcp_cmnd *fcmds; | ||
126 | atomic_t todo; | ||
127 | struct timer_list timer; | ||
128 | } lso; | ||
129 | |||
130 | #define LSEMAGIC 84482456 | ||
131 | typedef struct { | ||
132 | /* Must be first */ | ||
133 | struct semaphore sem; | ||
134 | int magic; | ||
135 | int status; | ||
136 | struct timer_list timer; | ||
137 | } lse; | ||
138 | |||
139 | static void fcp_login_timeout(unsigned long data) | ||
140 | { | ||
141 | ls *l = (ls *)data; | ||
142 | FCND(("Login timeout\n")) | ||
143 | up(&l->sem); | ||
144 | } | ||
145 | |||
146 | static void fcp_login_done(fc_channel *fc, int i, int status) | ||
147 | { | ||
148 | fcp_cmnd *fcmd; | ||
149 | logi *plogi; | ||
150 | fc_hdr *fch; | ||
151 | ls *l = (ls *)fc->ls; | ||
152 | |||
153 | FCD(("Login done %d %d\n", i, status)) | ||
154 | if (i < l->count) { | ||
155 | if (fc->state == FC_STATE_FPORT_OK) { | ||
156 | FCD(("Additional FPORT_OK received with status %d\n", status)) | ||
157 | return; | ||
158 | } | ||
159 | switch (status) { | ||
160 | case FC_STATUS_OK: /* Oh, we found a fabric */ | ||
161 | case FC_STATUS_P_RJT: /* Oh, we haven't found any */ | ||
162 | fc->state = FC_STATE_FPORT_OK; | ||
163 | fcmd = l->fcmds + i; | ||
164 | plogi = l->logi + 3 * i; | ||
165 | dma_unmap_single (fc->dev, fcmd->cmd, 3 * sizeof(logi), | ||
166 | DMA_BIDIRECTIONAL); | ||
167 | plogi->code = LS_PLOGI; | ||
168 | memcpy (&plogi->nport_wwn, &fc->wwn_nport, sizeof(fc_wwn)); | ||
169 | memcpy (&plogi->node_wwn, &fc->wwn_node, sizeof(fc_wwn)); | ||
170 | memcpy (&plogi->common, fc->common_svc, sizeof(common_svc_parm)); | ||
171 | memcpy (&plogi->class1, fc->class_svcs, 3*sizeof(svc_parm)); | ||
172 | fch = &fcmd->fch; | ||
173 | fcmd->token += l->count; | ||
174 | FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, fc->did); | ||
175 | FILL_FCHDR_SID(fch, fc->sid); | ||
176 | #ifdef FCDEBUG | ||
177 | { | ||
178 | int i; | ||
179 | unsigned *x = (unsigned *)plogi; | ||
180 | printk ("logi: "); | ||
181 | for (i = 0; i < 21; i++) | ||
182 | printk ("%08x ", x[i]); | ||
183 | printk ("\n"); | ||
184 | } | ||
185 | #endif | ||
186 | fcmd->cmd = dma_map_single (fc->dev, plogi, 3 * sizeof(logi), | ||
187 | DMA_BIDIRECTIONAL); | ||
188 | fcmd->rsp = fcmd->cmd + 2 * sizeof(logi); | ||
189 | if (fc->hw_enque (fc, fcmd)) | ||
190 | printk ("FC: Cannot enque PLOGI packet on %s\n", fc->name); | ||
191 | break; | ||
192 | case FC_STATUS_ERR_OFFLINE: | ||
193 | fc->state = FC_STATE_MAYBEOFFLINE; | ||
194 | FCD (("FC is offline %d\n", l->grace[i])) | ||
195 | break; | ||
196 | default: | ||
197 | printk ("FLOGI failed for %s with status %d\n", fc->name, status); | ||
198 | /* Do some sort of error recovery here */ | ||
199 | break; | ||
200 | } | ||
201 | } else { | ||
202 | i -= l->count; | ||
203 | if (fc->state != FC_STATE_FPORT_OK) { | ||
204 | FCD(("Unexpected N-PORT rsp received")) | ||
205 | return; | ||
206 | } | ||
207 | switch (status) { | ||
208 | case FC_STATUS_OK: | ||
209 | plogi = l->logi + 3 * i; | ||
210 | dma_unmap_single (fc->dev, l->fcmds[i].cmd, 3 * sizeof(logi), | ||
211 | DMA_BIDIRECTIONAL); | ||
212 | if (!fc->wwn_dest.lo && !fc->wwn_dest.hi) { | ||
213 | memcpy (&fc->wwn_dest, &plogi[1].node_wwn, sizeof(fc_wwn)); | ||
214 | FCD(("Dest WWN %08x%08x\n", *(u32 *)&fc->wwn_dest, fc->wwn_dest.lo)) | ||
215 | } else if (fc->wwn_dest.lo != plogi[1].node_wwn.lo || | ||
216 | fc->wwn_dest.hi != plogi[1].node_wwn.hi) { | ||
217 | printk ("%s: mismatch in wwns. Got %08x%08x, expected %08x%08x\n", | ||
218 | fc->name, | ||
219 | *(u32 *)&plogi[1].node_wwn, plogi[1].node_wwn.lo, | ||
220 | *(u32 *)&fc->wwn_dest, fc->wwn_dest.lo); | ||
221 | } | ||
222 | fc->state = FC_STATE_ONLINE; | ||
223 | printk ("%s: ONLINE\n", fc->name); | ||
224 | if (atomic_dec_and_test (&l->todo)) | ||
225 | up(&l->sem); | ||
226 | break; | ||
227 | case FC_STATUS_ERR_OFFLINE: | ||
228 | fc->state = FC_STATE_OFFLINE; | ||
229 | dma_unmap_single (fc->dev, l->fcmds[i].cmd, 3 * sizeof(logi), | ||
230 | DMA_BIDIRECTIONAL); | ||
231 | printk ("%s: FC is offline\n", fc->name); | ||
232 | if (atomic_dec_and_test (&l->todo)) | ||
233 | up(&l->sem); | ||
234 | break; | ||
235 | default: | ||
236 | printk ("PLOGI failed for %s with status %d\n", fc->name, status); | ||
237 | /* Do some sort of error recovery here */ | ||
238 | break; | ||
239 | } | ||
240 | } | ||
241 | } | ||
242 | |||
243 | static void fcp_report_map_done(fc_channel *fc, int i, int status) | ||
244 | { | ||
245 | fcp_cmnd *fcmd; | ||
246 | fc_hdr *fch; | ||
247 | unsigned char j; | ||
248 | ls *l = (ls *)fc->ls; | ||
249 | fc_al_posmap *p; | ||
250 | |||
251 | FCD(("Report map done %d %d\n", i, status)) | ||
252 | switch (status) { | ||
253 | case FC_STATUS_OK: /* Ok, let's have a fun on a loop */ | ||
254 | dma_unmap_single (fc->dev, l->fcmds[i].cmd, 3 * sizeof(logi), | ||
255 | DMA_BIDIRECTIONAL); | ||
256 | p = (fc_al_posmap *)(l->logi + 3 * i); | ||
257 | #ifdef FCDEBUG | ||
258 | { | ||
259 | u32 *u = (u32 *)p; | ||
260 | FCD(("%08x\n", u[0])) | ||
261 | u ++; | ||
262 | FCD(("%08x.%08x.%08x.%08x.%08x.%08x.%08x.%08x\n", u[0],u[1],u[2],u[3],u[4],u[5],u[6],u[7])) | ||
263 | } | ||
264 | #endif | ||
265 | if ((p->magic & 0xffff0000) != FC_AL_LILP || !p->len) { | ||
266 | printk ("FC: Bad magic from REPORT_AL_MAP on %s - %08x\n", fc->name, p->magic); | ||
267 | fc->state = FC_STATE_OFFLINE; | ||
268 | } else { | ||
269 | fc->posmap = kzalloc(sizeof(fcp_posmap)+p->len, GFP_KERNEL); | ||
270 | if (!fc->posmap) { | ||
271 | printk("FC: Not enough memory, offlining channel\n"); | ||
272 | fc->state = FC_STATE_OFFLINE; | ||
273 | } else { | ||
274 | int k; | ||
275 | /* FIXME: This is where SOCAL transfers our AL-PA. | ||
276 | Keep it here till we found out what other cards do... */ | ||
277 | fc->sid = (p->magic & 0xff); | ||
278 | for (i = 0; i < p->len; i++) | ||
279 | if (p->alpa[i] == fc->sid) | ||
280 | break; | ||
281 | k = p->len; | ||
282 | if (i == p->len) | ||
283 | i = 0; | ||
284 | else { | ||
285 | p->len--; | ||
286 | i++; | ||
287 | } | ||
288 | fc->posmap->len = p->len; | ||
289 | for (j = 0; j < p->len; j++) { | ||
290 | if (i == k) i = 0; | ||
291 | fc->posmap->list[j] = p->alpa[i++]; | ||
292 | } | ||
293 | fc->state = FC_STATE_ONLINE; | ||
294 | } | ||
295 | } | ||
296 | printk ("%s: ONLINE\n", fc->name); | ||
297 | if (atomic_dec_and_test (&l->todo)) | ||
298 | up(&l->sem); | ||
299 | break; | ||
300 | case FC_STATUS_POINTTOPOINT: /* We're Point-to-Point, no AL... */ | ||
301 | FCD(("SID %d DID %d\n", fc->sid, fc->did)) | ||
302 | fcmd = l->fcmds + i; | ||
303 | dma_unmap_single(fc->dev, fcmd->cmd, 3 * sizeof(logi), | ||
304 | DMA_BIDIRECTIONAL); | ||
305 | fch = &fcmd->fch; | ||
306 | memset(l->logi + 3 * i, 0, 3 * sizeof(logi)); | ||
307 | FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, FS_FABRIC_F_PORT); | ||
308 | FILL_FCHDR_SID(fch, 0); | ||
309 | FILL_FCHDR_TYPE_FCTL(fch, TYPE_EXTENDED_LS, F_CTL_FIRST_SEQ | F_CTL_SEQ_INITIATIVE); | ||
310 | FILL_FCHDR_SEQ_DF_SEQ(fch, 0, 0, 0); | ||
311 | FILL_FCHDR_OXRX(fch, 0xffff, 0xffff); | ||
312 | fch->param = 0; | ||
313 | l->logi [3 * i].code = LS_FLOGI; | ||
314 | fcmd->cmd = dma_map_single (fc->dev, l->logi + 3 * i, 3 * sizeof(logi), | ||
315 | DMA_BIDIRECTIONAL); | ||
316 | fcmd->rsp = fcmd->cmd + sizeof(logi); | ||
317 | fcmd->cmdlen = sizeof(logi); | ||
318 | fcmd->rsplen = sizeof(logi); | ||
319 | fcmd->data = (dma_addr_t)NULL; | ||
320 | fcmd->class = FC_CLASS_SIMPLE; | ||
321 | fcmd->proto = TYPE_EXTENDED_LS; | ||
322 | if (fc->hw_enque (fc, fcmd)) | ||
323 | printk ("FC: Cannot enque FLOGI packet on %s\n", fc->name); | ||
324 | break; | ||
325 | case FC_STATUS_ERR_OFFLINE: | ||
326 | fc->state = FC_STATE_MAYBEOFFLINE; | ||
327 | FCD (("FC is offline %d\n", l->grace[i])) | ||
328 | break; | ||
329 | default: | ||
330 | printk ("FLOGI failed for %s with status %d\n", fc->name, status); | ||
331 | /* Do some sort of error recovery here */ | ||
332 | break; | ||
333 | } | ||
334 | } | ||
335 | |||
336 | void fcp_register(fc_channel *fc, u8 type, int unregister) | ||
337 | { | ||
338 | int size, i; | ||
339 | int slots = (fc->can_queue * 3) >> 1; | ||
340 | |||
341 | FCND(("Going to %sregister\n", unregister ? "un" : "")) | ||
342 | |||
343 | if (type == TYPE_SCSI_FCP) { | ||
344 | if (!unregister) { | ||
345 | fc->scsi_cmd_pool = (fcp_cmd *) | ||
346 | dma_alloc_consistent (fc->dev, | ||
347 | slots * (sizeof (fcp_cmd) + fc->rsp_size), | ||
348 | &fc->dma_scsi_cmd); | ||
349 | fc->scsi_rsp_pool = (char *)(fc->scsi_cmd_pool + slots); | ||
350 | fc->dma_scsi_rsp = fc->dma_scsi_cmd + slots * sizeof (fcp_cmd); | ||
351 | fc->scsi_bitmap_end = (slots + 63) & ~63; | ||
352 | size = fc->scsi_bitmap_end / 8; | ||
353 | fc->scsi_bitmap = kzalloc (size, GFP_KERNEL); | ||
354 | set_bit (0, fc->scsi_bitmap); | ||
355 | for (i = fc->can_queue; i < fc->scsi_bitmap_end; i++) | ||
356 | set_bit (i, fc->scsi_bitmap); | ||
357 | fc->scsi_free = fc->can_queue; | ||
358 | fc->cmd_slots = kzalloc(slots * sizeof(fcp_cmnd*), GFP_KERNEL); | ||
359 | fc->abort_count = 0; | ||
360 | } else { | ||
361 | fc->scsi_name[0] = 0; | ||
362 | kfree (fc->scsi_bitmap); | ||
363 | kfree (fc->cmd_slots); | ||
364 | FCND(("Unregistering\n")); | ||
365 | #if 0 | ||
366 | if (fc->rst_pkt) { | ||
367 | if (fc->rst_pkt->eh_state == SCSI_STATE_UNUSED) | ||
368 | kfree(fc->rst_pkt); | ||
369 | else { | ||
370 | /* Can't happen. Some memory would be lost. */ | ||
371 | printk("FC: Reset in progress. Now?!"); | ||
372 | } | ||
373 | } | ||
374 | #endif | ||
375 | FCND(("Unregistered\n")); | ||
376 | } | ||
377 | } else | ||
378 | printk ("FC: %segistering unknown type %02x\n", unregister ? "Unr" : "R", type); | ||
379 | } | ||
380 | |||
381 | static void fcp_scsi_done(struct scsi_cmnd *SCpnt); | ||
382 | |||
383 | static inline void fcp_scsi_receive(fc_channel *fc, int token, int status, fc_hdr *fch) | ||
384 | { | ||
385 | fcp_cmnd *fcmd; | ||
386 | fcp_rsp *rsp; | ||
387 | int host_status; | ||
388 | struct scsi_cmnd *SCpnt; | ||
389 | int sense_len; | ||
390 | int rsp_status; | ||
391 | |||
392 | fcmd = fc->cmd_slots[token]; | ||
393 | if (!fcmd) return; | ||
394 | rsp = (fcp_rsp *) (fc->scsi_rsp_pool + fc->rsp_size * token); | ||
395 | SCpnt = SC_FCMND(fcmd); | ||
396 | |||
397 | if (SCpnt->done != fcp_scsi_done) | ||
398 | return; | ||
399 | |||
400 | rsp_status = rsp->fcp_status; | ||
401 | FCD(("rsp_status %08x status %08x\n", rsp_status, status)) | ||
402 | switch (status) { | ||
403 | case FC_STATUS_OK: | ||
404 | host_status=DID_OK; | ||
405 | |||
406 | if (rsp_status & FCP_STATUS_RESID) { | ||
407 | #ifdef FCDEBUG | ||
408 | FCD(("Resid %d\n", rsp->fcp_resid)) | ||
409 | { | ||
410 | fcp_cmd *cmd = fc->scsi_cmd_pool + token; | ||
411 | int i; | ||
412 | |||
413 | printk ("Command "); | ||
414 | for (i = 0; i < sizeof(fcp_cmd); i+=4) | ||
415 | printk ("%08x ", *(u32 *)(((char *)cmd)+i)); | ||
416 | printk ("\nResponse "); | ||
417 | for (i = 0; i < fc->rsp_size; i+=4) | ||
418 | printk ("%08x ", *(u32 *)(((char *)rsp)+i)); | ||
419 | printk ("\n"); | ||
420 | } | ||
421 | #endif | ||
422 | } | ||
423 | |||
424 | if (rsp_status & FCP_STATUS_SENSE_LEN) { | ||
425 | sense_len = rsp->fcp_sense_len; | ||
426 | if (sense_len > sizeof(SCpnt->sense_buffer)) sense_len = sizeof(SCpnt->sense_buffer); | ||
427 | memcpy(SCpnt->sense_buffer, ((char *)(rsp+1)), sense_len); | ||
428 | } | ||
429 | |||
430 | if (fcmd->data) | ||
431 | dma_unmap_sg(fc->dev, scsi_sglist(SCpnt), | ||
432 | scsi_sg_count(SCpnt), | ||
433 | SCpnt->sc_data_direction); | ||
434 | break; | ||
435 | default: | ||
436 | host_status=DID_ERROR; /* FIXME */ | ||
437 | FCD(("Wrong FC status %d for token %d\n", status, token)) | ||
438 | break; | ||
439 | } | ||
440 | |||
441 | if (status_byte(rsp_status) == QUEUE_FULL) { | ||
442 | printk ("%s: (%d,%d) Received rsp_status 0x%x\n", fc->name, SCpnt->device->channel, SCpnt->device->id, rsp_status); | ||
443 | } | ||
444 | |||
445 | SCpnt->result = (host_status << 16) | (rsp_status & 0xff); | ||
446 | #ifdef FCDEBUG | ||
447 | if (host_status || SCpnt->result || rsp_status) printk("FC: host_status %d, packet status %d\n", | ||
448 | host_status, SCpnt->result); | ||
449 | #endif | ||
450 | SCpnt->done = fcmd->done; | ||
451 | fcmd->done=NULL; | ||
452 | clear_bit(token, fc->scsi_bitmap); | ||
453 | fc->scsi_free++; | ||
454 | FCD(("Calling scsi_done with %08x\n", SCpnt->result)) | ||
455 | SCpnt->scsi_done(SCpnt); | ||
456 | } | ||
457 | |||
458 | void fcp_receive_solicited(fc_channel *fc, int proto, int token, int status, fc_hdr *fch) | ||
459 | { | ||
460 | int magic; | ||
461 | FCD(("receive_solicited %d %d %d\n", proto, token, status)) | ||
462 | switch (proto) { | ||
463 | case TYPE_SCSI_FCP: | ||
464 | fcp_scsi_receive(fc, token, status, fch); break; | ||
465 | case TYPE_EXTENDED_LS: | ||
466 | case PROTO_REPORT_AL_MAP: | ||
467 | magic = 0; | ||
468 | if (fc->ls) | ||
469 | magic = ((ls *)(fc->ls))->magic; | ||
470 | if (magic == LSMAGIC) { | ||
471 | ls *l = (ls *)fc->ls; | ||
472 | int i = (token >= l->count) ? token - l->count : token; | ||
473 | |||
474 | /* Let's be sure */ | ||
475 | if ((unsigned)i < l->count && l->fcmds[i].fc == fc) { | ||
476 | if (proto == TYPE_EXTENDED_LS) | ||
477 | fcp_login_done(fc, token, status); | ||
478 | else | ||
479 | fcp_report_map_done(fc, token, status); | ||
480 | break; | ||
481 | } | ||
482 | } | ||
483 | FCD(("fc %p fc->ls %p fc->cmd_slots %p\n", fc, fc->ls, fc->cmd_slots)) | ||
484 | if (proto == TYPE_EXTENDED_LS && !fc->ls && fc->cmd_slots) { | ||
485 | fcp_cmnd *fcmd; | ||
486 | |||
487 | fcmd = fc->cmd_slots[token]; | ||
488 | if (fcmd && fcmd->ls && ((ls *)(fcmd->ls))->magic == LSEMAGIC) { | ||
489 | lse *l = (lse *)fcmd->ls; | ||
490 | |||
491 | l->status = status; | ||
492 | up (&l->sem); | ||
493 | } | ||
494 | } | ||
495 | break; | ||
496 | case PROTO_OFFLINE: | ||
497 | if (fc->ls && ((lso *)(fc->ls))->magic == LSOMAGIC) { | ||
498 | lso *l = (lso *)fc->ls; | ||
499 | |||
500 | if ((unsigned)token < l->count && l->fcmds[token].fc == fc) { | ||
501 | /* Wow, OFFLINE response arrived :) */ | ||
502 | FCD(("OFFLINE Response arrived\n")) | ||
503 | fc->state = FC_STATE_OFFLINE; | ||
504 | if (atomic_dec_and_test (&l->todo)) | ||
505 | up(&l->sem); | ||
506 | } | ||
507 | } | ||
508 | break; | ||
509 | |||
510 | default: | ||
511 | break; | ||
512 | } | ||
513 | } | ||
514 | |||
515 | void fcp_state_change(fc_channel *fc, int state) | ||
516 | { | ||
517 | FCD(("state_change %d %d\n", state, fc->state)) | ||
518 | if (state == FC_STATE_ONLINE && fc->state == FC_STATE_MAYBEOFFLINE) | ||
519 | fc->state = FC_STATE_UNINITED; | ||
520 | else if (state == FC_STATE_ONLINE) | ||
521 | printk (KERN_WARNING "%s: state change to ONLINE\n", fc->name); | ||
522 | else | ||
523 | printk (KERN_ERR "%s: state change to OFFLINE\n", fc->name); | ||
524 | } | ||
525 | |||
526 | int fcp_initialize(fc_channel *fcchain, int count) | ||
527 | { | ||
528 | fc_channel *fc; | ||
529 | fcp_cmnd *fcmd; | ||
530 | int i, retry, ret; | ||
531 | ls *l; | ||
532 | |||
533 | FCND(("fcp_inititialize %08lx\n", (long)fcp_init)) | ||
534 | FCND(("fc_channels %08lx\n", (long)fc_channels)) | ||
535 | FCND((" SID %d DID %d\n", fcchain->sid, fcchain->did)) | ||
536 | l = kzalloc(sizeof (ls) + count, GFP_KERNEL); | ||
537 | if (!l) { | ||
538 | printk ("FC: Cannot allocate memory for initialization\n"); | ||
539 | return -ENOMEM; | ||
540 | } | ||
541 | l->magic = LSMAGIC; | ||
542 | l->count = count; | ||
543 | FCND(("FCP Init for %d channels\n", count)) | ||
544 | init_MUTEX_LOCKED(&l->sem); | ||
545 | init_timer(&l->timer); | ||
546 | l->timer.function = fcp_login_timeout; | ||
547 | l->timer.data = (unsigned long)l; | ||
548 | atomic_set (&l->todo, count); | ||
549 | l->logi = kzalloc (count * 3 * sizeof(logi), GFP_KERNEL); | ||
550 | l->fcmds = kzalloc (count * sizeof(fcp_cmnd), GFP_KERNEL); | ||
551 | if (!l->logi || !l->fcmds) { | ||
552 | kfree (l->logi); | ||
553 | kfree (l->fcmds); | ||
554 | kfree (l); | ||
555 | printk ("FC: Cannot allocate DMA memory for initialization\n"); | ||
556 | return -ENOMEM; | ||
557 | } | ||
558 | for (fc = fcchain, i = 0; fc && i < count; fc = fc->next, i++) { | ||
559 | fc->state = FC_STATE_UNINITED; | ||
560 | fc->rst_pkt = NULL; /* kmalloc when first used */ | ||
561 | } | ||
562 | /* First try if we are in a AL topology */ | ||
563 | FCND(("Initializing REPORT_MAP packets\n")) | ||
564 | for (fc = fcchain, i = 0; fc && i < count; fc = fc->next, i++) { | ||
565 | fcmd = l->fcmds + i; | ||
566 | fc->login = fcmd; | ||
567 | fc->ls = (void *)l; | ||
568 | /* Assumes sizeof(fc_al_posmap) < 3 * sizeof(logi), which is true */ | ||
569 | fcmd->cmd = dma_map_single (fc->dev, l->logi + 3 * i, 3 * sizeof(logi), | ||
570 | DMA_BIDIRECTIONAL); | ||
571 | fcmd->proto = PROTO_REPORT_AL_MAP; | ||
572 | fcmd->token = i; | ||
573 | fcmd->fc = fc; | ||
574 | } | ||
575 | for (retry = 0; retry < 8; retry++) { | ||
576 | int nqueued = 0; | ||
577 | FCND(("Sending REPORT_MAP/FLOGI/PLOGI packets\n")) | ||
578 | for (fc = fcchain, i = 0; fc && i < count; fc = fc->next, i++) { | ||
579 | if (fc->state == FC_STATE_ONLINE || fc->state == FC_STATE_OFFLINE) | ||
580 | continue; | ||
581 | disable_irq(fc->irq); | ||
582 | if (fc->state == FC_STATE_MAYBEOFFLINE) { | ||
583 | if (!l->grace[i]) { | ||
584 | l->grace[i]++; | ||
585 | FCD(("Grace\n")) | ||
586 | } else { | ||
587 | fc->state = FC_STATE_OFFLINE; | ||
588 | enable_irq(fc->irq); | ||
589 | dma_unmap_single (fc->dev, l->fcmds[i].cmd, 3 * sizeof(logi), DMA_BIDIRECTIONAL); | ||
590 | if (atomic_dec_and_test (&l->todo)) | ||
591 | goto all_done; | ||
592 | } | ||
593 | } | ||
594 | ret = fc->hw_enque (fc, fc->login); | ||
595 | enable_irq(fc->irq); | ||
596 | if (!ret) { | ||
597 | nqueued++; | ||
598 | continue; | ||
599 | } | ||
600 | if (ret == -ENOSYS && fc->login->proto == PROTO_REPORT_AL_MAP) { | ||
601 | /* Oh yes, this card handles Point-to-Point only, so let's try that. */ | ||
602 | fc_hdr *fch; | ||
603 | |||
604 | FCD(("SID %d DID %d\n", fc->sid, fc->did)) | ||
605 | fcmd = l->fcmds + i; | ||
606 | dma_unmap_single(fc->dev, fcmd->cmd, 3 * sizeof(logi), DMA_BIDIRECTIONAL); | ||
607 | fch = &fcmd->fch; | ||
608 | FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, FS_FABRIC_F_PORT); | ||
609 | FILL_FCHDR_SID(fch, 0); | ||
610 | FILL_FCHDR_TYPE_FCTL(fch, TYPE_EXTENDED_LS, F_CTL_FIRST_SEQ | F_CTL_SEQ_INITIATIVE); | ||
611 | FILL_FCHDR_SEQ_DF_SEQ(fch, 0, 0, 0); | ||
612 | FILL_FCHDR_OXRX(fch, 0xffff, 0xffff); | ||
613 | fch->param = 0; | ||
614 | l->logi [3 * i].code = LS_FLOGI; | ||
615 | fcmd->cmd = dma_map_single (fc->dev, l->logi + 3 * i, 3 * sizeof(logi), DMA_BIDIRECTIONAL); | ||
616 | fcmd->rsp = fcmd->cmd + sizeof(logi); | ||
617 | fcmd->cmdlen = sizeof(logi); | ||
618 | fcmd->rsplen = sizeof(logi); | ||
619 | fcmd->data = (dma_addr_t)NULL; | ||
620 | fcmd->class = FC_CLASS_SIMPLE; | ||
621 | fcmd->proto = TYPE_EXTENDED_LS; | ||
622 | } else | ||
623 | printk ("FC: Cannot enque FLOGI/REPORT_MAP packet on %s\n", fc->name); | ||
624 | } | ||
625 | |||
626 | if (nqueued) { | ||
627 | l->timer.expires = jiffies + 5 * HZ; | ||
628 | add_timer(&l->timer); | ||
629 | |||
630 | down(&l->sem); | ||
631 | if (!atomic_read(&l->todo)) { | ||
632 | FCND(("All channels answered in time\n")) | ||
633 | break; /* All fc channels have answered us */ | ||
634 | } | ||
635 | } | ||
636 | } | ||
637 | all_done: | ||
638 | for (fc = fcchain, i = 0; fc && i < count; fc = fc->next, i++) { | ||
639 | fc->ls = NULL; | ||
640 | switch (fc->state) { | ||
641 | case FC_STATE_ONLINE: break; | ||
642 | case FC_STATE_OFFLINE: break; | ||
643 | default: dma_unmap_single (fc->dev, l->fcmds[i].cmd, 3 * sizeof(logi), DMA_BIDIRECTIONAL); | ||
644 | break; | ||
645 | } | ||
646 | } | ||
647 | del_timer(&l->timer); | ||
648 | kfree (l->logi); | ||
649 | kfree (l->fcmds); | ||
650 | kfree (l); | ||
651 | return 0; | ||
652 | } | ||
653 | |||
654 | int fcp_forceoffline(fc_channel *fcchain, int count) | ||
655 | { | ||
656 | fc_channel *fc; | ||
657 | fcp_cmnd *fcmd; | ||
658 | int i, ret; | ||
659 | lso l; | ||
660 | |||
661 | memset (&l, 0, sizeof(lso)); | ||
662 | l.count = count; | ||
663 | l.magic = LSOMAGIC; | ||
664 | FCND(("FCP Force Offline for %d channels\n", count)) | ||
665 | init_MUTEX_LOCKED(&l.sem); | ||
666 | init_timer(&l.timer); | ||
667 | l.timer.function = fcp_login_timeout; | ||
668 | l.timer.data = (unsigned long)&l; | ||
669 | atomic_set (&l.todo, count); | ||
670 | l.fcmds = kzalloc (count * sizeof(fcp_cmnd), GFP_KERNEL); | ||
671 | if (!l.fcmds) { | ||
672 | printk ("FC: Cannot allocate memory for forcing offline\n"); | ||
673 | return -ENOMEM; | ||
674 | } | ||
675 | FCND(("Initializing OFFLINE packets\n")) | ||
676 | for (fc = fcchain, i = 0; fc && i < count; fc = fc->next, i++) { | ||
677 | fc->state = FC_STATE_UNINITED; | ||
678 | fcmd = l.fcmds + i; | ||
679 | fc->login = fcmd; | ||
680 | fc->ls = (void *)&l; | ||
681 | fcmd->did = fc->did; | ||
682 | fcmd->class = FC_CLASS_OFFLINE; | ||
683 | fcmd->proto = PROTO_OFFLINE; | ||
684 | fcmd->token = i; | ||
685 | fcmd->fc = fc; | ||
686 | disable_irq(fc->irq); | ||
687 | ret = fc->hw_enque (fc, fc->login); | ||
688 | enable_irq(fc->irq); | ||
689 | if (ret) printk ("FC: Cannot enque OFFLINE packet on %s\n", fc->name); | ||
690 | } | ||
691 | |||
692 | l.timer.expires = jiffies + 5 * HZ; | ||
693 | add_timer(&l.timer); | ||
694 | down(&l.sem); | ||
695 | del_timer(&l.timer); | ||
696 | |||
697 | for (fc = fcchain, i = 0; fc && i < count; fc = fc->next, i++) | ||
698 | fc->ls = NULL; | ||
699 | kfree (l.fcmds); | ||
700 | return 0; | ||
701 | } | ||
702 | |||
703 | int fcp_init(fc_channel *fcchain) | ||
704 | { | ||
705 | fc_channel *fc; | ||
706 | int count=0; | ||
707 | int ret; | ||
708 | |||
709 | for (fc = fcchain; fc; fc = fc->next) { | ||
710 | fc->fcp_register = fcp_register; | ||
711 | count++; | ||
712 | } | ||
713 | |||
714 | ret = fcp_initialize (fcchain, count); | ||
715 | if (ret) | ||
716 | return ret; | ||
717 | |||
718 | if (!fc_channels) | ||
719 | fc_channels = fcchain; | ||
720 | else { | ||
721 | for (fc = fc_channels; fc->next; fc = fc->next); | ||
722 | fc->next = fcchain; | ||
723 | } | ||
724 | return ret; | ||
725 | } | ||
726 | |||
727 | void fcp_release(fc_channel *fcchain, int count) /* count must > 0 */ | ||
728 | { | ||
729 | fc_channel *fc; | ||
730 | fc_channel *fcx; | ||
731 | |||
732 | for (fc = fcchain; --count && fc->next; fc = fc->next); | ||
733 | if (count) { | ||
734 | printk("FC: nothing to release\n"); | ||
735 | return; | ||
736 | } | ||
737 | |||
738 | if (fc_channels == fcchain) | ||
739 | fc_channels = fc->next; | ||
740 | else { | ||
741 | for (fcx = fc_channels; fcx->next != fcchain; fcx = fcx->next); | ||
742 | fcx->next = fc->next; | ||
743 | } | ||
744 | fc->next = NULL; | ||
745 | |||
746 | /* | ||
747 | * We've just grabbed fcchain out of the fc_channel list | ||
748 | * and zero-terminated it, while destroying the count. | ||
749 | * | ||
750 | * Freeing the fc's is the low level driver's responsibility. | ||
751 | */ | ||
752 | } | ||
753 | |||
754 | |||
755 | static void fcp_scsi_done(struct scsi_cmnd *SCpnt) | ||
756 | { | ||
757 | if (FCP_CMND(SCpnt)->done) | ||
758 | FCP_CMND(SCpnt)->done(SCpnt); | ||
759 | } | ||
760 | |||
761 | static int fcp_scsi_queue_it(fc_channel *fc, struct scsi_cmnd *SCpnt, | ||
762 | fcp_cmnd *fcmd, int prepare) | ||
763 | { | ||
764 | long i; | ||
765 | fcp_cmd *cmd; | ||
766 | u32 fcp_cntl; | ||
767 | if (prepare) { | ||
768 | i = find_first_zero_bit (fc->scsi_bitmap, fc->scsi_bitmap_end); | ||
769 | set_bit (i, fc->scsi_bitmap); | ||
770 | fcmd->token = i; | ||
771 | cmd = fc->scsi_cmd_pool + i; | ||
772 | |||
773 | if (fc->encode_addr (SCpnt, cmd->fcp_addr, fc, fcmd)) { | ||
774 | /* Invalid channel/id/lun and couldn't map it into fcp_addr */ | ||
775 | clear_bit (i, fc->scsi_bitmap); | ||
776 | SCpnt->result = (DID_BAD_TARGET << 16); | ||
777 | SCpnt->scsi_done(SCpnt); | ||
778 | return 0; | ||
779 | } | ||
780 | fc->scsi_free--; | ||
781 | fc->cmd_slots[fcmd->token] = fcmd; | ||
782 | |||
783 | if (SCpnt->device->tagged_supported) { | ||
784 | if (jiffies - fc->ages[SCpnt->device->channel * fc->targets + SCpnt->device->id] > (5 * 60 * HZ)) { | ||
785 | fc->ages[SCpnt->device->channel * fc->targets + SCpnt->device->id] = jiffies; | ||
786 | fcp_cntl = FCP_CNTL_QTYPE_ORDERED; | ||
787 | } else | ||
788 | fcp_cntl = FCP_CNTL_QTYPE_SIMPLE; | ||
789 | } else | ||
790 | fcp_cntl = FCP_CNTL_QTYPE_UNTAGGED; | ||
791 | |||
792 | if (!scsi_bufflen(SCpnt)) { | ||
793 | cmd->fcp_cntl = fcp_cntl; | ||
794 | fcmd->data = (dma_addr_t)NULL; | ||
795 | } else { | ||
796 | struct scatterlist *sg; | ||
797 | int nents; | ||
798 | |||
799 | switch (SCpnt->cmnd[0]) { | ||
800 | case WRITE_6: | ||
801 | case WRITE_10: | ||
802 | case WRITE_12: | ||
803 | cmd->fcp_cntl = (FCP_CNTL_WRITE | fcp_cntl); break; | ||
804 | default: | ||
805 | cmd->fcp_cntl = (FCP_CNTL_READ | fcp_cntl); break; | ||
806 | } | ||
807 | |||
808 | sg = scsi_sglist(SCpnt); | ||
809 | nents = dma_map_sg(fc->dev, sg, scsi_sg_count(SCpnt), | ||
810 | SCpnt->sc_data_direction); | ||
811 | fcmd->data = sg_dma_address(sg); | ||
812 | cmd->fcp_data_len = sg_dma_len(sg); | ||
813 | } | ||
814 | memcpy (cmd->fcp_cdb, SCpnt->cmnd, SCpnt->cmd_len); | ||
815 | memset (cmd->fcp_cdb+SCpnt->cmd_len, 0, sizeof(cmd->fcp_cdb)-SCpnt->cmd_len); | ||
816 | FCD(("XXX: %04x.%04x.%04x.%04x - %08x%08x%08x\n", cmd->fcp_addr[0], cmd->fcp_addr[1], cmd->fcp_addr[2], cmd->fcp_addr[3], *(u32 *)SCpnt->cmnd, *(u32 *)(SCpnt->cmnd+4), *(u32 *)(SCpnt->cmnd+8))) | ||
817 | } | ||
818 | FCD(("Trying to enque %p\n", fcmd)) | ||
819 | if (!fc->scsi_que) { | ||
820 | if (!fc->hw_enque (fc, fcmd)) { | ||
821 | FCD(("hw_enque succeeded for %p\n", fcmd)) | ||
822 | return 0; | ||
823 | } | ||
824 | } | ||
825 | FCD(("Putting into que1 %p\n", fcmd)) | ||
826 | fcp_scsi_insert_queue (fc, fcmd); | ||
827 | return 0; | ||
828 | } | ||
829 | |||
830 | int fcp_scsi_queuecommand(struct scsi_cmnd *SCpnt, | ||
831 | void (* done)(struct scsi_cmnd *)) | ||
832 | { | ||
833 | fcp_cmnd *fcmd = FCP_CMND(SCpnt); | ||
834 | fc_channel *fc = FC_SCMND(SCpnt); | ||
835 | |||
836 | FCD(("Entering SCSI queuecommand %p\n", fcmd)) | ||
837 | if (SCpnt->done != fcp_scsi_done) { | ||
838 | fcmd->done = SCpnt->done; | ||
839 | SCpnt->done = fcp_scsi_done; | ||
840 | SCpnt->scsi_done = done; | ||
841 | fcmd->proto = TYPE_SCSI_FCP; | ||
842 | if (!fc->scsi_free) { | ||
843 | FCD(("FC: !scsi_free, putting cmd on ML queue\n")) | ||
844 | #if (FCP_SCSI_USE_NEW_EH_CODE == 0) | ||
845 | printk("fcp_scsi_queue_command: queue full, losing cmd, bad\n"); | ||
846 | #endif | ||
847 | return 1; | ||
848 | } | ||
849 | return fcp_scsi_queue_it(fc, SCpnt, fcmd, 1); | ||
850 | } | ||
851 | return fcp_scsi_queue_it(fc, SCpnt, fcmd, 0); | ||
852 | } | ||
853 | |||
854 | void fcp_queue_empty(fc_channel *fc) | ||
855 | { | ||
856 | fcp_cmnd *fcmd; | ||
857 | |||
858 | FCD(("Queue empty\n")) | ||
859 | while ((fcmd = fc->scsi_que)) { | ||
860 | /* The hw told us we can try again queue some packet */ | ||
861 | if (fc->hw_enque (fc, fcmd)) | ||
862 | break; | ||
863 | fcp_scsi_remove_queue (fc, fcmd); | ||
864 | } | ||
865 | } | ||
866 | |||
867 | int fcp_scsi_abort(struct scsi_cmnd *SCpnt) | ||
868 | { | ||
869 | /* Internal bookkeeping only. Lose 1 cmd_slots slot. */ | ||
870 | fcp_cmnd *fcmd = FCP_CMND(SCpnt); | ||
871 | fc_channel *fc = FC_SCMND(SCpnt); | ||
872 | |||
873 | /* | ||
874 | * We react to abort requests by simply forgetting | ||
875 | * about the command and pretending everything's sweet. | ||
876 | * This may or may not be silly. We can't, however, | ||
877 | * immediately reuse the command's cmd_slots slot, | ||
878 | * as its result may arrive later and we cannot | ||
879 | * check whether it is the aborted one, can't we? | ||
880 | * | ||
881 | * Therefore, after the first few aborts are done, | ||
882 | * we tell the scsi error handler to do something clever. | ||
883 | * It will eventually call host reset, refreshing | ||
884 | * cmd_slots for us. | ||
885 | * | ||
886 | * There is a theoretical chance that we sometimes allow | ||
887 | * more than can_queue packets to the jungle this way, | ||
888 | * but the worst outcome possible is a series of | ||
889 | * more aborts and eventually the dev_reset catharsis. | ||
890 | */ | ||
891 | |||
892 | if (++fc->abort_count < (fc->can_queue >> 1)) { | ||
893 | SCpnt->result = DID_ABORT; | ||
894 | fcmd->done(SCpnt); | ||
895 | printk("FC: soft abort\n"); | ||
896 | return SUCCESS; | ||
897 | } else { | ||
898 | printk("FC: hard abort refused\n"); | ||
899 | return FAILED; | ||
900 | } | ||
901 | } | ||
902 | |||
903 | #if 0 | ||
904 | void fcp_scsi_reset_done(struct scsi_cmnd *SCpnt) | ||
905 | { | ||
906 | fc_channel *fc = FC_SCMND(SCpnt); | ||
907 | |||
908 | fc->rst_pkt->eh_state = SCSI_STATE_FINISHED; | ||
909 | up(fc->rst_pkt->device->host->eh_action); | ||
910 | } | ||
911 | #endif | ||
912 | |||
913 | #define FCP_RESET_TIMEOUT (2*HZ) | ||
914 | |||
915 | int fcp_scsi_dev_reset(struct scsi_cmnd *SCpnt) | ||
916 | { | ||
917 | #if 0 /* broken junk, but if davem wants to compile this driver, let him.. */ | ||
918 | unsigned long flags; | ||
919 | fcp_cmd *cmd; | ||
920 | fcp_cmnd *fcmd; | ||
921 | fc_channel *fc = FC_SCMND(SCpnt); | ||
922 | DECLARE_MUTEX_LOCKED(sem); | ||
923 | |||
924 | if (!fc->rst_pkt) { | ||
925 | fc->rst_pkt = kmalloc(sizeof(SCpnt), GFP_KERNEL); | ||
926 | if (!fc->rst_pkt) return FAILED; | ||
927 | |||
928 | fcmd = FCP_CMND(fc->rst_pkt); | ||
929 | |||
930 | |||
931 | fcmd->token = 0; | ||
932 | cmd = fc->scsi_cmd_pool + 0; | ||
933 | FCD(("Preparing rst packet\n")) | ||
934 | fc->encode_addr (SCpnt, cmd->fcp_addr, fc, fcmd); | ||
935 | fc->rst_pkt->device = SCpnt->device; | ||
936 | fc->rst_pkt->cmd_len = 0; | ||
937 | |||
938 | fc->cmd_slots[0] = fcmd; | ||
939 | |||
940 | cmd->fcp_cntl = FCP_CNTL_QTYPE_ORDERED | FCP_CNTL_RESET; | ||
941 | fcmd->data = (dma_addr_t)NULL; | ||
942 | fcmd->proto = TYPE_SCSI_FCP; | ||
943 | |||
944 | memcpy (cmd->fcp_cdb, SCpnt->cmnd, SCpnt->cmd_len); | ||
945 | memset (cmd->fcp_cdb+SCpnt->cmd_len, 0, sizeof(cmd->fcp_cdb)-SCpnt->cmd_len); | ||
946 | FCD(("XXX: %04x.%04x.%04x.%04x - %08x%08x%08x\n", cmd->fcp_addr[0], cmd->fcp_addr[1], cmd->fcp_addr[2], cmd->fcp_addr[3], *(u32 *)SCpnt->cmnd, *(u32 *)(SCpnt->cmnd+4), *(u32 *)(SCpnt->cmnd+8))) | ||
947 | } else { | ||
948 | fcmd = FCP_CMND(fc->rst_pkt); | ||
949 | if (fc->rst_pkt->eh_state == SCSI_STATE_QUEUED) | ||
950 | return FAILED; /* or SUCCESS. Only these */ | ||
951 | } | ||
952 | fc->rst_pkt->done = NULL; | ||
953 | |||
954 | |||
955 | fc->rst_pkt->eh_state = SCSI_STATE_QUEUED; | ||
956 | init_timer(&fc->rst_pkt->eh_timeout); | ||
957 | fc->rst_pkt->eh_timeout.data = (unsigned long) fc->rst_pkt; | ||
958 | fc->rst_pkt->eh_timeout.expires = jiffies + FCP_RESET_TIMEOUT; | ||
959 | fc->rst_pkt->eh_timeout.function = (void (*)(unsigned long))fcp_scsi_reset_done; | ||
960 | |||
961 | add_timer(&fc->rst_pkt->eh_timeout); | ||
962 | |||
963 | /* | ||
964 | * Set up the semaphore so we wait for the command to complete. | ||
965 | */ | ||
966 | |||
967 | fc->rst_pkt->device->host->eh_action = &sem; | ||
968 | |||
969 | fc->rst_pkt->done = fcp_scsi_reset_done; | ||
970 | |||
971 | spin_lock_irqsave(SCpnt->device->host->host_lock, flags); | ||
972 | fcp_scsi_queue_it(fc, fc->rst_pkt, fcmd, 0); | ||
973 | spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); | ||
974 | |||
975 | down(&sem); | ||
976 | |||
977 | fc->rst_pkt->device->host->eh_action = NULL; | ||
978 | del_timer(&fc->rst_pkt->eh_timeout); | ||
979 | |||
980 | /* | ||
981 | * See if timeout. If so, tell the host to forget about it. | ||
982 | * In other words, we don't want a callback any more. | ||
983 | */ | ||
984 | if (fc->rst_pkt->eh_state == SCSI_STATE_TIMEOUT ) { | ||
985 | fc->rst_pkt->eh_state = SCSI_STATE_UNUSED; | ||
986 | return FAILED; | ||
987 | } | ||
988 | fc->rst_pkt->eh_state = SCSI_STATE_UNUSED; | ||
989 | #endif | ||
990 | return SUCCESS; | ||
991 | } | ||
992 | |||
993 | static int __fcp_scsi_host_reset(struct scsi_cmnd *SCpnt) | ||
994 | { | ||
995 | fc_channel *fc = FC_SCMND(SCpnt); | ||
996 | fcp_cmnd *fcmd = FCP_CMND(SCpnt); | ||
997 | int i; | ||
998 | |||
999 | printk ("FC: host reset\n"); | ||
1000 | |||
1001 | for (i=0; i < fc->can_queue; i++) { | ||
1002 | if (fc->cmd_slots[i] && SCpnt->result != DID_ABORT) { | ||
1003 | SCpnt->result = DID_RESET; | ||
1004 | fcmd->done(SCpnt); | ||
1005 | fc->cmd_slots[i] = NULL; | ||
1006 | } | ||
1007 | } | ||
1008 | fc->reset(fc); | ||
1009 | fc->abort_count = 0; | ||
1010 | if (fcp_initialize(fc, 1)) return SUCCESS; | ||
1011 | else return FAILED; | ||
1012 | } | ||
1013 | |||
1014 | int fcp_scsi_host_reset(struct scsi_cmnd *SCpnt) | ||
1015 | { | ||
1016 | unsigned long flags; | ||
1017 | int rc; | ||
1018 | |||
1019 | spin_lock_irqsave(SCpnt->device->host->host_lock, flags); | ||
1020 | rc = __fcp_scsi_host_reset(SCpnt); | ||
1021 | spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); | ||
1022 | |||
1023 | return rc; | ||
1024 | } | ||
1025 | |||
1026 | static int fcp_els_queue_it(fc_channel *fc, fcp_cmnd *fcmd) | ||
1027 | { | ||
1028 | long i; | ||
1029 | |||
1030 | i = find_first_zero_bit (fc->scsi_bitmap, fc->scsi_bitmap_end); | ||
1031 | set_bit (i, fc->scsi_bitmap); | ||
1032 | fcmd->token = i; | ||
1033 | fc->scsi_free--; | ||
1034 | fc->cmd_slots[fcmd->token] = fcmd; | ||
1035 | return fcp_scsi_queue_it(fc, NULL, fcmd, 0); | ||
1036 | } | ||
1037 | |||
1038 | static int fc_do_els(fc_channel *fc, unsigned int alpa, void *data, int len) | ||
1039 | { | ||
1040 | fcp_cmnd _fcmd, *fcmd; | ||
1041 | fc_hdr *fch; | ||
1042 | lse l; | ||
1043 | int i; | ||
1044 | |||
1045 | fcmd = &_fcmd; | ||
1046 | memset(fcmd, 0, sizeof(fcp_cmnd)); | ||
1047 | FCD(("PLOGI SID %d DID %d\n", fc->sid, alpa)) | ||
1048 | fch = &fcmd->fch; | ||
1049 | FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, alpa); | ||
1050 | FILL_FCHDR_SID(fch, fc->sid); | ||
1051 | FILL_FCHDR_TYPE_FCTL(fch, TYPE_EXTENDED_LS, F_CTL_FIRST_SEQ | F_CTL_SEQ_INITIATIVE); | ||
1052 | FILL_FCHDR_SEQ_DF_SEQ(fch, 0, 0, 0); | ||
1053 | FILL_FCHDR_OXRX(fch, 0xffff, 0xffff); | ||
1054 | fch->param = 0; | ||
1055 | fcmd->cmd = dma_map_single (fc->dev, data, 2 * len, DMA_BIDIRECTIONAL); | ||
1056 | fcmd->rsp = fcmd->cmd + len; | ||
1057 | fcmd->cmdlen = len; | ||
1058 | fcmd->rsplen = len; | ||
1059 | fcmd->data = (dma_addr_t)NULL; | ||
1060 | fcmd->fc = fc; | ||
1061 | fcmd->class = FC_CLASS_SIMPLE; | ||
1062 | fcmd->proto = TYPE_EXTENDED_LS; | ||
1063 | |||
1064 | memset (&l, 0, sizeof(lse)); | ||
1065 | l.magic = LSEMAGIC; | ||
1066 | init_MUTEX_LOCKED(&l.sem); | ||
1067 | l.timer.function = fcp_login_timeout; | ||
1068 | l.timer.data = (unsigned long)&l; | ||
1069 | l.status = FC_STATUS_TIMED_OUT; | ||
1070 | fcmd->ls = (void *)&l; | ||
1071 | |||
1072 | disable_irq(fc->irq); | ||
1073 | fcp_els_queue_it(fc, fcmd); | ||
1074 | enable_irq(fc->irq); | ||
1075 | |||
1076 | for (i = 0;;) { | ||
1077 | l.timer.expires = jiffies + 5 * HZ; | ||
1078 | add_timer(&l.timer); | ||
1079 | down(&l.sem); | ||
1080 | del_timer(&l.timer); | ||
1081 | if (l.status != FC_STATUS_TIMED_OUT) break; | ||
1082 | if (++i == 3) break; | ||
1083 | disable_irq(fc->irq); | ||
1084 | fcp_scsi_queue_it(fc, NULL, fcmd, 0); | ||
1085 | enable_irq(fc->irq); | ||
1086 | } | ||
1087 | |||
1088 | clear_bit(fcmd->token, fc->scsi_bitmap); | ||
1089 | fc->scsi_free++; | ||
1090 | dma_unmap_single (fc->dev, fcmd->cmd, 2 * len, DMA_BIDIRECTIONAL); | ||
1091 | return l.status; | ||
1092 | } | ||
1093 | |||
1094 | int fc_do_plogi(fc_channel *fc, unsigned char alpa, fc_wwn *node, fc_wwn *nport) | ||
1095 | { | ||
1096 | logi *l; | ||
1097 | int status; | ||
1098 | |||
1099 | l = kzalloc(2 * sizeof(logi), GFP_KERNEL); | ||
1100 | if (!l) return -ENOMEM; | ||
1101 | l->code = LS_PLOGI; | ||
1102 | memcpy (&l->nport_wwn, &fc->wwn_nport, sizeof(fc_wwn)); | ||
1103 | memcpy (&l->node_wwn, &fc->wwn_node, sizeof(fc_wwn)); | ||
1104 | memcpy (&l->common, fc->common_svc, sizeof(common_svc_parm)); | ||
1105 | memcpy (&l->class1, fc->class_svcs, 3*sizeof(svc_parm)); | ||
1106 | status = fc_do_els(fc, alpa, l, sizeof(logi)); | ||
1107 | if (status == FC_STATUS_OK) { | ||
1108 | if (l[1].code == LS_ACC) { | ||
1109 | #ifdef FCDEBUG | ||
1110 | u32 *u = (u32 *)&l[1].nport_wwn; | ||
1111 | FCD(("AL-PA %02x: Port WWN %08x%08x Node WWN %08x%08x\n", alpa, | ||
1112 | u[0], u[1], u[2], u[3])) | ||
1113 | #endif | ||
1114 | memcpy(nport, &l[1].nport_wwn, sizeof(fc_wwn)); | ||
1115 | memcpy(node, &l[1].node_wwn, sizeof(fc_wwn)); | ||
1116 | } else | ||
1117 | status = FC_STATUS_BAD_RSP; | ||
1118 | } | ||
1119 | kfree(l); | ||
1120 | return status; | ||
1121 | } | ||
1122 | |||
1123 | typedef struct { | ||
1124 | unsigned int code; | ||
1125 | unsigned params[4]; | ||
1126 | } prli; | ||
1127 | |||
1128 | int fc_do_prli(fc_channel *fc, unsigned char alpa) | ||
1129 | { | ||
1130 | prli *p; | ||
1131 | int status; | ||
1132 | |||
1133 | p = kzalloc(2 * sizeof(prli), GFP_KERNEL); | ||
1134 | if (!p) return -ENOMEM; | ||
1135 | p->code = LS_PRLI; | ||
1136 | p->params[0] = 0x08002000; | ||
1137 | p->params[3] = 0x00000022; | ||
1138 | status = fc_do_els(fc, alpa, p, sizeof(prli)); | ||
1139 | if (status == FC_STATUS_OK && p[1].code != LS_PRLI_ACC && p[1].code != LS_ACC) | ||
1140 | status = FC_STATUS_BAD_RSP; | ||
1141 | kfree(p); | ||
1142 | return status; | ||
1143 | } | ||
1144 | |||
1145 | MODULE_LICENSE("GPL"); | ||
1146 | |||
diff --git a/drivers/fc4/fc.h b/drivers/fc4/fc.h deleted file mode 100644 index 13f89d4c8cb9..000000000000 --- a/drivers/fc4/fc.h +++ /dev/null | |||
@@ -1,230 +0,0 @@ | |||
1 | /* fc.h: Definitions for Fibre Channel Physical and Signaling Interface. | ||
2 | * | ||
3 | * Copyright (C) 1996-1997,1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
4 | * | ||
5 | * Sources: | ||
6 | * Fibre Channel Physical & Signaling Interface (FC-PH), dpANS, 1994 | ||
7 | * dpANS Fibre Channel Protocol for SCSI (X3.269-199X), Rev. 012, 1995 | ||
8 | */ | ||
9 | |||
10 | #ifndef __FC_H | ||
11 | #define __FC_H | ||
12 | |||
13 | /* World Wide Name */ | ||
14 | #define NAAID_IEEE 1 | ||
15 | #define NAAID_IEEE_EXT 2 | ||
16 | #define NAAID_LOCAL 3 | ||
17 | #define NAAID_IP 4 | ||
18 | #define NAAID_IEEE_REG 5 | ||
19 | #define NAAID_IEEE_REG_EXT 6 | ||
20 | #define NAAID_CCITT 12 | ||
21 | #define NAAID_CCITT_GRP 14 | ||
22 | |||
23 | /* This is NAAID_IEEE_EXT scheme */ | ||
24 | typedef struct { | ||
25 | u32 naaid:4; | ||
26 | u32 nportid:12; | ||
27 | u32 hi:16; | ||
28 | u32 lo; | ||
29 | } fc_wwn; | ||
30 | |||
31 | /* Frame header for FC-PH frames */ | ||
32 | |||
33 | /* r_ctl field */ | ||
34 | #define R_CTL_DEVICE_DATA 0x00 /* FC4 Device_Data frame */ | ||
35 | #define R_CTL_EXTENDED_SVC 0x20 /* Extended Link_Data frame */ | ||
36 | #define R_CTL_FC4_SVC 0x30 /* FC4 Link_Data frame */ | ||
37 | #define R_CTL_VIDEO 0x40 /* Video_Data frame */ | ||
38 | #define R_CTL_BASIC_SVC 0x80 /* Basic Link_Data frame */ | ||
39 | #define R_CTL_LINK_CTL 0xc0 /* Link_Control frame */ | ||
40 | /* FC4 Device_Data frames */ | ||
41 | #define R_CTL_UNCATEGORIZED 0x00 | ||
42 | #define R_CTL_SOLICITED_DATA 0x01 | ||
43 | #define R_CTL_UNSOL_CONTROL 0x02 | ||
44 | #define R_CTL_SOLICITED_CONTROL 0x03 | ||
45 | #define R_CTL_UNSOL_DATA 0x04 | ||
46 | #define R_CTL_XFER_RDY 0x05 | ||
47 | #define R_CTL_COMMAND 0x06 | ||
48 | #define R_CTL_STATUS 0x07 | ||
49 | /* Basic Link_Data frames */ | ||
50 | #define R_CTL_LS_NOP 0x80 | ||
51 | #define R_CTL_LS_ABTS 0x81 | ||
52 | #define R_CTL_LS_RMC 0x82 | ||
53 | #define R_CTL_LS_BA_ACC 0x84 | ||
54 | #define R_CTL_LS_BA_RJT 0x85 | ||
55 | /* Extended Link_Data frames */ | ||
56 | #define R_CTL_ELS_REQ 0x22 | ||
57 | #define R_CTL_ELS_RSP 0x23 | ||
58 | /* Link_Control frames */ | ||
59 | #define R_CTL_ACK_1 0xc0 | ||
60 | #define R_CTL_ACK_N 0xc1 | ||
61 | #define R_CTL_P_RJT 0xc2 | ||
62 | #define R_CTL_F_RJT 0xc3 | ||
63 | #define R_CTL_P_BSY 0xc4 | ||
64 | #define R_CTL_F_BSY_DF 0xc5 | ||
65 | #define R_CTL_F_BSY_LC 0xc6 | ||
66 | #define R_CTL_LCR 0xc7 | ||
67 | |||
68 | /* type field */ | ||
69 | #define TYPE_BASIC_LS 0x00 | ||
70 | #define TYPE_EXTENDED_LS 0x01 | ||
71 | #define TYPE_IS8802 0x04 | ||
72 | #define TYPE_IS8802_SNAP 0x05 | ||
73 | #define TYPE_SCSI_FCP 0x08 | ||
74 | #define TYPE_SCSI_GPP 0x09 | ||
75 | #define TYPE_HIPP_FP 0x0a | ||
76 | #define TYPE_IPI3_MASTER 0x11 | ||
77 | #define TYPE_IPI3_SLAVE 0x12 | ||
78 | #define TYPE_IPI3_PEER 0x13 | ||
79 | |||
80 | /* f_ctl field */ | ||
81 | #define F_CTL_FILL_BYTES 0x000003 | ||
82 | #define F_CTL_XCHG_REASSEMBLE 0x000004 | ||
83 | #define F_CTL_RO_PRESENT 0x000008 | ||
84 | #define F_CTL_ABORT_SEQ 0x000030 | ||
85 | #define F_CTL_CONTINUE_SEQ 0x0000c0 | ||
86 | #define F_CTL_INVALIDATE_XID 0x004000 | ||
87 | #define F_CTL_XID_REASSIGNED 0x008000 | ||
88 | #define F_CTL_SEQ_INITIATIVE 0x010000 | ||
89 | #define F_CTL_CHAINED_SEQ 0x020000 | ||
90 | #define F_CTL_END_CONNECT 0x040000 | ||
91 | #define F_CTL_END_SEQ 0x080000 | ||
92 | #define F_CTL_LAST_SEQ 0x100000 | ||
93 | #define F_CTL_FIRST_SEQ 0x200000 | ||
94 | #define F_CTL_SEQ_CONTEXT 0x400000 | ||
95 | #define F_CTL_XCHG_CONTEXT 0x800000 | ||
96 | |||
97 | typedef struct { | ||
98 | u32 r_ctl:8, did:24; | ||
99 | u32 xxx1:8, sid:24; | ||
100 | u32 type:8, f_ctl:24; | ||
101 | u32 seq_id:8, df_ctl:8, seq_cnt:16; | ||
102 | u16 ox_id, rx_id; | ||
103 | u32 param; | ||
104 | } fc_hdr; | ||
105 | /* The following are ugly macros to make setup of this structure faster */ | ||
106 | #define FILL_FCHDR_RCTL_DID(fch, r_ctl, did) *(u32 *)(fch) = ((r_ctl) << 24) | (did); | ||
107 | #define FILL_FCHDR_SID(fch, sid) *((u32 *)(fch)+1) = (sid); | ||
108 | #define FILL_FCHDR_TYPE_FCTL(fch, type, f_ctl) *((u32 *)(fch)+2) = ((type) << 24) | (f_ctl); | ||
109 | #define FILL_FCHDR_SEQ_DF_SEQ(fch, seq_id, df_ctl, seq_cnt) *((u32 *)(fch)+3) = ((seq_id) << 24) | ((df_ctl) << 16) | (seq_cnt); | ||
110 | #define FILL_FCHDR_OXRX(fch, ox_id, rx_id) *((u32 *)(fch)+4) = ((ox_id) << 16) | (rx_id); | ||
111 | |||
112 | /* Well known addresses */ | ||
113 | #define FS_GENERAL_MULTICAST 0xfffff7 | ||
114 | #define FS_WELL_KNOWN_MULTICAST 0xfffff8 | ||
115 | #define FS_HUNT_GROUP 0xfffff9 | ||
116 | #define FS_MANAGEMENT_SERVER 0xfffffa | ||
117 | #define FS_TIME_SERVER 0xfffffb | ||
118 | #define FS_NAME_SERVER 0xfffffc | ||
119 | #define FS_FABRIC_CONTROLLER 0xfffffd | ||
120 | #define FS_FABRIC_F_PORT 0xfffffe | ||
121 | #define FS_BROADCAST 0xffffff | ||
122 | |||
123 | /* Reject frames */ | ||
124 | /* The param field should be cast to this structure */ | ||
125 | typedef struct { | ||
126 | u8 action; | ||
127 | u8 reason; | ||
128 | u8 xxx; | ||
129 | u8 vendor_unique; | ||
130 | } rjt_param; | ||
131 | |||
132 | /* Reject action codes */ | ||
133 | #define RJT_RETRY 0x01 | ||
134 | #define RJT_NONRETRY 0x02 | ||
135 | |||
136 | /* Reject reason codes */ | ||
137 | #define RJT_INVALID_DID 0x01 | ||
138 | #define RJT_INVALID_SID 0x02 | ||
139 | #define RJT_NPORT_NOT_AVAIL_TEMP 0x03 | ||
140 | #define RJT_NPORT_NOT_AVAIL_PERM 0x04 | ||
141 | #define RJT_CLASS_NOT_SUPPORTED 0x05 | ||
142 | #define RJT_DELIMITER_ERROR 0x06 | ||
143 | #define RJT_TYPE_NOT_SUPPORTED 0x07 | ||
144 | #define RJT_INVALID_LINK_CONTROL 0x08 | ||
145 | #define RJT_INVALID_R_CTL 0x09 | ||
146 | #define RJT_INVALID_F_CTL 0x0a | ||
147 | #define RJT_INVALID_OX_ID 0x0b | ||
148 | #define RJT_INVALID_RX_ID 0x0c | ||
149 | #define RJT_INVALID_SEQ_ID 0x0d | ||
150 | #define RJT_INVALID_DF_CTL 0x0e | ||
151 | #define RJT_INVALID_SEQ_CNT 0x0f | ||
152 | #define RJT_INVALID_PARAMETER 0x10 | ||
153 | #define RJT_EXCHANGE_ERROR 0x11 | ||
154 | #define RJT_PROTOCOL_ERROR 0x12 | ||
155 | #define RJT_INCORRECT_LENGTH 0x13 | ||
156 | #define RJT_UNEXPECTED_ACK 0x14 | ||
157 | #define RJT_UNEXPECTED_LINK_RESP 0x15 | ||
158 | #define RJT_LOGIN_REQUIRED 0x16 | ||
159 | #define RJT_EXCESSIVE_SEQUENCES 0x17 | ||
160 | #define RJT_CANT_ESTABLISH_EXCHANGE 0x18 | ||
161 | #define RJT_SECURITY_NOT_SUPPORTED 0x19 | ||
162 | #define RJT_FABRIC_NA 0x1a | ||
163 | #define RJT_VENDOR_UNIQUE 0xff | ||
164 | |||
165 | |||
166 | #define SP_F_PORT_LOGIN 0x10 | ||
167 | |||
168 | /* Extended SVC commands */ | ||
169 | #define LS_RJT 0x01000000 | ||
170 | #define LS_ACC 0x02000000 | ||
171 | #define LS_PRLI_ACC 0x02100014 | ||
172 | #define LS_PLOGI 0x03000000 | ||
173 | #define LS_FLOGI 0x04000000 | ||
174 | #define LS_LOGO 0x05000000 | ||
175 | #define LS_ABTX 0x06000000 | ||
176 | #define LS_RCS 0x07000000 | ||
177 | #define LS_RES 0x08000000 | ||
178 | #define LS_RSS 0x09000000 | ||
179 | #define LS_RSI 0x0a000000 | ||
180 | #define LS_ESTS 0x0b000000 | ||
181 | #define LS_ESTC 0x0c000000 | ||
182 | #define LS_ADVC 0x0d000000 | ||
183 | #define LS_RTV 0x0e000000 | ||
184 | #define LS_RLS 0x0f000000 | ||
185 | #define LS_ECHO 0x10000000 | ||
186 | #define LS_TEST 0x11000000 | ||
187 | #define LS_RRQ 0x12000000 | ||
188 | #define LS_IDENT 0x20000000 | ||
189 | #define LS_PRLI 0x20100014 | ||
190 | #define LS_DISPLAY 0x21000000 | ||
191 | #define LS_PRLO 0x21100014 | ||
192 | #define LS_PDISC 0x50000000 | ||
193 | #define LS_ADISC 0x52000000 | ||
194 | |||
195 | typedef struct { | ||
196 | u8 fcph_hi, fcph_lo; | ||
197 | u16 buf2buf_credit; | ||
198 | u8 common_features; | ||
199 | u8 xxx1; | ||
200 | u16 buf2buf_size; | ||
201 | u8 xxx2; | ||
202 | u8 total_concurrent; | ||
203 | u16 off_by_info; | ||
204 | u32 e_d_tov; | ||
205 | } common_svc_parm; | ||
206 | |||
207 | typedef struct { | ||
208 | u16 serv_opts; | ||
209 | u16 initiator_ctl; | ||
210 | u16 rcpt_ctl; | ||
211 | u16 recv_size; | ||
212 | u8 xxx1; | ||
213 | u8 concurrent_seqs; | ||
214 | u16 end2end_credit; | ||
215 | u16 open_seqs_per_xchg; | ||
216 | u16 xxx2; | ||
217 | } svc_parm; | ||
218 | |||
219 | /* Login */ | ||
220 | typedef struct { | ||
221 | u32 code; | ||
222 | common_svc_parm common; | ||
223 | fc_wwn nport_wwn; | ||
224 | fc_wwn node_wwn; | ||
225 | svc_parm class1; | ||
226 | svc_parm class2; | ||
227 | svc_parm class3; | ||
228 | } logi; | ||
229 | |||
230 | #endif /* !(__FC_H) */ | ||
diff --git a/drivers/fc4/fc_syms.c b/drivers/fc4/fc_syms.c deleted file mode 100644 index bd3918ddf7ac..000000000000 --- a/drivers/fc4/fc_syms.c +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* | ||
2 | * We should not even be trying to compile this if we are not doing | ||
3 | * a module. | ||
4 | */ | ||
5 | #include <linux/module.h> | ||
6 | |||
7 | #ifdef CONFIG_MODULES | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/kernel.h> | ||
12 | |||
13 | #include "fcp_impl.h" | ||
14 | |||
15 | EXPORT_SYMBOL(fcp_init); | ||
16 | EXPORT_SYMBOL(fcp_release); | ||
17 | EXPORT_SYMBOL(fcp_queue_empty); | ||
18 | EXPORT_SYMBOL(fcp_receive_solicited); | ||
19 | EXPORT_SYMBOL(fc_channels); | ||
20 | EXPORT_SYMBOL(fcp_state_change); | ||
21 | EXPORT_SYMBOL(fc_do_plogi); | ||
22 | EXPORT_SYMBOL(fc_do_prli); | ||
23 | |||
24 | /* SCSI stuff */ | ||
25 | EXPORT_SYMBOL(fcp_scsi_queuecommand); | ||
26 | EXPORT_SYMBOL(fcp_scsi_abort); | ||
27 | EXPORT_SYMBOL(fcp_scsi_dev_reset); | ||
28 | EXPORT_SYMBOL(fcp_scsi_host_reset); | ||
29 | |||
30 | #endif /* CONFIG_MODULES */ | ||
diff --git a/drivers/fc4/fcp.h b/drivers/fc4/fcp.h deleted file mode 100644 index 6aa34a7a4c11..000000000000 --- a/drivers/fc4/fcp.h +++ /dev/null | |||
@@ -1,94 +0,0 @@ | |||
1 | /* fcp.h: Definitions for Fibre Channel Protocol. | ||
2 | * | ||
3 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
4 | * | ||
5 | */ | ||
6 | |||
7 | #ifndef __FCP_H | ||
8 | #define __FCP_H | ||
9 | |||
10 | /* FCP addressing is hierarchical with up to 4 layers, MS first. | ||
11 | Exact meaning of the addresses is up to the vendor */ | ||
12 | |||
13 | /* fcp_cntl field */ | ||
14 | #define FCP_CNTL_WRITE 0x00000001 /* Initiator write */ | ||
15 | #define FCP_CNTL_READ 0x00000002 /* Initiator read */ | ||
16 | #define FCP_CNTL_ABORT_TSK 0x00000200 /* Abort task set */ | ||
17 | #define FCP_CNTL_CLR_TASK 0x00000400 /* Clear task set */ | ||
18 | #define FCP_CNTL_RESET 0x00002000 /* Reset */ | ||
19 | #define FCP_CNTL_CLR_ACA 0x00004000 /* Clear ACA */ | ||
20 | #define FCP_CNTL_KILL_TASK 0x00008000 /* Terminate task */ | ||
21 | #define FCP_CNTL_QTYPE_MASK 0x00070000 /* Tagged queueing type */ | ||
22 | #define FCP_CNTL_QTYPE_SIMPLE 0x00000000 | ||
23 | #define FCP_CNTL_QTYPE_HEAD_OF_Q 0x00010000 | ||
24 | #define FCP_CNTL_QTYPE_ORDERED 0x00020000 | ||
25 | #define FCP_CNTL_QTYPE_ACA_Q_TAG 0x00040000 | ||
26 | #define FCP_CNTL_QTYPE_UNTAGGED 0x00050000 | ||
27 | |||
28 | typedef struct { | ||
29 | u16 fcp_addr[4]; | ||
30 | u32 fcp_cntl; | ||
31 | u8 fcp_cdb[16]; | ||
32 | u32 fcp_data_len; | ||
33 | } fcp_cmd; | ||
34 | |||
35 | /* fcp_status field */ | ||
36 | #define FCP_STATUS_MASK 0x000000ff /* scsi status of command */ | ||
37 | #define FCP_STATUS_RSP_LEN 0x00000100 /* response_len != 0 */ | ||
38 | #define FCP_STATUS_SENSE_LEN 0x00000200 /* sense_len != 0 */ | ||
39 | #define FCP_STATUS_RESID 0x00000400 /* resid != 0 */ | ||
40 | |||
41 | typedef struct { | ||
42 | u32 xxx[2]; | ||
43 | u32 fcp_status; | ||
44 | u32 fcp_resid; | ||
45 | u32 fcp_sense_len; | ||
46 | u32 fcp_response_len; | ||
47 | /* u8 fcp_sense[fcp_sense_len]; */ | ||
48 | /* u8 fcp_response[fcp_response_len]; */ | ||
49 | } fcp_rsp; | ||
50 | |||
51 | /* fcp errors */ | ||
52 | |||
53 | /* rsp_info_type field */ | ||
54 | #define FCP_RSP_SCSI_BUS_ERR 0x01 | ||
55 | #define FCP_RSP_SCSI_PORT_ERR 0x02 | ||
56 | #define FCP_RSP_CARD_ERR 0x03 | ||
57 | |||
58 | /* isp_status field */ | ||
59 | #define FCP_RSP_CMD_COMPLETE 0x0000 | ||
60 | #define FCP_RSP_CMD_INCOMPLETE 0x0001 | ||
61 | #define FCP_RSP_CMD_DMA_ERR 0x0002 | ||
62 | #define FCP_RSP_CMD_TRAN_ERR 0x0003 | ||
63 | #define FCP_RSP_CMD_RESET 0x0004 | ||
64 | #define FCP_RSP_CMD_ABORTED 0x0005 | ||
65 | #define FCP_RSP_CMD_TIMEOUT 0x0006 | ||
66 | #define FCP_RSP_CMD_OVERRUN 0x0007 | ||
67 | |||
68 | /* isp_state_flags field */ | ||
69 | #define FCP_RSP_ST_GOT_BUS 0x0100 | ||
70 | #define FCP_RSP_ST_GOT_TARGET 0x0200 | ||
71 | #define FCP_RSP_ST_SENT_CMD 0x0400 | ||
72 | #define FCP_RSP_ST_XFRD_DATA 0x0800 | ||
73 | #define FCP_RSP_ST_GOT_STATUS 0x1000 | ||
74 | #define FCP_RSP_ST_GOT_SENSE 0x2000 | ||
75 | |||
76 | /* isp_stat_flags field */ | ||
77 | #define FCP_RSP_STAT_DISC 0x0001 | ||
78 | #define FCP_RSP_STAT_SYNC 0x0002 | ||
79 | #define FCP_RSP_STAT_PERR 0x0004 | ||
80 | #define FCP_RSP_STAT_BUS_RESET 0x0008 | ||
81 | #define FCP_RSP_STAT_DEV_RESET 0x0010 | ||
82 | #define FCP_RSP_STAT_ABORTED 0x0020 | ||
83 | #define FCP_RSP_STAT_TIMEOUT 0x0040 | ||
84 | #define FCP_RSP_STAT_NEGOTIATE 0x0080 | ||
85 | |||
86 | typedef struct { | ||
87 | u8 rsp_info_type; | ||
88 | u8 xxx; | ||
89 | u16 isp_status; | ||
90 | u16 isp_state_flags; | ||
91 | u16 isp_stat_flags; | ||
92 | } fcp_scsi_err; | ||
93 | |||
94 | #endif /* !(__FCP_H) */ | ||
diff --git a/drivers/fc4/fcp_impl.h b/drivers/fc4/fcp_impl.h deleted file mode 100644 index 506338a461ba..000000000000 --- a/drivers/fc4/fcp_impl.h +++ /dev/null | |||
@@ -1,164 +0,0 @@ | |||
1 | /* fcp_impl.h: Generic SCSI on top of FC4 - our interface defines. | ||
2 | * | ||
3 | * Copyright (C) 1997-1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
4 | * Copyright (C) 1998 Jirka Hanika (geo@ff.cuni.cz) | ||
5 | */ | ||
6 | |||
7 | #ifndef _FCP_SCSI_H | ||
8 | #define _FCP_SCSI_H | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include "../scsi/scsi.h" | ||
12 | |||
13 | #include "fc.h" | ||
14 | #include "fcp.h" | ||
15 | #include "fc-al.h" | ||
16 | |||
17 | #include <asm/io.h> | ||
18 | #ifdef __sparc__ | ||
19 | #include <asm/sbus.h> | ||
20 | #endif | ||
21 | |||
22 | /* 0 or 1 */ | ||
23 | #define FCP_SCSI_USE_NEW_EH_CODE 0 | ||
24 | |||
25 | #define FC_CLASS_OUTBOUND 0x01 | ||
26 | #define FC_CLASS_INBOUND 0x02 | ||
27 | #define FC_CLASS_SIMPLE 0x03 | ||
28 | #define FC_CLASS_IO_WRITE 0x04 | ||
29 | #define FC_CLASS_IO_READ 0x05 | ||
30 | #define FC_CLASS_UNSOLICITED 0x06 | ||
31 | #define FC_CLASS_OFFLINE 0x08 | ||
32 | |||
33 | #define PROTO_OFFLINE 0x02 | ||
34 | #define PROTO_REPORT_AL_MAP 0x03 | ||
35 | #define PROTO_FORCE_LIP 0x06 | ||
36 | |||
37 | struct _fc_channel; | ||
38 | |||
39 | typedef struct fcp_cmnd { | ||
40 | struct fcp_cmnd *next; | ||
41 | struct fcp_cmnd *prev; | ||
42 | void (*done)(struct scsi_cmnd *); | ||
43 | unsigned short proto; | ||
44 | unsigned short token; | ||
45 | unsigned int did; | ||
46 | /* FCP SCSI stuff */ | ||
47 | dma_addr_t data; | ||
48 | /* From now on this cannot be touched for proto == TYPE_SCSI_FCP */ | ||
49 | fc_hdr fch; | ||
50 | dma_addr_t cmd; | ||
51 | dma_addr_t rsp; | ||
52 | int cmdlen; | ||
53 | int rsplen; | ||
54 | int class; | ||
55 | int datalen; | ||
56 | /* This is just used as a verification during login */ | ||
57 | struct _fc_channel *fc; | ||
58 | void *ls; | ||
59 | } fcp_cmnd; | ||
60 | |||
61 | typedef struct { | ||
62 | unsigned int len; | ||
63 | unsigned char list[0]; | ||
64 | } fcp_posmap; | ||
65 | |||
66 | typedef struct _fc_channel { | ||
67 | struct _fc_channel *next; | ||
68 | int irq; | ||
69 | int state; | ||
70 | int sid; | ||
71 | int did; | ||
72 | char name[16]; | ||
73 | void (*fcp_register)(struct _fc_channel *, u8, int); | ||
74 | void (*reset)(struct _fc_channel *); | ||
75 | int (*hw_enque)(struct _fc_channel *, fcp_cmnd *); | ||
76 | fc_wwn wwn_node; | ||
77 | fc_wwn wwn_nport; | ||
78 | fc_wwn wwn_dest; | ||
79 | common_svc_parm *common_svc; | ||
80 | svc_parm *class_svcs; | ||
81 | #ifdef __sparc__ | ||
82 | struct sbus_dev *dev; | ||
83 | #else | ||
84 | struct pci_dev *dev; | ||
85 | #endif | ||
86 | struct module *module; | ||
87 | /* FCP SCSI stuff */ | ||
88 | short can_queue; | ||
89 | short abort_count; | ||
90 | int rsp_size; | ||
91 | fcp_cmd *scsi_cmd_pool; | ||
92 | char *scsi_rsp_pool; | ||
93 | dma_addr_t dma_scsi_cmd, dma_scsi_rsp; | ||
94 | unsigned long *scsi_bitmap; | ||
95 | long scsi_bitmap_end; | ||
96 | int scsi_free; | ||
97 | int (*encode_addr)(struct scsi_cmnd *, u16 *, struct _fc_channel *, fcp_cmnd *); | ||
98 | fcp_cmnd *scsi_que; | ||
99 | char scsi_name[4]; | ||
100 | fcp_cmnd **cmd_slots; | ||
101 | int channels; | ||
102 | int targets; | ||
103 | long *ages; | ||
104 | struct scsi_cmnd *rst_pkt; | ||
105 | fcp_posmap *posmap; | ||
106 | /* LOGIN stuff */ | ||
107 | fcp_cmnd *login; | ||
108 | void *ls; | ||
109 | } fc_channel; | ||
110 | |||
111 | extern fc_channel *fc_channels; | ||
112 | |||
113 | #define FC_STATE_UNINITED 0 | ||
114 | #define FC_STATE_ONLINE 1 | ||
115 | #define FC_STATE_OFFLINE 2 | ||
116 | #define FC_STATE_RESETING 3 | ||
117 | #define FC_STATE_FPORT_OK 4 | ||
118 | #define FC_STATE_MAYBEOFFLINE 5 | ||
119 | |||
120 | #define FC_STATUS_OK 0 | ||
121 | #define FC_STATUS_P_RJT 2 | ||
122 | #define FC_STATUS_F_RJT 3 | ||
123 | #define FC_STATUS_P_BSY 4 | ||
124 | #define FC_STATUS_F_BSY 5 | ||
125 | #define FC_STATUS_ERR_OFFLINE 0x11 | ||
126 | #define FC_STATUS_TIMEOUT 0x12 | ||
127 | #define FC_STATUS_ERR_OVERRUN 0x13 | ||
128 | #define FC_STATUS_POINTTOPOINT 0x15 | ||
129 | #define FC_STATUS_AL 0x16 | ||
130 | #define FC_STATUS_UNKNOWN_CQ_TYPE 0x20 | ||
131 | #define FC_STATUS_BAD_SEG_CNT 0x21 | ||
132 | #define FC_STATUS_MAX_XCHG_EXCEEDED 0x22 | ||
133 | #define FC_STATUS_BAD_XID 0x23 | ||
134 | #define FC_STATUS_XCHG_BUSY 0x24 | ||
135 | #define FC_STATUS_BAD_POOL_ID 0x25 | ||
136 | #define FC_STATUS_INSUFFICIENT_CQES 0x26 | ||
137 | #define FC_STATUS_ALLOC_FAIL 0x27 | ||
138 | #define FC_STATUS_BAD_SID 0x28 | ||
139 | #define FC_STATUS_NO_SEQ_INIT 0x29 | ||
140 | #define FC_STATUS_TIMED_OUT -1 | ||
141 | #define FC_STATUS_BAD_RSP -2 | ||
142 | |||
143 | void fcp_queue_empty(fc_channel *); | ||
144 | int fcp_init(fc_channel *); | ||
145 | void fcp_release(fc_channel *fc_chain, int count); | ||
146 | void fcp_receive_solicited(fc_channel *, int, int, int, fc_hdr *); | ||
147 | void fcp_state_change(fc_channel *, int); | ||
148 | int fc_do_plogi(fc_channel *, unsigned char, fc_wwn *, fc_wwn *); | ||
149 | int fc_do_prli(fc_channel *, unsigned char); | ||
150 | |||
151 | #define for_each_fc_channel(fc) \ | ||
152 | for (fc = fc_channels; fc; fc = fc->next) | ||
153 | |||
154 | #define for_each_online_fc_channel(fc) \ | ||
155 | for_each_fc_channel(fc) \ | ||
156 | if (fc->state == FC_STATE_ONLINE) | ||
157 | |||
158 | int fcp_scsi_queuecommand(struct scsi_cmnd *, | ||
159 | void (* done) (struct scsi_cmnd *)); | ||
160 | int fcp_scsi_abort(struct scsi_cmnd *); | ||
161 | int fcp_scsi_dev_reset(struct scsi_cmnd *); | ||
162 | int fcp_scsi_host_reset(struct scsi_cmnd *); | ||
163 | |||
164 | #endif /* !(_FCP_SCSI_H) */ | ||
diff --git a/drivers/fc4/soc.c b/drivers/fc4/soc.c deleted file mode 100644 index d517734462e6..000000000000 --- a/drivers/fc4/soc.c +++ /dev/null | |||
@@ -1,764 +0,0 @@ | |||
1 | /* soc.c: Sparc SUNW,soc (Serial Optical Channel) Fibre Channel Sbus adapter support. | ||
2 | * | ||
3 | * Copyright (C) 1996,1997,1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
4 | * Copyright (C) 1997,1998 Jirka Hanika (geo@ff.cuni.cz) | ||
5 | * | ||
6 | * Sources: | ||
7 | * Fibre Channel Physical & Signaling Interface (FC-PH), dpANS, 1994 | ||
8 | * dpANS Fibre Channel Protocol for SCSI (X3.269-199X), Rev. 012, 1995 | ||
9 | * | ||
10 | * Supported hardware: | ||
11 | * Tested on SOC sbus card bought with SS1000 in Linux running on SS5 and Ultra1. | ||
12 | * For SOC sbus cards, you have to make sure your FCode is 1.52 or later. | ||
13 | * If you have older FCode, you should try to upgrade or get SOC microcode from Sun | ||
14 | * (the microcode is present in Solaris soc driver as well). In that case you need | ||
15 | * to #define HAVE_SOC_UCODE and format the microcode into soc_asm.c. For the exact | ||
16 | * format mail me and I will tell you. I cannot offer you the actual microcode though, | ||
17 | * unless Sun confirms they don't mind. | ||
18 | */ | ||
19 | |||
20 | static char *version = | ||
21 | "soc.c:v1.3 9/Feb/99 Jakub Jelinek (jj@ultra.linux.cz), Jirka Hanika (geo@ff.cuni.cz)\n"; | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/fcntl.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/ioport.h> | ||
30 | #include <linux/in.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/bitops.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/dma.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <asm/byteorder.h> | ||
39 | |||
40 | #include <asm/openprom.h> | ||
41 | #include <asm/oplib.h> | ||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/irq.h> | ||
44 | |||
45 | /* #define SOCDEBUG */ | ||
46 | /* #define HAVE_SOC_UCODE */ | ||
47 | |||
48 | #include "fcp_impl.h" | ||
49 | #include "soc.h" | ||
50 | #ifdef HAVE_SOC_UCODE | ||
51 | #include "soc_asm.h" | ||
52 | #endif | ||
53 | |||
54 | #define soc_printk printk ("soc%d: ", s->soc_no); printk | ||
55 | |||
56 | #ifdef SOCDEBUG | ||
57 | #define SOD(x) soc_printk x; | ||
58 | #else | ||
59 | #define SOD(x) | ||
60 | #endif | ||
61 | |||
62 | #define for_each_soc(s) for (s = socs; s; s = s->next) | ||
63 | struct soc *socs = NULL; | ||
64 | |||
65 | static inline void soc_disable(struct soc *s) | ||
66 | { | ||
67 | sbus_writel(0, s->regs + IMASK); | ||
68 | sbus_writel(SOC_CMD_SOFT_RESET, s->regs + CMD); | ||
69 | } | ||
70 | |||
71 | static inline void soc_enable(struct soc *s) | ||
72 | { | ||
73 | SOD(("enable %08x\n", s->cfg)) | ||
74 | sbus_writel(0, s->regs + SAE); | ||
75 | sbus_writel(s->cfg, s->regs + CFG); | ||
76 | sbus_writel(SOC_CMD_RSP_QALL, s->regs + CMD); | ||
77 | SOC_SETIMASK(s, SOC_IMASK_RSP_QALL | SOC_IMASK_SAE); | ||
78 | SOD(("imask %08lx %08lx\n", s->imask, sbus_readl(s->regs + IMAK))); | ||
79 | } | ||
80 | |||
81 | static void soc_reset(fc_channel *fc) | ||
82 | { | ||
83 | soc_port *port = (soc_port *)fc; | ||
84 | struct soc *s = port->s; | ||
85 | |||
86 | /* FIXME */ | ||
87 | soc_disable(s); | ||
88 | s->req[0].seqno = 1; | ||
89 | s->req[1].seqno = 1; | ||
90 | s->rsp[0].seqno = 1; | ||
91 | s->rsp[1].seqno = 1; | ||
92 | s->req[0].in = 0; | ||
93 | s->req[1].in = 0; | ||
94 | s->rsp[0].in = 0; | ||
95 | s->rsp[1].in = 0; | ||
96 | s->req[0].out = 0; | ||
97 | s->req[1].out = 0; | ||
98 | s->rsp[0].out = 0; | ||
99 | s->rsp[1].out = 0; | ||
100 | |||
101 | /* FIXME */ | ||
102 | soc_enable(s); | ||
103 | } | ||
104 | |||
105 | static inline void soc_solicited (struct soc *s) | ||
106 | { | ||
107 | fc_hdr fchdr; | ||
108 | soc_rsp __iomem *hwrsp; | ||
109 | soc_cq_rsp *sw_cq; | ||
110 | int token; | ||
111 | int status; | ||
112 | fc_channel *fc; | ||
113 | |||
114 | sw_cq = &s->rsp[SOC_SOLICITED_RSP_Q]; | ||
115 | |||
116 | if (sw_cq->pool == NULL) | ||
117 | sw_cq->pool = (soc_req __iomem *) | ||
118 | (s->xram + xram_get_32low ((xram_p)&sw_cq->hw_cq->address)); | ||
119 | sw_cq->in = xram_get_8 ((xram_p)&sw_cq->hw_cq->in); | ||
120 | SOD (("soc_solicited, %d pkts arrived\n", (sw_cq->in-sw_cq->out) & sw_cq->last)) | ||
121 | for (;;) { | ||
122 | hwrsp = (soc_rsp __iomem *)sw_cq->pool + sw_cq->out; | ||
123 | token = xram_get_32low ((xram_p)&hwrsp->shdr.token); | ||
124 | status = xram_get_32low ((xram_p)&hwrsp->status); | ||
125 | fc = (fc_channel *)(&s->port[(token >> 11) & 1]); | ||
126 | |||
127 | if (status == SOC_OK) { | ||
128 | fcp_receive_solicited(fc, token >> 12, | ||
129 | token & ((1 << 11) - 1), | ||
130 | FC_STATUS_OK, NULL); | ||
131 | } else { | ||
132 | xram_copy_from(&fchdr, (xram_p)&hwrsp->fchdr, sizeof(fchdr)); | ||
133 | /* We have intentionally defined FC_STATUS_* constants | ||
134 | * to match SOC_* constants, otherwise we'd have to | ||
135 | * translate status. | ||
136 | */ | ||
137 | fcp_receive_solicited(fc, token >> 12, | ||
138 | token & ((1 << 11) - 1), | ||
139 | status, &fchdr); | ||
140 | } | ||
141 | |||
142 | if (++sw_cq->out > sw_cq->last) { | ||
143 | sw_cq->seqno++; | ||
144 | sw_cq->out = 0; | ||
145 | } | ||
146 | |||
147 | if (sw_cq->out == sw_cq->in) { | ||
148 | sw_cq->in = xram_get_8 ((xram_p)&sw_cq->hw_cq->in); | ||
149 | if (sw_cq->out == sw_cq->in) { | ||
150 | /* Tell the hardware about it */ | ||
151 | sbus_writel((sw_cq->out << 24) | | ||
152 | (SOC_CMD_RSP_QALL & | ||
153 | ~(SOC_CMD_RSP_Q0 << SOC_SOLICITED_RSP_Q)), | ||
154 | s->regs + CMD); | ||
155 | |||
156 | /* Read it, so that we're sure it has been updated */ | ||
157 | sbus_readl(s->regs + CMD); | ||
158 | sw_cq->in = xram_get_8 ((xram_p)&sw_cq->hw_cq->in); | ||
159 | if (sw_cq->out == sw_cq->in) | ||
160 | break; | ||
161 | } | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | |||
166 | static inline void soc_request (struct soc *s, u32 cmd) | ||
167 | { | ||
168 | SOC_SETIMASK(s, s->imask & ~(cmd & SOC_CMD_REQ_QALL)); | ||
169 | SOD(("imask %08lx %08lx\n", s->imask, sbus_readl(s->regs + IMASK))); | ||
170 | |||
171 | SOD(("Queues available %08x OUT %X %X\n", cmd, | ||
172 | xram_get_8((xram_p)&s->req[0].hw_cq->out), | ||
173 | xram_get_8((xram_p)&s->req[0].hw_cq->out))) | ||
174 | if (s->port[s->curr_port].fc.state != FC_STATE_OFFLINE) { | ||
175 | fcp_queue_empty ((fc_channel *)&(s->port[s->curr_port])); | ||
176 | if (((s->req[1].in + 1) & s->req[1].last) != (s->req[1].out)) | ||
177 | fcp_queue_empty ((fc_channel *)&(s->port[1 - s->curr_port])); | ||
178 | } else { | ||
179 | fcp_queue_empty ((fc_channel *)&(s->port[1 - s->curr_port])); | ||
180 | } | ||
181 | if (s->port[1 - s->curr_port].fc.state != FC_STATE_OFFLINE) | ||
182 | s->curr_port ^= 1; | ||
183 | } | ||
184 | |||
185 | static inline void soc_unsolicited (struct soc *s) | ||
186 | { | ||
187 | soc_rsp __iomem *hwrsp, *hwrspc; | ||
188 | soc_cq_rsp *sw_cq; | ||
189 | int count; | ||
190 | int status; | ||
191 | int flags; | ||
192 | fc_channel *fc; | ||
193 | |||
194 | sw_cq = &s->rsp[SOC_UNSOLICITED_RSP_Q]; | ||
195 | if (sw_cq->pool == NULL) | ||
196 | sw_cq->pool = (soc_req __iomem *) | ||
197 | (s->xram + (xram_get_32low ((xram_p)&sw_cq->hw_cq->address))); | ||
198 | |||
199 | sw_cq->in = xram_get_8 ((xram_p)&sw_cq->hw_cq->in); | ||
200 | SOD (("soc_unsolicited, %d packets arrived\n", (sw_cq->in - sw_cq->out) & sw_cq->last)) | ||
201 | while (sw_cq->in != sw_cq->out) { | ||
202 | /* ...real work per entry here... */ | ||
203 | hwrsp = (soc_rsp __iomem *)sw_cq->pool + sw_cq->out; | ||
204 | |||
205 | hwrspc = NULL; | ||
206 | flags = xram_get_16 ((xram_p)&hwrsp->shdr.flags); | ||
207 | count = xram_get_8 ((xram_p)&hwrsp->count); | ||
208 | fc = (fc_channel *)&s->port[flags & SOC_PORT_B]; | ||
209 | SOD(("FC %08lx fcp_state_change %08lx\n", | ||
210 | (long)fc, (long)fc->fcp_state_change)) | ||
211 | |||
212 | if (count != 1) { | ||
213 | /* Ugh, continuation entries */ | ||
214 | u8 in; | ||
215 | |||
216 | if (count != 2) { | ||
217 | printk("%s: Too many continuations entries %d\n", | ||
218 | fc->name, count); | ||
219 | goto update_out; | ||
220 | } | ||
221 | |||
222 | in = sw_cq->in; | ||
223 | if (in < sw_cq->out) in += sw_cq->last + 1; | ||
224 | if (in < sw_cq->out + 2) { | ||
225 | /* Ask the hardware if they haven't arrived yet. */ | ||
226 | sbus_writel((sw_cq->out << 24) | | ||
227 | (SOC_CMD_RSP_QALL & | ||
228 | ~(SOC_CMD_RSP_Q0 << SOC_UNSOLICITED_RSP_Q)), | ||
229 | s->regs + CMD); | ||
230 | |||
231 | /* Read it, so that we're sure it has been updated */ | ||
232 | sbus_readl(s->regs + CMD); | ||
233 | sw_cq->in = xram_get_8 ((xram_p)&sw_cq->hw_cq->in); | ||
234 | in = sw_cq->in; | ||
235 | if (in < sw_cq->out) | ||
236 | in += sw_cq->last + 1; | ||
237 | if (in < sw_cq->out + 2) /* Nothing came, let us wait */ | ||
238 | return; | ||
239 | } | ||
240 | if (sw_cq->out == sw_cq->last) | ||
241 | hwrspc = (soc_rsp __iomem *)sw_cq->pool; | ||
242 | else | ||
243 | hwrspc = hwrsp + 1; | ||
244 | } | ||
245 | |||
246 | switch (flags & ~SOC_PORT_B) { | ||
247 | case SOC_STATUS: | ||
248 | status = xram_get_32low ((xram_p)&hwrsp->status); | ||
249 | switch (status) { | ||
250 | case SOC_ONLINE: | ||
251 | SOD(("State change to ONLINE\n")); | ||
252 | fcp_state_change(fc, FC_STATE_ONLINE); | ||
253 | break; | ||
254 | case SOC_OFFLINE: | ||
255 | SOD(("State change to OFFLINE\n")); | ||
256 | fcp_state_change(fc, FC_STATE_OFFLINE); | ||
257 | break; | ||
258 | default: | ||
259 | printk ("%s: Unknown STATUS no %d\n", | ||
260 | fc->name, status); | ||
261 | break; | ||
262 | } | ||
263 | break; | ||
264 | case (SOC_UNSOLICITED|SOC_FC_HDR): | ||
265 | { | ||
266 | int r_ctl = xram_get_8 ((xram_p)&hwrsp->fchdr); | ||
267 | unsigned len; | ||
268 | char buf[64]; | ||
269 | |||
270 | if ((r_ctl & 0xf0) == R_CTL_EXTENDED_SVC) { | ||
271 | len = xram_get_32 ((xram_p)&hwrsp->shdr.bytecnt); | ||
272 | if (len < 4 || !hwrspc) { | ||
273 | printk ("%s: Invalid R_CTL %02x " | ||
274 | "continuation entries\n", | ||
275 | fc->name, r_ctl); | ||
276 | } else { | ||
277 | if (len > 60) | ||
278 | len = 60; | ||
279 | xram_copy_from (buf, (xram_p)hwrspc, | ||
280 | (len + 3) & ~3); | ||
281 | if (*(u32 *)buf == LS_DISPLAY) { | ||
282 | int i; | ||
283 | |||
284 | for (i = 4; i < len; i++) | ||
285 | if (buf[i] == '\n') | ||
286 | buf[i] = ' '; | ||
287 | buf[len] = 0; | ||
288 | printk ("%s message: %s\n", | ||
289 | fc->name, buf + 4); | ||
290 | } else { | ||
291 | printk ("%s: Unknown LS_CMD " | ||
292 | "%02x\n", fc->name, | ||
293 | buf[0]); | ||
294 | } | ||
295 | } | ||
296 | } else { | ||
297 | printk ("%s: Unsolicited R_CTL %02x " | ||
298 | "not handled\n", fc->name, r_ctl); | ||
299 | } | ||
300 | } | ||
301 | break; | ||
302 | default: | ||
303 | printk ("%s: Unexpected flags %08x\n", fc->name, flags); | ||
304 | break; | ||
305 | }; | ||
306 | update_out: | ||
307 | if (++sw_cq->out > sw_cq->last) { | ||
308 | sw_cq->seqno++; | ||
309 | sw_cq->out = 0; | ||
310 | } | ||
311 | |||
312 | if (hwrspc) { | ||
313 | if (++sw_cq->out > sw_cq->last) { | ||
314 | sw_cq->seqno++; | ||
315 | sw_cq->out = 0; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | if (sw_cq->out == sw_cq->in) { | ||
320 | sw_cq->in = xram_get_8 ((xram_p)&sw_cq->hw_cq->in); | ||
321 | if (sw_cq->out == sw_cq->in) { | ||
322 | /* Tell the hardware about it */ | ||
323 | sbus_writel((sw_cq->out << 24) | | ||
324 | (SOC_CMD_RSP_QALL & | ||
325 | ~(SOC_CMD_RSP_Q0 << SOC_UNSOLICITED_RSP_Q)), | ||
326 | s->regs + CMD); | ||
327 | |||
328 | /* Read it, so that we're sure it has been updated */ | ||
329 | sbus_readl(s->regs + CMD); | ||
330 | sw_cq->in = xram_get_8 ((xram_p)&sw_cq->hw_cq->in); | ||
331 | } | ||
332 | } | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static irqreturn_t soc_intr(int irq, void *dev_id) | ||
337 | { | ||
338 | u32 cmd; | ||
339 | unsigned long flags; | ||
340 | register struct soc *s = (struct soc *)dev_id; | ||
341 | |||
342 | spin_lock_irqsave(&s->lock, flags); | ||
343 | cmd = sbus_readl(s->regs + CMD); | ||
344 | for (; (cmd = SOC_INTR (s, cmd)); cmd = sbus_readl(s->regs + CMD)) { | ||
345 | if (cmd & SOC_CMD_RSP_Q1) soc_unsolicited (s); | ||
346 | if (cmd & SOC_CMD_RSP_Q0) soc_solicited (s); | ||
347 | if (cmd & SOC_CMD_REQ_QALL) soc_request (s, cmd); | ||
348 | } | ||
349 | spin_unlock_irqrestore(&s->lock, flags); | ||
350 | |||
351 | return IRQ_HANDLED; | ||
352 | } | ||
353 | |||
354 | #define TOKEN(proto, port, token) (((proto)<<12)|(token)|(port)) | ||
355 | |||
356 | static int soc_hw_enque (fc_channel *fc, fcp_cmnd *fcmd) | ||
357 | { | ||
358 | soc_port *port = (soc_port *)fc; | ||
359 | struct soc *s = port->s; | ||
360 | int qno; | ||
361 | soc_cq_req *sw_cq; | ||
362 | int cq_next_in; | ||
363 | soc_req *request; | ||
364 | fc_hdr *fch; | ||
365 | int i; | ||
366 | |||
367 | if (fcmd->proto == TYPE_SCSI_FCP) | ||
368 | qno = 1; | ||
369 | else | ||
370 | qno = 0; | ||
371 | SOD(("Putting a FCP packet type %d into hw queue %d\n", fcmd->proto, qno)) | ||
372 | if (s->imask & (SOC_IMASK_REQ_Q0 << qno)) { | ||
373 | SOD(("EIO %08x\n", s->imask)) | ||
374 | return -EIO; | ||
375 | } | ||
376 | sw_cq = s->req + qno; | ||
377 | cq_next_in = (sw_cq->in + 1) & sw_cq->last; | ||
378 | |||
379 | if (cq_next_in == sw_cq->out && | ||
380 | cq_next_in == (sw_cq->out = xram_get_8((xram_p)&sw_cq->hw_cq->out))) { | ||
381 | SOD(("%d IN %d OUT %d LAST %d\n", qno, sw_cq->in, sw_cq->out, sw_cq->last)) | ||
382 | SOC_SETIMASK(s, s->imask | (SOC_IMASK_REQ_Q0 << qno)); | ||
383 | SOD(("imask %08lx %08lx\n", s->imask, sbus_readl(s->regs + IMASK))); | ||
384 | /* If queue is full, just say NO */ | ||
385 | return -EBUSY; | ||
386 | } | ||
387 | |||
388 | request = sw_cq->pool + sw_cq->in; | ||
389 | fch = &request->fchdr; | ||
390 | |||
391 | switch (fcmd->proto) { | ||
392 | case TYPE_SCSI_FCP: | ||
393 | request->shdr.token = TOKEN(TYPE_SCSI_FCP, port->mask, fcmd->token); | ||
394 | request->data[0].base = fc->dma_scsi_cmd + fcmd->token * sizeof(fcp_cmd); | ||
395 | request->data[0].count = sizeof(fcp_cmd); | ||
396 | request->data[1].base = fc->dma_scsi_rsp + fcmd->token * fc->rsp_size; | ||
397 | request->data[1].count = fc->rsp_size; | ||
398 | if (fcmd->data) { | ||
399 | request->shdr.segcnt = 3; | ||
400 | i = fc->scsi_cmd_pool[fcmd->token].fcp_data_len; | ||
401 | request->shdr.bytecnt = i; | ||
402 | request->data[2].base = fcmd->data; | ||
403 | request->data[2].count = i; | ||
404 | request->type = | ||
405 | (fc->scsi_cmd_pool[fcmd->token].fcp_cntl & FCP_CNTL_WRITE) ? | ||
406 | SOC_CQTYPE_IO_WRITE : SOC_CQTYPE_IO_READ; | ||
407 | } else { | ||
408 | request->shdr.segcnt = 2; | ||
409 | request->shdr.bytecnt = 0; | ||
410 | request->data[2].base = 0; | ||
411 | request->data[2].count = 0; | ||
412 | request->type = SOC_CQTYPE_SIMPLE; | ||
413 | } | ||
414 | FILL_FCHDR_RCTL_DID(fch, R_CTL_COMMAND, fc->did); | ||
415 | FILL_FCHDR_SID(fch, fc->sid); | ||
416 | FILL_FCHDR_TYPE_FCTL(fch, TYPE_SCSI_FCP, | ||
417 | F_CTL_FIRST_SEQ | F_CTL_SEQ_INITIATIVE); | ||
418 | FILL_FCHDR_SEQ_DF_SEQ(fch, 0, 0, 0); | ||
419 | FILL_FCHDR_OXRX(fch, 0xffff, 0xffff); | ||
420 | fch->param = 0; | ||
421 | request->shdr.flags = port->flags; | ||
422 | request->shdr.class = 2; | ||
423 | break; | ||
424 | |||
425 | case PROTO_OFFLINE: | ||
426 | memset (request, 0, sizeof(*request)); | ||
427 | request->shdr.token = TOKEN(PROTO_OFFLINE, port->mask, fcmd->token); | ||
428 | request->type = SOC_CQTYPE_OFFLINE; | ||
429 | FILL_FCHDR_RCTL_DID(fch, R_CTL_COMMAND, fc->did); | ||
430 | FILL_FCHDR_SID(fch, fc->sid); | ||
431 | FILL_FCHDR_TYPE_FCTL(fch, TYPE_SCSI_FCP, | ||
432 | F_CTL_FIRST_SEQ | F_CTL_SEQ_INITIATIVE); | ||
433 | FILL_FCHDR_SEQ_DF_SEQ(fch, 0, 0, 0); | ||
434 | FILL_FCHDR_OXRX(fch, 0xffff, 0xffff); | ||
435 | request->shdr.flags = port->flags; | ||
436 | break; | ||
437 | |||
438 | case PROTO_REPORT_AL_MAP: | ||
439 | /* SOC only supports Point-to-Point topology, no FC-AL, sorry... */ | ||
440 | return -ENOSYS; | ||
441 | |||
442 | default: | ||
443 | request->shdr.token = TOKEN(fcmd->proto, port->mask, fcmd->token); | ||
444 | request->shdr.class = 2; | ||
445 | request->shdr.flags = port->flags; | ||
446 | memcpy (fch, &fcmd->fch, sizeof(fc_hdr)); | ||
447 | request->data[0].count = fcmd->cmdlen; | ||
448 | request->data[1].count = fcmd->rsplen; | ||
449 | request->type = fcmd->class; | ||
450 | switch (fcmd->class) { | ||
451 | case FC_CLASS_OUTBOUND: | ||
452 | request->data[0].base = fcmd->cmd; | ||
453 | request->data[0].count = fcmd->cmdlen; | ||
454 | request->type = SOC_CQTYPE_OUTBOUND; | ||
455 | request->shdr.bytecnt = fcmd->cmdlen; | ||
456 | request->shdr.segcnt = 1; | ||
457 | break; | ||
458 | case FC_CLASS_INBOUND: | ||
459 | request->data[0].base = fcmd->rsp; | ||
460 | request->data[0].count = fcmd->rsplen; | ||
461 | request->type = SOC_CQTYPE_INBOUND; | ||
462 | request->shdr.bytecnt = 0; | ||
463 | request->shdr.segcnt = 1; | ||
464 | break; | ||
465 | case FC_CLASS_SIMPLE: | ||
466 | request->data[0].base = fcmd->cmd; | ||
467 | request->data[1].base = fcmd->rsp; | ||
468 | request->data[0].count = fcmd->cmdlen; | ||
469 | request->data[1].count = fcmd->rsplen; | ||
470 | request->type = SOC_CQTYPE_SIMPLE; | ||
471 | request->shdr.bytecnt = fcmd->cmdlen; | ||
472 | request->shdr.segcnt = 2; | ||
473 | break; | ||
474 | case FC_CLASS_IO_READ: | ||
475 | case FC_CLASS_IO_WRITE: | ||
476 | request->data[0].base = fcmd->cmd; | ||
477 | request->data[1].base = fcmd->rsp; | ||
478 | request->data[0].count = fcmd->cmdlen; | ||
479 | request->data[1].count = fcmd->rsplen; | ||
480 | request->type = | ||
481 | (fcmd->class == FC_CLASS_IO_READ) ? | ||
482 | SOC_CQTYPE_IO_READ : SOC_CQTYPE_IO_WRITE; | ||
483 | if (fcmd->data) { | ||
484 | request->data[2].base = fcmd->data; | ||
485 | request->data[2].count = fcmd->datalen; | ||
486 | request->shdr.bytecnt = fcmd->datalen; | ||
487 | request->shdr.segcnt = 3; | ||
488 | } else { | ||
489 | request->shdr.bytecnt = 0; | ||
490 | request->shdr.segcnt = 2; | ||
491 | } | ||
492 | break; | ||
493 | }; | ||
494 | break; | ||
495 | }; | ||
496 | |||
497 | request->count = 1; | ||
498 | request->flags = 0; | ||
499 | request->seqno = sw_cq->seqno; | ||
500 | |||
501 | /* And now tell the SOC about it */ | ||
502 | |||
503 | if (++sw_cq->in > sw_cq->last) { | ||
504 | sw_cq->in = 0; | ||
505 | sw_cq->seqno++; | ||
506 | } | ||
507 | |||
508 | SOD(("Putting %08x into cmd\n", | ||
509 | SOC_CMD_RSP_QALL | (sw_cq->in << 24) | (SOC_CMD_REQ_Q0 << qno))) | ||
510 | |||
511 | sbus_writel(SOC_CMD_RSP_QALL | (sw_cq->in << 24) | (SOC_CMD_REQ_Q0 << qno), | ||
512 | s->regs + CMD); | ||
513 | |||
514 | /* Read so that command is completed. */ | ||
515 | sbus_readl(s->regs + CMD); | ||
516 | |||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | static inline void soc_download_fw(struct soc *s) | ||
521 | { | ||
522 | #ifdef HAVE_SOC_UCODE | ||
523 | xram_copy_to (s->xram, soc_ucode, sizeof(soc_ucode)); | ||
524 | xram_bzero (s->xram + sizeof(soc_ucode), 32768 - sizeof(soc_ucode)); | ||
525 | #endif | ||
526 | } | ||
527 | |||
528 | /* Check for what the best SBUS burst we can use happens | ||
529 | * to be on this machine. | ||
530 | */ | ||
531 | static inline void soc_init_bursts(struct soc *s, struct sbus_dev *sdev) | ||
532 | { | ||
533 | int bsizes, bsizes_more; | ||
534 | |||
535 | bsizes = (prom_getintdefault(sdev->prom_node,"burst-sizes",0xff) & 0xff); | ||
536 | bsizes_more = (prom_getintdefault(sdev->bus->prom_node, "burst-sizes", 0xff) & 0xff); | ||
537 | bsizes &= bsizes_more; | ||
538 | if ((bsizes & 0x7f) == 0x7f) | ||
539 | s->cfg = SOC_CFG_BURST_64; | ||
540 | else if ((bsizes & 0x3f) == 0x3f) | ||
541 | s->cfg = SOC_CFG_BURST_32; | ||
542 | else if ((bsizes & 0x1f) == 0x1f) | ||
543 | s->cfg = SOC_CFG_BURST_16; | ||
544 | else | ||
545 | s->cfg = SOC_CFG_BURST_4; | ||
546 | } | ||
547 | |||
548 | static inline void soc_init(struct sbus_dev *sdev, int no) | ||
549 | { | ||
550 | unsigned char tmp[60]; | ||
551 | int propl; | ||
552 | struct soc *s; | ||
553 | static int version_printed = 0; | ||
554 | soc_hw_cq cq[8]; | ||
555 | int size, i; | ||
556 | int irq; | ||
557 | |||
558 | s = kzalloc (sizeof (struct soc), GFP_KERNEL); | ||
559 | if (s == NULL) | ||
560 | return; | ||
561 | spin_lock_init(&s->lock); | ||
562 | s->soc_no = no; | ||
563 | |||
564 | SOD(("socs %08lx soc_intr %08lx soc_hw_enque %08x\n", | ||
565 | (long)socs, (long)soc_intr, (long)soc_hw_enque)) | ||
566 | if (version_printed++ == 0) | ||
567 | printk (version); | ||
568 | |||
569 | s->port[0].fc.module = THIS_MODULE; | ||
570 | s->port[1].fc.module = THIS_MODULE; | ||
571 | |||
572 | s->next = socs; | ||
573 | socs = s; | ||
574 | s->port[0].fc.dev = sdev; | ||
575 | s->port[1].fc.dev = sdev; | ||
576 | s->port[0].s = s; | ||
577 | s->port[1].s = s; | ||
578 | |||
579 | s->port[0].fc.next = &s->port[1].fc; | ||
580 | |||
581 | /* World Wide Name of SOC */ | ||
582 | propl = prom_getproperty (sdev->prom_node, "soc-wwn", tmp, sizeof(tmp)); | ||
583 | if (propl != sizeof (fc_wwn)) { | ||
584 | s->wwn.naaid = NAAID_IEEE; | ||
585 | s->wwn.lo = 0x12345678; | ||
586 | } else | ||
587 | memcpy (&s->wwn, tmp, sizeof (fc_wwn)); | ||
588 | |||
589 | propl = prom_getproperty (sdev->prom_node, "port-wwns", tmp, sizeof(tmp)); | ||
590 | if (propl != 2 * sizeof (fc_wwn)) { | ||
591 | s->port[0].fc.wwn_nport.naaid = NAAID_IEEE_EXT; | ||
592 | s->port[0].fc.wwn_nport.hi = s->wwn.hi; | ||
593 | s->port[0].fc.wwn_nport.lo = s->wwn.lo; | ||
594 | s->port[1].fc.wwn_nport.naaid = NAAID_IEEE_EXT; | ||
595 | s->port[1].fc.wwn_nport.nportid = 1; | ||
596 | s->port[1].fc.wwn_nport.hi = s->wwn.hi; | ||
597 | s->port[1].fc.wwn_nport.lo = s->wwn.lo; | ||
598 | } else { | ||
599 | memcpy (&s->port[0].fc.wwn_nport, tmp, sizeof (fc_wwn)); | ||
600 | memcpy (&s->port[1].fc.wwn_nport, tmp + sizeof (fc_wwn), sizeof (fc_wwn)); | ||
601 | } | ||
602 | memcpy (&s->port[0].fc.wwn_node, &s->wwn, sizeof (fc_wwn)); | ||
603 | memcpy (&s->port[1].fc.wwn_node, &s->wwn, sizeof (fc_wwn)); | ||
604 | SOD(("Got wwns %08x%08x ports %08x%08x and %08x%08x\n", | ||
605 | *(u32 *)&s->port[0].fc.wwn_nport, s->port[0].fc.wwn_nport.lo, | ||
606 | *(u32 *)&s->port[0].fc.wwn_nport, s->port[0].fc.wwn_nport.lo, | ||
607 | *(u32 *)&s->port[1].fc.wwn_nport, s->port[1].fc.wwn_nport.lo)) | ||
608 | |||
609 | s->port[0].fc.sid = 1; | ||
610 | s->port[1].fc.sid = 17; | ||
611 | s->port[0].fc.did = 2; | ||
612 | s->port[1].fc.did = 18; | ||
613 | |||
614 | s->port[0].fc.reset = soc_reset; | ||
615 | s->port[1].fc.reset = soc_reset; | ||
616 | |||
617 | if (sdev->num_registers == 1) { | ||
618 | /* Probably SunFire onboard SOC */ | ||
619 | s->xram = sbus_ioremap(&sdev->resource[0], 0, | ||
620 | 0x10000UL, "soc xram"); | ||
621 | s->regs = sbus_ioremap(&sdev->resource[0], 0x10000UL, | ||
622 | 0x10UL, "soc regs"); | ||
623 | } else { | ||
624 | /* Probably SOC sbus card */ | ||
625 | s->xram = sbus_ioremap(&sdev->resource[1], 0, | ||
626 | sdev->reg_addrs[1].reg_size, "soc xram"); | ||
627 | s->regs = sbus_ioremap(&sdev->resource[2], 0, | ||
628 | sdev->reg_addrs[2].reg_size, "soc regs"); | ||
629 | } | ||
630 | |||
631 | soc_init_bursts(s, sdev); | ||
632 | |||
633 | SOD(("Disabling SOC\n")) | ||
634 | |||
635 | soc_disable (s); | ||
636 | |||
637 | irq = sdev->irqs[0]; | ||
638 | |||
639 | if (request_irq (irq, soc_intr, IRQF_SHARED, "SOC", (void *)s)) { | ||
640 | soc_printk ("Cannot order irq %d to go\n", irq); | ||
641 | socs = s->next; | ||
642 | return; | ||
643 | } | ||
644 | |||
645 | SOD(("SOC uses IRQ %d\n", irq)) | ||
646 | |||
647 | s->port[0].fc.irq = irq; | ||
648 | s->port[1].fc.irq = irq; | ||
649 | |||
650 | sprintf (s->port[0].fc.name, "soc%d port A", no); | ||
651 | sprintf (s->port[1].fc.name, "soc%d port B", no); | ||
652 | s->port[0].flags = SOC_FC_HDR | SOC_PORT_A; | ||
653 | s->port[1].flags = SOC_FC_HDR | SOC_PORT_B; | ||
654 | s->port[1].mask = (1 << 11); | ||
655 | |||
656 | s->port[0].fc.hw_enque = soc_hw_enque; | ||
657 | s->port[1].fc.hw_enque = soc_hw_enque; | ||
658 | |||
659 | soc_download_fw (s); | ||
660 | |||
661 | SOD(("Downloaded firmware\n")) | ||
662 | |||
663 | /* Now setup xram circular queues */ | ||
664 | memset (cq, 0, sizeof(cq)); | ||
665 | |||
666 | size = (SOC_CQ_REQ0_SIZE + SOC_CQ_REQ1_SIZE) * sizeof(soc_req); | ||
667 | s->req_cpu = sbus_alloc_consistent(sdev, size, &s->req_dvma); | ||
668 | s->req[0].pool = s->req_cpu; | ||
669 | cq[0].address = s->req_dvma; | ||
670 | s->req[1].pool = s->req[0].pool + SOC_CQ_REQ0_SIZE; | ||
671 | |||
672 | s->req[0].hw_cq = (soc_hw_cq __iomem *)(s->xram + SOC_CQ_REQ_OFFSET); | ||
673 | s->req[1].hw_cq = (soc_hw_cq __iomem *)(s->xram + SOC_CQ_REQ_OFFSET + sizeof(soc_hw_cq)); | ||
674 | s->rsp[0].hw_cq = (soc_hw_cq __iomem *)(s->xram + SOC_CQ_RSP_OFFSET); | ||
675 | s->rsp[1].hw_cq = (soc_hw_cq __iomem *)(s->xram + SOC_CQ_RSP_OFFSET + sizeof(soc_hw_cq)); | ||
676 | |||
677 | cq[1].address = cq[0].address + (SOC_CQ_REQ0_SIZE * sizeof(soc_req)); | ||
678 | cq[4].address = 1; | ||
679 | cq[5].address = 1; | ||
680 | cq[0].last = SOC_CQ_REQ0_SIZE - 1; | ||
681 | cq[1].last = SOC_CQ_REQ1_SIZE - 1; | ||
682 | cq[4].last = SOC_CQ_RSP0_SIZE - 1; | ||
683 | cq[5].last = SOC_CQ_RSP1_SIZE - 1; | ||
684 | for (i = 0; i < 8; i++) | ||
685 | cq[i].seqno = 1; | ||
686 | |||
687 | s->req[0].last = SOC_CQ_REQ0_SIZE - 1; | ||
688 | s->req[1].last = SOC_CQ_REQ1_SIZE - 1; | ||
689 | s->rsp[0].last = SOC_CQ_RSP0_SIZE - 1; | ||
690 | s->rsp[1].last = SOC_CQ_RSP1_SIZE - 1; | ||
691 | |||
692 | s->req[0].seqno = 1; | ||
693 | s->req[1].seqno = 1; | ||
694 | s->rsp[0].seqno = 1; | ||
695 | s->rsp[1].seqno = 1; | ||
696 | |||
697 | xram_copy_to (s->xram + SOC_CQ_REQ_OFFSET, cq, sizeof(cq)); | ||
698 | |||
699 | /* Make our sw copy of SOC service parameters */ | ||
700 | xram_copy_from (s->serv_params, s->xram + 0x140, sizeof (s->serv_params)); | ||
701 | |||
702 | s->port[0].fc.common_svc = (common_svc_parm *)s->serv_params; | ||
703 | s->port[0].fc.class_svcs = (svc_parm *)(s->serv_params + 0x20); | ||
704 | s->port[1].fc.common_svc = (common_svc_parm *)&s->serv_params; | ||
705 | s->port[1].fc.class_svcs = (svc_parm *)(s->serv_params + 0x20); | ||
706 | |||
707 | soc_enable (s); | ||
708 | |||
709 | SOD(("Enabled SOC\n")) | ||
710 | } | ||
711 | |||
712 | static int __init soc_probe(void) | ||
713 | { | ||
714 | struct sbus_bus *sbus; | ||
715 | struct sbus_dev *sdev = NULL; | ||
716 | struct soc *s; | ||
717 | int cards = 0; | ||
718 | |||
719 | for_each_sbus(sbus) { | ||
720 | for_each_sbusdev(sdev, sbus) { | ||
721 | if(!strcmp(sdev->prom_name, "SUNW,soc")) { | ||
722 | soc_init(sdev, cards); | ||
723 | cards++; | ||
724 | } | ||
725 | } | ||
726 | } | ||
727 | if (!cards) return -EIO; | ||
728 | |||
729 | for_each_soc(s) | ||
730 | if (s->next) | ||
731 | s->port[1].fc.next = &s->next->port[0].fc; | ||
732 | fcp_init (&socs->port[0].fc); | ||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | static void __exit soc_cleanup(void) | ||
737 | { | ||
738 | struct soc *s; | ||
739 | int irq; | ||
740 | struct sbus_dev *sdev; | ||
741 | |||
742 | for_each_soc(s) { | ||
743 | irq = s->port[0].fc.irq; | ||
744 | free_irq (irq, s); | ||
745 | |||
746 | fcp_release(&(s->port[0].fc), 2); | ||
747 | |||
748 | sdev = s->port[0].fc.dev; | ||
749 | if (sdev->num_registers == 1) { | ||
750 | sbus_iounmap(s->xram, 0x10000UL); | ||
751 | sbus_iounmap(s->regs, 0x10UL); | ||
752 | } else { | ||
753 | sbus_iounmap(s->xram, sdev->reg_addrs[1].reg_size); | ||
754 | sbus_iounmap(s->regs, sdev->reg_addrs[2].reg_size); | ||
755 | } | ||
756 | sbus_free_consistent(sdev, | ||
757 | (SOC_CQ_REQ0_SIZE+SOC_CQ_REQ1_SIZE)*sizeof(soc_req), | ||
758 | s->req_cpu, s->req_dvma); | ||
759 | } | ||
760 | } | ||
761 | |||
762 | module_init(soc_probe); | ||
763 | module_exit(soc_cleanup); | ||
764 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/fc4/soc.h b/drivers/fc4/soc.h deleted file mode 100644 index d38cf5b28eed..000000000000 --- a/drivers/fc4/soc.h +++ /dev/null | |||
@@ -1,301 +0,0 @@ | |||
1 | /* soc.h: Definitions for Sparc SUNW,soc Fibre Channel Sbus driver. | ||
2 | * | ||
3 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
4 | */ | ||
5 | |||
6 | #ifndef __SOC_H | ||
7 | #define __SOC_H | ||
8 | |||
9 | #include "fc.h" | ||
10 | #include "fcp.h" | ||
11 | #include "fcp_impl.h" | ||
12 | |||
13 | /* Hardware register offsets and constants first {{{ */ | ||
14 | #define CFG 0x00UL /* Config Register */ | ||
15 | #define SAE 0x04UL /* Slave Access Error Register */ | ||
16 | #define CMD 0x08UL /* Command and Status Register */ | ||
17 | #define IMASK 0x0cUL /* Interrupt Mask Register */ | ||
18 | |||
19 | /* Config Register */ | ||
20 | #define SOC_CFG_EXT_RAM_BANK_MASK 0x07000000 | ||
21 | #define SOC_CFG_EEPROM_BANK_MASK 0x00030000 | ||
22 | #define SOC_CFG_BURST64_MASK 0x00000700 | ||
23 | #define SOC_CFG_SBUS_PARITY_TEST 0x00000020 | ||
24 | #define SOC_CFG_SBUS_PARITY_CHECK 0x00000010 | ||
25 | #define SOC_CFG_SBUS_ENHANCED 0x00000008 | ||
26 | #define SOC_CFG_BURST_MASK 0x00000007 | ||
27 | /* Bursts */ | ||
28 | #define SOC_CFG_BURST_4 0x00000000 | ||
29 | #define SOC_CFG_BURST_16 0x00000004 | ||
30 | #define SOC_CFG_BURST_32 0x00000005 | ||
31 | #define SOC_CFG_BURST_64 0x00000006 | ||
32 | |||
33 | /* Slave Access Error Register */ | ||
34 | #define SOC_SAE_ALIGNMENT 0x00000004 | ||
35 | #define SOC_SAE_UNSUPPORTED 0x00000002 | ||
36 | #define SOC_SAE_PARITY 0x00000001 | ||
37 | |||
38 | /* Command & Status Register */ | ||
39 | #define SOC_CMD_RSP_QALL 0x000f0000 | ||
40 | #define SOC_CMD_RSP_Q0 0x00010000 | ||
41 | #define SOC_CMD_RSP_Q1 0x00020000 | ||
42 | #define SOC_CMD_RSP_Q2 0x00040000 | ||
43 | #define SOC_CMD_RSP_Q3 0x00080000 | ||
44 | #define SOC_CMD_REQ_QALL 0x00000f00 | ||
45 | #define SOC_CMD_REQ_Q0 0x00000100 | ||
46 | #define SOC_CMD_REQ_Q1 0x00000200 | ||
47 | #define SOC_CMD_REQ_Q2 0x00000400 | ||
48 | #define SOC_CMD_REQ_Q3 0x00000800 | ||
49 | #define SOC_CMD_SAE 0x00000080 | ||
50 | #define SOC_CMD_INTR_PENDING 0x00000008 | ||
51 | #define SOC_CMD_NON_QUEUED 0x00000004 | ||
52 | #define SOC_CMD_IDLE 0x00000002 | ||
53 | #define SOC_CMD_SOFT_RESET 0x00000001 | ||
54 | |||
55 | /* Interrupt Mask Register */ | ||
56 | #define SOC_IMASK_RSP_QALL 0x000f0000 | ||
57 | #define SOC_IMASK_RSP_Q0 0x00010000 | ||
58 | #define SOC_IMASK_RSP_Q1 0x00020000 | ||
59 | #define SOC_IMASK_RSP_Q2 0x00040000 | ||
60 | #define SOC_IMASK_RSP_Q3 0x00080000 | ||
61 | #define SOC_IMASK_REQ_QALL 0x00000f00 | ||
62 | #define SOC_IMASK_REQ_Q0 0x00000100 | ||
63 | #define SOC_IMASK_REQ_Q1 0x00000200 | ||
64 | #define SOC_IMASK_REQ_Q2 0x00000400 | ||
65 | #define SOC_IMASK_REQ_Q3 0x00000800 | ||
66 | #define SOC_IMASK_SAE 0x00000080 | ||
67 | #define SOC_IMASK_NON_QUEUED 0x00000004 | ||
68 | |||
69 | #define SOC_INTR(s, cmd) \ | ||
70 | (((cmd & SOC_CMD_RSP_QALL) | ((~cmd) & SOC_CMD_REQ_QALL)) \ | ||
71 | & s->imask) | ||
72 | |||
73 | #define SOC_SETIMASK(s, i) \ | ||
74 | do { (s)->imask = (i); \ | ||
75 | sbus_writel((i), (s)->regs + IMASK); \ | ||
76 | } while(0) | ||
77 | |||
78 | /* XRAM | ||
79 | * | ||
80 | * This is a 64KB register area. It accepts only halfword access. | ||
81 | * That's why here are the following inline functions... | ||
82 | */ | ||
83 | |||
84 | typedef void __iomem *xram_p; | ||
85 | |||
86 | /* Get 32bit number from XRAM */ | ||
87 | static inline u32 xram_get_32(xram_p x) | ||
88 | { | ||
89 | return ((sbus_readw(x + 0x00UL) << 16) | | ||
90 | (sbus_readw(x + 0x02UL))); | ||
91 | } | ||
92 | |||
93 | /* Like the above, but when we don't care about the high 16 bits */ | ||
94 | static inline u32 xram_get_32low(xram_p x) | ||
95 | { | ||
96 | return (u32) sbus_readw(x + 0x02UL); | ||
97 | } | ||
98 | |||
99 | static inline u16 xram_get_16(xram_p x) | ||
100 | { | ||
101 | return sbus_readw(x); | ||
102 | } | ||
103 | |||
104 | static inline u8 xram_get_8(xram_p x) | ||
105 | { | ||
106 | if ((unsigned long)x & 0x1UL) { | ||
107 | x = x - 1; | ||
108 | return (u8) sbus_readw(x); | ||
109 | } else { | ||
110 | return (u8) (sbus_readw(x) >> 8); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | static inline void xram_copy_from(void *p, xram_p x, int len) | ||
115 | { | ||
116 | for (len >>= 2; len > 0; len--, x += sizeof(u32)) { | ||
117 | u32 val, *p32 = p; | ||
118 | |||
119 | val = ((sbus_readw(x + 0x00UL) << 16) | | ||
120 | (sbus_readw(x + 0x02UL))); | ||
121 | *p32++ = val; | ||
122 | p = p32; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static inline void xram_copy_to(xram_p x, void *p, int len) | ||
127 | { | ||
128 | for (len >>= 2; len > 0; len--, x += sizeof(u32)) { | ||
129 | u32 tmp, *p32 = p; | ||
130 | |||
131 | tmp = *p32++; | ||
132 | p = p32; | ||
133 | sbus_writew(tmp >> 16, x + 0x00UL); | ||
134 | sbus_writew(tmp, x + 0x02UL); | ||
135 | } | ||
136 | } | ||
137 | |||
138 | static inline void xram_bzero(xram_p x, int len) | ||
139 | { | ||
140 | for (len >>= 1; len > 0; len--, x += sizeof(u16)) | ||
141 | sbus_writew(0, x); | ||
142 | } | ||
143 | |||
144 | /* Circular Queue */ | ||
145 | |||
146 | #define SOC_CQ_REQ_OFFSET (0x100 * sizeof(u16)) | ||
147 | #define SOC_CQ_RSP_OFFSET (0x110 * sizeof(u16)) | ||
148 | |||
149 | typedef struct { | ||
150 | u32 address; | ||
151 | u8 in; | ||
152 | u8 out; | ||
153 | u8 last; | ||
154 | u8 seqno; | ||
155 | } soc_hw_cq; | ||
156 | |||
157 | #define SOC_PORT_A 0x0000 /* From/To Port A */ | ||
158 | #define SOC_PORT_B 0x0001 /* From/To Port A */ | ||
159 | #define SOC_FC_HDR 0x0002 /* Contains FC Header */ | ||
160 | #define SOC_NORSP 0x0004 /* Don't generate response nor interrupt */ | ||
161 | #define SOC_NOINT 0x0008 /* Generate response but not interrupt */ | ||
162 | #define SOC_XFERRDY 0x0010 /* Generate XFERRDY */ | ||
163 | #define SOC_IGNOREPARAM 0x0020 /* Ignore PARAM field in the FC header */ | ||
164 | #define SOC_COMPLETE 0x0040 /* Command completed */ | ||
165 | #define SOC_UNSOLICITED 0x0080 /* For request this is the packet to establish unsolicited pools, */ | ||
166 | /* for rsp this is unsolicited packet */ | ||
167 | #define SOC_STATUS 0x0100 /* State change (on/off line) */ | ||
168 | |||
169 | typedef struct { | ||
170 | u32 token; | ||
171 | u16 flags; | ||
172 | u8 class; | ||
173 | u8 segcnt; | ||
174 | u32 bytecnt; | ||
175 | } soc_hdr; | ||
176 | |||
177 | typedef struct { | ||
178 | u32 base; | ||
179 | u32 count; | ||
180 | } soc_data; | ||
181 | |||
182 | #define SOC_CQTYPE_OUTBOUND 0x01 | ||
183 | #define SOC_CQTYPE_INBOUND 0x02 | ||
184 | #define SOC_CQTYPE_SIMPLE 0x03 | ||
185 | #define SOC_CQTYPE_IO_WRITE 0x04 | ||
186 | #define SOC_CQTYPE_IO_READ 0x05 | ||
187 | #define SOC_CQTYPE_UNSOLICITED 0x06 | ||
188 | #define SOC_CQTYPE_DIAG 0x07 | ||
189 | #define SOC_CQTYPE_OFFLINE 0x08 | ||
190 | #define SOC_CQTYPE_RESPONSE 0x10 | ||
191 | #define SOC_CQTYPE_INLINE 0x20 | ||
192 | |||
193 | #define SOC_CQFLAGS_CONT 0x01 | ||
194 | #define SOC_CQFLAGS_FULL 0x02 | ||
195 | #define SOC_CQFLAGS_BADHDR 0x04 | ||
196 | #define SOC_CQFLAGS_BADPKT 0x08 | ||
197 | |||
198 | typedef struct { | ||
199 | soc_hdr shdr; | ||
200 | soc_data data[3]; | ||
201 | fc_hdr fchdr; | ||
202 | u8 count; | ||
203 | u8 type; | ||
204 | u8 flags; | ||
205 | u8 seqno; | ||
206 | } soc_req; | ||
207 | |||
208 | #define SOC_OK 0 | ||
209 | #define SOC_P_RJT 2 | ||
210 | #define SOC_F_RJT 3 | ||
211 | #define SOC_P_BSY 4 | ||
212 | #define SOC_F_BSY 5 | ||
213 | #define SOC_ONLINE 0x10 | ||
214 | #define SOC_OFFLINE 0x11 | ||
215 | #define SOC_TIMEOUT 0x12 | ||
216 | #define SOC_OVERRUN 0x13 | ||
217 | #define SOC_UNKOWN_CQ_TYPE 0x20 | ||
218 | #define SOC_BAD_SEG_CNT 0x21 | ||
219 | #define SOC_MAX_XCHG_EXCEEDED 0x22 | ||
220 | #define SOC_BAD_XID 0x23 | ||
221 | #define SOC_XCHG_BUSY 0x24 | ||
222 | #define SOC_BAD_POOL_ID 0x25 | ||
223 | #define SOC_INSUFFICIENT_CQES 0x26 | ||
224 | #define SOC_ALLOC_FAIL 0x27 | ||
225 | #define SOC_BAD_SID 0x28 | ||
226 | #define SOC_NO_SEG_INIT 0x29 | ||
227 | |||
228 | typedef struct { | ||
229 | soc_hdr shdr; | ||
230 | u32 status; | ||
231 | soc_data data; | ||
232 | u8 xxx1[12]; | ||
233 | fc_hdr fchdr; | ||
234 | u8 count; | ||
235 | u8 type; | ||
236 | u8 flags; | ||
237 | u8 seqno; | ||
238 | } soc_rsp; | ||
239 | |||
240 | /* }}} */ | ||
241 | |||
242 | /* Now our software structures and constants we use to drive the beast {{{ */ | ||
243 | |||
244 | #define SOC_CQ_REQ0_SIZE 4 | ||
245 | #define SOC_CQ_REQ1_SIZE 64 | ||
246 | #define SOC_CQ_RSP0_SIZE 8 | ||
247 | #define SOC_CQ_RSP1_SIZE 4 | ||
248 | |||
249 | #define SOC_SOLICITED_RSP_Q 0 | ||
250 | #define SOC_UNSOLICITED_RSP_Q 1 | ||
251 | |||
252 | struct soc; | ||
253 | |||
254 | typedef struct { | ||
255 | /* This must come first */ | ||
256 | fc_channel fc; | ||
257 | struct soc *s; | ||
258 | u16 flags; | ||
259 | u16 mask; | ||
260 | } soc_port; | ||
261 | |||
262 | typedef struct { | ||
263 | soc_hw_cq __iomem *hw_cq; /* Related XRAM cq */ | ||
264 | soc_req __iomem *pool; | ||
265 | u8 in; | ||
266 | u8 out; | ||
267 | u8 last; | ||
268 | u8 seqno; | ||
269 | } soc_cq_rsp; | ||
270 | |||
271 | typedef struct { | ||
272 | soc_hw_cq __iomem *hw_cq; /* Related XRAM cq */ | ||
273 | soc_req *pool; | ||
274 | u8 in; | ||
275 | u8 out; | ||
276 | u8 last; | ||
277 | u8 seqno; | ||
278 | } soc_cq_req; | ||
279 | |||
280 | struct soc { | ||
281 | spinlock_t lock; | ||
282 | soc_port port[2]; /* Every SOC has one or two FC ports */ | ||
283 | soc_cq_req req[2]; /* Request CQs */ | ||
284 | soc_cq_rsp rsp[2]; /* Response CQs */ | ||
285 | int soc_no; | ||
286 | void __iomem *regs; | ||
287 | xram_p xram; | ||
288 | fc_wwn wwn; | ||
289 | u32 imask; /* Our copy of regs->imask */ | ||
290 | u32 cfg; /* Our copy of regs->cfg */ | ||
291 | char serv_params[80]; | ||
292 | struct soc *next; | ||
293 | int curr_port; /* Which port will have priority to fcp_queue_empty */ | ||
294 | |||
295 | soc_req *req_cpu; | ||
296 | u32 req_dvma; | ||
297 | }; | ||
298 | |||
299 | /* }}} */ | ||
300 | |||
301 | #endif /* !(__SOC_H) */ | ||
diff --git a/drivers/fc4/socal.c b/drivers/fc4/socal.c deleted file mode 100644 index c903ebfab526..000000000000 --- a/drivers/fc4/socal.c +++ /dev/null | |||
@@ -1,904 +0,0 @@ | |||
1 | /* socal.c: Sparc SUNW,socal (SOC+) Fibre Channel Sbus adapter support. | ||
2 | * | ||
3 | * Copyright (C) 1998,1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
4 | * | ||
5 | * Sources: | ||
6 | * Fibre Channel Physical & Signaling Interface (FC-PH), dpANS, 1994 | ||
7 | * dpANS Fibre Channel Protocol for SCSI (X3.269-199X), Rev. 012, 1995 | ||
8 | * SOC+ Programming Guide 0.1 | ||
9 | * Fibre Channel Arbitrated Loop (FC-AL), dpANS rev. 4.5, 1995 | ||
10 | * | ||
11 | * Supported hardware: | ||
12 | * On-board SOC+ adapters of Ultra Enterprise servers and sun4d. | ||
13 | */ | ||
14 | |||
15 | static char *version = | ||
16 | "socal.c: SOC+ driver v1.1 9/Feb/99 Jakub Jelinek (jj@ultra.linux.cz)\n"; | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/fcntl.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/bitops.h> | ||
30 | #include <asm/system.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/dma.h> | ||
33 | #include <linux/errno.h> | ||
34 | #include <asm/byteorder.h> | ||
35 | |||
36 | #include <asm/openprom.h> | ||
37 | #include <asm/oplib.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/irq.h> | ||
40 | |||
41 | /* #define SOCALDEBUG */ | ||
42 | /* #define HAVE_SOCAL_UCODE */ | ||
43 | /* #define USE_64BIT_MODE */ | ||
44 | |||
45 | #include "fcp_impl.h" | ||
46 | #include "socal.h" | ||
47 | #ifdef HAVE_SOCAL_UCODE | ||
48 | #include "socal_asm.h" | ||
49 | #endif | ||
50 | |||
51 | #define socal_printk printk ("socal%d: ", s->socal_no); printk | ||
52 | |||
53 | #ifdef SOCALDEBUG | ||
54 | #define SOD(x) socal_printk x; | ||
55 | #else | ||
56 | #define SOD(x) | ||
57 | #endif | ||
58 | |||
59 | #define for_each_socal(s) for (s = socals; s; s = s->next) | ||
60 | struct socal *socals = NULL; | ||
61 | |||
62 | static void socal_copy_from_xram(void *d, void __iomem *xram, long size) | ||
63 | { | ||
64 | u32 *dp = (u32 *) d; | ||
65 | while (size) { | ||
66 | *dp++ = sbus_readl(xram); | ||
67 | xram += sizeof(u32); | ||
68 | size -= sizeof(u32); | ||
69 | } | ||
70 | } | ||
71 | |||
72 | static void socal_copy_to_xram(void __iomem *xram, void *s, long size) | ||
73 | { | ||
74 | u32 *sp = (u32 *) s; | ||
75 | while (size) { | ||
76 | u32 val = *sp++; | ||
77 | sbus_writel(val, xram); | ||
78 | xram += sizeof(u32); | ||
79 | size -= sizeof(u32); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | #ifdef HAVE_SOCAL_UCODE | ||
84 | static void socal_bzero(unsigned long xram, int size) | ||
85 | { | ||
86 | while (size) { | ||
87 | sbus_writel(0, xram); | ||
88 | xram += sizeof(u32); | ||
89 | size -= sizeof(u32); | ||
90 | } | ||
91 | } | ||
92 | #endif | ||
93 | |||
94 | static inline void socal_disable(struct socal *s) | ||
95 | { | ||
96 | sbus_writel(0, s->regs + IMASK); | ||
97 | sbus_writel(SOCAL_CMD_SOFT_RESET, s->regs + CMD); | ||
98 | } | ||
99 | |||
100 | static inline void socal_enable(struct socal *s) | ||
101 | { | ||
102 | SOD(("enable %08x\n", s->cfg)) | ||
103 | sbus_writel(0, s->regs + SAE); | ||
104 | sbus_writel(s->cfg, s->regs + CFG); | ||
105 | sbus_writel(SOCAL_CMD_RSP_QALL, s->regs + CMD); | ||
106 | SOCAL_SETIMASK(s, SOCAL_IMASK_RSP_QALL | SOCAL_IMASK_SAE); | ||
107 | SOD(("imask %08x %08x\n", s->imask, sbus_readl(s->regs + IMASK))); | ||
108 | } | ||
109 | |||
110 | static void socal_reset(fc_channel *fc) | ||
111 | { | ||
112 | socal_port *port = (socal_port *)fc; | ||
113 | struct socal *s = port->s; | ||
114 | |||
115 | /* FIXME */ | ||
116 | socal_disable(s); | ||
117 | s->req[0].seqno = 1; | ||
118 | s->req[1].seqno = 1; | ||
119 | s->rsp[0].seqno = 1; | ||
120 | s->rsp[1].seqno = 1; | ||
121 | s->req[0].in = 0; | ||
122 | s->req[1].in = 0; | ||
123 | s->rsp[0].in = 0; | ||
124 | s->rsp[1].in = 0; | ||
125 | s->req[0].out = 0; | ||
126 | s->req[1].out = 0; | ||
127 | s->rsp[0].out = 0; | ||
128 | s->rsp[1].out = 0; | ||
129 | |||
130 | /* FIXME */ | ||
131 | socal_enable(s); | ||
132 | } | ||
133 | |||
134 | static inline void socal_solicited(struct socal *s, unsigned long qno) | ||
135 | { | ||
136 | socal_rsp *hwrsp; | ||
137 | socal_cq *sw_cq; | ||
138 | int token; | ||
139 | int status; | ||
140 | fc_channel *fc; | ||
141 | |||
142 | sw_cq = &s->rsp[qno]; | ||
143 | |||
144 | /* Finally an improvement against old SOC :) */ | ||
145 | sw_cq->in = sbus_readb(s->regs + RESP + qno); | ||
146 | SOD (("socal_solicited, %d packets arrived\n", | ||
147 | (sw_cq->in - sw_cq->out) & sw_cq->last)) | ||
148 | for (;;) { | ||
149 | hwrsp = (socal_rsp *)sw_cq->pool + sw_cq->out; | ||
150 | SOD(("hwrsp %p out %d\n", hwrsp, sw_cq->out)) | ||
151 | |||
152 | #if defined(SOCALDEBUG) && 0 | ||
153 | { | ||
154 | u32 *u = (u32 *)hwrsp; | ||
155 | SOD(("%08x.%08x.%08x.%08x.%08x.%08x.%08x.%08x\n", | ||
156 | u[0],u[1],u[2],u[3],u[4],u[5],u[6],u[7])) | ||
157 | u += 8; | ||
158 | SOD(("%08x.%08x.%08x.%08x.%08x.%08x.%08x.%08x\n", | ||
159 | u[0],u[1],u[2],u[3],u[4],u[5],u[6],u[7])) | ||
160 | u = (u32 *)s->xram; | ||
161 | while (u < ((u32 *)s->regs)) { | ||
162 | if (sbus_readl(&u[0]) == 0x00003000 || | ||
163 | sbus_readl(&u[0]) == 0x00003801) { | ||
164 | SOD(("Found at %04lx\n", | ||
165 | (unsigned long)u - (unsigned long)s->xram)) | ||
166 | SOD((" %08x.%08x.%08x.%08x.%08x.%08x.%08x.%08x\n", | ||
167 | sbus_readl(&u[0]), sbus_readl(&u[1]), | ||
168 | sbus_readl(&u[2]), sbus_readl(&u[3]), | ||
169 | sbus_readl(&u[4]), sbus_readl(&u[5]), | ||
170 | sbus_readl(&u[6]), sbus_readl(&u[7]))) | ||
171 | u += 8; | ||
172 | SOD((" %08x.%08x.%08x.%08x.%08x.%08x.%08x.%08x\n", | ||
173 | sbus_readl(&u[0]), sbus_readl(&u[1]), | ||
174 | sbus_readl(&u[2]), sbus_readl(&u[3]), | ||
175 | sbus_readl(&u[4]), sbus_readl(&u[5]), | ||
176 | sbus_readl(&u[6]), sbus_readl(&u[7]))) | ||
177 | u -= 8; | ||
178 | } | ||
179 | u++; | ||
180 | } | ||
181 | } | ||
182 | #endif | ||
183 | |||
184 | token = hwrsp->shdr.token; | ||
185 | status = hwrsp->status; | ||
186 | fc = (fc_channel *)(&s->port[(token >> 11) & 1]); | ||
187 | |||
188 | SOD(("Solicited token %08x status %08x\n", token, status)) | ||
189 | if (status == SOCAL_OK) { | ||
190 | fcp_receive_solicited(fc, token >> 12, | ||
191 | token & ((1 << 11) - 1), | ||
192 | FC_STATUS_OK, NULL); | ||
193 | } else { | ||
194 | /* We have intentionally defined FC_STATUS_* constants | ||
195 | * to match SOCAL_* constants, otherwise we'd have to | ||
196 | * translate status. | ||
197 | */ | ||
198 | fcp_receive_solicited(fc, token >> 12, | ||
199 | token & ((1 << 11) - 1), status, &hwrsp->fchdr); | ||
200 | } | ||
201 | |||
202 | if (++sw_cq->out > sw_cq->last) { | ||
203 | sw_cq->seqno++; | ||
204 | sw_cq->out = 0; | ||
205 | } | ||
206 | |||
207 | if (sw_cq->out == sw_cq->in) { | ||
208 | sw_cq->in = sbus_readb(s->regs + RESP + qno); | ||
209 | if (sw_cq->out == sw_cq->in) { | ||
210 | /* Tell the hardware about it */ | ||
211 | sbus_writel((sw_cq->out << 24) | | ||
212 | (SOCAL_CMD_RSP_QALL & | ||
213 | ~(SOCAL_CMD_RSP_Q0 << qno)), | ||
214 | s->regs + CMD); | ||
215 | |||
216 | /* Read it, so that we're sure it has been updated */ | ||
217 | sbus_readl(s->regs + CMD); | ||
218 | sw_cq->in = sbus_readb(s->regs + RESP + qno); | ||
219 | if (sw_cq->out == sw_cq->in) | ||
220 | break; | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | } | ||
225 | |||
226 | static inline void socal_request (struct socal *s, u32 cmd) | ||
227 | { | ||
228 | SOCAL_SETIMASK(s, s->imask & ~(cmd & SOCAL_CMD_REQ_QALL)); | ||
229 | SOD(("imask %08x %08x\n", s->imask, sbus_readl(s->regs + IMASK))); | ||
230 | |||
231 | SOD(("Queues available %08x OUT %X\n", cmd, s->regs->reqpr[0])) | ||
232 | if (s->port[s->curr_port].fc.state != FC_STATE_OFFLINE) { | ||
233 | fcp_queue_empty ((fc_channel *)&(s->port[s->curr_port])); | ||
234 | if (((s->req[1].in + 1) & s->req[1].last) != (s->req[1].out)) | ||
235 | fcp_queue_empty ((fc_channel *)&(s->port[1 - s->curr_port])); | ||
236 | } else { | ||
237 | fcp_queue_empty ((fc_channel *)&(s->port[1 - s->curr_port])); | ||
238 | } | ||
239 | if (s->port[1 - s->curr_port].fc.state != FC_STATE_OFFLINE) | ||
240 | s->curr_port ^= 1; | ||
241 | } | ||
242 | |||
243 | static inline void socal_unsolicited (struct socal *s, unsigned long qno) | ||
244 | { | ||
245 | socal_rsp *hwrsp, *hwrspc; | ||
246 | socal_cq *sw_cq; | ||
247 | int count; | ||
248 | int status; | ||
249 | int flags; | ||
250 | fc_channel *fc; | ||
251 | |||
252 | sw_cq = &s->rsp[qno]; | ||
253 | |||
254 | sw_cq->in = sbus_readb(s->regs + RESP + qno); | ||
255 | SOD (("socal_unsolicited, %d packets arrived, in %d\n", | ||
256 | (sw_cq->in - sw_cq->out) & sw_cq->last, sw_cq->in)) | ||
257 | while (sw_cq->in != sw_cq->out) { | ||
258 | /* ...real work per entry here... */ | ||
259 | hwrsp = (socal_rsp *)sw_cq->pool + sw_cq->out; | ||
260 | SOD(("hwrsp %p out %d\n", hwrsp, sw_cq->out)) | ||
261 | |||
262 | #if defined(SOCALDEBUG) && 0 | ||
263 | { | ||
264 | u32 *u = (u32 *)hwrsp; | ||
265 | SOD(("%08x.%08x.%08x.%08x.%08x.%08x.%08x.%08x\n", | ||
266 | u[0],u[1],u[2],u[3],u[4],u[5],u[6],u[7])) | ||
267 | u += 8; | ||
268 | SOD(("%08x.%08x.%08x.%08x.%08x.%08x.%08x.%08x\n", | ||
269 | u[0],u[1],u[2],u[3],u[4],u[5],u[6],u[7])) | ||
270 | } | ||
271 | #endif | ||
272 | |||
273 | hwrspc = NULL; | ||
274 | flags = hwrsp->shdr.flags; | ||
275 | count = hwrsp->count; | ||
276 | fc = (fc_channel *)&s->port[flags & SOCAL_PORT_B]; | ||
277 | SOD(("FC %08lx\n", (long)fc)) | ||
278 | |||
279 | if (count != 1) { | ||
280 | /* Ugh, continuation entries */ | ||
281 | u8 in; | ||
282 | |||
283 | if (count != 2) { | ||
284 | printk("%s: Too many continuations entries %d\n", | ||
285 | fc->name, count); | ||
286 | goto update_out; | ||
287 | } | ||
288 | |||
289 | in = sw_cq->in; | ||
290 | if (in < sw_cq->out) | ||
291 | in += sw_cq->last + 1; | ||
292 | if (in < sw_cq->out + 2) { | ||
293 | /* Ask the hardware if they haven't arrived yet. */ | ||
294 | sbus_writel((sw_cq->out << 24) | | ||
295 | (SOCAL_CMD_RSP_QALL & | ||
296 | ~(SOCAL_CMD_RSP_Q0 << qno)), | ||
297 | s->regs + CMD); | ||
298 | |||
299 | /* Read it, so that we're sure it has been updated */ | ||
300 | sbus_readl(s->regs + CMD); | ||
301 | sw_cq->in = sbus_readb(s->regs + RESP + qno); | ||
302 | in = sw_cq->in; | ||
303 | if (in < sw_cq->out) | ||
304 | in += sw_cq->last + 1; | ||
305 | if (in < sw_cq->out + 2) /* Nothing came, let us wait */ | ||
306 | return; | ||
307 | } | ||
308 | if (sw_cq->out == sw_cq->last) | ||
309 | hwrspc = (socal_rsp *)sw_cq->pool; | ||
310 | else | ||
311 | hwrspc = hwrsp + 1; | ||
312 | } | ||
313 | |||
314 | switch (flags & ~SOCAL_PORT_B) { | ||
315 | case SOCAL_STATUS: | ||
316 | status = hwrsp->status; | ||
317 | switch (status) { | ||
318 | case SOCAL_ONLINE: | ||
319 | SOD(("State change to ONLINE\n")); | ||
320 | fcp_state_change(fc, FC_STATE_ONLINE); | ||
321 | break; | ||
322 | case SOCAL_ONLINE_LOOP: | ||
323 | SOD(("State change to ONLINE_LOOP\n")); | ||
324 | fcp_state_change(fc, FC_STATE_ONLINE); | ||
325 | break; | ||
326 | case SOCAL_OFFLINE: | ||
327 | SOD(("State change to OFFLINE\n")); | ||
328 | fcp_state_change(fc, FC_STATE_OFFLINE); | ||
329 | break; | ||
330 | default: | ||
331 | printk ("%s: Unknown STATUS no %d\n", | ||
332 | fc->name, status); | ||
333 | break; | ||
334 | }; | ||
335 | |||
336 | break; | ||
337 | case (SOCAL_UNSOLICITED|SOCAL_FC_HDR): | ||
338 | { | ||
339 | int r_ctl = *((u8 *)&hwrsp->fchdr); | ||
340 | unsigned len; | ||
341 | |||
342 | if ((r_ctl & 0xf0) == R_CTL_EXTENDED_SVC) { | ||
343 | len = hwrsp->shdr.bytecnt; | ||
344 | if (len < 4 || !hwrspc) { | ||
345 | printk ("%s: Invalid R_CTL %02x " | ||
346 | "continuation entries\n", | ||
347 | fc->name, r_ctl); | ||
348 | } else { | ||
349 | if (len > 60) | ||
350 | len = 60; | ||
351 | if (*(u32 *)hwrspc == LS_DISPLAY) { | ||
352 | int i; | ||
353 | |||
354 | for (i = 4; i < len; i++) | ||
355 | if (((u8 *)hwrspc)[i] == '\n') | ||
356 | ((u8 *)hwrspc)[i] = ' '; | ||
357 | ((u8 *)hwrspc)[len] = 0; | ||
358 | printk ("%s message: %s\n", | ||
359 | fc->name, ((u8 *)hwrspc) + 4); | ||
360 | } else { | ||
361 | printk ("%s: Unknown LS_CMD " | ||
362 | "%08x\n", fc->name, | ||
363 | *(u32 *)hwrspc); | ||
364 | } | ||
365 | } | ||
366 | } else { | ||
367 | printk ("%s: Unsolicited R_CTL %02x " | ||
368 | "not handled\n", fc->name, r_ctl); | ||
369 | } | ||
370 | } | ||
371 | break; | ||
372 | default: | ||
373 | printk ("%s: Unexpected flags %08x\n", fc->name, flags); | ||
374 | break; | ||
375 | }; | ||
376 | update_out: | ||
377 | if (++sw_cq->out > sw_cq->last) { | ||
378 | sw_cq->seqno++; | ||
379 | sw_cq->out = 0; | ||
380 | } | ||
381 | |||
382 | if (hwrspc) { | ||
383 | if (++sw_cq->out > sw_cq->last) { | ||
384 | sw_cq->seqno++; | ||
385 | sw_cq->out = 0; | ||
386 | } | ||
387 | } | ||
388 | |||
389 | if (sw_cq->out == sw_cq->in) { | ||
390 | sw_cq->in = sbus_readb(s->regs + RESP + qno); | ||
391 | if (sw_cq->out == sw_cq->in) { | ||
392 | /* Tell the hardware about it */ | ||
393 | sbus_writel((sw_cq->out << 24) | | ||
394 | (SOCAL_CMD_RSP_QALL & | ||
395 | ~(SOCAL_CMD_RSP_Q0 << qno)), | ||
396 | s->regs + CMD); | ||
397 | |||
398 | /* Read it, so that we're sure it has been updated */ | ||
399 | sbus_readl(s->regs + CMD); | ||
400 | sw_cq->in = sbus_readb(s->regs + RESP + qno); | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | } | ||
405 | |||
406 | static irqreturn_t socal_intr(int irq, void *dev_id) | ||
407 | { | ||
408 | u32 cmd; | ||
409 | unsigned long flags; | ||
410 | register struct socal *s = (struct socal *)dev_id; | ||
411 | |||
412 | spin_lock_irqsave(&s->lock, flags); | ||
413 | cmd = sbus_readl(s->regs + CMD); | ||
414 | for (; (cmd = SOCAL_INTR (s, cmd)); cmd = sbus_readl(s->regs + CMD)) { | ||
415 | #ifdef SOCALDEBUG | ||
416 | static int cnt = 0; | ||
417 | if (cnt++ < 50) | ||
418 | printk("soc_intr %08x\n", cmd); | ||
419 | #endif | ||
420 | if (cmd & SOCAL_CMD_RSP_Q2) | ||
421 | socal_unsolicited (s, SOCAL_UNSOLICITED_RSP_Q); | ||
422 | if (cmd & SOCAL_CMD_RSP_Q1) | ||
423 | socal_unsolicited (s, SOCAL_SOLICITED_BAD_RSP_Q); | ||
424 | if (cmd & SOCAL_CMD_RSP_Q0) | ||
425 | socal_solicited (s, SOCAL_SOLICITED_RSP_Q); | ||
426 | if (cmd & SOCAL_CMD_REQ_QALL) | ||
427 | socal_request (s, cmd); | ||
428 | } | ||
429 | spin_unlock_irqrestore(&s->lock, flags); | ||
430 | |||
431 | return IRQ_HANDLED; | ||
432 | } | ||
433 | |||
434 | #define TOKEN(proto, port, token) (((proto)<<12)|(token)|(port)) | ||
435 | |||
436 | static int socal_hw_enque (fc_channel *fc, fcp_cmnd *fcmd) | ||
437 | { | ||
438 | socal_port *port = (socal_port *)fc; | ||
439 | struct socal *s = port->s; | ||
440 | unsigned long qno; | ||
441 | socal_cq *sw_cq; | ||
442 | int cq_next_in; | ||
443 | socal_req *request; | ||
444 | fc_hdr *fch; | ||
445 | int i; | ||
446 | |||
447 | if (fcmd->proto == TYPE_SCSI_FCP) | ||
448 | qno = 1; | ||
449 | else | ||
450 | qno = 0; | ||
451 | SOD(("Putting a FCP packet type %d into hw queue %d\n", fcmd->proto, qno)) | ||
452 | if (s->imask & (SOCAL_IMASK_REQ_Q0 << qno)) { | ||
453 | SOD(("EIO %08x\n", s->imask)) | ||
454 | return -EIO; | ||
455 | } | ||
456 | sw_cq = s->req + qno; | ||
457 | cq_next_in = (sw_cq->in + 1) & sw_cq->last; | ||
458 | |||
459 | if (cq_next_in == sw_cq->out && | ||
460 | cq_next_in == (sw_cq->out = sbus_readb(s->regs + REQP + qno))) { | ||
461 | SOD(("%d IN %d OUT %d LAST %d\n", | ||
462 | qno, sw_cq->in, | ||
463 | sw_cq->out, sw_cq->last)) | ||
464 | SOCAL_SETIMASK(s, s->imask | (SOCAL_IMASK_REQ_Q0 << qno)); | ||
465 | SOD(("imask %08x %08x\n", s->imask, sbus_readl(s->regs + IMASK))); | ||
466 | |||
467 | /* If queue is full, just say NO. */ | ||
468 | return -EBUSY; | ||
469 | } | ||
470 | |||
471 | request = sw_cq->pool + sw_cq->in; | ||
472 | fch = &request->fchdr; | ||
473 | |||
474 | switch (fcmd->proto) { | ||
475 | case TYPE_SCSI_FCP: | ||
476 | request->shdr.token = TOKEN(TYPE_SCSI_FCP, port->mask, fcmd->token); | ||
477 | request->data[0].base = fc->dma_scsi_cmd + fcmd->token * sizeof(fcp_cmd); | ||
478 | request->data[0].count = sizeof(fcp_cmd); | ||
479 | request->data[1].base = fc->dma_scsi_rsp + fcmd->token * fc->rsp_size; | ||
480 | request->data[1].count = fc->rsp_size; | ||
481 | if (fcmd->data) { | ||
482 | request->shdr.segcnt = 3; | ||
483 | i = fc->scsi_cmd_pool[fcmd->token].fcp_data_len; | ||
484 | request->shdr.bytecnt = i; | ||
485 | request->data[2].base = fcmd->data; | ||
486 | request->data[2].count = i; | ||
487 | request->type = (fc->scsi_cmd_pool[fcmd->token].fcp_cntl & FCP_CNTL_WRITE) ? | ||
488 | SOCAL_CQTYPE_IO_WRITE : SOCAL_CQTYPE_IO_READ; | ||
489 | } else { | ||
490 | request->shdr.segcnt = 2; | ||
491 | request->shdr.bytecnt = 0; | ||
492 | request->data[2].base = 0; | ||
493 | request->data[2].count = 0; | ||
494 | request->type = SOCAL_CQTYPE_SIMPLE; | ||
495 | } | ||
496 | FILL_FCHDR_RCTL_DID(fch, R_CTL_COMMAND, fcmd->did); | ||
497 | FILL_FCHDR_SID(fch, fc->sid); | ||
498 | FILL_FCHDR_TYPE_FCTL(fch, TYPE_SCSI_FCP, F_CTL_FIRST_SEQ | F_CTL_SEQ_INITIATIVE); | ||
499 | FILL_FCHDR_SEQ_DF_SEQ(fch, 0, 0, 0); | ||
500 | FILL_FCHDR_OXRX(fch, 0xffff, 0xffff); | ||
501 | fch->param = 0; | ||
502 | request->shdr.flags = port->flags; | ||
503 | request->shdr.class = fc->posmap ? 3 : 2; | ||
504 | break; | ||
505 | |||
506 | case PROTO_OFFLINE: | ||
507 | memset (request, 0, sizeof(*request)); | ||
508 | request->shdr.token = TOKEN(PROTO_OFFLINE, port->mask, fcmd->token); | ||
509 | request->type = SOCAL_CQTYPE_OFFLINE; | ||
510 | FILL_FCHDR_RCTL_DID(fch, R_CTL_COMMAND, fcmd->did); | ||
511 | FILL_FCHDR_SID(fch, fc->sid); | ||
512 | FILL_FCHDR_TYPE_FCTL(fch, TYPE_SCSI_FCP, F_CTL_FIRST_SEQ | F_CTL_SEQ_INITIATIVE); | ||
513 | FILL_FCHDR_SEQ_DF_SEQ(fch, 0, 0, 0); | ||
514 | FILL_FCHDR_OXRX(fch, 0xffff, 0xffff); | ||
515 | request->shdr.flags = port->flags; | ||
516 | break; | ||
517 | |||
518 | case PROTO_REPORT_AL_MAP: | ||
519 | memset (request, 0, sizeof(*request)); | ||
520 | request->shdr.token = TOKEN(PROTO_REPORT_AL_MAP, port->mask, fcmd->token); | ||
521 | request->type = SOCAL_CQTYPE_REPORT_MAP; | ||
522 | request->shdr.flags = port->flags; | ||
523 | request->shdr.segcnt = 1; | ||
524 | request->shdr.bytecnt = sizeof(fc_al_posmap); | ||
525 | request->data[0].base = fcmd->cmd; | ||
526 | request->data[0].count = sizeof(fc_al_posmap); | ||
527 | break; | ||
528 | |||
529 | default: | ||
530 | request->shdr.token = TOKEN(fcmd->proto, port->mask, fcmd->token); | ||
531 | request->shdr.class = fc->posmap ? 3 : 2; | ||
532 | request->shdr.flags = port->flags; | ||
533 | memcpy (fch, &fcmd->fch, sizeof(fc_hdr)); | ||
534 | request->data[0].count = fcmd->cmdlen; | ||
535 | request->data[1].count = fcmd->rsplen; | ||
536 | request->type = fcmd->class; | ||
537 | switch (fcmd->class) { | ||
538 | case FC_CLASS_OUTBOUND: | ||
539 | request->data[0].base = fcmd->cmd; | ||
540 | request->data[0].count = fcmd->cmdlen; | ||
541 | request->type = SOCAL_CQTYPE_OUTBOUND; | ||
542 | request->shdr.bytecnt = fcmd->cmdlen; | ||
543 | request->shdr.segcnt = 1; | ||
544 | break; | ||
545 | case FC_CLASS_INBOUND: | ||
546 | request->data[0].base = fcmd->rsp; | ||
547 | request->data[0].count = fcmd->rsplen; | ||
548 | request->type = SOCAL_CQTYPE_INBOUND; | ||
549 | request->shdr.bytecnt = 0; | ||
550 | request->shdr.segcnt = 1; | ||
551 | break; | ||
552 | case FC_CLASS_SIMPLE: | ||
553 | request->data[0].base = fcmd->cmd; | ||
554 | request->data[1].base = fcmd->rsp; | ||
555 | request->data[0].count = fcmd->cmdlen; | ||
556 | request->data[1].count = fcmd->rsplen; | ||
557 | request->type = SOCAL_CQTYPE_SIMPLE; | ||
558 | request->shdr.bytecnt = fcmd->cmdlen; | ||
559 | request->shdr.segcnt = 2; | ||
560 | break; | ||
561 | case FC_CLASS_IO_READ: | ||
562 | case FC_CLASS_IO_WRITE: | ||
563 | request->data[0].base = fcmd->cmd; | ||
564 | request->data[1].base = fcmd->rsp; | ||
565 | request->data[0].count = fcmd->cmdlen; | ||
566 | request->data[1].count = fcmd->rsplen; | ||
567 | request->type = (fcmd->class == FC_CLASS_IO_READ) ? SOCAL_CQTYPE_IO_READ : SOCAL_CQTYPE_IO_WRITE; | ||
568 | if (fcmd->data) { | ||
569 | request->data[2].base = fcmd->data; | ||
570 | request->data[2].count = fcmd->datalen; | ||
571 | request->shdr.bytecnt = fcmd->datalen; | ||
572 | request->shdr.segcnt = 3; | ||
573 | } else { | ||
574 | request->shdr.bytecnt = 0; | ||
575 | request->shdr.segcnt = 2; | ||
576 | } | ||
577 | break; | ||
578 | } | ||
579 | break; | ||
580 | } | ||
581 | |||
582 | request->count = 1; | ||
583 | request->flags = 0; | ||
584 | request->seqno = sw_cq->seqno; | ||
585 | |||
586 | SOD(("queueing token %08x\n", request->shdr.token)) | ||
587 | |||
588 | /* And now tell the SOCAL about it */ | ||
589 | |||
590 | if (++sw_cq->in > sw_cq->last) { | ||
591 | sw_cq->in = 0; | ||
592 | sw_cq->seqno++; | ||
593 | } | ||
594 | |||
595 | SOD(("Putting %08x into cmd\n", SOCAL_CMD_RSP_QALL | (sw_cq->in << 24) | (SOCAL_CMD_REQ_Q0 << qno))) | ||
596 | |||
597 | sbus_writel(SOCAL_CMD_RSP_QALL | (sw_cq->in << 24) | (SOCAL_CMD_REQ_Q0 << qno), | ||
598 | s->regs + CMD); | ||
599 | |||
600 | /* Read so that command is completed */ | ||
601 | sbus_readl(s->regs + CMD); | ||
602 | |||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | static inline void socal_download_fw(struct socal *s) | ||
607 | { | ||
608 | #ifdef HAVE_SOCAL_UCODE | ||
609 | SOD(("Loading %ld bytes from %p to %p\n", sizeof(socal_ucode), socal_ucode, s->xram)) | ||
610 | socal_copy_to_xram(s->xram, socal_ucode, sizeof(socal_ucode)); | ||
611 | SOD(("Clearing the rest of memory\n")) | ||
612 | socal_bzero (s->xram + sizeof(socal_ucode), 65536 - sizeof(socal_ucode)); | ||
613 | SOD(("Done\n")) | ||
614 | #endif | ||
615 | } | ||
616 | |||
617 | /* Check for what the best SBUS burst we can use happens | ||
618 | * to be on this machine. | ||
619 | */ | ||
620 | static inline void socal_init_bursts(struct socal *s, struct sbus_dev *sdev) | ||
621 | { | ||
622 | int bsizes, bsizes_more; | ||
623 | u32 cfg; | ||
624 | |||
625 | bsizes = (prom_getintdefault(sdev->prom_node,"burst-sizes",0xff) & 0xff); | ||
626 | bsizes_more = (prom_getintdefault(sdev->bus->prom_node, "burst-sizes", 0xff) & 0xff); | ||
627 | bsizes &= bsizes_more; | ||
628 | #ifdef USE_64BIT_MODE | ||
629 | #ifdef __sparc_v9__ | ||
630 | mmu_set_sbus64(sdev, bsizes >> 16); | ||
631 | #endif | ||
632 | #endif | ||
633 | if ((bsizes & 0x7f) == 0x7f) | ||
634 | cfg = SOCAL_CFG_BURST_64; | ||
635 | else if ((bsizes & 0x3f) == 0x3f) | ||
636 | cfg = SOCAL_CFG_BURST_32; | ||
637 | else if ((bsizes & 0x1f) == 0x1f) | ||
638 | cfg = SOCAL_CFG_BURST_16; | ||
639 | else | ||
640 | cfg = SOCAL_CFG_BURST_4; | ||
641 | #ifdef USE_64BIT_MODE | ||
642 | #ifdef __sparc_v9__ | ||
643 | /* What is BURST_128? -jj */ | ||
644 | if ((bsizes & 0x780000) == 0x780000) | ||
645 | cfg |= (SOCAL_CFG_BURST_64 << 8) | SOCAL_CFG_SBUS_ENHANCED; | ||
646 | else if ((bsizes & 0x380000) == 0x380000) | ||
647 | cfg |= (SOCAL_CFG_BURST_32 << 8) | SOCAL_CFG_SBUS_ENHANCED; | ||
648 | else if ((bsizes & 0x180000) == 0x180000) | ||
649 | cfg |= (SOCAL_CFG_BURST_16 << 8) | SOCAL_CFG_SBUS_ENHANCED; | ||
650 | else | ||
651 | cfg |= (SOCAL_CFG_BURST_8 << 8) | SOCAL_CFG_SBUS_ENHANCED; | ||
652 | #endif | ||
653 | #endif | ||
654 | s->cfg = cfg; | ||
655 | } | ||
656 | |||
657 | static inline void socal_init(struct sbus_dev *sdev, int no) | ||
658 | { | ||
659 | unsigned char tmp[60]; | ||
660 | int propl; | ||
661 | struct socal *s; | ||
662 | static unsigned version_printed = 0; | ||
663 | socal_hw_cq cq[8]; | ||
664 | int size, i; | ||
665 | int irq, node; | ||
666 | |||
667 | s = kzalloc (sizeof (struct socal), GFP_KERNEL); | ||
668 | if (!s) return; | ||
669 | spin_lock_init(&s->lock); | ||
670 | s->socal_no = no; | ||
671 | |||
672 | SOD(("socals %08lx socal_intr %08lx socal_hw_enque %08lx\n", | ||
673 | (long)socals, (long)socal_intr, (long)socal_hw_enque)) | ||
674 | if (version_printed++ == 0) | ||
675 | printk (version); | ||
676 | |||
677 | s->port[0].fc.module = THIS_MODULE; | ||
678 | s->port[1].fc.module = THIS_MODULE; | ||
679 | |||
680 | s->next = socals; | ||
681 | socals = s; | ||
682 | s->port[0].fc.dev = sdev; | ||
683 | s->port[1].fc.dev = sdev; | ||
684 | s->port[0].s = s; | ||
685 | s->port[1].s = s; | ||
686 | |||
687 | s->port[0].fc.next = &s->port[1].fc; | ||
688 | |||
689 | /* World Wide Name of SOCAL */ | ||
690 | propl = prom_getproperty (sdev->prom_node, "wwn", tmp, sizeof(tmp)); | ||
691 | if (propl != sizeof (fc_wwn)) { | ||
692 | s->wwn.naaid = NAAID_IEEE_REG; | ||
693 | s->wwn.nportid = 0x123; | ||
694 | s->wwn.hi = 0x1234; | ||
695 | s->wwn.lo = 0x12345678; | ||
696 | } else | ||
697 | memcpy (&s->wwn, tmp, sizeof (fc_wwn)); | ||
698 | |||
699 | memcpy (&s->port[0].fc.wwn_nport, &s->wwn, sizeof (fc_wwn)); | ||
700 | s->port[0].fc.wwn_nport.lo++; | ||
701 | memcpy (&s->port[1].fc.wwn_nport, &s->wwn, sizeof (fc_wwn)); | ||
702 | s->port[1].fc.wwn_nport.lo+=2; | ||
703 | |||
704 | node = prom_getchild (sdev->prom_node); | ||
705 | while (node && (node = prom_searchsiblings (node, "sf"))) { | ||
706 | int port; | ||
707 | |||
708 | port = prom_getintdefault(node, "port#", -1); | ||
709 | switch (port) { | ||
710 | case 0: | ||
711 | case 1: | ||
712 | if (prom_getproplen(node, "port-wwn") == sizeof (fc_wwn)) | ||
713 | prom_getproperty (node, "port-wwn", | ||
714 | (char *)&s->port[port].fc.wwn_nport, | ||
715 | sizeof (fc_wwn)); | ||
716 | break; | ||
717 | default: | ||
718 | break; | ||
719 | }; | ||
720 | |||
721 | node = prom_getsibling(node); | ||
722 | } | ||
723 | |||
724 | memcpy (&s->port[0].fc.wwn_node, &s->wwn, sizeof (fc_wwn)); | ||
725 | memcpy (&s->port[1].fc.wwn_node, &s->wwn, sizeof (fc_wwn)); | ||
726 | SOD(("Got wwns %08x%08x ports %08x%08x and %08x%08x\n", | ||
727 | *(u32 *)&s->port[0].fc.wwn_node, s->port[0].fc.wwn_node.lo, | ||
728 | *(u32 *)&s->port[0].fc.wwn_nport, s->port[0].fc.wwn_nport.lo, | ||
729 | *(u32 *)&s->port[1].fc.wwn_nport, s->port[1].fc.wwn_nport.lo)) | ||
730 | |||
731 | s->port[0].fc.sid = 1; | ||
732 | s->port[1].fc.sid = 17; | ||
733 | s->port[0].fc.did = 2; | ||
734 | s->port[1].fc.did = 18; | ||
735 | |||
736 | s->port[0].fc.reset = socal_reset; | ||
737 | s->port[1].fc.reset = socal_reset; | ||
738 | |||
739 | if (sdev->num_registers == 1) { | ||
740 | s->eeprom = sbus_ioremap(&sdev->resource[0], 0, | ||
741 | sdev->reg_addrs[0].reg_size, "socal xram"); | ||
742 | if (sdev->reg_addrs[0].reg_size > 0x20000) | ||
743 | s->xram = s->eeprom + 0x10000UL; | ||
744 | else | ||
745 | s->xram = s->eeprom; | ||
746 | s->regs = (s->xram + 0x10000UL); | ||
747 | } else { | ||
748 | /* E.g. starfire presents 3 registers for SOCAL */ | ||
749 | s->xram = sbus_ioremap(&sdev->resource[1], 0, | ||
750 | sdev->reg_addrs[1].reg_size, "socal xram"); | ||
751 | s->regs = sbus_ioremap(&sdev->resource[2], 0, | ||
752 | sdev->reg_addrs[2].reg_size, "socal regs"); | ||
753 | } | ||
754 | |||
755 | socal_init_bursts(s, sdev); | ||
756 | |||
757 | SOD(("Disabling SOCAL\n")) | ||
758 | |||
759 | socal_disable (s); | ||
760 | |||
761 | irq = sdev->irqs[0]; | ||
762 | |||
763 | if (request_irq (irq, socal_intr, IRQF_SHARED, "SOCAL", (void *)s)) { | ||
764 | socal_printk ("Cannot order irq %d to go\n", irq); | ||
765 | socals = s->next; | ||
766 | return; | ||
767 | } | ||
768 | |||
769 | SOD(("SOCAL uses IRQ %d\n", irq)) | ||
770 | |||
771 | s->port[0].fc.irq = irq; | ||
772 | s->port[1].fc.irq = irq; | ||
773 | |||
774 | sprintf (s->port[0].fc.name, "socal%d port A", no); | ||
775 | sprintf (s->port[1].fc.name, "socal%d port B", no); | ||
776 | s->port[0].flags = SOCAL_FC_HDR | SOCAL_PORT_A; | ||
777 | s->port[1].flags = SOCAL_FC_HDR | SOCAL_PORT_B; | ||
778 | s->port[1].mask = (1 << 11); | ||
779 | |||
780 | s->port[0].fc.hw_enque = socal_hw_enque; | ||
781 | s->port[1].fc.hw_enque = socal_hw_enque; | ||
782 | |||
783 | socal_download_fw (s); | ||
784 | |||
785 | SOD(("Downloaded firmware\n")) | ||
786 | |||
787 | /* Now setup xram circular queues */ | ||
788 | memset (cq, 0, sizeof(cq)); | ||
789 | |||
790 | size = (SOCAL_CQ_REQ0_SIZE + SOCAL_CQ_REQ1_SIZE + | ||
791 | SOCAL_CQ_RSP0_SIZE + SOCAL_CQ_RSP1_SIZE + | ||
792 | SOCAL_CQ_RSP2_SIZE) * sizeof(socal_req); | ||
793 | s->req_cpu = sbus_alloc_consistent(sdev, size, &s->req_dvma); | ||
794 | s->req[0].pool = s->req_cpu; | ||
795 | cq[0].address = s->req_dvma; | ||
796 | s->req[1].pool = s->req[0].pool + SOCAL_CQ_REQ0_SIZE; | ||
797 | s->rsp[0].pool = s->req[1].pool + SOCAL_CQ_REQ1_SIZE; | ||
798 | s->rsp[1].pool = s->rsp[0].pool + SOCAL_CQ_RSP0_SIZE; | ||
799 | s->rsp[2].pool = s->rsp[1].pool + SOCAL_CQ_RSP1_SIZE; | ||
800 | |||
801 | s->req[0].hw_cq = (socal_hw_cq __iomem *)(s->xram + SOCAL_CQ_REQ_OFFSET); | ||
802 | s->req[1].hw_cq = (socal_hw_cq __iomem *)(s->xram + SOCAL_CQ_REQ_OFFSET + sizeof(socal_hw_cq)); | ||
803 | s->rsp[0].hw_cq = (socal_hw_cq __iomem *)(s->xram + SOCAL_CQ_RSP_OFFSET); | ||
804 | s->rsp[1].hw_cq = (socal_hw_cq __iomem *)(s->xram + SOCAL_CQ_RSP_OFFSET + sizeof(socal_hw_cq)); | ||
805 | s->rsp[2].hw_cq = (socal_hw_cq __iomem *)(s->xram + SOCAL_CQ_RSP_OFFSET + 2 * sizeof(socal_hw_cq)); | ||
806 | |||
807 | cq[1].address = cq[0].address + (SOCAL_CQ_REQ0_SIZE * sizeof(socal_req)); | ||
808 | cq[4].address = cq[1].address + (SOCAL_CQ_REQ1_SIZE * sizeof(socal_req)); | ||
809 | cq[5].address = cq[4].address + (SOCAL_CQ_RSP0_SIZE * sizeof(socal_req)); | ||
810 | cq[6].address = cq[5].address + (SOCAL_CQ_RSP1_SIZE * sizeof(socal_req)); | ||
811 | |||
812 | cq[0].last = SOCAL_CQ_REQ0_SIZE - 1; | ||
813 | cq[1].last = SOCAL_CQ_REQ1_SIZE - 1; | ||
814 | cq[4].last = SOCAL_CQ_RSP0_SIZE - 1; | ||
815 | cq[5].last = SOCAL_CQ_RSP1_SIZE - 1; | ||
816 | cq[6].last = SOCAL_CQ_RSP2_SIZE - 1; | ||
817 | for (i = 0; i < 8; i++) | ||
818 | cq[i].seqno = 1; | ||
819 | |||
820 | s->req[0].last = SOCAL_CQ_REQ0_SIZE - 1; | ||
821 | s->req[1].last = SOCAL_CQ_REQ1_SIZE - 1; | ||
822 | s->rsp[0].last = SOCAL_CQ_RSP0_SIZE - 1; | ||
823 | s->rsp[1].last = SOCAL_CQ_RSP1_SIZE - 1; | ||
824 | s->rsp[2].last = SOCAL_CQ_RSP2_SIZE - 1; | ||
825 | |||
826 | s->req[0].seqno = 1; | ||
827 | s->req[1].seqno = 1; | ||
828 | s->rsp[0].seqno = 1; | ||
829 | s->rsp[1].seqno = 1; | ||
830 | s->rsp[2].seqno = 1; | ||
831 | |||
832 | socal_copy_to_xram(s->xram + SOCAL_CQ_REQ_OFFSET, cq, sizeof(cq)); | ||
833 | |||
834 | SOD(("Setting up params\n")) | ||
835 | |||
836 | /* Make our sw copy of SOCAL service parameters */ | ||
837 | socal_copy_from_xram(s->serv_params, s->xram + 0x280, sizeof (s->serv_params)); | ||
838 | |||
839 | s->port[0].fc.common_svc = (common_svc_parm *)s->serv_params; | ||
840 | s->port[0].fc.class_svcs = (svc_parm *)(s->serv_params + 0x20); | ||
841 | s->port[1].fc.common_svc = (common_svc_parm *)&s->serv_params; | ||
842 | s->port[1].fc.class_svcs = (svc_parm *)(s->serv_params + 0x20); | ||
843 | |||
844 | socal_enable (s); | ||
845 | |||
846 | SOD(("Enabled SOCAL\n")) | ||
847 | } | ||
848 | |||
849 | static int __init socal_probe(void) | ||
850 | { | ||
851 | struct sbus_bus *sbus; | ||
852 | struct sbus_dev *sdev = NULL; | ||
853 | struct socal *s; | ||
854 | int cards = 0; | ||
855 | |||
856 | for_each_sbus(sbus) { | ||
857 | for_each_sbusdev(sdev, sbus) { | ||
858 | if(!strcmp(sdev->prom_name, "SUNW,socal")) { | ||
859 | socal_init(sdev, cards); | ||
860 | cards++; | ||
861 | } | ||
862 | } | ||
863 | } | ||
864 | if (!cards) | ||
865 | return -EIO; | ||
866 | |||
867 | for_each_socal(s) | ||
868 | if (s->next) | ||
869 | s->port[1].fc.next = &s->next->port[0].fc; | ||
870 | |||
871 | fcp_init (&socals->port[0].fc); | ||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | static void __exit socal_cleanup(void) | ||
876 | { | ||
877 | struct socal *s; | ||
878 | int irq; | ||
879 | struct sbus_dev *sdev; | ||
880 | |||
881 | for_each_socal(s) { | ||
882 | irq = s->port[0].fc.irq; | ||
883 | free_irq (irq, s); | ||
884 | |||
885 | fcp_release(&(s->port[0].fc), 2); | ||
886 | |||
887 | sdev = s->port[0].fc.dev; | ||
888 | if (sdev->num_registers == 1) { | ||
889 | sbus_iounmap(s->eeprom, sdev->reg_addrs[0].reg_size); | ||
890 | } else { | ||
891 | sbus_iounmap(s->xram, sdev->reg_addrs[1].reg_size); | ||
892 | sbus_iounmap(s->regs, sdev->reg_addrs[2].reg_size); | ||
893 | } | ||
894 | sbus_free_consistent(sdev, | ||
895 | (SOCAL_CQ_REQ0_SIZE + SOCAL_CQ_REQ1_SIZE + | ||
896 | SOCAL_CQ_RSP0_SIZE + SOCAL_CQ_RSP1_SIZE + | ||
897 | SOCAL_CQ_RSP2_SIZE) * sizeof(socal_req), | ||
898 | s->req_cpu, s->req_dvma); | ||
899 | } | ||
900 | } | ||
901 | |||
902 | module_init(socal_probe); | ||
903 | module_exit(socal_cleanup); | ||
904 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/fc4/socal.h b/drivers/fc4/socal.h deleted file mode 100644 index 774edf68e4d2..000000000000 --- a/drivers/fc4/socal.h +++ /dev/null | |||
@@ -1,314 +0,0 @@ | |||
1 | /* socal.h: Definitions for Sparc SUNW,socal (SOC+) Fibre Channel Sbus driver. | ||
2 | * | ||
3 | * Copyright (C) 1998,1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
4 | */ | ||
5 | |||
6 | #ifndef __SOCAL_H | ||
7 | #define __SOCAL_H | ||
8 | |||
9 | #include "fc.h" | ||
10 | #include "fcp.h" | ||
11 | #include "fcp_impl.h" | ||
12 | |||
13 | /* Hardware register offsets and constants first {{{ */ | ||
14 | #define CFG 0x00UL | ||
15 | #define SAE 0x04UL | ||
16 | #define CMD 0x08UL | ||
17 | #define IMASK 0x0cUL | ||
18 | #define REQP 0x10UL | ||
19 | #define RESP 0x14UL | ||
20 | |||
21 | /* Config Register */ | ||
22 | #define SOCAL_CFG_EXT_RAM_BANK_MASK 0x07000000 | ||
23 | #define SOCAL_CFG_EEPROM_BANK_MASK 0x00030000 | ||
24 | #define SOCAL_CFG_BURST64_MASK 0x00000700 | ||
25 | #define SOCAL_CFG_SBUS_PARITY_TEST 0x00000020 | ||
26 | #define SOCAL_CFG_SBUS_PARITY_CHECK 0x00000010 | ||
27 | #define SOCAL_CFG_SBUS_ENHANCED 0x00000008 | ||
28 | #define SOCAL_CFG_BURST_MASK 0x00000007 | ||
29 | /* Bursts */ | ||
30 | #define SOCAL_CFG_BURST_4 0x00000000 | ||
31 | #define SOCAL_CFG_BURST_8 0x00000003 | ||
32 | #define SOCAL_CFG_BURST_16 0x00000004 | ||
33 | #define SOCAL_CFG_BURST_32 0x00000005 | ||
34 | #define SOCAL_CFG_BURST_64 0x00000006 | ||
35 | #define SOCAL_CFG_BURST_128 0x00000007 | ||
36 | |||
37 | /* Slave Access Error Register */ | ||
38 | #define SOCAL_SAE_ALIGNMENT 0x00000004 | ||
39 | #define SOCAL_SAE_UNSUPPORTED 0x00000002 | ||
40 | #define SOCAL_SAE_PARITY 0x00000001 | ||
41 | |||
42 | /* Command & Status Register */ | ||
43 | #define SOCAL_CMD_RSP_QALL 0x000f0000 | ||
44 | #define SOCAL_CMD_RSP_Q0 0x00010000 | ||
45 | #define SOCAL_CMD_RSP_Q1 0x00020000 | ||
46 | #define SOCAL_CMD_RSP_Q2 0x00040000 | ||
47 | #define SOCAL_CMD_RSP_Q3 0x00080000 | ||
48 | #define SOCAL_CMD_REQ_QALL 0x00000f00 | ||
49 | #define SOCAL_CMD_REQ_Q0 0x00000100 | ||
50 | #define SOCAL_CMD_REQ_Q1 0x00000200 | ||
51 | #define SOCAL_CMD_REQ_Q2 0x00000400 | ||
52 | #define SOCAL_CMD_REQ_Q3 0x00000800 | ||
53 | #define SOCAL_CMD_SAE 0x00000080 | ||
54 | #define SOCAL_CMD_INTR_PENDING 0x00000008 | ||
55 | #define SOCAL_CMD_NON_QUEUED 0x00000004 | ||
56 | #define SOCAL_CMD_IDLE 0x00000002 | ||
57 | #define SOCAL_CMD_SOFT_RESET 0x00000001 | ||
58 | |||
59 | /* Interrupt Mask Register */ | ||
60 | #define SOCAL_IMASK_RSP_QALL 0x000f0000 | ||
61 | #define SOCAL_IMASK_RSP_Q0 0x00010000 | ||
62 | #define SOCAL_IMASK_RSP_Q1 0x00020000 | ||
63 | #define SOCAL_IMASK_RSP_Q2 0x00040000 | ||
64 | #define SOCAL_IMASK_RSP_Q3 0x00080000 | ||
65 | #define SOCAL_IMASK_REQ_QALL 0x00000f00 | ||
66 | #define SOCAL_IMASK_REQ_Q0 0x00000100 | ||
67 | #define SOCAL_IMASK_REQ_Q1 0x00000200 | ||
68 | #define SOCAL_IMASK_REQ_Q2 0x00000400 | ||
69 | #define SOCAL_IMASK_REQ_Q3 0x00000800 | ||
70 | #define SOCAL_IMASK_SAE 0x00000080 | ||
71 | #define SOCAL_IMASK_NON_QUEUED 0x00000004 | ||
72 | |||
73 | #define SOCAL_INTR(s, cmd) \ | ||
74 | (((cmd & SOCAL_CMD_RSP_QALL) | ((~cmd) & SOCAL_CMD_REQ_QALL)) \ | ||
75 | & s->imask) | ||
76 | |||
77 | #define SOCAL_SETIMASK(s, i) \ | ||
78 | do { (s)->imask = (i); \ | ||
79 | sbus_writel((i), (s)->regs + IMASK); \ | ||
80 | } while (0) | ||
81 | |||
82 | #define SOCAL_MAX_EXCHANGES 1024 | ||
83 | |||
84 | /* XRAM | ||
85 | * | ||
86 | * This is a 64KB register area. | ||
87 | * From the documentation, it seems like it is finally able to cope | ||
88 | * at least with 1,2,4 byte accesses for read and 2,4 byte accesses for write. | ||
89 | */ | ||
90 | |||
91 | /* Circular Queue */ | ||
92 | |||
93 | #define SOCAL_CQ_REQ_OFFSET 0x200 | ||
94 | #define SOCAL_CQ_RSP_OFFSET 0x220 | ||
95 | |||
96 | typedef struct { | ||
97 | u32 address; | ||
98 | u8 in; | ||
99 | u8 out; | ||
100 | u8 last; | ||
101 | u8 seqno; | ||
102 | } socal_hw_cq; | ||
103 | |||
104 | #define SOCAL_PORT_A 0x0000 /* From/To Port A */ | ||
105 | #define SOCAL_PORT_B 0x0001 /* From/To Port A */ | ||
106 | #define SOCAL_FC_HDR 0x0002 /* Contains FC Header */ | ||
107 | #define SOCAL_NORSP 0x0004 /* Don't generate response nor interrupt */ | ||
108 | #define SOCAL_NOINT 0x0008 /* Generate response but not interrupt */ | ||
109 | #define SOCAL_XFERRDY 0x0010 /* Generate XFERRDY */ | ||
110 | #define SOCAL_IGNOREPARAM 0x0020 /* Ignore PARAM field in the FC header */ | ||
111 | #define SOCAL_COMPLETE 0x0040 /* Command completed */ | ||
112 | #define SOCAL_UNSOLICITED 0x0080 /* For request this is the packet to establish unsolicited pools, */ | ||
113 | /* for rsp this is unsolicited packet */ | ||
114 | #define SOCAL_STATUS 0x0100 /* State change (on/off line) */ | ||
115 | #define SOCAL_RSP_HDR 0x0200 /* Return frame header in any case */ | ||
116 | |||
117 | typedef struct { | ||
118 | u32 token; | ||
119 | u16 flags; | ||
120 | u8 class; | ||
121 | u8 segcnt; | ||
122 | u32 bytecnt; | ||
123 | } socal_hdr; | ||
124 | |||
125 | typedef struct { | ||
126 | u32 base; | ||
127 | u32 count; | ||
128 | } socal_data; | ||
129 | |||
130 | #define SOCAL_CQTYPE_NOP 0x00 | ||
131 | #define SOCAL_CQTYPE_OUTBOUND 0x01 | ||
132 | #define SOCAL_CQTYPE_INBOUND 0x02 | ||
133 | #define SOCAL_CQTYPE_SIMPLE 0x03 | ||
134 | #define SOCAL_CQTYPE_IO_WRITE 0x04 | ||
135 | #define SOCAL_CQTYPE_IO_READ 0x05 | ||
136 | #define SOCAL_CQTYPE_UNSOLICITED 0x06 | ||
137 | #define SOCAL_CQTYPE_BYPASS_DEV 0x06 | ||
138 | #define SOCAL_CQTYPE_DIAG 0x07 | ||
139 | #define SOCAL_CQTYPE_OFFLINE 0x08 | ||
140 | #define SOCAL_CQTYPE_ADD_POOL 0x09 | ||
141 | #define SOCAL_CQTYPE_DELETE_POOL 0x0a | ||
142 | #define SOCAL_CQTYPE_ADD_BUFFER 0x0b | ||
143 | #define SOCAL_CQTYPE_ADD_POOL_BUFFER 0x0c | ||
144 | #define SOCAL_CQTYPE_REQUEST_ABORT 0x0d | ||
145 | #define SOCAL_CQTYPE_REQUEST_LIP 0x0e | ||
146 | #define SOCAL_CQTYPE_REPORT_MAP 0x0f | ||
147 | #define SOCAL_CQTYPE_RESPONSE 0x10 | ||
148 | #define SOCAL_CQTYPE_INLINE 0x20 | ||
149 | |||
150 | #define SOCAL_CQFLAGS_CONT 0x01 | ||
151 | #define SOCAL_CQFLAGS_FULL 0x02 | ||
152 | #define SOCAL_CQFLAGS_BADHDR 0x04 | ||
153 | #define SOCAL_CQFLAGS_BADPKT 0x08 | ||
154 | |||
155 | typedef struct { | ||
156 | socal_hdr shdr; | ||
157 | socal_data data[3]; | ||
158 | fc_hdr fchdr; | ||
159 | u8 count; | ||
160 | u8 type; | ||
161 | u8 flags; | ||
162 | u8 seqno; | ||
163 | } socal_req; | ||
164 | |||
165 | #define SOCAL_OK 0 | ||
166 | #define SOCAL_P_RJT 2 | ||
167 | #define SOCAL_F_RJT 3 | ||
168 | #define SOCAL_P_BSY 4 | ||
169 | #define SOCAL_F_BSY 5 | ||
170 | #define SOCAL_ONLINE 0x10 | ||
171 | #define SOCAL_OFFLINE 0x11 | ||
172 | #define SOCAL_TIMEOUT 0x12 | ||
173 | #define SOCAL_OVERRUN 0x13 | ||
174 | #define SOCAL_ONLINE_LOOP 0x14 | ||
175 | #define SOCAL_OLD_PORT 0x15 | ||
176 | #define SOCAL_AL_PORT 0x16 | ||
177 | #define SOCAL_UNKOWN_CQ_TYPE 0x20 | ||
178 | #define SOCAL_BAD_SEG_CNT 0x21 | ||
179 | #define SOCAL_MAX_XCHG_EXCEEDED 0x22 | ||
180 | #define SOCAL_BAD_XID 0x23 | ||
181 | #define SOCAL_XCHG_BUSY 0x24 | ||
182 | #define SOCAL_BAD_POOL_ID 0x25 | ||
183 | #define SOCAL_INSUFFICIENT_CQES 0x26 | ||
184 | #define SOCAL_ALLOC_FAIL 0x27 | ||
185 | #define SOCAL_BAD_SID 0x28 | ||
186 | #define SOCAL_NO_SEG_INIT 0x29 | ||
187 | #define SOCAL_BAD_DID 0x2a | ||
188 | #define SOCAL_ABORTED 0x30 | ||
189 | #define SOCAL_ABORT_FAILED 0x31 | ||
190 | |||
191 | typedef struct { | ||
192 | socal_hdr shdr; | ||
193 | u32 status; | ||
194 | socal_data data; | ||
195 | u8 xxx1[10]; | ||
196 | u16 ncmds; | ||
197 | fc_hdr fchdr; | ||
198 | u8 count; | ||
199 | u8 type; | ||
200 | u8 flags; | ||
201 | u8 seqno; | ||
202 | } socal_rsp; | ||
203 | |||
204 | typedef struct { | ||
205 | socal_hdr shdr; | ||
206 | u8 xxx1[48]; | ||
207 | u8 count; | ||
208 | u8 type; | ||
209 | u8 flags; | ||
210 | u8 seqno; | ||
211 | } socal_cmdonly; | ||
212 | |||
213 | #define SOCAL_DIAG_NOP 0x00 | ||
214 | #define SOCAL_DIAG_INT_LOOP 0x01 | ||
215 | #define SOCAL_DIAG_EXT_LOOP 0x02 | ||
216 | #define SOCAL_DIAG_REM_LOOP 0x03 | ||
217 | #define SOCAL_DIAG_XRAM_TEST 0x04 | ||
218 | #define SOCAL_DIAG_SOC_TEST 0x05 | ||
219 | #define SOCAL_DIAG_HCB_TEST 0x06 | ||
220 | #define SOCAL_DIAG_SOCLB_TEST 0x07 | ||
221 | #define SOCAL_DIAG_SRDSLB_TEST 0x08 | ||
222 | #define SOCAL_DIAG_EXTOE_TEST 0x09 | ||
223 | |||
224 | typedef struct { | ||
225 | socal_hdr shdr; | ||
226 | u32 cmd; | ||
227 | u8 xxx1[44]; | ||
228 | u8 count; | ||
229 | u8 type; | ||
230 | u8 flags; | ||
231 | u8 seqno; | ||
232 | } socal_diag_req; | ||
233 | |||
234 | #define SOCAL_POOL_MASK_RCTL 0x800000 | ||
235 | #define SOCAL_POOL_MASK_DID 0x700000 | ||
236 | #define SOCAL_POOL_MASK_SID 0x070000 | ||
237 | #define SOCAL_POOL_MASK_TYPE 0x008000 | ||
238 | #define SOCAL_POOL_MASK_F_CTL 0x007000 | ||
239 | #define SOCAL_POOL_MASK_SEQ_ID 0x000800 | ||
240 | #define SOCAL_POOL_MASK_D_CTL 0x000400 | ||
241 | #define SOCAL_POOL_MASK_SEQ_CNT 0x000300 | ||
242 | #define SOCAL_POOL_MASK_OX_ID 0x0000f0 | ||
243 | #define SOCAL_POOL_MASK_PARAM 0x00000f | ||
244 | |||
245 | typedef struct { | ||
246 | socal_hdr shdr; | ||
247 | u32 pool_id; | ||
248 | u32 header_mask; | ||
249 | u32 buf_size; | ||
250 | u32 entries; | ||
251 | u8 xxx1[8]; | ||
252 | fc_hdr fchdr; | ||
253 | u8 count; | ||
254 | u8 type; | ||
255 | u8 flags; | ||
256 | u8 seqno; | ||
257 | } socal_pool_req; | ||
258 | |||
259 | /* }}} */ | ||
260 | |||
261 | /* Now our software structures and constants we use to drive the beast {{{ */ | ||
262 | |||
263 | #define SOCAL_CQ_REQ0_SIZE 4 | ||
264 | #define SOCAL_CQ_REQ1_SIZE 256 | ||
265 | #define SOCAL_CQ_RSP0_SIZE 8 | ||
266 | #define SOCAL_CQ_RSP1_SIZE 4 | ||
267 | #define SOCAL_CQ_RSP2_SIZE 4 | ||
268 | |||
269 | #define SOCAL_SOLICITED_RSP_Q 0 | ||
270 | #define SOCAL_SOLICITED_BAD_RSP_Q 1 | ||
271 | #define SOCAL_UNSOLICITED_RSP_Q 2 | ||
272 | |||
273 | struct socal; | ||
274 | |||
275 | typedef struct { | ||
276 | /* This must come first */ | ||
277 | fc_channel fc; | ||
278 | struct socal *s; | ||
279 | u16 flags; | ||
280 | u16 mask; | ||
281 | } socal_port; | ||
282 | |||
283 | typedef struct { | ||
284 | socal_hw_cq __iomem *hw_cq; /* Related XRAM cq */ | ||
285 | socal_req *pool; | ||
286 | u8 in; | ||
287 | u8 out; | ||
288 | u8 last; | ||
289 | u8 seqno; | ||
290 | } socal_cq; | ||
291 | |||
292 | struct socal { | ||
293 | spinlock_t lock; | ||
294 | socal_port port[2]; /* Every SOCAL has one or two FC ports */ | ||
295 | socal_cq req[4]; /* Request CQs */ | ||
296 | socal_cq rsp[4]; /* Response CQs */ | ||
297 | int socal_no; | ||
298 | void __iomem *regs; | ||
299 | void __iomem *xram; | ||
300 | void __iomem *eeprom; | ||
301 | fc_wwn wwn; | ||
302 | u32 imask; /* Our copy of regs->imask */ | ||
303 | u32 cfg; /* Our copy of regs->cfg */ | ||
304 | char serv_params[80]; | ||
305 | struct socal *next; | ||
306 | int curr_port; /* Which port will have priority to fcp_queue_empty */ | ||
307 | |||
308 | socal_req * req_cpu; | ||
309 | u32 req_dvma; | ||
310 | }; | ||
311 | |||
312 | /* }}} */ | ||
313 | |||
314 | #endif /* !(__SOCAL_H) */ | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2538816817aa..86b8641b4664 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2371,13 +2371,16 @@ config UGETH_TX_ON_DEMAND | |||
2371 | depends on UCC_GETH | 2371 | depends on UCC_GETH |
2372 | 2372 | ||
2373 | config MV643XX_ETH | 2373 | config MV643XX_ETH |
2374 | tristate "MV-643XX Ethernet support" | 2374 | tristate "Marvell Discovery (643XX) and Orion ethernet support" |
2375 | depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) | 2375 | depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || ARCH_ORION |
2376 | select MII | 2376 | select MII |
2377 | help | 2377 | help |
2378 | This driver supports the gigabit Ethernet on the Marvell MV643XX | 2378 | This driver supports the gigabit ethernet MACs in the |
2379 | chipset which is used in the Momenco Ocelot C and Jaguar ATX and | 2379 | Marvell Discovery PPC/MIPS chipset family (MV643XX) and |
2380 | Pegasos II, amongst other PPC and MIPS boards. | 2380 | in the Marvell Orion ARM SoC family. |
2381 | |||
2382 | Some boards that use the Discovery chipset are the Momenco | ||
2383 | Ocelot C and Jaguar ATX and Pegasos II. | ||
2381 | 2384 | ||
2382 | config QLA3XXX | 2385 | config QLA3XXX |
2383 | tristate "QLogic QLA3XXX Network Driver Support" | 2386 | tristate "QLogic QLA3XXX Network Driver Support" |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 084f0292ea6e..cb3c6faa7888 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2076,8 +2076,10 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
2076 | * times out, and it selects an aggregator for the ports that are yet not | 2076 | * times out, and it selects an aggregator for the ports that are yet not |
2077 | * related to any aggregator, and selects the active aggregator for a bond. | 2077 | * related to any aggregator, and selects the active aggregator for a bond. |
2078 | */ | 2078 | */ |
2079 | void bond_3ad_state_machine_handler(struct bonding *bond) | 2079 | void bond_3ad_state_machine_handler(struct work_struct *work) |
2080 | { | 2080 | { |
2081 | struct bonding *bond = container_of(work, struct bonding, | ||
2082 | ad_work.work); | ||
2081 | struct port *port; | 2083 | struct port *port; |
2082 | struct aggregator *aggregator; | 2084 | struct aggregator *aggregator; |
2083 | 2085 | ||
@@ -2128,7 +2130,7 @@ void bond_3ad_state_machine_handler(struct bonding *bond) | |||
2128 | } | 2130 | } |
2129 | 2131 | ||
2130 | re_arm: | 2132 | re_arm: |
2131 | mod_timer(&(BOND_AD_INFO(bond).ad_timer), jiffies + ad_delta_in_ticks); | 2133 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); |
2132 | out: | 2134 | out: |
2133 | read_unlock(&bond->lock); | 2135 | read_unlock(&bond->lock); |
2134 | } | 2136 | } |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index f16557264944..b5ee45f6d55a 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -276,7 +276,7 @@ struct ad_slave_info { | |||
276 | void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast); | 276 | void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast); |
277 | int bond_3ad_bind_slave(struct slave *slave); | 277 | int bond_3ad_bind_slave(struct slave *slave); |
278 | void bond_3ad_unbind_slave(struct slave *slave); | 278 | void bond_3ad_unbind_slave(struct slave *slave); |
279 | void bond_3ad_state_machine_handler(struct bonding *bond); | 279 | void bond_3ad_state_machine_handler(struct work_struct *); |
280 | void bond_3ad_adapter_speed_changed(struct slave *slave); | 280 | void bond_3ad_adapter_speed_changed(struct slave *slave); |
281 | void bond_3ad_adapter_duplex_changed(struct slave *slave); | 281 | void bond_3ad_adapter_duplex_changed(struct slave *slave); |
282 | void bond_3ad_handle_link_change(struct slave *slave, char link); | 282 | void bond_3ad_handle_link_change(struct slave *slave, char link); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index aea2217c56eb..25b8dbf6cfd7 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -128,12 +128,12 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size) | |||
128 | 128 | ||
129 | static inline void _lock_tx_hashtbl(struct bonding *bond) | 129 | static inline void _lock_tx_hashtbl(struct bonding *bond) |
130 | { | 130 | { |
131 | spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | 131 | spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); |
132 | } | 132 | } |
133 | 133 | ||
134 | static inline void _unlock_tx_hashtbl(struct bonding *bond) | 134 | static inline void _unlock_tx_hashtbl(struct bonding *bond) |
135 | { | 135 | { |
136 | spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | 136 | spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); |
137 | } | 137 | } |
138 | 138 | ||
139 | /* Caller must hold tx_hashtbl lock */ | 139 | /* Caller must hold tx_hashtbl lock */ |
@@ -305,12 +305,12 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u3 | |||
305 | /*********************** rlb specific functions ***************************/ | 305 | /*********************** rlb specific functions ***************************/ |
306 | static inline void _lock_rx_hashtbl(struct bonding *bond) | 306 | static inline void _lock_rx_hashtbl(struct bonding *bond) |
307 | { | 307 | { |
308 | spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | 308 | spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); |
309 | } | 309 | } |
310 | 310 | ||
311 | static inline void _unlock_rx_hashtbl(struct bonding *bond) | 311 | static inline void _unlock_rx_hashtbl(struct bonding *bond) |
312 | { | 312 | { |
313 | spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | 313 | spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); |
314 | } | 314 | } |
315 | 315 | ||
316 | /* when an ARP REPLY is received from a client update its info | 316 | /* when an ARP REPLY is received from a client update its info |
@@ -472,13 +472,13 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) | |||
472 | 472 | ||
473 | _unlock_rx_hashtbl(bond); | 473 | _unlock_rx_hashtbl(bond); |
474 | 474 | ||
475 | write_lock(&bond->curr_slave_lock); | 475 | write_lock_bh(&bond->curr_slave_lock); |
476 | 476 | ||
477 | if (slave != bond->curr_active_slave) { | 477 | if (slave != bond->curr_active_slave) { |
478 | rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); | 478 | rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); |
479 | } | 479 | } |
480 | 480 | ||
481 | write_unlock(&bond->curr_slave_lock); | 481 | write_unlock_bh(&bond->curr_slave_lock); |
482 | } | 482 | } |
483 | 483 | ||
484 | static void rlb_update_client(struct rlb_client_info *client_info) | 484 | static void rlb_update_client(struct rlb_client_info *client_info) |
@@ -959,19 +959,34 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw) | |||
959 | return 0; | 959 | return 0; |
960 | } | 960 | } |
961 | 961 | ||
962 | /* Caller must hold bond lock for write or curr_slave_lock for write*/ | 962 | /* |
963 | * Swap MAC addresses between two slaves. | ||
964 | * | ||
965 | * Called with RTNL held, and no other locks. | ||
966 | * | ||
967 | */ | ||
968 | |||
963 | static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2) | 969 | static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2) |
964 | { | 970 | { |
965 | struct slave *disabled_slave = NULL; | ||
966 | u8 tmp_mac_addr[ETH_ALEN]; | 971 | u8 tmp_mac_addr[ETH_ALEN]; |
967 | int slaves_state_differ; | ||
968 | |||
969 | slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2)); | ||
970 | 972 | ||
971 | memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); | 973 | memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); |
972 | alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled); | 974 | alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled); |
973 | alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled); | 975 | alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled); |
974 | 976 | ||
977 | } | ||
978 | |||
979 | /* | ||
980 | * Send learning packets after MAC address swap. | ||
981 | * | ||
982 | * Called with RTNL and bond->lock held for read. | ||
983 | */ | ||
984 | static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, | ||
985 | struct slave *slave2) | ||
986 | { | ||
987 | int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2)); | ||
988 | struct slave *disabled_slave = NULL; | ||
989 | |||
975 | /* fasten the change in the switch */ | 990 | /* fasten the change in the switch */ |
976 | if (SLAVE_IS_OK(slave1)) { | 991 | if (SLAVE_IS_OK(slave1)) { |
977 | alb_send_learning_packets(slave1, slave1->dev->dev_addr); | 992 | alb_send_learning_packets(slave1, slave1->dev->dev_addr); |
@@ -1044,7 +1059,9 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla | |||
1044 | } | 1059 | } |
1045 | 1060 | ||
1046 | if (found) { | 1061 | if (found) { |
1062 | /* locking: needs RTNL and nothing else */ | ||
1047 | alb_swap_mac_addr(bond, slave, tmp_slave); | 1063 | alb_swap_mac_addr(bond, slave, tmp_slave); |
1064 | alb_fasten_mac_swap(bond, slave, tmp_slave); | ||
1048 | } | 1065 | } |
1049 | } | 1066 | } |
1050 | } | 1067 | } |
@@ -1375,8 +1392,10 @@ out: | |||
1375 | return 0; | 1392 | return 0; |
1376 | } | 1393 | } |
1377 | 1394 | ||
1378 | void bond_alb_monitor(struct bonding *bond) | 1395 | void bond_alb_monitor(struct work_struct *work) |
1379 | { | 1396 | { |
1397 | struct bonding *bond = container_of(work, struct bonding, | ||
1398 | alb_work.work); | ||
1380 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 1399 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
1381 | struct slave *slave; | 1400 | struct slave *slave; |
1382 | int i; | 1401 | int i; |
@@ -1436,16 +1455,16 @@ void bond_alb_monitor(struct bonding *bond) | |||
1436 | 1455 | ||
1437 | /* handle rlb stuff */ | 1456 | /* handle rlb stuff */ |
1438 | if (bond_info->rlb_enabled) { | 1457 | if (bond_info->rlb_enabled) { |
1439 | /* the following code changes the promiscuity of the | ||
1440 | * the curr_active_slave. It needs to be locked with a | ||
1441 | * write lock to protect from other code that also | ||
1442 | * sets the promiscuity. | ||
1443 | */ | ||
1444 | write_lock_bh(&bond->curr_slave_lock); | ||
1445 | |||
1446 | if (bond_info->primary_is_promisc && | 1458 | if (bond_info->primary_is_promisc && |
1447 | (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { | 1459 | (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { |
1448 | 1460 | ||
1461 | /* | ||
1462 | * dev_set_promiscuity requires rtnl and | ||
1463 | * nothing else. | ||
1464 | */ | ||
1465 | read_unlock(&bond->lock); | ||
1466 | rtnl_lock(); | ||
1467 | |||
1449 | bond_info->rlb_promisc_timeout_counter = 0; | 1468 | bond_info->rlb_promisc_timeout_counter = 0; |
1450 | 1469 | ||
1451 | /* If the primary was set to promiscuous mode | 1470 | /* If the primary was set to promiscuous mode |
@@ -1454,9 +1473,10 @@ void bond_alb_monitor(struct bonding *bond) | |||
1454 | */ | 1473 | */ |
1455 | dev_set_promiscuity(bond->curr_active_slave->dev, -1); | 1474 | dev_set_promiscuity(bond->curr_active_slave->dev, -1); |
1456 | bond_info->primary_is_promisc = 0; | 1475 | bond_info->primary_is_promisc = 0; |
1457 | } | ||
1458 | 1476 | ||
1459 | write_unlock_bh(&bond->curr_slave_lock); | 1477 | rtnl_unlock(); |
1478 | read_lock(&bond->lock); | ||
1479 | } | ||
1460 | 1480 | ||
1461 | if (bond_info->rlb_rebalance) { | 1481 | if (bond_info->rlb_rebalance) { |
1462 | bond_info->rlb_rebalance = 0; | 1482 | bond_info->rlb_rebalance = 0; |
@@ -1479,7 +1499,7 @@ void bond_alb_monitor(struct bonding *bond) | |||
1479 | } | 1499 | } |
1480 | 1500 | ||
1481 | re_arm: | 1501 | re_arm: |
1482 | mod_timer(&(bond_info->alb_timer), jiffies + alb_delta_in_ticks); | 1502 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); |
1483 | out: | 1503 | out: |
1484 | read_unlock(&bond->lock); | 1504 | read_unlock(&bond->lock); |
1485 | } | 1505 | } |
@@ -1500,11 +1520,11 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) | |||
1500 | /* caller must hold the bond lock for write since the mac addresses | 1520 | /* caller must hold the bond lock for write since the mac addresses |
1501 | * are compared and may be swapped. | 1521 | * are compared and may be swapped. |
1502 | */ | 1522 | */ |
1503 | write_lock_bh(&bond->lock); | 1523 | read_lock(&bond->lock); |
1504 | 1524 | ||
1505 | res = alb_handle_addr_collision_on_attach(bond, slave); | 1525 | res = alb_handle_addr_collision_on_attach(bond, slave); |
1506 | 1526 | ||
1507 | write_unlock_bh(&bond->lock); | 1527 | read_unlock(&bond->lock); |
1508 | 1528 | ||
1509 | if (res) { | 1529 | if (res) { |
1510 | return res; | 1530 | return res; |
@@ -1569,13 +1589,21 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char | |||
1569 | * Set the bond->curr_active_slave to @new_slave and handle | 1589 | * Set the bond->curr_active_slave to @new_slave and handle |
1570 | * mac address swapping and promiscuity changes as needed. | 1590 | * mac address swapping and promiscuity changes as needed. |
1571 | * | 1591 | * |
1572 | * Caller must hold bond curr_slave_lock for write (or bond lock for write) | 1592 | * If new_slave is NULL, caller must hold curr_slave_lock or |
1593 | * bond->lock for write. | ||
1594 | * | ||
1595 | * If new_slave is not NULL, caller must hold RTNL, bond->lock for | ||
1596 | * read and curr_slave_lock for write. Processing here may sleep, so | ||
1597 | * no other locks may be held. | ||
1573 | */ | 1598 | */ |
1574 | void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) | 1599 | void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) |
1575 | { | 1600 | { |
1576 | struct slave *swap_slave; | 1601 | struct slave *swap_slave; |
1577 | int i; | 1602 | int i; |
1578 | 1603 | ||
1604 | if (new_slave) | ||
1605 | ASSERT_RTNL(); | ||
1606 | |||
1579 | if (bond->curr_active_slave == new_slave) { | 1607 | if (bond->curr_active_slave == new_slave) { |
1580 | return; | 1608 | return; |
1581 | } | 1609 | } |
@@ -1608,6 +1636,19 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave | |||
1608 | } | 1636 | } |
1609 | } | 1637 | } |
1610 | 1638 | ||
1639 | /* | ||
1640 | * Arrange for swap_slave and new_slave to temporarily be | ||
1641 | * ignored so we can mess with their MAC addresses without | ||
1642 | * fear of interference from transmit activity. | ||
1643 | */ | ||
1644 | if (swap_slave) { | ||
1645 | tlb_clear_slave(bond, swap_slave, 1); | ||
1646 | } | ||
1647 | tlb_clear_slave(bond, new_slave, 1); | ||
1648 | |||
1649 | write_unlock_bh(&bond->curr_slave_lock); | ||
1650 | read_unlock(&bond->lock); | ||
1651 | |||
1611 | /* curr_active_slave must be set before calling alb_swap_mac_addr */ | 1652 | /* curr_active_slave must be set before calling alb_swap_mac_addr */ |
1612 | if (swap_slave) { | 1653 | if (swap_slave) { |
1613 | /* swap mac address */ | 1654 | /* swap mac address */ |
@@ -1616,11 +1657,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave | |||
1616 | /* set the new_slave to the bond mac address */ | 1657 | /* set the new_slave to the bond mac address */ |
1617 | alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, | 1658 | alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, |
1618 | bond->alb_info.rlb_enabled); | 1659 | bond->alb_info.rlb_enabled); |
1660 | } | ||
1661 | |||
1662 | read_lock(&bond->lock); | ||
1663 | |||
1664 | if (swap_slave) { | ||
1665 | alb_fasten_mac_swap(bond, swap_slave, new_slave); | ||
1666 | } else { | ||
1619 | /* fasten bond mac on new current slave */ | 1667 | /* fasten bond mac on new current slave */ |
1620 | alb_send_learning_packets(new_slave, bond->dev->dev_addr); | 1668 | alb_send_learning_packets(new_slave, bond->dev->dev_addr); |
1621 | } | 1669 | } |
1670 | |||
1671 | write_lock_bh(&bond->curr_slave_lock); | ||
1622 | } | 1672 | } |
1623 | 1673 | ||
1674 | /* | ||
1675 | * Called with RTNL | ||
1676 | */ | ||
1624 | int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) | 1677 | int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) |
1625 | { | 1678 | { |
1626 | struct bonding *bond = bond_dev->priv; | 1679 | struct bonding *bond = bond_dev->priv; |
@@ -1657,8 +1710,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) | |||
1657 | } | 1710 | } |
1658 | } | 1711 | } |
1659 | 1712 | ||
1713 | write_unlock_bh(&bond->curr_slave_lock); | ||
1714 | read_unlock(&bond->lock); | ||
1715 | |||
1660 | if (swap_slave) { | 1716 | if (swap_slave) { |
1661 | alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); | 1717 | alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); |
1718 | alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); | ||
1662 | } else { | 1719 | } else { |
1663 | alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, | 1720 | alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, |
1664 | bond->alb_info.rlb_enabled); | 1721 | bond->alb_info.rlb_enabled); |
@@ -1670,6 +1727,9 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) | |||
1670 | } | 1727 | } |
1671 | } | 1728 | } |
1672 | 1729 | ||
1730 | read_lock(&bond->lock); | ||
1731 | write_lock_bh(&bond->curr_slave_lock); | ||
1732 | |||
1673 | return 0; | 1733 | return 0; |
1674 | } | 1734 | } |
1675 | 1735 | ||
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index fd8726429890..50968f8196cf 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h | |||
@@ -125,7 +125,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave); | |||
125 | void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); | 125 | void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); |
126 | void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); | 126 | void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); |
127 | int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); | 127 | int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); |
128 | void bond_alb_monitor(struct bonding *bond); | 128 | void bond_alb_monitor(struct work_struct *); |
129 | int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); | 129 | int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); |
130 | void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); | 130 | void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); |
131 | #endif /* __BOND_ALB_H__ */ | 131 | #endif /* __BOND_ALB_H__ */ |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6f85cc31f8a2..6909becb10f6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1590,15 +1590,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1590 | case BOND_MODE_TLB: | 1590 | case BOND_MODE_TLB: |
1591 | case BOND_MODE_ALB: | 1591 | case BOND_MODE_ALB: |
1592 | new_slave->state = BOND_STATE_ACTIVE; | 1592 | new_slave->state = BOND_STATE_ACTIVE; |
1593 | if ((!bond->curr_active_slave) && | 1593 | bond_set_slave_inactive_flags(new_slave); |
1594 | (new_slave->link != BOND_LINK_DOWN)) { | ||
1595 | /* first slave or no active slave yet, and this link | ||
1596 | * is OK, so make this interface the active one | ||
1597 | */ | ||
1598 | bond_change_active_slave(bond, new_slave); | ||
1599 | } else { | ||
1600 | bond_set_slave_inactive_flags(new_slave); | ||
1601 | } | ||
1602 | break; | 1594 | break; |
1603 | default: | 1595 | default: |
1604 | dprintk("This slave is always active in trunk mode\n"); | 1596 | dprintk("This slave is always active in trunk mode\n"); |
@@ -1754,9 +1746,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1754 | bond_alb_deinit_slave(bond, slave); | 1746 | bond_alb_deinit_slave(bond, slave); |
1755 | } | 1747 | } |
1756 | 1748 | ||
1757 | if (oldcurrent == slave) | 1749 | if (oldcurrent == slave) { |
1750 | /* | ||
1751 | * Note that we hold RTNL over this sequence, so there | ||
1752 | * is no concern that another slave add/remove event | ||
1753 | * will interfere. | ||
1754 | */ | ||
1755 | write_unlock_bh(&bond->lock); | ||
1756 | read_lock(&bond->lock); | ||
1757 | write_lock_bh(&bond->curr_slave_lock); | ||
1758 | |||
1758 | bond_select_active_slave(bond); | 1759 | bond_select_active_slave(bond); |
1759 | 1760 | ||
1761 | write_unlock_bh(&bond->curr_slave_lock); | ||
1762 | read_unlock(&bond->lock); | ||
1763 | write_lock_bh(&bond->lock); | ||
1764 | } | ||
1765 | |||
1760 | if (bond->slave_cnt == 0) { | 1766 | if (bond->slave_cnt == 0) { |
1761 | bond_set_carrier(bond); | 1767 | bond_set_carrier(bond); |
1762 | 1768 | ||
@@ -1840,9 +1846,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1840 | */ | 1846 | */ |
1841 | void bond_destroy(struct bonding *bond) | 1847 | void bond_destroy(struct bonding *bond) |
1842 | { | 1848 | { |
1849 | unregister_netdevice(bond->dev); | ||
1843 | bond_deinit(bond->dev); | 1850 | bond_deinit(bond->dev); |
1844 | bond_destroy_sysfs_entry(bond); | 1851 | bond_destroy_sysfs_entry(bond); |
1845 | unregister_netdevice(bond->dev); | ||
1846 | } | 1852 | } |
1847 | 1853 | ||
1848 | /* | 1854 | /* |
@@ -2012,16 +2018,19 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi | |||
2012 | return -EINVAL; | 2018 | return -EINVAL; |
2013 | } | 2019 | } |
2014 | 2020 | ||
2015 | write_lock_bh(&bond->lock); | 2021 | read_lock(&bond->lock); |
2016 | 2022 | ||
2023 | read_lock(&bond->curr_slave_lock); | ||
2017 | old_active = bond->curr_active_slave; | 2024 | old_active = bond->curr_active_slave; |
2025 | read_unlock(&bond->curr_slave_lock); | ||
2026 | |||
2018 | new_active = bond_get_slave_by_dev(bond, slave_dev); | 2027 | new_active = bond_get_slave_by_dev(bond, slave_dev); |
2019 | 2028 | ||
2020 | /* | 2029 | /* |
2021 | * Changing to the current active: do nothing; return success. | 2030 | * Changing to the current active: do nothing; return success. |
2022 | */ | 2031 | */ |
2023 | if (new_active && (new_active == old_active)) { | 2032 | if (new_active && (new_active == old_active)) { |
2024 | write_unlock_bh(&bond->lock); | 2033 | read_unlock(&bond->lock); |
2025 | return 0; | 2034 | return 0; |
2026 | } | 2035 | } |
2027 | 2036 | ||
@@ -2029,12 +2038,14 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi | |||
2029 | (old_active) && | 2038 | (old_active) && |
2030 | (new_active->link == BOND_LINK_UP) && | 2039 | (new_active->link == BOND_LINK_UP) && |
2031 | IS_UP(new_active->dev)) { | 2040 | IS_UP(new_active->dev)) { |
2041 | write_lock_bh(&bond->curr_slave_lock); | ||
2032 | bond_change_active_slave(bond, new_active); | 2042 | bond_change_active_slave(bond, new_active); |
2043 | write_unlock_bh(&bond->curr_slave_lock); | ||
2033 | } else { | 2044 | } else { |
2034 | res = -EINVAL; | 2045 | res = -EINVAL; |
2035 | } | 2046 | } |
2036 | 2047 | ||
2037 | write_unlock_bh(&bond->lock); | 2048 | read_unlock(&bond->lock); |
2038 | 2049 | ||
2039 | return res; | 2050 | return res; |
2040 | } | 2051 | } |
@@ -2046,9 +2057,9 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) | |||
2046 | info->bond_mode = bond->params.mode; | 2057 | info->bond_mode = bond->params.mode; |
2047 | info->miimon = bond->params.miimon; | 2058 | info->miimon = bond->params.miimon; |
2048 | 2059 | ||
2049 | read_lock_bh(&bond->lock); | 2060 | read_lock(&bond->lock); |
2050 | info->num_slaves = bond->slave_cnt; | 2061 | info->num_slaves = bond->slave_cnt; |
2051 | read_unlock_bh(&bond->lock); | 2062 | read_unlock(&bond->lock); |
2052 | 2063 | ||
2053 | return 0; | 2064 | return 0; |
2054 | } | 2065 | } |
@@ -2063,7 +2074,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in | |||
2063 | return -ENODEV; | 2074 | return -ENODEV; |
2064 | } | 2075 | } |
2065 | 2076 | ||
2066 | read_lock_bh(&bond->lock); | 2077 | read_lock(&bond->lock); |
2067 | 2078 | ||
2068 | bond_for_each_slave(bond, slave, i) { | 2079 | bond_for_each_slave(bond, slave, i) { |
2069 | if (i == (int)info->slave_id) { | 2080 | if (i == (int)info->slave_id) { |
@@ -2072,7 +2083,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in | |||
2072 | } | 2083 | } |
2073 | } | 2084 | } |
2074 | 2085 | ||
2075 | read_unlock_bh(&bond->lock); | 2086 | read_unlock(&bond->lock); |
2076 | 2087 | ||
2077 | if (found) { | 2088 | if (found) { |
2078 | strcpy(info->slave_name, slave->dev->name); | 2089 | strcpy(info->slave_name, slave->dev->name); |
@@ -2088,26 +2099,25 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in | |||
2088 | 2099 | ||
2089 | /*-------------------------------- Monitoring -------------------------------*/ | 2100 | /*-------------------------------- Monitoring -------------------------------*/ |
2090 | 2101 | ||
2091 | /* this function is called regularly to monitor each slave's link. */ | 2102 | /* |
2092 | void bond_mii_monitor(struct net_device *bond_dev) | 2103 | * if !have_locks, return nonzero if a failover is necessary. if |
2104 | * have_locks, do whatever failover activities are needed. | ||
2105 | * | ||
2106 | * This is to separate the inspection and failover steps for locking | ||
2107 | * purposes; failover requires rtnl, but acquiring it for every | ||
2108 | * inspection is undesirable, so a wrapper first does inspection, and | ||
2109 | * the acquires the necessary locks and calls again to perform | ||
2110 | * failover if needed. Since all locks are dropped, a complete | ||
2111 | * restart is needed between calls. | ||
2112 | */ | ||
2113 | static int __bond_mii_monitor(struct bonding *bond, int have_locks) | ||
2093 | { | 2114 | { |
2094 | struct bonding *bond = bond_dev->priv; | ||
2095 | struct slave *slave, *oldcurrent; | 2115 | struct slave *slave, *oldcurrent; |
2096 | int do_failover = 0; | 2116 | int do_failover = 0; |
2097 | int delta_in_ticks; | ||
2098 | int i; | 2117 | int i; |
2099 | 2118 | ||
2100 | read_lock(&bond->lock); | 2119 | if (bond->slave_cnt == 0) |
2101 | |||
2102 | delta_in_ticks = (bond->params.miimon * HZ) / 1000; | ||
2103 | |||
2104 | if (bond->kill_timers) { | ||
2105 | goto out; | 2120 | goto out; |
2106 | } | ||
2107 | |||
2108 | if (bond->slave_cnt == 0) { | ||
2109 | goto re_arm; | ||
2110 | } | ||
2111 | 2121 | ||
2112 | /* we will try to read the link status of each of our slaves, and | 2122 | /* we will try to read the link status of each of our slaves, and |
2113 | * set their IFF_RUNNING flag appropriately. For each slave not | 2123 | * set their IFF_RUNNING flag appropriately. For each slave not |
@@ -2141,7 +2151,11 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2141 | switch (slave->link) { | 2151 | switch (slave->link) { |
2142 | case BOND_LINK_UP: /* the link was up */ | 2152 | case BOND_LINK_UP: /* the link was up */ |
2143 | if (link_state == BMSR_LSTATUS) { | 2153 | if (link_state == BMSR_LSTATUS) { |
2144 | /* link stays up, nothing more to do */ | 2154 | if (!oldcurrent) { |
2155 | if (!have_locks) | ||
2156 | return 1; | ||
2157 | do_failover = 1; | ||
2158 | } | ||
2145 | break; | 2159 | break; |
2146 | } else { /* link going down */ | 2160 | } else { /* link going down */ |
2147 | slave->link = BOND_LINK_FAIL; | 2161 | slave->link = BOND_LINK_FAIL; |
@@ -2156,7 +2170,7 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2156 | ": %s: link status down for %s " | 2170 | ": %s: link status down for %s " |
2157 | "interface %s, disabling it in " | 2171 | "interface %s, disabling it in " |
2158 | "%d ms.\n", | 2172 | "%d ms.\n", |
2159 | bond_dev->name, | 2173 | bond->dev->name, |
2160 | IS_UP(slave_dev) | 2174 | IS_UP(slave_dev) |
2161 | ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) | 2175 | ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) |
2162 | ? ((slave == oldcurrent) | 2176 | ? ((slave == oldcurrent) |
@@ -2174,6 +2188,9 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2174 | if (link_state != BMSR_LSTATUS) { | 2188 | if (link_state != BMSR_LSTATUS) { |
2175 | /* link stays down */ | 2189 | /* link stays down */ |
2176 | if (slave->delay <= 0) { | 2190 | if (slave->delay <= 0) { |
2191 | if (!have_locks) | ||
2192 | return 1; | ||
2193 | |||
2177 | /* link down for too long time */ | 2194 | /* link down for too long time */ |
2178 | slave->link = BOND_LINK_DOWN; | 2195 | slave->link = BOND_LINK_DOWN; |
2179 | 2196 | ||
@@ -2189,7 +2206,7 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2189 | ": %s: link status definitely " | 2206 | ": %s: link status definitely " |
2190 | "down for interface %s, " | 2207 | "down for interface %s, " |
2191 | "disabling it\n", | 2208 | "disabling it\n", |
2192 | bond_dev->name, | 2209 | bond->dev->name, |
2193 | slave_dev->name); | 2210 | slave_dev->name); |
2194 | 2211 | ||
2195 | /* notify ad that the link status has changed */ | 2212 | /* notify ad that the link status has changed */ |
@@ -2215,7 +2232,7 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2215 | printk(KERN_INFO DRV_NAME | 2232 | printk(KERN_INFO DRV_NAME |
2216 | ": %s: link status up again after %d " | 2233 | ": %s: link status up again after %d " |
2217 | "ms for interface %s.\n", | 2234 | "ms for interface %s.\n", |
2218 | bond_dev->name, | 2235 | bond->dev->name, |
2219 | (bond->params.downdelay - slave->delay) * bond->params.miimon, | 2236 | (bond->params.downdelay - slave->delay) * bond->params.miimon, |
2220 | slave_dev->name); | 2237 | slave_dev->name); |
2221 | } | 2238 | } |
@@ -2235,7 +2252,7 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2235 | ": %s: link status up for " | 2252 | ": %s: link status up for " |
2236 | "interface %s, enabling it " | 2253 | "interface %s, enabling it " |
2237 | "in %d ms.\n", | 2254 | "in %d ms.\n", |
2238 | bond_dev->name, | 2255 | bond->dev->name, |
2239 | slave_dev->name, | 2256 | slave_dev->name, |
2240 | bond->params.updelay * bond->params.miimon); | 2257 | bond->params.updelay * bond->params.miimon); |
2241 | } | 2258 | } |
@@ -2251,12 +2268,15 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2251 | printk(KERN_INFO DRV_NAME | 2268 | printk(KERN_INFO DRV_NAME |
2252 | ": %s: link status down again after %d " | 2269 | ": %s: link status down again after %d " |
2253 | "ms for interface %s.\n", | 2270 | "ms for interface %s.\n", |
2254 | bond_dev->name, | 2271 | bond->dev->name, |
2255 | (bond->params.updelay - slave->delay) * bond->params.miimon, | 2272 | (bond->params.updelay - slave->delay) * bond->params.miimon, |
2256 | slave_dev->name); | 2273 | slave_dev->name); |
2257 | } else { | 2274 | } else { |
2258 | /* link stays up */ | 2275 | /* link stays up */ |
2259 | if (slave->delay == 0) { | 2276 | if (slave->delay == 0) { |
2277 | if (!have_locks) | ||
2278 | return 1; | ||
2279 | |||
2260 | /* now the link has been up for long time enough */ | 2280 | /* now the link has been up for long time enough */ |
2261 | slave->link = BOND_LINK_UP; | 2281 | slave->link = BOND_LINK_UP; |
2262 | slave->jiffies = jiffies; | 2282 | slave->jiffies = jiffies; |
@@ -2275,7 +2295,7 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2275 | printk(KERN_INFO DRV_NAME | 2295 | printk(KERN_INFO DRV_NAME |
2276 | ": %s: link status definitely " | 2296 | ": %s: link status definitely " |
2277 | "up for interface %s.\n", | 2297 | "up for interface %s.\n", |
2278 | bond_dev->name, | 2298 | bond->dev->name, |
2279 | slave_dev->name); | 2299 | slave_dev->name); |
2280 | 2300 | ||
2281 | /* notify ad that the link status has changed */ | 2301 | /* notify ad that the link status has changed */ |
@@ -2301,7 +2321,7 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2301 | /* Should not happen */ | 2321 | /* Should not happen */ |
2302 | printk(KERN_ERR DRV_NAME | 2322 | printk(KERN_ERR DRV_NAME |
2303 | ": %s: Error: %s Illegal value (link=%d)\n", | 2323 | ": %s: Error: %s Illegal value (link=%d)\n", |
2304 | bond_dev->name, | 2324 | bond->dev->name, |
2305 | slave->dev->name, | 2325 | slave->dev->name, |
2306 | slave->link); | 2326 | slave->link); |
2307 | goto out; | 2327 | goto out; |
@@ -2322,22 +2342,52 @@ void bond_mii_monitor(struct net_device *bond_dev) | |||
2322 | } /* end of for */ | 2342 | } /* end of for */ |
2323 | 2343 | ||
2324 | if (do_failover) { | 2344 | if (do_failover) { |
2325 | write_lock(&bond->curr_slave_lock); | 2345 | ASSERT_RTNL(); |
2346 | |||
2347 | write_lock_bh(&bond->curr_slave_lock); | ||
2326 | 2348 | ||
2327 | bond_select_active_slave(bond); | 2349 | bond_select_active_slave(bond); |
2328 | 2350 | ||
2329 | write_unlock(&bond->curr_slave_lock); | 2351 | write_unlock_bh(&bond->curr_slave_lock); |
2352 | |||
2330 | } else | 2353 | } else |
2331 | bond_set_carrier(bond); | 2354 | bond_set_carrier(bond); |
2332 | 2355 | ||
2333 | re_arm: | ||
2334 | if (bond->params.miimon) { | ||
2335 | mod_timer(&bond->mii_timer, jiffies + delta_in_ticks); | ||
2336 | } | ||
2337 | out: | 2356 | out: |
2338 | read_unlock(&bond->lock); | 2357 | return 0; |
2339 | } | 2358 | } |
2340 | 2359 | ||
2360 | /* | ||
2361 | * bond_mii_monitor | ||
2362 | * | ||
2363 | * Really a wrapper that splits the mii monitor into two phases: an | ||
2364 | * inspection, then (if inspection indicates something needs to be | ||
2365 | * done) an acquisition of appropriate locks followed by another pass | ||
2366 | * to implement whatever link state changes are indicated. | ||
2367 | */ | ||
2368 | void bond_mii_monitor(struct work_struct *work) | ||
2369 | { | ||
2370 | struct bonding *bond = container_of(work, struct bonding, | ||
2371 | mii_work.work); | ||
2372 | unsigned long delay; | ||
2373 | |||
2374 | read_lock(&bond->lock); | ||
2375 | if (bond->kill_timers) { | ||
2376 | read_unlock(&bond->lock); | ||
2377 | return; | ||
2378 | } | ||
2379 | if (__bond_mii_monitor(bond, 0)) { | ||
2380 | read_unlock(&bond->lock); | ||
2381 | rtnl_lock(); | ||
2382 | read_lock(&bond->lock); | ||
2383 | __bond_mii_monitor(bond, 1); | ||
2384 | rtnl_unlock(); | ||
2385 | } | ||
2386 | |||
2387 | delay = ((bond->params.miimon * HZ) / 1000) ? : 1; | ||
2388 | read_unlock(&bond->lock); | ||
2389 | queue_delayed_work(bond->wq, &bond->mii_work, delay); | ||
2390 | } | ||
2341 | 2391 | ||
2342 | static __be32 bond_glean_dev_ip(struct net_device *dev) | 2392 | static __be32 bond_glean_dev_ip(struct net_device *dev) |
2343 | { | 2393 | { |
@@ -2636,9 +2686,10 @@ out: | |||
2636 | * arp is transmitted to generate traffic. see activebackup_arp_monitor for | 2686 | * arp is transmitted to generate traffic. see activebackup_arp_monitor for |
2637 | * arp monitoring in active backup mode. | 2687 | * arp monitoring in active backup mode. |
2638 | */ | 2688 | */ |
2639 | void bond_loadbalance_arp_mon(struct net_device *bond_dev) | 2689 | void bond_loadbalance_arp_mon(struct work_struct *work) |
2640 | { | 2690 | { |
2641 | struct bonding *bond = bond_dev->priv; | 2691 | struct bonding *bond = container_of(work, struct bonding, |
2692 | arp_work.work); | ||
2642 | struct slave *slave, *oldcurrent; | 2693 | struct slave *slave, *oldcurrent; |
2643 | int do_failover = 0; | 2694 | int do_failover = 0; |
2644 | int delta_in_ticks; | 2695 | int delta_in_ticks; |
@@ -2685,13 +2736,13 @@ void bond_loadbalance_arp_mon(struct net_device *bond_dev) | |||
2685 | printk(KERN_INFO DRV_NAME | 2736 | printk(KERN_INFO DRV_NAME |
2686 | ": %s: link status definitely " | 2737 | ": %s: link status definitely " |
2687 | "up for interface %s, ", | 2738 | "up for interface %s, ", |
2688 | bond_dev->name, | 2739 | bond->dev->name, |
2689 | slave->dev->name); | 2740 | slave->dev->name); |
2690 | do_failover = 1; | 2741 | do_failover = 1; |
2691 | } else { | 2742 | } else { |
2692 | printk(KERN_INFO DRV_NAME | 2743 | printk(KERN_INFO DRV_NAME |
2693 | ": %s: interface %s is now up\n", | 2744 | ": %s: interface %s is now up\n", |
2694 | bond_dev->name, | 2745 | bond->dev->name, |
2695 | slave->dev->name); | 2746 | slave->dev->name); |
2696 | } | 2747 | } |
2697 | } | 2748 | } |
@@ -2715,7 +2766,7 @@ void bond_loadbalance_arp_mon(struct net_device *bond_dev) | |||
2715 | 2766 | ||
2716 | printk(KERN_INFO DRV_NAME | 2767 | printk(KERN_INFO DRV_NAME |
2717 | ": %s: interface %s is now down.\n", | 2768 | ": %s: interface %s is now down.\n", |
2718 | bond_dev->name, | 2769 | bond->dev->name, |
2719 | slave->dev->name); | 2770 | slave->dev->name); |
2720 | 2771 | ||
2721 | if (slave == oldcurrent) { | 2772 | if (slave == oldcurrent) { |
@@ -2737,17 +2788,19 @@ void bond_loadbalance_arp_mon(struct net_device *bond_dev) | |||
2737 | } | 2788 | } |
2738 | 2789 | ||
2739 | if (do_failover) { | 2790 | if (do_failover) { |
2740 | write_lock(&bond->curr_slave_lock); | 2791 | rtnl_lock(); |
2792 | write_lock_bh(&bond->curr_slave_lock); | ||
2741 | 2793 | ||
2742 | bond_select_active_slave(bond); | 2794 | bond_select_active_slave(bond); |
2743 | 2795 | ||
2744 | write_unlock(&bond->curr_slave_lock); | 2796 | write_unlock_bh(&bond->curr_slave_lock); |
2797 | rtnl_unlock(); | ||
2798 | |||
2745 | } | 2799 | } |
2746 | 2800 | ||
2747 | re_arm: | 2801 | re_arm: |
2748 | if (bond->params.arp_interval) { | 2802 | if (bond->params.arp_interval) |
2749 | mod_timer(&bond->arp_timer, jiffies + delta_in_ticks); | 2803 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
2750 | } | ||
2751 | out: | 2804 | out: |
2752 | read_unlock(&bond->lock); | 2805 | read_unlock(&bond->lock); |
2753 | } | 2806 | } |
@@ -2767,9 +2820,10 @@ out: | |||
2767 | * may have received. | 2820 | * may have received. |
2768 | * see loadbalance_arp_monitor for arp monitoring in load balancing mode | 2821 | * see loadbalance_arp_monitor for arp monitoring in load balancing mode |
2769 | */ | 2822 | */ |
2770 | void bond_activebackup_arp_mon(struct net_device *bond_dev) | 2823 | void bond_activebackup_arp_mon(struct work_struct *work) |
2771 | { | 2824 | { |
2772 | struct bonding *bond = bond_dev->priv; | 2825 | struct bonding *bond = container_of(work, struct bonding, |
2826 | arp_work.work); | ||
2773 | struct slave *slave; | 2827 | struct slave *slave; |
2774 | int delta_in_ticks; | 2828 | int delta_in_ticks; |
2775 | int i; | 2829 | int i; |
@@ -2798,7 +2852,9 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev) | |||
2798 | 2852 | ||
2799 | slave->link = BOND_LINK_UP; | 2853 | slave->link = BOND_LINK_UP; |
2800 | 2854 | ||
2801 | write_lock(&bond->curr_slave_lock); | 2855 | rtnl_lock(); |
2856 | |||
2857 | write_lock_bh(&bond->curr_slave_lock); | ||
2802 | 2858 | ||
2803 | if ((!bond->curr_active_slave) && | 2859 | if ((!bond->curr_active_slave) && |
2804 | ((jiffies - slave->dev->trans_start) <= delta_in_ticks)) { | 2860 | ((jiffies - slave->dev->trans_start) <= delta_in_ticks)) { |
@@ -2821,18 +2877,19 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev) | |||
2821 | printk(KERN_INFO DRV_NAME | 2877 | printk(KERN_INFO DRV_NAME |
2822 | ": %s: %s is up and now the " | 2878 | ": %s: %s is up and now the " |
2823 | "active interface\n", | 2879 | "active interface\n", |
2824 | bond_dev->name, | 2880 | bond->dev->name, |
2825 | slave->dev->name); | 2881 | slave->dev->name); |
2826 | netif_carrier_on(bond->dev); | 2882 | netif_carrier_on(bond->dev); |
2827 | } else { | 2883 | } else { |
2828 | printk(KERN_INFO DRV_NAME | 2884 | printk(KERN_INFO DRV_NAME |
2829 | ": %s: backup interface %s is " | 2885 | ": %s: backup interface %s is " |
2830 | "now up\n", | 2886 | "now up\n", |
2831 | bond_dev->name, | 2887 | bond->dev->name, |
2832 | slave->dev->name); | 2888 | slave->dev->name); |
2833 | } | 2889 | } |
2834 | 2890 | ||
2835 | write_unlock(&bond->curr_slave_lock); | 2891 | write_unlock_bh(&bond->curr_slave_lock); |
2892 | rtnl_unlock(); | ||
2836 | } | 2893 | } |
2837 | } else { | 2894 | } else { |
2838 | read_lock(&bond->curr_slave_lock); | 2895 | read_lock(&bond->curr_slave_lock); |
@@ -2864,7 +2921,7 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev) | |||
2864 | 2921 | ||
2865 | printk(KERN_INFO DRV_NAME | 2922 | printk(KERN_INFO DRV_NAME |
2866 | ": %s: backup interface %s is now down\n", | 2923 | ": %s: backup interface %s is now down\n", |
2867 | bond_dev->name, | 2924 | bond->dev->name, |
2868 | slave->dev->name); | 2925 | slave->dev->name); |
2869 | } else { | 2926 | } else { |
2870 | read_unlock(&bond->curr_slave_lock); | 2927 | read_unlock(&bond->curr_slave_lock); |
@@ -2899,15 +2956,18 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev) | |||
2899 | printk(KERN_INFO DRV_NAME | 2956 | printk(KERN_INFO DRV_NAME |
2900 | ": %s: link status down for active interface " | 2957 | ": %s: link status down for active interface " |
2901 | "%s, disabling it\n", | 2958 | "%s, disabling it\n", |
2902 | bond_dev->name, | 2959 | bond->dev->name, |
2903 | slave->dev->name); | 2960 | slave->dev->name); |
2904 | 2961 | ||
2905 | write_lock(&bond->curr_slave_lock); | 2962 | rtnl_lock(); |
2963 | write_lock_bh(&bond->curr_slave_lock); | ||
2906 | 2964 | ||
2907 | bond_select_active_slave(bond); | 2965 | bond_select_active_slave(bond); |
2908 | slave = bond->curr_active_slave; | 2966 | slave = bond->curr_active_slave; |
2909 | 2967 | ||
2910 | write_unlock(&bond->curr_slave_lock); | 2968 | write_unlock_bh(&bond->curr_slave_lock); |
2969 | |||
2970 | rtnl_unlock(); | ||
2911 | 2971 | ||
2912 | bond->current_arp_slave = slave; | 2972 | bond->current_arp_slave = slave; |
2913 | 2973 | ||
@@ -2921,14 +2981,17 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev) | |||
2921 | printk(KERN_INFO DRV_NAME | 2981 | printk(KERN_INFO DRV_NAME |
2922 | ": %s: changing from interface %s to primary " | 2982 | ": %s: changing from interface %s to primary " |
2923 | "interface %s\n", | 2983 | "interface %s\n", |
2924 | bond_dev->name, | 2984 | bond->dev->name, |
2925 | slave->dev->name, | 2985 | slave->dev->name, |
2926 | bond->primary_slave->dev->name); | 2986 | bond->primary_slave->dev->name); |
2927 | 2987 | ||
2928 | /* primary is up so switch to it */ | 2988 | /* primary is up so switch to it */ |
2929 | write_lock(&bond->curr_slave_lock); | 2989 | rtnl_lock(); |
2990 | write_lock_bh(&bond->curr_slave_lock); | ||
2930 | bond_change_active_slave(bond, bond->primary_slave); | 2991 | bond_change_active_slave(bond, bond->primary_slave); |
2931 | write_unlock(&bond->curr_slave_lock); | 2992 | write_unlock_bh(&bond->curr_slave_lock); |
2993 | |||
2994 | rtnl_unlock(); | ||
2932 | 2995 | ||
2933 | slave = bond->primary_slave; | 2996 | slave = bond->primary_slave; |
2934 | slave->jiffies = jiffies; | 2997 | slave->jiffies = jiffies; |
@@ -2985,7 +3048,7 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev) | |||
2985 | printk(KERN_INFO DRV_NAME | 3048 | printk(KERN_INFO DRV_NAME |
2986 | ": %s: backup interface %s is " | 3049 | ": %s: backup interface %s is " |
2987 | "now down.\n", | 3050 | "now down.\n", |
2988 | bond_dev->name, | 3051 | bond->dev->name, |
2989 | slave->dev->name); | 3052 | slave->dev->name); |
2990 | } | 3053 | } |
2991 | } | 3054 | } |
@@ -2994,7 +3057,7 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev) | |||
2994 | 3057 | ||
2995 | re_arm: | 3058 | re_arm: |
2996 | if (bond->params.arp_interval) { | 3059 | if (bond->params.arp_interval) { |
2997 | mod_timer(&bond->arp_timer, jiffies + delta_in_ticks); | 3060 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
2998 | } | 3061 | } |
2999 | out: | 3062 | out: |
3000 | read_unlock(&bond->lock); | 3063 | read_unlock(&bond->lock); |
@@ -3015,7 +3078,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) | |||
3015 | 3078 | ||
3016 | /* make sure the bond won't be taken away */ | 3079 | /* make sure the bond won't be taken away */ |
3017 | read_lock(&dev_base_lock); | 3080 | read_lock(&dev_base_lock); |
3018 | read_lock_bh(&bond->lock); | 3081 | read_lock(&bond->lock); |
3019 | 3082 | ||
3020 | if (*pos == 0) { | 3083 | if (*pos == 0) { |
3021 | return SEQ_START_TOKEN; | 3084 | return SEQ_START_TOKEN; |
@@ -3049,7 +3112,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v) | |||
3049 | { | 3112 | { |
3050 | struct bonding *bond = seq->private; | 3113 | struct bonding *bond = seq->private; |
3051 | 3114 | ||
3052 | read_unlock_bh(&bond->lock); | 3115 | read_unlock(&bond->lock); |
3053 | read_unlock(&dev_base_lock); | 3116 | read_unlock(&dev_base_lock); |
3054 | } | 3117 | } |
3055 | 3118 | ||
@@ -3582,15 +3645,11 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, | |||
3582 | static int bond_open(struct net_device *bond_dev) | 3645 | static int bond_open(struct net_device *bond_dev) |
3583 | { | 3646 | { |
3584 | struct bonding *bond = bond_dev->priv; | 3647 | struct bonding *bond = bond_dev->priv; |
3585 | struct timer_list *mii_timer = &bond->mii_timer; | ||
3586 | struct timer_list *arp_timer = &bond->arp_timer; | ||
3587 | 3648 | ||
3588 | bond->kill_timers = 0; | 3649 | bond->kill_timers = 0; |
3589 | 3650 | ||
3590 | if ((bond->params.mode == BOND_MODE_TLB) || | 3651 | if ((bond->params.mode == BOND_MODE_TLB) || |
3591 | (bond->params.mode == BOND_MODE_ALB)) { | 3652 | (bond->params.mode == BOND_MODE_ALB)) { |
3592 | struct timer_list *alb_timer = &(BOND_ALB_INFO(bond).alb_timer); | ||
3593 | |||
3594 | /* bond_alb_initialize must be called before the timer | 3653 | /* bond_alb_initialize must be called before the timer |
3595 | * is started. | 3654 | * is started. |
3596 | */ | 3655 | */ |
@@ -3599,44 +3658,31 @@ static int bond_open(struct net_device *bond_dev) | |||
3599 | return -1; | 3658 | return -1; |
3600 | } | 3659 | } |
3601 | 3660 | ||
3602 | init_timer(alb_timer); | 3661 | INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); |
3603 | alb_timer->expires = jiffies + 1; | 3662 | queue_delayed_work(bond->wq, &bond->alb_work, 0); |
3604 | alb_timer->data = (unsigned long)bond; | ||
3605 | alb_timer->function = (void *)&bond_alb_monitor; | ||
3606 | add_timer(alb_timer); | ||
3607 | } | 3663 | } |
3608 | 3664 | ||
3609 | if (bond->params.miimon) { /* link check interval, in milliseconds. */ | 3665 | if (bond->params.miimon) { /* link check interval, in milliseconds. */ |
3610 | init_timer(mii_timer); | 3666 | INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); |
3611 | mii_timer->expires = jiffies + 1; | 3667 | queue_delayed_work(bond->wq, &bond->mii_work, 0); |
3612 | mii_timer->data = (unsigned long)bond_dev; | ||
3613 | mii_timer->function = (void *)&bond_mii_monitor; | ||
3614 | add_timer(mii_timer); | ||
3615 | } | 3668 | } |
3616 | 3669 | ||
3617 | if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ | 3670 | if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ |
3618 | init_timer(arp_timer); | 3671 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) |
3619 | arp_timer->expires = jiffies + 1; | 3672 | INIT_DELAYED_WORK(&bond->arp_work, |
3620 | arp_timer->data = (unsigned long)bond_dev; | 3673 | bond_activebackup_arp_mon); |
3621 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { | 3674 | else |
3622 | arp_timer->function = (void *)&bond_activebackup_arp_mon; | 3675 | INIT_DELAYED_WORK(&bond->arp_work, |
3623 | } else { | 3676 | bond_loadbalance_arp_mon); |
3624 | arp_timer->function = (void *)&bond_loadbalance_arp_mon; | 3677 | |
3625 | } | 3678 | queue_delayed_work(bond->wq, &bond->arp_work, 0); |
3626 | if (bond->params.arp_validate) | 3679 | if (bond->params.arp_validate) |
3627 | bond_register_arp(bond); | 3680 | bond_register_arp(bond); |
3628 | |||
3629 | add_timer(arp_timer); | ||
3630 | } | 3681 | } |
3631 | 3682 | ||
3632 | if (bond->params.mode == BOND_MODE_8023AD) { | 3683 | if (bond->params.mode == BOND_MODE_8023AD) { |
3633 | struct timer_list *ad_timer = &(BOND_AD_INFO(bond).ad_timer); | 3684 | INIT_DELAYED_WORK(&bond->ad_work, bond_alb_monitor); |
3634 | init_timer(ad_timer); | 3685 | queue_delayed_work(bond->wq, &bond->ad_work, 0); |
3635 | ad_timer->expires = jiffies + 1; | ||
3636 | ad_timer->data = (unsigned long)bond; | ||
3637 | ad_timer->function = (void *)&bond_3ad_state_machine_handler; | ||
3638 | add_timer(ad_timer); | ||
3639 | |||
3640 | /* register to receive LACPDUs */ | 3686 | /* register to receive LACPDUs */ |
3641 | bond_register_lacpdu(bond); | 3687 | bond_register_lacpdu(bond); |
3642 | } | 3688 | } |
@@ -3664,25 +3710,21 @@ static int bond_close(struct net_device *bond_dev) | |||
3664 | 3710 | ||
3665 | write_unlock_bh(&bond->lock); | 3711 | write_unlock_bh(&bond->lock); |
3666 | 3712 | ||
3667 | /* del_timer_sync must run without holding the bond->lock | ||
3668 | * because a running timer might be trying to hold it too | ||
3669 | */ | ||
3670 | |||
3671 | if (bond->params.miimon) { /* link check interval, in milliseconds. */ | 3713 | if (bond->params.miimon) { /* link check interval, in milliseconds. */ |
3672 | del_timer_sync(&bond->mii_timer); | 3714 | cancel_delayed_work(&bond->mii_work); |
3673 | } | 3715 | } |
3674 | 3716 | ||
3675 | if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ | 3717 | if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ |
3676 | del_timer_sync(&bond->arp_timer); | 3718 | cancel_delayed_work(&bond->arp_work); |
3677 | } | 3719 | } |
3678 | 3720 | ||
3679 | switch (bond->params.mode) { | 3721 | switch (bond->params.mode) { |
3680 | case BOND_MODE_8023AD: | 3722 | case BOND_MODE_8023AD: |
3681 | del_timer_sync(&(BOND_AD_INFO(bond).ad_timer)); | 3723 | cancel_delayed_work(&bond->ad_work); |
3682 | break; | 3724 | break; |
3683 | case BOND_MODE_TLB: | 3725 | case BOND_MODE_TLB: |
3684 | case BOND_MODE_ALB: | 3726 | case BOND_MODE_ALB: |
3685 | del_timer_sync(&(BOND_ALB_INFO(bond).alb_timer)); | 3727 | cancel_delayed_work(&bond->alb_work); |
3686 | break; | 3728 | break; |
3687 | default: | 3729 | default: |
3688 | break; | 3730 | break; |
@@ -3779,13 +3821,13 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd | |||
3779 | if (mii->reg_num == 1) { | 3821 | if (mii->reg_num == 1) { |
3780 | struct bonding *bond = bond_dev->priv; | 3822 | struct bonding *bond = bond_dev->priv; |
3781 | mii->val_out = 0; | 3823 | mii->val_out = 0; |
3782 | read_lock_bh(&bond->lock); | 3824 | read_lock(&bond->lock); |
3783 | read_lock(&bond->curr_slave_lock); | 3825 | read_lock(&bond->curr_slave_lock); |
3784 | if (netif_carrier_ok(bond->dev)) { | 3826 | if (netif_carrier_ok(bond->dev)) { |
3785 | mii->val_out = BMSR_LSTATUS; | 3827 | mii->val_out = BMSR_LSTATUS; |
3786 | } | 3828 | } |
3787 | read_unlock(&bond->curr_slave_lock); | 3829 | read_unlock(&bond->curr_slave_lock); |
3788 | read_unlock_bh(&bond->lock); | 3830 | read_unlock(&bond->lock); |
3789 | } | 3831 | } |
3790 | 3832 | ||
3791 | return 0; | 3833 | return 0; |
@@ -4077,8 +4119,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
4077 | { | 4119 | { |
4078 | struct bonding *bond = bond_dev->priv; | 4120 | struct bonding *bond = bond_dev->priv; |
4079 | struct slave *slave, *start_at; | 4121 | struct slave *slave, *start_at; |
4080 | int i; | 4122 | int i, slave_no, res = 1; |
4081 | int res = 1; | ||
4082 | 4123 | ||
4083 | read_lock(&bond->lock); | 4124 | read_lock(&bond->lock); |
4084 | 4125 | ||
@@ -4086,29 +4127,29 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
4086 | goto out; | 4127 | goto out; |
4087 | } | 4128 | } |
4088 | 4129 | ||
4089 | read_lock(&bond->curr_slave_lock); | 4130 | /* |
4090 | slave = start_at = bond->curr_active_slave; | 4131 | * Concurrent TX may collide on rr_tx_counter; we accept that |
4091 | read_unlock(&bond->curr_slave_lock); | 4132 | * as being rare enough not to justify using an atomic op here |
4133 | */ | ||
4134 | slave_no = bond->rr_tx_counter++ % bond->slave_cnt; | ||
4092 | 4135 | ||
4093 | if (!slave) { | 4136 | bond_for_each_slave(bond, slave, i) { |
4094 | goto out; | 4137 | slave_no--; |
4138 | if (slave_no < 0) { | ||
4139 | break; | ||
4140 | } | ||
4095 | } | 4141 | } |
4096 | 4142 | ||
4143 | start_at = slave; | ||
4097 | bond_for_each_slave_from(bond, slave, i, start_at) { | 4144 | bond_for_each_slave_from(bond, slave, i, start_at) { |
4098 | if (IS_UP(slave->dev) && | 4145 | if (IS_UP(slave->dev) && |
4099 | (slave->link == BOND_LINK_UP) && | 4146 | (slave->link == BOND_LINK_UP) && |
4100 | (slave->state == BOND_STATE_ACTIVE)) { | 4147 | (slave->state == BOND_STATE_ACTIVE)) { |
4101 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | 4148 | res = bond_dev_queue_xmit(bond, skb, slave->dev); |
4102 | |||
4103 | write_lock(&bond->curr_slave_lock); | ||
4104 | bond->curr_active_slave = slave->next; | ||
4105 | write_unlock(&bond->curr_slave_lock); | ||
4106 | |||
4107 | break; | 4149 | break; |
4108 | } | 4150 | } |
4109 | } | 4151 | } |
4110 | 4152 | ||
4111 | |||
4112 | out: | 4153 | out: |
4113 | if (res) { | 4154 | if (res) { |
4114 | /* no suitable interface, frame not sent */ | 4155 | /* no suitable interface, frame not sent */ |
@@ -4340,6 +4381,10 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params) | |||
4340 | 4381 | ||
4341 | bond->params = *params; /* copy params struct */ | 4382 | bond->params = *params; /* copy params struct */ |
4342 | 4383 | ||
4384 | bond->wq = create_singlethread_workqueue(bond_dev->name); | ||
4385 | if (!bond->wq) | ||
4386 | return -ENOMEM; | ||
4387 | |||
4343 | /* Initialize pointers */ | 4388 | /* Initialize pointers */ |
4344 | bond->first_slave = NULL; | 4389 | bond->first_slave = NULL; |
4345 | bond->curr_active_slave = NULL; | 4390 | bond->curr_active_slave = NULL; |
@@ -4428,8 +4473,8 @@ static void bond_free_all(void) | |||
4428 | bond_mc_list_destroy(bond); | 4473 | bond_mc_list_destroy(bond); |
4429 | /* Release the bonded slaves */ | 4474 | /* Release the bonded slaves */ |
4430 | bond_release_all(bond_dev); | 4475 | bond_release_all(bond_dev); |
4431 | bond_deinit(bond_dev); | ||
4432 | unregister_netdevice(bond_dev); | 4476 | unregister_netdevice(bond_dev); |
4477 | bond_deinit(bond_dev); | ||
4433 | } | 4478 | } |
4434 | 4479 | ||
4435 | #ifdef CONFIG_PROC_FS | 4480 | #ifdef CONFIG_PROC_FS |
@@ -4826,10 +4871,32 @@ out_rtnl: | |||
4826 | return res; | 4871 | return res; |
4827 | } | 4872 | } |
4828 | 4873 | ||
4874 | static void bond_work_cancel_all(struct bonding *bond) | ||
4875 | { | ||
4876 | write_lock_bh(&bond->lock); | ||
4877 | bond->kill_timers = 1; | ||
4878 | write_unlock_bh(&bond->lock); | ||
4879 | |||
4880 | if (bond->params.miimon && delayed_work_pending(&bond->mii_work)) | ||
4881 | cancel_delayed_work(&bond->mii_work); | ||
4882 | |||
4883 | if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work)) | ||
4884 | cancel_delayed_work(&bond->arp_work); | ||
4885 | |||
4886 | if (bond->params.mode == BOND_MODE_ALB && | ||
4887 | delayed_work_pending(&bond->alb_work)) | ||
4888 | cancel_delayed_work(&bond->alb_work); | ||
4889 | |||
4890 | if (bond->params.mode == BOND_MODE_8023AD && | ||
4891 | delayed_work_pending(&bond->ad_work)) | ||
4892 | cancel_delayed_work(&bond->ad_work); | ||
4893 | } | ||
4894 | |||
4829 | static int __init bonding_init(void) | 4895 | static int __init bonding_init(void) |
4830 | { | 4896 | { |
4831 | int i; | 4897 | int i; |
4832 | int res; | 4898 | int res; |
4899 | struct bonding *bond, *nxt; | ||
4833 | 4900 | ||
4834 | printk(KERN_INFO "%s", version); | 4901 | printk(KERN_INFO "%s", version); |
4835 | 4902 | ||
@@ -4856,6 +4923,11 @@ static int __init bonding_init(void) | |||
4856 | 4923 | ||
4857 | goto out; | 4924 | goto out; |
4858 | err: | 4925 | err: |
4926 | list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) { | ||
4927 | bond_work_cancel_all(bond); | ||
4928 | destroy_workqueue(bond->wq); | ||
4929 | } | ||
4930 | |||
4859 | rtnl_lock(); | 4931 | rtnl_lock(); |
4860 | bond_free_all(); | 4932 | bond_free_all(); |
4861 | bond_destroy_sysfs(); | 4933 | bond_destroy_sysfs(); |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 855dc10ffa1b..7a06ade85b02 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -229,7 +229,7 @@ static ssize_t bonding_show_slaves(struct device *d, | |||
229 | int i, res = 0; | 229 | int i, res = 0; |
230 | struct bonding *bond = to_bond(d); | 230 | struct bonding *bond = to_bond(d); |
231 | 231 | ||
232 | read_lock_bh(&bond->lock); | 232 | read_lock(&bond->lock); |
233 | bond_for_each_slave(bond, slave, i) { | 233 | bond_for_each_slave(bond, slave, i) { |
234 | if (res > (PAGE_SIZE - IFNAMSIZ)) { | 234 | if (res > (PAGE_SIZE - IFNAMSIZ)) { |
235 | /* not enough space for another interface name */ | 235 | /* not enough space for another interface name */ |
@@ -240,7 +240,7 @@ static ssize_t bonding_show_slaves(struct device *d, | |||
240 | } | 240 | } |
241 | res += sprintf(buf + res, "%s ", slave->dev->name); | 241 | res += sprintf(buf + res, "%s ", slave->dev->name); |
242 | } | 242 | } |
243 | read_unlock_bh(&bond->lock); | 243 | read_unlock(&bond->lock); |
244 | res += sprintf(buf + res, "\n"); | 244 | res += sprintf(buf + res, "\n"); |
245 | res++; | 245 | res++; |
246 | return res; | 246 | return res; |
@@ -282,18 +282,18 @@ static ssize_t bonding_store_slaves(struct device *d, | |||
282 | 282 | ||
283 | /* Got a slave name in ifname. Is it already in the list? */ | 283 | /* Got a slave name in ifname. Is it already in the list? */ |
284 | found = 0; | 284 | found = 0; |
285 | read_lock_bh(&bond->lock); | 285 | read_lock(&bond->lock); |
286 | bond_for_each_slave(bond, slave, i) | 286 | bond_for_each_slave(bond, slave, i) |
287 | if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { | 287 | if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { |
288 | printk(KERN_ERR DRV_NAME | 288 | printk(KERN_ERR DRV_NAME |
289 | ": %s: Interface %s is already enslaved!\n", | 289 | ": %s: Interface %s is already enslaved!\n", |
290 | bond->dev->name, ifname); | 290 | bond->dev->name, ifname); |
291 | ret = -EPERM; | 291 | ret = -EPERM; |
292 | read_unlock_bh(&bond->lock); | 292 | read_unlock(&bond->lock); |
293 | goto out; | 293 | goto out; |
294 | } | 294 | } |
295 | 295 | ||
296 | read_unlock_bh(&bond->lock); | 296 | read_unlock(&bond->lock); |
297 | printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n", | 297 | printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n", |
298 | bond->dev->name, ifname); | 298 | bond->dev->name, ifname); |
299 | dev = dev_get_by_name(&init_net, ifname); | 299 | dev = dev_get_by_name(&init_net, ifname); |
@@ -662,12 +662,9 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
662 | "%s Disabling MII monitoring.\n", | 662 | "%s Disabling MII monitoring.\n", |
663 | bond->dev->name, bond->dev->name); | 663 | bond->dev->name, bond->dev->name); |
664 | bond->params.miimon = 0; | 664 | bond->params.miimon = 0; |
665 | /* Kill MII timer, else it brings bond's link down */ | 665 | if (delayed_work_pending(&bond->mii_work)) { |
666 | if (bond->arp_timer.function) { | 666 | cancel_delayed_work(&bond->mii_work); |
667 | printk(KERN_INFO DRV_NAME | 667 | flush_workqueue(bond->wq); |
668 | ": %s: Kill MII timer, else it brings bond's link down...\n", | ||
669 | bond->dev->name); | ||
670 | del_timer_sync(&bond->mii_timer); | ||
671 | } | 668 | } |
672 | } | 669 | } |
673 | if (!bond->params.arp_targets[0]) { | 670 | if (!bond->params.arp_targets[0]) { |
@@ -682,25 +679,15 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
682 | * timer will get fired off when the open function | 679 | * timer will get fired off when the open function |
683 | * is called. | 680 | * is called. |
684 | */ | 681 | */ |
685 | if (bond->arp_timer.function) { | 682 | if (!delayed_work_pending(&bond->arp_work)) { |
686 | /* The timer's already set up, so fire it off */ | 683 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) |
687 | mod_timer(&bond->arp_timer, jiffies + 1); | 684 | INIT_DELAYED_WORK(&bond->arp_work, |
688 | } else { | 685 | bond_activebackup_arp_mon); |
689 | /* Set up the timer. */ | 686 | else |
690 | init_timer(&bond->arp_timer); | 687 | INIT_DELAYED_WORK(&bond->arp_work, |
691 | bond->arp_timer.expires = jiffies + 1; | 688 | bond_loadbalance_arp_mon); |
692 | bond->arp_timer.data = | 689 | |
693 | (unsigned long) bond->dev; | 690 | queue_delayed_work(bond->wq, &bond->arp_work, 0); |
694 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { | ||
695 | bond->arp_timer.function = | ||
696 | (void *) | ||
697 | &bond_activebackup_arp_mon; | ||
698 | } else { | ||
699 | bond->arp_timer.function = | ||
700 | (void *) | ||
701 | &bond_loadbalance_arp_mon; | ||
702 | } | ||
703 | add_timer(&bond->arp_timer); | ||
704 | } | 691 | } |
705 | } | 692 | } |
706 | 693 | ||
@@ -1056,12 +1043,9 @@ static ssize_t bonding_store_miimon(struct device *d, | |||
1056 | bond->params.arp_validate = | 1043 | bond->params.arp_validate = |
1057 | BOND_ARP_VALIDATE_NONE; | 1044 | BOND_ARP_VALIDATE_NONE; |
1058 | } | 1045 | } |
1059 | /* Kill ARP timer, else it brings bond's link down */ | 1046 | if (delayed_work_pending(&bond->arp_work)) { |
1060 | if (bond->mii_timer.function) { | 1047 | cancel_delayed_work(&bond->arp_work); |
1061 | printk(KERN_INFO DRV_NAME | 1048 | flush_workqueue(bond->wq); |
1062 | ": %s: Kill ARP timer, else it brings bond's link down...\n", | ||
1063 | bond->dev->name); | ||
1064 | del_timer_sync(&bond->arp_timer); | ||
1065 | } | 1049 | } |
1066 | } | 1050 | } |
1067 | 1051 | ||
@@ -1071,18 +1055,11 @@ static ssize_t bonding_store_miimon(struct device *d, | |||
1071 | * timer will get fired off when the open function | 1055 | * timer will get fired off when the open function |
1072 | * is called. | 1056 | * is called. |
1073 | */ | 1057 | */ |
1074 | if (bond->mii_timer.function) { | 1058 | if (!delayed_work_pending(&bond->mii_work)) { |
1075 | /* The timer's already set up, so fire it off */ | 1059 | INIT_DELAYED_WORK(&bond->mii_work, |
1076 | mod_timer(&bond->mii_timer, jiffies + 1); | 1060 | bond_mii_monitor); |
1077 | } else { | 1061 | queue_delayed_work(bond->wq, |
1078 | /* Set up the timer. */ | 1062 | &bond->mii_work, 0); |
1079 | init_timer(&bond->mii_timer); | ||
1080 | bond->mii_timer.expires = jiffies + 1; | ||
1081 | bond->mii_timer.data = | ||
1082 | (unsigned long) bond->dev; | ||
1083 | bond->mii_timer.function = | ||
1084 | (void *) &bond_mii_monitor; | ||
1085 | add_timer(&bond->mii_timer); | ||
1086 | } | 1063 | } |
1087 | } | 1064 | } |
1088 | } | 1065 | } |
@@ -1156,6 +1133,9 @@ static ssize_t bonding_store_primary(struct device *d, | |||
1156 | } | 1133 | } |
1157 | out: | 1134 | out: |
1158 | write_unlock_bh(&bond->lock); | 1135 | write_unlock_bh(&bond->lock); |
1136 | |||
1137 | rtnl_unlock(); | ||
1138 | |||
1159 | return count; | 1139 | return count; |
1160 | } | 1140 | } |
1161 | static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary); | 1141 | static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary); |
@@ -1213,6 +1193,7 @@ static ssize_t bonding_show_active_slave(struct device *d, | |||
1213 | struct bonding *bond = to_bond(d); | 1193 | struct bonding *bond = to_bond(d); |
1214 | int count; | 1194 | int count; |
1215 | 1195 | ||
1196 | rtnl_lock(); | ||
1216 | 1197 | ||
1217 | read_lock(&bond->curr_slave_lock); | 1198 | read_lock(&bond->curr_slave_lock); |
1218 | curr = bond->curr_active_slave; | 1199 | curr = bond->curr_active_slave; |
@@ -1292,6 +1273,8 @@ static ssize_t bonding_store_active_slave(struct device *d, | |||
1292 | } | 1273 | } |
1293 | out: | 1274 | out: |
1294 | write_unlock_bh(&bond->lock); | 1275 | write_unlock_bh(&bond->lock); |
1276 | rtnl_unlock(); | ||
1277 | |||
1295 | return count; | 1278 | return count; |
1296 | 1279 | ||
1297 | } | 1280 | } |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index b8180600a309..d1ed14bf1ccb 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -184,8 +184,6 @@ struct bonding { | |||
184 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ | 184 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ |
185 | rwlock_t lock; | 185 | rwlock_t lock; |
186 | rwlock_t curr_slave_lock; | 186 | rwlock_t curr_slave_lock; |
187 | struct timer_list mii_timer; | ||
188 | struct timer_list arp_timer; | ||
189 | s8 kill_timers; | 187 | s8 kill_timers; |
190 | s8 send_grat_arp; | 188 | s8 send_grat_arp; |
191 | s8 setup_by_slave; | 189 | s8 setup_by_slave; |
@@ -199,12 +197,18 @@ struct bonding { | |||
199 | int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); | 197 | int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); |
200 | __be32 master_ip; | 198 | __be32 master_ip; |
201 | u16 flags; | 199 | u16 flags; |
200 | u16 rr_tx_counter; | ||
202 | struct ad_bond_info ad_info; | 201 | struct ad_bond_info ad_info; |
203 | struct alb_bond_info alb_info; | 202 | struct alb_bond_info alb_info; |
204 | struct bond_params params; | 203 | struct bond_params params; |
205 | struct list_head vlan_list; | 204 | struct list_head vlan_list; |
206 | struct vlan_group *vlgrp; | 205 | struct vlan_group *vlgrp; |
207 | struct packet_type arp_mon_pt; | 206 | struct packet_type arp_mon_pt; |
207 | struct workqueue_struct *wq; | ||
208 | struct delayed_work mii_work; | ||
209 | struct delayed_work arp_work; | ||
210 | struct delayed_work alb_work; | ||
211 | struct delayed_work ad_work; | ||
208 | }; | 212 | }; |
209 | 213 | ||
210 | /** | 214 | /** |
@@ -307,9 +311,9 @@ int bond_create_slave_symlinks(struct net_device *master, struct net_device *sla | |||
307 | void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave); | 311 | void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave); |
308 | int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); | 312 | int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); |
309 | int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); | 313 | int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); |
310 | void bond_mii_monitor(struct net_device *bond_dev); | 314 | void bond_mii_monitor(struct work_struct *); |
311 | void bond_loadbalance_arp_mon(struct net_device *bond_dev); | 315 | void bond_loadbalance_arp_mon(struct work_struct *); |
312 | void bond_activebackup_arp_mon(struct net_device *bond_dev); | 316 | void bond_activebackup_arp_mon(struct work_struct *); |
313 | void bond_set_mode_ops(struct bonding *bond, int mode); | 317 | void bond_set_mode_ops(struct bonding *bond, int mode); |
314 | int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl); | 318 | int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl); |
315 | void bond_select_active_slave(struct bonding *bond); | 319 | void bond_select_active_slave(struct bonding *bond); |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index ae419736158e..57541d2d9e1e 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -460,18 +460,11 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
460 | struct cpmac_desc *desc; | 460 | struct cpmac_desc *desc; |
461 | struct cpmac_priv *priv = netdev_priv(dev); | 461 | struct cpmac_priv *priv = netdev_priv(dev); |
462 | 462 | ||
463 | if (unlikely(skb_padto(skb, ETH_ZLEN))) { | 463 | if (unlikely(skb_padto(skb, ETH_ZLEN))) |
464 | if (netif_msg_tx_err(priv) && net_ratelimit()) | 464 | return NETDEV_TX_OK; |
465 | printk(KERN_WARNING | ||
466 | "%s: tx: padding failed, dropping\n", dev->name); | ||
467 | spin_lock(&priv->lock); | ||
468 | dev->stats.tx_dropped++; | ||
469 | spin_unlock(&priv->lock); | ||
470 | return -ENOMEM; | ||
471 | } | ||
472 | 465 | ||
473 | len = max(skb->len, ETH_ZLEN); | 466 | len = max(skb->len, ETH_ZLEN); |
474 | queue = skb_get_queue_mapping(skb); | 467 | queue = skb->queue_mapping; |
475 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 468 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
476 | netif_stop_subqueue(dev, queue); | 469 | netif_stop_subqueue(dev, queue); |
477 | #else | 470 | #else |
@@ -481,13 +474,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
481 | desc = &priv->desc_ring[queue]; | 474 | desc = &priv->desc_ring[queue]; |
482 | if (unlikely(desc->dataflags & CPMAC_OWN)) { | 475 | if (unlikely(desc->dataflags & CPMAC_OWN)) { |
483 | if (netif_msg_tx_err(priv) && net_ratelimit()) | 476 | if (netif_msg_tx_err(priv) && net_ratelimit()) |
484 | printk(KERN_WARNING "%s: tx dma ring full, dropping\n", | 477 | printk(KERN_WARNING "%s: tx dma ring full\n", |
485 | dev->name); | 478 | dev->name); |
486 | spin_lock(&priv->lock); | 479 | return NETDEV_TX_BUSY; |
487 | dev->stats.tx_dropped++; | ||
488 | spin_unlock(&priv->lock); | ||
489 | dev_kfree_skb_any(skb); | ||
490 | return -ENOMEM; | ||
491 | } | 480 | } |
492 | 481 | ||
493 | spin_lock(&priv->lock); | 482 | spin_lock(&priv->lock); |
@@ -509,7 +498,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
509 | cpmac_dump_skb(dev, skb); | 498 | cpmac_dump_skb(dev, skb); |
510 | cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); | 499 | cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); |
511 | 500 | ||
512 | return 0; | 501 | return NETDEV_TX_OK; |
513 | } | 502 | } |
514 | 503 | ||
515 | static void cpmac_end_xmit(struct net_device *dev, int queue) | 504 | static void cpmac_end_xmit(struct net_device *dev, int queue) |
@@ -646,12 +635,14 @@ static void cpmac_clear_tx(struct net_device *dev) | |||
646 | int i; | 635 | int i; |
647 | if (unlikely(!priv->desc_ring)) | 636 | if (unlikely(!priv->desc_ring)) |
648 | return; | 637 | return; |
649 | for (i = 0; i < CPMAC_QUEUES; i++) | 638 | for (i = 0; i < CPMAC_QUEUES; i++) { |
639 | priv->desc_ring[i].dataflags = 0; | ||
650 | if (priv->desc_ring[i].skb) { | 640 | if (priv->desc_ring[i].skb) { |
651 | dev_kfree_skb_any(priv->desc_ring[i].skb); | 641 | dev_kfree_skb_any(priv->desc_ring[i].skb); |
652 | if (netif_subqueue_stopped(dev, i)) | 642 | if (netif_subqueue_stopped(dev, i)) |
653 | netif_wake_subqueue(dev, i); | 643 | netif_wake_subqueue(dev, i); |
654 | } | 644 | } |
645 | } | ||
655 | } | 646 | } |
656 | 647 | ||
657 | static void cpmac_hw_error(struct work_struct *work) | 648 | static void cpmac_hw_error(struct work_struct *work) |
@@ -727,11 +718,13 @@ static void cpmac_tx_timeout(struct net_device *dev) | |||
727 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 718 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
728 | for (i = 0; i < CPMAC_QUEUES; i++) | 719 | for (i = 0; i < CPMAC_QUEUES; i++) |
729 | if (priv->desc_ring[i].skb) { | 720 | if (priv->desc_ring[i].skb) { |
721 | priv->desc_ring[i].dataflags = 0; | ||
730 | dev_kfree_skb_any(priv->desc_ring[i].skb); | 722 | dev_kfree_skb_any(priv->desc_ring[i].skb); |
731 | netif_wake_subqueue(dev, i); | 723 | netif_wake_subqueue(dev, i); |
732 | break; | 724 | break; |
733 | } | 725 | } |
734 | #else | 726 | #else |
727 | priv->desc_ring[0].dataflags = 0; | ||
735 | if (priv->desc_ring[0].skb) | 728 | if (priv->desc_ring[0].skb) |
736 | dev_kfree_skb_any(priv->desc_ring[0].skb); | 729 | dev_kfree_skb_any(priv->desc_ring[0].skb); |
737 | netif_wake_queue(dev); | 730 | netif_wake_queue(dev); |
@@ -794,7 +787,7 @@ static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* | |||
794 | { | 787 | { |
795 | struct cpmac_priv *priv = netdev_priv(dev); | 788 | struct cpmac_priv *priv = netdev_priv(dev); |
796 | 789 | ||
797 | if (dev->flags && IFF_UP) | 790 | if (netif_running(dev)) |
798 | return -EBUSY; | 791 | return -EBUSY; |
799 | priv->ring_size = ring->rx_pending; | 792 | priv->ring_size = ring->rx_pending; |
800 | return 0; | 793 | return 0; |
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index b07613e61f53..ddc30c4bf34a 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c | |||
@@ -805,7 +805,7 @@ static void __devinit dfx_bus_init(struct net_device *dev) | |||
805 | * Interrupts are disabled at the adapter bus-specific logic. | 805 | * Interrupts are disabled at the adapter bus-specific logic. |
806 | */ | 806 | */ |
807 | 807 | ||
808 | static void __devinit dfx_bus_uninit(struct net_device *dev) | 808 | static void __devexit dfx_bus_uninit(struct net_device *dev) |
809 | { | 809 | { |
810 | DFX_board_t *bp = netdev_priv(dev); | 810 | DFX_board_t *bp = netdev_priv(dev); |
811 | struct device *bdev = bp->bus_dev; | 811 | struct device *bdev = bp->bus_dev; |
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 9c85e50014b4..70509ed6c11d 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c | |||
@@ -651,9 +651,9 @@ static void eexp_timeout(struct net_device *dev) | |||
651 | */ | 651 | */ |
652 | static int eexp_xmit(struct sk_buff *buf, struct net_device *dev) | 652 | static int eexp_xmit(struct sk_buff *buf, struct net_device *dev) |
653 | { | 653 | { |
654 | struct net_local *lp = netdev_priv(dev); | ||
655 | short length = buf->len; | 654 | short length = buf->len; |
656 | #ifdef CONFIG_SMP | 655 | #ifdef CONFIG_SMP |
656 | struct net_local *lp = netdev_priv(dev); | ||
657 | unsigned long flags; | 657 | unsigned long flags; |
658 | #endif | 658 | #endif |
659 | 659 | ||
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index 37707a0c0498..aafc3ce59cbb 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c | |||
@@ -30,6 +30,7 @@ static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata, | |||
30 | int len) | 30 | int len) |
31 | { | 31 | { |
32 | uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount)); | 32 | uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount)); |
33 | |||
33 | if (available_len < len) | 34 | if (available_len < len) |
34 | return -EFAULT; | 35 | return -EFAULT; |
35 | 36 | ||
@@ -45,14 +46,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, | |||
45 | int count_to_go = skb->len; | 46 | int count_to_go = skb->len; |
46 | char *buf_ptr = skb->data; | 47 | char *buf_ptr = skb->data; |
47 | 48 | ||
48 | pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", | ||
49 | dev->name, __FUNCTION__, skb->len); | ||
50 | |||
51 | outl(skb->len, mipsnet_reg_address(dev, txDataCount)); | 49 | outl(skb->len, mipsnet_reg_address(dev, txDataCount)); |
52 | 50 | ||
53 | pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n", | ||
54 | dev->name, __FUNCTION__, skb->len); | ||
55 | |||
56 | for (; count_to_go; buf_ptr++, count_to_go--) | 51 | for (; count_to_go; buf_ptr++, count_to_go--) |
57 | outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); | 52 | outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); |
58 | 53 | ||
@@ -64,10 +59,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, | |||
64 | 59 | ||
65 | static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) | 60 | static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) |
66 | { | 61 | { |
67 | pr_debug("%s:%s(): transmitting %d bytes\n", | 62 | /* |
68 | dev->name, __FUNCTION__, skb->len); | 63 | * Only one packet at a time. Once TXDONE interrupt is serviced, the |
69 | |||
70 | /* Only one packet at a time. Once TXDONE interrupt is serviced, the | ||
71 | * queue will be restarted. | 64 | * queue will be restarted. |
72 | */ | 65 | */ |
73 | netif_stop_queue(dev); | 66 | netif_stop_queue(dev); |
@@ -94,8 +87,6 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) | |||
94 | skb->protocol = eth_type_trans(skb, dev); | 87 | skb->protocol = eth_type_trans(skb, dev); |
95 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 88 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
96 | 89 | ||
97 | pr_debug("%s:%s(): pushing RXed data to kernel\n", | ||
98 | dev->name, __FUNCTION__); | ||
99 | netif_rx(skb); | 90 | netif_rx(skb); |
100 | 91 | ||
101 | dev->stats.rx_packets++; | 92 | dev->stats.rx_packets++; |
@@ -112,44 +103,29 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) | |||
112 | uint64_t interruptFlags; | 103 | uint64_t interruptFlags; |
113 | 104 | ||
114 | if (irq == dev->irq) { | 105 | if (irq == dev->irq) { |
115 | pr_debug("%s:%s(): irq %d for device\n", | ||
116 | dev->name, __FUNCTION__, irq); | ||
117 | |||
118 | retval = IRQ_HANDLED; | 106 | retval = IRQ_HANDLED; |
119 | 107 | ||
120 | interruptFlags = | 108 | interruptFlags = |
121 | inl(mipsnet_reg_address(dev, interruptControl)); | 109 | inl(mipsnet_reg_address(dev, interruptControl)); |
122 | pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name, | ||
123 | __FUNCTION__, interruptFlags); | ||
124 | 110 | ||
125 | if (interruptFlags & MIPSNET_INTCTL_TXDONE) { | 111 | if (interruptFlags & MIPSNET_INTCTL_TXDONE) { |
126 | pr_debug("%s:%s(): got TXDone\n", | ||
127 | dev->name, __FUNCTION__); | ||
128 | outl(MIPSNET_INTCTL_TXDONE, | 112 | outl(MIPSNET_INTCTL_TXDONE, |
129 | mipsnet_reg_address(dev, interruptControl)); | 113 | mipsnet_reg_address(dev, interruptControl)); |
130 | /* only one packet at a time, we are done. */ | 114 | /* only one packet at a time, we are done. */ |
131 | netif_wake_queue(dev); | 115 | netif_wake_queue(dev); |
132 | } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { | 116 | } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { |
133 | pr_debug("%s:%s(): got RX data\n", | ||
134 | dev->name, __FUNCTION__); | ||
135 | mipsnet_get_fromdev(dev, | 117 | mipsnet_get_fromdev(dev, |
136 | inl(mipsnet_reg_address(dev, rxDataCount))); | 118 | inl(mipsnet_reg_address(dev, rxDataCount))); |
137 | pr_debug("%s:%s(): clearing RX int\n", | ||
138 | dev->name, __FUNCTION__); | ||
139 | outl(MIPSNET_INTCTL_RXDONE, | 119 | outl(MIPSNET_INTCTL_RXDONE, |
140 | mipsnet_reg_address(dev, interruptControl)); | 120 | mipsnet_reg_address(dev, interruptControl)); |
141 | 121 | ||
142 | } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { | 122 | } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { |
143 | pr_debug("%s:%s(): got test interrupt\n", | ||
144 | dev->name, __FUNCTION__); | ||
145 | /* | 123 | /* |
146 | * TESTBIT is cleared on read. | 124 | * TESTBIT is cleared on read. |
147 | * And takes effect after a write with 0 | 125 | * And takes effect after a write with 0 |
148 | */ | 126 | */ |
149 | outl(0, mipsnet_reg_address(dev, interruptControl)); | 127 | outl(0, mipsnet_reg_address(dev, interruptControl)); |
150 | } else { | 128 | } else { |
151 | pr_debug("%s:%s(): no valid fags 0x%016llx\n", | ||
152 | dev->name, __FUNCTION__, interruptFlags); | ||
153 | /* Maybe shared IRQ, just ignore, no clearing. */ | 129 | /* Maybe shared IRQ, just ignore, no clearing. */ |
154 | retval = IRQ_NONE; | 130 | retval = IRQ_NONE; |
155 | } | 131 | } |
@@ -165,22 +141,15 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) | |||
165 | static int mipsnet_open(struct net_device *dev) | 141 | static int mipsnet_open(struct net_device *dev) |
166 | { | 142 | { |
167 | int err; | 143 | int err; |
168 | pr_debug("%s: mipsnet_open\n", dev->name); | ||
169 | 144 | ||
170 | err = request_irq(dev->irq, &mipsnet_interrupt, | 145 | err = request_irq(dev->irq, &mipsnet_interrupt, |
171 | IRQF_SHARED, dev->name, (void *) dev); | 146 | IRQF_SHARED, dev->name, (void *) dev); |
172 | 147 | ||
173 | if (err) { | 148 | if (err) { |
174 | pr_debug("%s: %s(): can't get irq %d\n", | ||
175 | dev->name, __FUNCTION__, dev->irq); | ||
176 | release_region(dev->base_addr, MIPSNET_IO_EXTENT); | 149 | release_region(dev->base_addr, MIPSNET_IO_EXTENT); |
177 | return err; | 150 | return err; |
178 | } | 151 | } |
179 | 152 | ||
180 | pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n", | ||
181 | dev->name, __FUNCTION__, dev->base_addr, dev->irq); | ||
182 | |||
183 | |||
184 | netif_start_queue(dev); | 153 | netif_start_queue(dev); |
185 | 154 | ||
186 | /* test interrupt handler */ | 155 | /* test interrupt handler */ |
@@ -193,8 +162,8 @@ static int mipsnet_open(struct net_device *dev) | |||
193 | 162 | ||
194 | static int mipsnet_close(struct net_device *dev) | 163 | static int mipsnet_close(struct net_device *dev) |
195 | { | 164 | { |
196 | pr_debug("%s: %s()\n", dev->name, __FUNCTION__); | ||
197 | netif_stop_queue(dev); | 165 | netif_stop_queue(dev); |
166 | |||
198 | return 0; | 167 | return 0; |
199 | } | 168 | } |
200 | 169 | ||
@@ -229,9 +198,6 @@ static int __init mipsnet_probe(struct device *dev) | |||
229 | 198 | ||
230 | /* Get the io region now, get irq on open() */ | 199 | /* Get the io region now, get irq on open() */ |
231 | if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { | 200 | if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { |
232 | pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} " | ||
233 | "for dev is not availble.\n", netdev->name, | ||
234 | __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT); | ||
235 | err = -EBUSY; | 201 | err = -EBUSY; |
236 | goto out_free_netdev; | 202 | goto out_free_netdev; |
237 | } | 203 | } |
@@ -295,8 +261,6 @@ static int __init mipsnet_init_module(void) | |||
295 | 261 | ||
296 | static void __exit mipsnet_exit_module(void) | 262 | static void __exit mipsnet_exit_module(void) |
297 | { | 263 | { |
298 | pr_debug("MIPSNet Ethernet driver exiting\n"); | ||
299 | |||
300 | driver_unregister(&mipsnet_driver); | 264 | driver_unregister(&mipsnet_driver); |
301 | } | 265 | } |
302 | 266 | ||
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 84f2d6382f1e..651c2699d5e1 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports | 2 | * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports |
3 | * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> | 3 | * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> |
4 | * | 4 | * |
5 | * Based on the 64360 driver from: | 5 | * Based on the 64360 driver from: |
@@ -43,14 +43,567 @@ | |||
43 | #include <linux/ethtool.h> | 43 | #include <linux/ethtool.h> |
44 | #include <linux/platform_device.h> | 44 | #include <linux/platform_device.h> |
45 | 45 | ||
46 | #include <linux/module.h> | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/spinlock.h> | ||
49 | #include <linux/workqueue.h> | ||
50 | #include <linux/mii.h> | ||
51 | |||
52 | #include <linux/mv643xx_eth.h> | ||
53 | |||
46 | #include <asm/io.h> | 54 | #include <asm/io.h> |
47 | #include <asm/types.h> | 55 | #include <asm/types.h> |
48 | #include <asm/pgtable.h> | 56 | #include <asm/pgtable.h> |
49 | #include <asm/system.h> | 57 | #include <asm/system.h> |
50 | #include <asm/delay.h> | 58 | #include <asm/delay.h> |
51 | #include "mv643xx_eth.h" | 59 | #include <asm/dma-mapping.h> |
60 | |||
61 | #define MV643XX_CHECKSUM_OFFLOAD_TX | ||
62 | #define MV643XX_NAPI | ||
63 | #define MV643XX_TX_FAST_REFILL | ||
64 | #undef MV643XX_COAL | ||
65 | |||
66 | /* | ||
67 | * Number of RX / TX descriptors on RX / TX rings. | ||
68 | * Note that allocating RX descriptors is done by allocating the RX | ||
69 | * ring AND a preallocated RX buffers (skb's) for each descriptor. | ||
70 | * The TX descriptors only allocates the TX descriptors ring, | ||
71 | * with no pre allocated TX buffers (skb's are allocated by higher layers. | ||
72 | */ | ||
73 | |||
74 | /* Default TX ring size is 1000 descriptors */ | ||
75 | #define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000 | ||
76 | |||
77 | /* Default RX ring size is 400 descriptors */ | ||
78 | #define MV643XX_DEFAULT_RX_QUEUE_SIZE 400 | ||
79 | |||
80 | #define MV643XX_TX_COAL 100 | ||
81 | #ifdef MV643XX_COAL | ||
82 | #define MV643XX_RX_COAL 100 | ||
83 | #endif | ||
84 | |||
85 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
86 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) | ||
87 | #else | ||
88 | #define MAX_DESCS_PER_SKB 1 | ||
89 | #endif | ||
90 | |||
91 | #define ETH_VLAN_HLEN 4 | ||
92 | #define ETH_FCS_LEN 4 | ||
93 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
94 | #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ | ||
95 | ETH_VLAN_HLEN + ETH_FCS_LEN) | ||
96 | #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \ | ||
97 | dma_get_cache_alignment()) | ||
98 | |||
99 | /* | ||
100 | * Registers shared between all ports. | ||
101 | */ | ||
102 | #define PHY_ADDR_REG 0x0000 | ||
103 | #define SMI_REG 0x0004 | ||
104 | |||
105 | /* | ||
106 | * Per-port registers. | ||
107 | */ | ||
108 | #define PORT_CONFIG_REG(p) (0x0400 + ((p) << 10)) | ||
109 | #define PORT_CONFIG_EXTEND_REG(p) (0x0404 + ((p) << 10)) | ||
110 | #define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10)) | ||
111 | #define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10)) | ||
112 | #define SDMA_CONFIG_REG(p) (0x041c + ((p) << 10)) | ||
113 | #define PORT_SERIAL_CONTROL_REG(p) (0x043c + ((p) << 10)) | ||
114 | #define PORT_STATUS_REG(p) (0x0444 + ((p) << 10)) | ||
115 | #define TRANSMIT_QUEUE_COMMAND_REG(p) (0x0448 + ((p) << 10)) | ||
116 | #define MAXIMUM_TRANSMIT_UNIT(p) (0x0458 + ((p) << 10)) | ||
117 | #define INTERRUPT_CAUSE_REG(p) (0x0460 + ((p) << 10)) | ||
118 | #define INTERRUPT_CAUSE_EXTEND_REG(p) (0x0464 + ((p) << 10)) | ||
119 | #define INTERRUPT_MASK_REG(p) (0x0468 + ((p) << 10)) | ||
120 | #define INTERRUPT_EXTEND_MASK_REG(p) (0x046c + ((p) << 10)) | ||
121 | #define TX_FIFO_URGENT_THRESHOLD_REG(p) (0x0474 + ((p) << 10)) | ||
122 | #define RX_CURRENT_QUEUE_DESC_PTR_0(p) (0x060c + ((p) << 10)) | ||
123 | #define RECEIVE_QUEUE_COMMAND_REG(p) (0x0680 + ((p) << 10)) | ||
124 | #define TX_CURRENT_QUEUE_DESC_PTR_0(p) (0x06c0 + ((p) << 10)) | ||
125 | #define MIB_COUNTERS_BASE(p) (0x1000 + ((p) << 7)) | ||
126 | #define DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(p) (0x1400 + ((p) << 10)) | ||
127 | #define DA_FILTER_OTHER_MULTICAST_TABLE_BASE(p) (0x1500 + ((p) << 10)) | ||
128 | #define DA_FILTER_UNICAST_TABLE_BASE(p) (0x1600 + ((p) << 10)) | ||
129 | |||
130 | /* These macros describe Ethernet Port configuration reg (Px_cR) bits */ | ||
131 | #define UNICAST_NORMAL_MODE (0 << 0) | ||
132 | #define UNICAST_PROMISCUOUS_MODE (1 << 0) | ||
133 | #define DEFAULT_RX_QUEUE(queue) ((queue) << 1) | ||
134 | #define DEFAULT_RX_ARP_QUEUE(queue) ((queue) << 4) | ||
135 | #define RECEIVE_BC_IF_NOT_IP_OR_ARP (0 << 7) | ||
136 | #define REJECT_BC_IF_NOT_IP_OR_ARP (1 << 7) | ||
137 | #define RECEIVE_BC_IF_IP (0 << 8) | ||
138 | #define REJECT_BC_IF_IP (1 << 8) | ||
139 | #define RECEIVE_BC_IF_ARP (0 << 9) | ||
140 | #define REJECT_BC_IF_ARP (1 << 9) | ||
141 | #define TX_AM_NO_UPDATE_ERROR_SUMMARY (1 << 12) | ||
142 | #define CAPTURE_TCP_FRAMES_DIS (0 << 14) | ||
143 | #define CAPTURE_TCP_FRAMES_EN (1 << 14) | ||
144 | #define CAPTURE_UDP_FRAMES_DIS (0 << 15) | ||
145 | #define CAPTURE_UDP_FRAMES_EN (1 << 15) | ||
146 | #define DEFAULT_RX_TCP_QUEUE(queue) ((queue) << 16) | ||
147 | #define DEFAULT_RX_UDP_QUEUE(queue) ((queue) << 19) | ||
148 | #define DEFAULT_RX_BPDU_QUEUE(queue) ((queue) << 22) | ||
149 | |||
150 | #define PORT_CONFIG_DEFAULT_VALUE \ | ||
151 | UNICAST_NORMAL_MODE | \ | ||
152 | DEFAULT_RX_QUEUE(0) | \ | ||
153 | DEFAULT_RX_ARP_QUEUE(0) | \ | ||
154 | RECEIVE_BC_IF_NOT_IP_OR_ARP | \ | ||
155 | RECEIVE_BC_IF_IP | \ | ||
156 | RECEIVE_BC_IF_ARP | \ | ||
157 | CAPTURE_TCP_FRAMES_DIS | \ | ||
158 | CAPTURE_UDP_FRAMES_DIS | \ | ||
159 | DEFAULT_RX_TCP_QUEUE(0) | \ | ||
160 | DEFAULT_RX_UDP_QUEUE(0) | \ | ||
161 | DEFAULT_RX_BPDU_QUEUE(0) | ||
162 | |||
163 | /* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/ | ||
164 | #define CLASSIFY_EN (1 << 0) | ||
165 | #define SPAN_BPDU_PACKETS_AS_NORMAL (0 << 1) | ||
166 | #define SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 (1 << 1) | ||
167 | #define PARTITION_DISABLE (0 << 2) | ||
168 | #define PARTITION_ENABLE (1 << 2) | ||
169 | |||
170 | #define PORT_CONFIG_EXTEND_DEFAULT_VALUE \ | ||
171 | SPAN_BPDU_PACKETS_AS_NORMAL | \ | ||
172 | PARTITION_DISABLE | ||
173 | |||
174 | /* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */ | ||
175 | #define RIFB (1 << 0) | ||
176 | #define RX_BURST_SIZE_1_64BIT (0 << 1) | ||
177 | #define RX_BURST_SIZE_2_64BIT (1 << 1) | ||
178 | #define RX_BURST_SIZE_4_64BIT (2 << 1) | ||
179 | #define RX_BURST_SIZE_8_64BIT (3 << 1) | ||
180 | #define RX_BURST_SIZE_16_64BIT (4 << 1) | ||
181 | #define BLM_RX_NO_SWAP (1 << 4) | ||
182 | #define BLM_RX_BYTE_SWAP (0 << 4) | ||
183 | #define BLM_TX_NO_SWAP (1 << 5) | ||
184 | #define BLM_TX_BYTE_SWAP (0 << 5) | ||
185 | #define DESCRIPTORS_BYTE_SWAP (1 << 6) | ||
186 | #define DESCRIPTORS_NO_SWAP (0 << 6) | ||
187 | #define IPG_INT_RX(value) (((value) & 0x3fff) << 8) | ||
188 | #define TX_BURST_SIZE_1_64BIT (0 << 22) | ||
189 | #define TX_BURST_SIZE_2_64BIT (1 << 22) | ||
190 | #define TX_BURST_SIZE_4_64BIT (2 << 22) | ||
191 | #define TX_BURST_SIZE_8_64BIT (3 << 22) | ||
192 | #define TX_BURST_SIZE_16_64BIT (4 << 22) | ||
193 | |||
194 | #if defined(__BIG_ENDIAN) | ||
195 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ | ||
196 | RX_BURST_SIZE_4_64BIT | \ | ||
197 | IPG_INT_RX(0) | \ | ||
198 | TX_BURST_SIZE_4_64BIT | ||
199 | #elif defined(__LITTLE_ENDIAN) | ||
200 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ | ||
201 | RX_BURST_SIZE_4_64BIT | \ | ||
202 | BLM_RX_NO_SWAP | \ | ||
203 | BLM_TX_NO_SWAP | \ | ||
204 | IPG_INT_RX(0) | \ | ||
205 | TX_BURST_SIZE_4_64BIT | ||
206 | #else | ||
207 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined | ||
208 | #endif | ||
209 | |||
210 | /* These macros describe Ethernet Port serial control reg (PSCR) bits */ | ||
211 | #define SERIAL_PORT_DISABLE (0 << 0) | ||
212 | #define SERIAL_PORT_ENABLE (1 << 0) | ||
213 | #define DO_NOT_FORCE_LINK_PASS (0 << 1) | ||
214 | #define FORCE_LINK_PASS (1 << 1) | ||
215 | #define ENABLE_AUTO_NEG_FOR_DUPLX (0 << 2) | ||
216 | #define DISABLE_AUTO_NEG_FOR_DUPLX (1 << 2) | ||
217 | #define ENABLE_AUTO_NEG_FOR_FLOW_CTRL (0 << 3) | ||
218 | #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3) | ||
219 | #define ADV_NO_FLOW_CTRL (0 << 4) | ||
220 | #define ADV_SYMMETRIC_FLOW_CTRL (1 << 4) | ||
221 | #define FORCE_FC_MODE_NO_PAUSE_DIS_TX (0 << 5) | ||
222 | #define FORCE_FC_MODE_TX_PAUSE_DIS (1 << 5) | ||
223 | #define FORCE_BP_MODE_NO_JAM (0 << 7) | ||
224 | #define FORCE_BP_MODE_JAM_TX (1 << 7) | ||
225 | #define FORCE_BP_MODE_JAM_TX_ON_RX_ERR (2 << 7) | ||
226 | #define SERIAL_PORT_CONTROL_RESERVED (1 << 9) | ||
227 | #define FORCE_LINK_FAIL (0 << 10) | ||
228 | #define DO_NOT_FORCE_LINK_FAIL (1 << 10) | ||
229 | #define RETRANSMIT_16_ATTEMPTS (0 << 11) | ||
230 | #define RETRANSMIT_FOREVER (1 << 11) | ||
231 | #define ENABLE_AUTO_NEG_SPEED_GMII (0 << 13) | ||
232 | #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13) | ||
233 | #define DTE_ADV_0 (0 << 14) | ||
234 | #define DTE_ADV_1 (1 << 14) | ||
235 | #define DISABLE_AUTO_NEG_BYPASS (0 << 15) | ||
236 | #define ENABLE_AUTO_NEG_BYPASS (1 << 15) | ||
237 | #define AUTO_NEG_NO_CHANGE (0 << 16) | ||
238 | #define RESTART_AUTO_NEG (1 << 16) | ||
239 | #define MAX_RX_PACKET_1518BYTE (0 << 17) | ||
240 | #define MAX_RX_PACKET_1522BYTE (1 << 17) | ||
241 | #define MAX_RX_PACKET_1552BYTE (2 << 17) | ||
242 | #define MAX_RX_PACKET_9022BYTE (3 << 17) | ||
243 | #define MAX_RX_PACKET_9192BYTE (4 << 17) | ||
244 | #define MAX_RX_PACKET_9700BYTE (5 << 17) | ||
245 | #define MAX_RX_PACKET_MASK (7 << 17) | ||
246 | #define CLR_EXT_LOOPBACK (0 << 20) | ||
247 | #define SET_EXT_LOOPBACK (1 << 20) | ||
248 | #define SET_HALF_DUPLEX_MODE (0 << 21) | ||
249 | #define SET_FULL_DUPLEX_MODE (1 << 21) | ||
250 | #define DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (0 << 22) | ||
251 | #define ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1 << 22) | ||
252 | #define SET_GMII_SPEED_TO_10_100 (0 << 23) | ||
253 | #define SET_GMII_SPEED_TO_1000 (1 << 23) | ||
254 | #define SET_MII_SPEED_TO_10 (0 << 24) | ||
255 | #define SET_MII_SPEED_TO_100 (1 << 24) | ||
256 | |||
257 | #define PORT_SERIAL_CONTROL_DEFAULT_VALUE \ | ||
258 | DO_NOT_FORCE_LINK_PASS | \ | ||
259 | ENABLE_AUTO_NEG_FOR_DUPLX | \ | ||
260 | DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \ | ||
261 | ADV_SYMMETRIC_FLOW_CTRL | \ | ||
262 | FORCE_FC_MODE_NO_PAUSE_DIS_TX | \ | ||
263 | FORCE_BP_MODE_NO_JAM | \ | ||
264 | (1 << 9) /* reserved */ | \ | ||
265 | DO_NOT_FORCE_LINK_FAIL | \ | ||
266 | RETRANSMIT_16_ATTEMPTS | \ | ||
267 | ENABLE_AUTO_NEG_SPEED_GMII | \ | ||
268 | DTE_ADV_0 | \ | ||
269 | DISABLE_AUTO_NEG_BYPASS | \ | ||
270 | AUTO_NEG_NO_CHANGE | \ | ||
271 | MAX_RX_PACKET_9700BYTE | \ | ||
272 | CLR_EXT_LOOPBACK | \ | ||
273 | SET_FULL_DUPLEX_MODE | \ | ||
274 | ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX | ||
275 | |||
276 | /* These macros describe Ethernet Serial Status reg (PSR) bits */ | ||
277 | #define PORT_STATUS_MODE_10_BIT (1 << 0) | ||
278 | #define PORT_STATUS_LINK_UP (1 << 1) | ||
279 | #define PORT_STATUS_FULL_DUPLEX (1 << 2) | ||
280 | #define PORT_STATUS_FLOW_CONTROL (1 << 3) | ||
281 | #define PORT_STATUS_GMII_1000 (1 << 4) | ||
282 | #define PORT_STATUS_MII_100 (1 << 5) | ||
283 | /* PSR bit 6 is undocumented */ | ||
284 | #define PORT_STATUS_TX_IN_PROGRESS (1 << 7) | ||
285 | #define PORT_STATUS_AUTONEG_BYPASSED (1 << 8) | ||
286 | #define PORT_STATUS_PARTITION (1 << 9) | ||
287 | #define PORT_STATUS_TX_FIFO_EMPTY (1 << 10) | ||
288 | /* PSR bits 11-31 are reserved */ | ||
289 | |||
290 | #define PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800 | ||
291 | #define PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400 | ||
292 | |||
293 | #define DESC_SIZE 64 | ||
294 | |||
295 | #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ | ||
296 | #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ | ||
297 | |||
298 | #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2) | ||
299 | #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9) | ||
300 | #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR) | ||
301 | #define ETH_INT_CAUSE_EXT 0x00000002 | ||
302 | #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT) | ||
303 | |||
304 | #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0) | ||
305 | #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) | ||
306 | #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) | ||
307 | #define ETH_INT_CAUSE_PHY 0x00010000 | ||
308 | #define ETH_INT_CAUSE_STATE 0x00100000 | ||
309 | #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \ | ||
310 | ETH_INT_CAUSE_STATE) | ||
311 | |||
312 | #define ETH_INT_MASK_ALL 0x00000000 | ||
313 | #define ETH_INT_MASK_ALL_EXT 0x00000000 | ||
314 | |||
315 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | ||
316 | #define PHY_WAIT_MICRO_SECONDS 10 | ||
317 | |||
318 | /* Buffer offset from buffer pointer */ | ||
319 | #define RX_BUF_OFFSET 0x2 | ||
320 | |||
321 | /* Gigabit Ethernet Unit Global Registers */ | ||
322 | |||
323 | /* MIB Counters register definitions */ | ||
324 | #define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0 | ||
325 | #define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4 | ||
326 | #define ETH_MIB_BAD_OCTETS_RECEIVED 0x8 | ||
327 | #define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc | ||
328 | #define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10 | ||
329 | #define ETH_MIB_BAD_FRAMES_RECEIVED 0x14 | ||
330 | #define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18 | ||
331 | #define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c | ||
332 | #define ETH_MIB_FRAMES_64_OCTETS 0x20 | ||
333 | #define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24 | ||
334 | #define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28 | ||
335 | #define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c | ||
336 | #define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30 | ||
337 | #define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 | ||
338 | #define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38 | ||
339 | #define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c | ||
340 | #define ETH_MIB_GOOD_FRAMES_SENT 0x40 | ||
341 | #define ETH_MIB_EXCESSIVE_COLLISION 0x44 | ||
342 | #define ETH_MIB_MULTICAST_FRAMES_SENT 0x48 | ||
343 | #define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c | ||
344 | #define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50 | ||
345 | #define ETH_MIB_FC_SENT 0x54 | ||
346 | #define ETH_MIB_GOOD_FC_RECEIVED 0x58 | ||
347 | #define ETH_MIB_BAD_FC_RECEIVED 0x5c | ||
348 | #define ETH_MIB_UNDERSIZE_RECEIVED 0x60 | ||
349 | #define ETH_MIB_FRAGMENTS_RECEIVED 0x64 | ||
350 | #define ETH_MIB_OVERSIZE_RECEIVED 0x68 | ||
351 | #define ETH_MIB_JABBER_RECEIVED 0x6c | ||
352 | #define ETH_MIB_MAC_RECEIVE_ERROR 0x70 | ||
353 | #define ETH_MIB_BAD_CRC_EVENT 0x74 | ||
354 | #define ETH_MIB_COLLISION 0x78 | ||
355 | #define ETH_MIB_LATE_COLLISION 0x7c | ||
356 | |||
357 | /* Port serial status reg (PSR) */ | ||
358 | #define ETH_INTERFACE_PCM 0x00000001 | ||
359 | #define ETH_LINK_IS_UP 0x00000002 | ||
360 | #define ETH_PORT_AT_FULL_DUPLEX 0x00000004 | ||
361 | #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008 | ||
362 | #define ETH_GMII_SPEED_1000 0x00000010 | ||
363 | #define ETH_MII_SPEED_100 0x00000020 | ||
364 | #define ETH_TX_IN_PROGRESS 0x00000080 | ||
365 | #define ETH_BYPASS_ACTIVE 0x00000100 | ||
366 | #define ETH_PORT_AT_PARTITION_STATE 0x00000200 | ||
367 | #define ETH_PORT_TX_FIFO_EMPTY 0x00000400 | ||
368 | |||
369 | /* SMI reg */ | ||
370 | #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ | ||
371 | #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ | ||
372 | #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */ | ||
373 | #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ | ||
374 | |||
375 | /* Interrupt Cause Register Bit Definitions */ | ||
376 | |||
377 | /* SDMA command status fields macros */ | ||
378 | |||
379 | /* Tx & Rx descriptors status */ | ||
380 | #define ETH_ERROR_SUMMARY 0x00000001 | ||
381 | |||
382 | /* Tx & Rx descriptors command */ | ||
383 | #define ETH_BUFFER_OWNED_BY_DMA 0x80000000 | ||
384 | |||
385 | /* Tx descriptors status */ | ||
386 | #define ETH_LC_ERROR 0 | ||
387 | #define ETH_UR_ERROR 0x00000002 | ||
388 | #define ETH_RL_ERROR 0x00000004 | ||
389 | #define ETH_LLC_SNAP_FORMAT 0x00000200 | ||
390 | |||
391 | /* Rx descriptors status */ | ||
392 | #define ETH_OVERRUN_ERROR 0x00000002 | ||
393 | #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004 | ||
394 | #define ETH_RESOURCE_ERROR 0x00000006 | ||
395 | #define ETH_VLAN_TAGGED 0x00080000 | ||
396 | #define ETH_BPDU_FRAME 0x00100000 | ||
397 | #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000 | ||
398 | #define ETH_OTHER_FRAME_TYPE 0x00400000 | ||
399 | #define ETH_LAYER_2_IS_ETH_V_2 0x00800000 | ||
400 | #define ETH_FRAME_TYPE_IP_V_4 0x01000000 | ||
401 | #define ETH_FRAME_HEADER_OK 0x02000000 | ||
402 | #define ETH_RX_LAST_DESC 0x04000000 | ||
403 | #define ETH_RX_FIRST_DESC 0x08000000 | ||
404 | #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000 | ||
405 | #define ETH_RX_ENABLE_INTERRUPT 0x20000000 | ||
406 | #define ETH_LAYER_4_CHECKSUM_OK 0x40000000 | ||
407 | |||
408 | /* Rx descriptors byte count */ | ||
409 | #define ETH_FRAME_FRAGMENTED 0x00000004 | ||
410 | |||
411 | /* Tx descriptors command */ | ||
412 | #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400 | ||
413 | #define ETH_FRAME_SET_TO_VLAN 0x00008000 | ||
414 | #define ETH_UDP_FRAME 0x00010000 | ||
415 | #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000 | ||
416 | #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000 | ||
417 | #define ETH_ZERO_PADDING 0x00080000 | ||
418 | #define ETH_TX_LAST_DESC 0x00100000 | ||
419 | #define ETH_TX_FIRST_DESC 0x00200000 | ||
420 | #define ETH_GEN_CRC 0x00400000 | ||
421 | #define ETH_TX_ENABLE_INTERRUPT 0x00800000 | ||
422 | #define ETH_AUTO_MODE 0x40000000 | ||
423 | |||
424 | #define ETH_TX_IHL_SHIFT 11 | ||
425 | |||
426 | /* typedefs */ | ||
427 | |||
428 | typedef enum _eth_func_ret_status { | ||
429 | ETH_OK, /* Returned as expected. */ | ||
430 | ETH_ERROR, /* Fundamental error. */ | ||
431 | ETH_RETRY, /* Could not process request. Try later.*/ | ||
432 | ETH_END_OF_JOB, /* Ring has nothing to process. */ | ||
433 | ETH_QUEUE_FULL, /* Ring resource error. */ | ||
434 | ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ | ||
435 | } ETH_FUNC_RET_STATUS; | ||
436 | |||
437 | typedef enum _eth_target { | ||
438 | ETH_TARGET_DRAM, | ||
439 | ETH_TARGET_DEVICE, | ||
440 | ETH_TARGET_CBS, | ||
441 | ETH_TARGET_PCI0, | ||
442 | ETH_TARGET_PCI1 | ||
443 | } ETH_TARGET; | ||
444 | |||
445 | /* These are for big-endian machines. Little endian needs different | ||
446 | * definitions. | ||
447 | */ | ||
448 | #if defined(__BIG_ENDIAN) | ||
449 | struct eth_rx_desc { | ||
450 | u16 byte_cnt; /* Descriptor buffer byte count */ | ||
451 | u16 buf_size; /* Buffer size */ | ||
452 | u32 cmd_sts; /* Descriptor command status */ | ||
453 | u32 next_desc_ptr; /* Next descriptor pointer */ | ||
454 | u32 buf_ptr; /* Descriptor buffer pointer */ | ||
455 | }; | ||
456 | |||
457 | struct eth_tx_desc { | ||
458 | u16 byte_cnt; /* buffer byte count */ | ||
459 | u16 l4i_chk; /* CPU provided TCP checksum */ | ||
460 | u32 cmd_sts; /* Command/status field */ | ||
461 | u32 next_desc_ptr; /* Pointer to next descriptor */ | ||
462 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | ||
463 | }; | ||
464 | #elif defined(__LITTLE_ENDIAN) | ||
465 | struct eth_rx_desc { | ||
466 | u32 cmd_sts; /* Descriptor command status */ | ||
467 | u16 buf_size; /* Buffer size */ | ||
468 | u16 byte_cnt; /* Descriptor buffer byte count */ | ||
469 | u32 buf_ptr; /* Descriptor buffer pointer */ | ||
470 | u32 next_desc_ptr; /* Next descriptor pointer */ | ||
471 | }; | ||
472 | |||
473 | struct eth_tx_desc { | ||
474 | u32 cmd_sts; /* Command/status field */ | ||
475 | u16 l4i_chk; /* CPU provided TCP checksum */ | ||
476 | u16 byte_cnt; /* buffer byte count */ | ||
477 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | ||
478 | u32 next_desc_ptr; /* Pointer to next descriptor */ | ||
479 | }; | ||
480 | #else | ||
481 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined | ||
482 | #endif | ||
483 | |||
484 | /* Unified struct for Rx and Tx operations. The user is not required to */ | ||
485 | /* be familier with neither Tx nor Rx descriptors. */ | ||
486 | struct pkt_info { | ||
487 | unsigned short byte_cnt; /* Descriptor buffer byte count */ | ||
488 | unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */ | ||
489 | unsigned int cmd_sts; /* Descriptor command status */ | ||
490 | dma_addr_t buf_ptr; /* Descriptor buffer pointer */ | ||
491 | struct sk_buff *return_info; /* User resource return information */ | ||
492 | }; | ||
493 | |||
494 | /* Ethernet port specific information */ | ||
495 | struct mv643xx_mib_counters { | ||
496 | u64 good_octets_received; | ||
497 | u32 bad_octets_received; | ||
498 | u32 internal_mac_transmit_err; | ||
499 | u32 good_frames_received; | ||
500 | u32 bad_frames_received; | ||
501 | u32 broadcast_frames_received; | ||
502 | u32 multicast_frames_received; | ||
503 | u32 frames_64_octets; | ||
504 | u32 frames_65_to_127_octets; | ||
505 | u32 frames_128_to_255_octets; | ||
506 | u32 frames_256_to_511_octets; | ||
507 | u32 frames_512_to_1023_octets; | ||
508 | u32 frames_1024_to_max_octets; | ||
509 | u64 good_octets_sent; | ||
510 | u32 good_frames_sent; | ||
511 | u32 excessive_collision; | ||
512 | u32 multicast_frames_sent; | ||
513 | u32 broadcast_frames_sent; | ||
514 | u32 unrec_mac_control_received; | ||
515 | u32 fc_sent; | ||
516 | u32 good_fc_received; | ||
517 | u32 bad_fc_received; | ||
518 | u32 undersize_received; | ||
519 | u32 fragments_received; | ||
520 | u32 oversize_received; | ||
521 | u32 jabber_received; | ||
522 | u32 mac_receive_error; | ||
523 | u32 bad_crc_event; | ||
524 | u32 collision; | ||
525 | u32 late_collision; | ||
526 | }; | ||
527 | |||
528 | struct mv643xx_private { | ||
529 | int port_num; /* User Ethernet port number */ | ||
530 | |||
531 | u32 rx_sram_addr; /* Base address of rx sram area */ | ||
532 | u32 rx_sram_size; /* Size of rx sram area */ | ||
533 | u32 tx_sram_addr; /* Base address of tx sram area */ | ||
534 | u32 tx_sram_size; /* Size of tx sram area */ | ||
535 | |||
536 | int rx_resource_err; /* Rx ring resource error flag */ | ||
537 | |||
538 | /* Tx/Rx rings managment indexes fields. For driver use */ | ||
539 | |||
540 | /* Next available and first returning Rx resource */ | ||
541 | int rx_curr_desc_q, rx_used_desc_q; | ||
542 | |||
543 | /* Next available and first returning Tx resource */ | ||
544 | int tx_curr_desc_q, tx_used_desc_q; | ||
545 | |||
546 | #ifdef MV643XX_TX_FAST_REFILL | ||
547 | u32 tx_clean_threshold; | ||
548 | #endif | ||
549 | |||
550 | struct eth_rx_desc *p_rx_desc_area; | ||
551 | dma_addr_t rx_desc_dma; | ||
552 | int rx_desc_area_size; | ||
553 | struct sk_buff **rx_skb; | ||
554 | |||
555 | struct eth_tx_desc *p_tx_desc_area; | ||
556 | dma_addr_t tx_desc_dma; | ||
557 | int tx_desc_area_size; | ||
558 | struct sk_buff **tx_skb; | ||
559 | |||
560 | struct work_struct tx_timeout_task; | ||
561 | |||
562 | struct net_device *dev; | ||
563 | struct napi_struct napi; | ||
564 | struct net_device_stats stats; | ||
565 | struct mv643xx_mib_counters mib_counters; | ||
566 | spinlock_t lock; | ||
567 | /* Size of Tx Ring per queue */ | ||
568 | int tx_ring_size; | ||
569 | /* Number of tx descriptors in use */ | ||
570 | int tx_desc_count; | ||
571 | /* Size of Rx Ring per queue */ | ||
572 | int rx_ring_size; | ||
573 | /* Number of rx descriptors in use */ | ||
574 | int rx_desc_count; | ||
575 | |||
576 | /* | ||
577 | * Used in case RX Ring is empty, which can be caused when | ||
578 | * system does not have resources (skb's) | ||
579 | */ | ||
580 | struct timer_list timeout; | ||
581 | |||
582 | u32 rx_int_coal; | ||
583 | u32 tx_int_coal; | ||
584 | struct mii_if_info mii; | ||
585 | }; | ||
52 | 586 | ||
53 | /* Static function declarations */ | 587 | /* Static function declarations */ |
588 | static void eth_port_init(struct mv643xx_private *mp); | ||
589 | static void eth_port_reset(unsigned int eth_port_num); | ||
590 | static void eth_port_start(struct net_device *dev); | ||
591 | |||
592 | static void ethernet_phy_reset(unsigned int eth_port_num); | ||
593 | |||
594 | static void eth_port_write_smi_reg(unsigned int eth_port_num, | ||
595 | unsigned int phy_reg, unsigned int value); | ||
596 | |||
597 | static void eth_port_read_smi_reg(unsigned int eth_port_num, | ||
598 | unsigned int phy_reg, unsigned int *value); | ||
599 | |||
600 | static void eth_clear_mib_counters(unsigned int eth_port_num); | ||
601 | |||
602 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | ||
603 | struct pkt_info *p_pkt_info); | ||
604 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | ||
605 | struct pkt_info *p_pkt_info); | ||
606 | |||
54 | static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr); | 607 | static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr); |
55 | static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr); | 608 | static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr); |
56 | static void eth_port_set_multicast_list(struct net_device *); | 609 | static void eth_port_set_multicast_list(struct net_device *); |
@@ -78,26 +631,19 @@ static const struct ethtool_ops mv643xx_ethtool_ops; | |||
78 | static char mv643xx_driver_name[] = "mv643xx_eth"; | 631 | static char mv643xx_driver_name[] = "mv643xx_eth"; |
79 | static char mv643xx_driver_version[] = "1.0"; | 632 | static char mv643xx_driver_version[] = "1.0"; |
80 | 633 | ||
81 | static void __iomem *mv643xx_eth_shared_base; | 634 | static void __iomem *mv643xx_eth_base; |
82 | 635 | ||
83 | /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */ | 636 | /* used to protect SMI_REG, which is shared across ports */ |
84 | static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); | 637 | static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); |
85 | 638 | ||
86 | static inline u32 mv_read(int offset) | 639 | static inline u32 mv_read(int offset) |
87 | { | 640 | { |
88 | void __iomem *reg_base; | 641 | return readl(mv643xx_eth_base + offset); |
89 | |||
90 | reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS; | ||
91 | |||
92 | return readl(reg_base + offset); | ||
93 | } | 642 | } |
94 | 643 | ||
95 | static inline void mv_write(int offset, u32 data) | 644 | static inline void mv_write(int offset, u32 data) |
96 | { | 645 | { |
97 | void __iomem *reg_base; | 646 | writel(data, mv643xx_eth_base + offset); |
98 | |||
99 | reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS; | ||
100 | writel(data, reg_base + offset); | ||
101 | } | 647 | } |
102 | 648 | ||
103 | /* | 649 | /* |
@@ -221,12 +767,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) | |||
221 | struct mv643xx_private *mp = netdev_priv(dev); | 767 | struct mv643xx_private *mp = netdev_priv(dev); |
222 | u32 config_reg; | 768 | u32 config_reg; |
223 | 769 | ||
224 | config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); | 770 | config_reg = mv_read(PORT_CONFIG_REG(mp->port_num)); |
225 | if (dev->flags & IFF_PROMISC) | 771 | if (dev->flags & IFF_PROMISC) |
226 | config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 772 | config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; |
227 | else | 773 | else |
228 | config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 774 | config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; |
229 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); | 775 | mv_write(PORT_CONFIG_REG(mp->port_num), config_reg); |
230 | 776 | ||
231 | eth_port_set_multicast_list(dev); | 777 | eth_port_set_multicast_list(dev); |
232 | } | 778 | } |
@@ -462,41 +1008,37 @@ static void mv643xx_eth_update_pscr(struct net_device *dev, | |||
462 | u32 o_pscr, n_pscr; | 1008 | u32 o_pscr, n_pscr; |
463 | unsigned int queues; | 1009 | unsigned int queues; |
464 | 1010 | ||
465 | o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 1011 | o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); |
466 | n_pscr = o_pscr; | 1012 | n_pscr = o_pscr; |
467 | 1013 | ||
468 | /* clear speed, duplex and rx buffer size fields */ | 1014 | /* clear speed, duplex and rx buffer size fields */ |
469 | n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | | 1015 | n_pscr &= ~(SET_MII_SPEED_TO_100 | |
470 | MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | 1016 | SET_GMII_SPEED_TO_1000 | |
471 | MV643XX_ETH_SET_FULL_DUPLEX_MODE | | 1017 | SET_FULL_DUPLEX_MODE | |
472 | MV643XX_ETH_MAX_RX_PACKET_MASK); | 1018 | MAX_RX_PACKET_MASK); |
473 | 1019 | ||
474 | if (ecmd->duplex == DUPLEX_FULL) | 1020 | if (ecmd->duplex == DUPLEX_FULL) |
475 | n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; | 1021 | n_pscr |= SET_FULL_DUPLEX_MODE; |
476 | 1022 | ||
477 | if (ecmd->speed == SPEED_1000) | 1023 | if (ecmd->speed == SPEED_1000) |
478 | n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | 1024 | n_pscr |= SET_GMII_SPEED_TO_1000 | |
479 | MV643XX_ETH_MAX_RX_PACKET_9700BYTE; | 1025 | MAX_RX_PACKET_9700BYTE; |
480 | else { | 1026 | else { |
481 | if (ecmd->speed == SPEED_100) | 1027 | if (ecmd->speed == SPEED_100) |
482 | n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; | 1028 | n_pscr |= SET_MII_SPEED_TO_100; |
483 | n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; | 1029 | n_pscr |= MAX_RX_PACKET_1522BYTE; |
484 | } | 1030 | } |
485 | 1031 | ||
486 | if (n_pscr != o_pscr) { | 1032 | if (n_pscr != o_pscr) { |
487 | if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) | 1033 | if ((o_pscr & SERIAL_PORT_ENABLE) == 0) |
488 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1034 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
489 | n_pscr); | ||
490 | else { | 1035 | else { |
491 | queues = mv643xx_eth_port_disable_tx(port_num); | 1036 | queues = mv643xx_eth_port_disable_tx(port_num); |
492 | 1037 | ||
493 | o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | 1038 | o_pscr &= ~SERIAL_PORT_ENABLE; |
494 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1039 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr); |
495 | o_pscr); | 1040 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
496 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1041 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
497 | n_pscr); | ||
498 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
499 | n_pscr); | ||
500 | if (queues) | 1042 | if (queues) |
501 | mv643xx_eth_port_enable_tx(port_num, queues); | 1043 | mv643xx_eth_port_enable_tx(port_num, queues); |
502 | } | 1044 | } |
@@ -522,13 +1064,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
522 | unsigned int port_num = mp->port_num; | 1064 | unsigned int port_num = mp->port_num; |
523 | 1065 | ||
524 | /* Read interrupt cause registers */ | 1066 | /* Read interrupt cause registers */ |
525 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & | 1067 | eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) & |
526 | ETH_INT_UNMASK_ALL; | 1068 | ETH_INT_UNMASK_ALL; |
527 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { | 1069 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { |
528 | eth_int_cause_ext = mv_read( | 1070 | eth_int_cause_ext = mv_read( |
529 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & | 1071 | INTERRUPT_CAUSE_EXTEND_REG(port_num)) & |
530 | ETH_INT_UNMASK_ALL_EXT; | 1072 | ETH_INT_UNMASK_ALL_EXT; |
531 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), | 1073 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), |
532 | ~eth_int_cause_ext); | 1074 | ~eth_int_cause_ext); |
533 | } | 1075 | } |
534 | 1076 | ||
@@ -556,10 +1098,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
556 | #ifdef MV643XX_NAPI | 1098 | #ifdef MV643XX_NAPI |
557 | if (eth_int_cause & ETH_INT_CAUSE_RX) { | 1099 | if (eth_int_cause & ETH_INT_CAUSE_RX) { |
558 | /* schedule the NAPI poll routine to maintain port */ | 1100 | /* schedule the NAPI poll routine to maintain port */ |
559 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 1101 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
560 | ETH_INT_MASK_ALL); | 1102 | |
561 | /* wait for previous write to complete */ | 1103 | /* wait for previous write to complete */ |
562 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 1104 | mv_read(INTERRUPT_MASK_REG(port_num)); |
563 | 1105 | ||
564 | netif_rx_schedule(dev, &mp->napi); | 1106 | netif_rx_schedule(dev, &mp->napi); |
565 | } | 1107 | } |
@@ -611,9 +1153,9 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, | |||
611 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; | 1153 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; |
612 | 1154 | ||
613 | /* Set RX Coalescing mechanism */ | 1155 | /* Set RX Coalescing mechanism */ |
614 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num), | 1156 | mv_write(SDMA_CONFIG_REG(eth_port_num), |
615 | ((coal & 0x3fff) << 8) | | 1157 | ((coal & 0x3fff) << 8) | |
616 | (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num)) | 1158 | (mv_read(SDMA_CONFIG_REG(eth_port_num)) |
617 | & 0xffc000ff)); | 1159 | & 0xffc000ff)); |
618 | 1160 | ||
619 | return coal; | 1161 | return coal; |
@@ -649,8 +1191,7 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, | |||
649 | unsigned int coal; | 1191 | unsigned int coal; |
650 | coal = ((t_clk / 1000000) * delay) / 64; | 1192 | coal = ((t_clk / 1000000) * delay) / 64; |
651 | /* Set TX Coalescing mechanism */ | 1193 | /* Set TX Coalescing mechanism */ |
652 | mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), | 1194 | mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4); |
653 | coal << 4); | ||
654 | return coal; | 1195 | return coal; |
655 | } | 1196 | } |
656 | 1197 | ||
@@ -786,10 +1327,10 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
786 | int err; | 1327 | int err; |
787 | 1328 | ||
788 | /* Clear any pending ethernet port interrupts */ | 1329 | /* Clear any pending ethernet port interrupts */ |
789 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | 1330 | mv_write(INTERRUPT_CAUSE_REG(port_num), 0); |
790 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1331 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
791 | /* wait for previous write to complete */ | 1332 | /* wait for previous write to complete */ |
792 | mv_read (MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)); | 1333 | mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num)); |
793 | 1334 | ||
794 | err = request_irq(dev->irq, mv643xx_eth_int_handler, | 1335 | err = request_irq(dev->irq, mv643xx_eth_int_handler, |
795 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); | 1336 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); |
@@ -896,11 +1437,10 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
896 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); | 1437 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); |
897 | 1438 | ||
898 | /* Unmask phy and link status changes interrupts */ | 1439 | /* Unmask phy and link status changes interrupts */ |
899 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), | 1440 | mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); |
900 | ETH_INT_UNMASK_ALL_EXT); | ||
901 | 1441 | ||
902 | /* Unmask RX buffer and TX end interrupt */ | 1442 | /* Unmask RX buffer and TX end interrupt */ |
903 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1443 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
904 | 1444 | ||
905 | return 0; | 1445 | return 0; |
906 | 1446 | ||
@@ -980,9 +1520,9 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
980 | unsigned int port_num = mp->port_num; | 1520 | unsigned int port_num = mp->port_num; |
981 | 1521 | ||
982 | /* Mask all interrupts on ethernet port */ | 1522 | /* Mask all interrupts on ethernet port */ |
983 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1523 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
984 | /* wait for previous write to complete */ | 1524 | /* wait for previous write to complete */ |
985 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 1525 | mv_read(INTERRUPT_MASK_REG(port_num)); |
986 | 1526 | ||
987 | #ifdef MV643XX_NAPI | 1527 | #ifdef MV643XX_NAPI |
988 | napi_disable(&mp->napi); | 1528 | napi_disable(&mp->napi); |
@@ -1021,16 +1561,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget) | |||
1021 | #endif | 1561 | #endif |
1022 | 1562 | ||
1023 | work_done = 0; | 1563 | work_done = 0; |
1024 | if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) | 1564 | if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) |
1025 | != (u32) mp->rx_used_desc_q) | 1565 | != (u32) mp->rx_used_desc_q) |
1026 | work_done = mv643xx_eth_receive_queue(dev, budget); | 1566 | work_done = mv643xx_eth_receive_queue(dev, budget); |
1027 | 1567 | ||
1028 | if (work_done < budget) { | 1568 | if (work_done < budget) { |
1029 | netif_rx_complete(dev, napi); | 1569 | netif_rx_complete(dev, napi); |
1030 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | 1570 | mv_write(INTERRUPT_CAUSE_REG(port_num), 0); |
1031 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1571 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1032 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 1572 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1033 | ETH_INT_UNMASK_ALL); | ||
1034 | } | 1573 | } |
1035 | 1574 | ||
1036 | return work_done; | 1575 | return work_done; |
@@ -1233,13 +1772,13 @@ static void mv643xx_netpoll(struct net_device *netdev) | |||
1233 | struct mv643xx_private *mp = netdev_priv(netdev); | 1772 | struct mv643xx_private *mp = netdev_priv(netdev); |
1234 | int port_num = mp->port_num; | 1773 | int port_num = mp->port_num; |
1235 | 1774 | ||
1236 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1775 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1237 | /* wait for previous write to complete */ | 1776 | /* wait for previous write to complete */ |
1238 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 1777 | mv_read(INTERRUPT_MASK_REG(port_num)); |
1239 | 1778 | ||
1240 | mv643xx_eth_int_handler(netdev->irq, netdev); | 1779 | mv643xx_eth_int_handler(netdev->irq, netdev); |
1241 | 1780 | ||
1242 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1781 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1243 | } | 1782 | } |
1244 | #endif | 1783 | #endif |
1245 | 1784 | ||
@@ -1357,8 +1896,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1357 | 1896 | ||
1358 | /* set default config values */ | 1897 | /* set default config values */ |
1359 | eth_port_uc_addr_get(port_num, dev->dev_addr); | 1898 | eth_port_uc_addr_get(port_num, dev->dev_addr); |
1360 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1899 | mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1361 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1900 | mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1362 | 1901 | ||
1363 | if (is_valid_ether_addr(pd->mac_addr)) | 1902 | if (is_valid_ether_addr(pd->mac_addr)) |
1364 | memcpy(dev->dev_addr, pd->mac_addr, 6); | 1903 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
@@ -1470,9 +2009,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
1470 | if (res == NULL) | 2009 | if (res == NULL) |
1471 | return -ENODEV; | 2010 | return -ENODEV; |
1472 | 2011 | ||
1473 | mv643xx_eth_shared_base = ioremap(res->start, | 2012 | mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1); |
1474 | MV643XX_ETH_SHARED_REGS_SIZE); | 2013 | if (mv643xx_eth_base == NULL) |
1475 | if (mv643xx_eth_shared_base == NULL) | ||
1476 | return -ENOMEM; | 2014 | return -ENOMEM; |
1477 | 2015 | ||
1478 | return 0; | 2016 | return 0; |
@@ -1481,8 +2019,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
1481 | 2019 | ||
1482 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) | 2020 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) |
1483 | { | 2021 | { |
1484 | iounmap(mv643xx_eth_shared_base); | 2022 | iounmap(mv643xx_eth_base); |
1485 | mv643xx_eth_shared_base = NULL; | 2023 | mv643xx_eth_base = NULL; |
1486 | 2024 | ||
1487 | return 0; | 2025 | return 0; |
1488 | } | 2026 | } |
@@ -1494,8 +2032,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) | |||
1494 | unsigned int port_num = mp->port_num; | 2032 | unsigned int port_num = mp->port_num; |
1495 | 2033 | ||
1496 | /* Mask all interrupts on ethernet port */ | 2034 | /* Mask all interrupts on ethernet port */ |
1497 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); | 2035 | mv_write(INTERRUPT_MASK_REG(port_num), 0); |
1498 | mv_read (MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 2036 | mv_read (INTERRUPT_MASK_REG(port_num)); |
1499 | 2037 | ||
1500 | eth_port_reset(port_num); | 2038 | eth_port_reset(port_num); |
1501 | } | 2039 | } |
@@ -1762,49 +2300,49 @@ static void eth_port_start(struct net_device *dev) | |||
1762 | 2300 | ||
1763 | /* Assignment of Tx CTRP of given queue */ | 2301 | /* Assignment of Tx CTRP of given queue */ |
1764 | tx_curr_desc = mp->tx_curr_desc_q; | 2302 | tx_curr_desc = mp->tx_curr_desc_q; |
1765 | mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num), | 2303 | mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num), |
1766 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | 2304 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); |
1767 | 2305 | ||
1768 | /* Assignment of Rx CRDP of given queue */ | 2306 | /* Assignment of Rx CRDP of given queue */ |
1769 | rx_curr_desc = mp->rx_curr_desc_q; | 2307 | rx_curr_desc = mp->rx_curr_desc_q; |
1770 | mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num), | 2308 | mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num), |
1771 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 2309 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); |
1772 | 2310 | ||
1773 | /* Add the assigned Ethernet address to the port's address table */ | 2311 | /* Add the assigned Ethernet address to the port's address table */ |
1774 | eth_port_uc_addr_set(port_num, dev->dev_addr); | 2312 | eth_port_uc_addr_set(port_num, dev->dev_addr); |
1775 | 2313 | ||
1776 | /* Assign port configuration and command. */ | 2314 | /* Assign port configuration and command. */ |
1777 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), | 2315 | mv_write(PORT_CONFIG_REG(port_num), |
1778 | MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); | 2316 | PORT_CONFIG_DEFAULT_VALUE); |
1779 | 2317 | ||
1780 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), | 2318 | mv_write(PORT_CONFIG_EXTEND_REG(port_num), |
1781 | MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); | 2319 | PORT_CONFIG_EXTEND_DEFAULT_VALUE); |
1782 | 2320 | ||
1783 | pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 2321 | pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); |
1784 | 2322 | ||
1785 | pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); | 2323 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); |
1786 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2324 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1787 | 2325 | ||
1788 | pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | 2326 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | |
1789 | MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | | 2327 | DISABLE_AUTO_NEG_SPEED_GMII | |
1790 | MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | | 2328 | DISABLE_AUTO_NEG_FOR_DUPLX | |
1791 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | | 2329 | DO_NOT_FORCE_LINK_FAIL | |
1792 | MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; | 2330 | SERIAL_PORT_CONTROL_RESERVED; |
1793 | 2331 | ||
1794 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2332 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1795 | 2333 | ||
1796 | pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; | 2334 | pscr |= SERIAL_PORT_ENABLE; |
1797 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2335 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1798 | 2336 | ||
1799 | /* Assign port SDMA configuration */ | 2337 | /* Assign port SDMA configuration */ |
1800 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), | 2338 | mv_write(SDMA_CONFIG_REG(port_num), |
1801 | MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); | 2339 | PORT_SDMA_CONFIG_DEFAULT_VALUE); |
1802 | 2340 | ||
1803 | /* Enable port Rx. */ | 2341 | /* Enable port Rx. */ |
1804 | mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); | 2342 | mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); |
1805 | 2343 | ||
1806 | /* Disable port bandwidth limits by clearing MTU register */ | 2344 | /* Disable port bandwidth limits by clearing MTU register */ |
1807 | mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); | 2345 | mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0); |
1808 | 2346 | ||
1809 | /* save phy settings across reset */ | 2347 | /* save phy settings across reset */ |
1810 | mv643xx_get_settings(dev, ðtool_cmd); | 2348 | mv643xx_get_settings(dev, ðtool_cmd); |
@@ -1825,11 +2363,11 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr) | |||
1825 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | 2363 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | |
1826 | (p_addr[3] << 0); | 2364 | (p_addr[3] << 0); |
1827 | 2365 | ||
1828 | mv_write(MV643XX_ETH_MAC_ADDR_LOW(port_num), mac_l); | 2366 | mv_write(MAC_ADDR_LOW(port_num), mac_l); |
1829 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(port_num), mac_h); | 2367 | mv_write(MAC_ADDR_HIGH(port_num), mac_h); |
1830 | 2368 | ||
1831 | /* Accept frames with this address */ | 2369 | /* Accept frames with this address */ |
1832 | table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(port_num); | 2370 | table = DA_FILTER_UNICAST_TABLE_BASE(port_num); |
1833 | eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); | 2371 | eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); |
1834 | } | 2372 | } |
1835 | 2373 | ||
@@ -1841,8 +2379,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr) | |||
1841 | unsigned int mac_h; | 2379 | unsigned int mac_h; |
1842 | unsigned int mac_l; | 2380 | unsigned int mac_l; |
1843 | 2381 | ||
1844 | mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(port_num)); | 2382 | mac_h = mv_read(MAC_ADDR_HIGH(port_num)); |
1845 | mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(port_num)); | 2383 | mac_l = mv_read(MAC_ADDR_LOW(port_num)); |
1846 | 2384 | ||
1847 | p_addr[0] = (mac_h >> 24) & 0xff; | 2385 | p_addr[0] = (mac_h >> 24) & 0xff; |
1848 | p_addr[1] = (mac_h >> 16) & 0xff; | 2386 | p_addr[1] = (mac_h >> 16) & 0xff; |
@@ -1902,7 +2440,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) | |||
1902 | 2440 | ||
1903 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && | 2441 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && |
1904 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { | 2442 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { |
1905 | table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2443 | table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE |
1906 | (eth_port_num); | 2444 | (eth_port_num); |
1907 | eth_port_set_filter_table_entry(table, p_addr[5]); | 2445 | eth_port_set_filter_table_entry(table, p_addr[5]); |
1908 | return; | 2446 | return; |
@@ -1976,7 +2514,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) | |||
1976 | for (i = 0; i < 8; i++) | 2514 | for (i = 0; i < 8; i++) |
1977 | crc_result = crc_result | (crc[i] << i); | 2515 | crc_result = crc_result | (crc[i] << i); |
1978 | 2516 | ||
1979 | table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); | 2517 | table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); |
1980 | eth_port_set_filter_table_entry(table, crc_result); | 2518 | eth_port_set_filter_table_entry(table, crc_result); |
1981 | } | 2519 | } |
1982 | 2520 | ||
@@ -2006,7 +2544,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2006 | * 3-1 Queue ETH_Q0=0 | 2544 | * 3-1 Queue ETH_Q0=0 |
2007 | * 7-4 Reserved = 0; | 2545 | * 7-4 Reserved = 0; |
2008 | */ | 2546 | */ |
2009 | mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | 2547 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); |
2010 | 2548 | ||
2011 | /* Set all entries in DA filter other multicast | 2549 | /* Set all entries in DA filter other multicast |
2012 | * table (Ex_dFOMT) | 2550 | * table (Ex_dFOMT) |
@@ -2016,7 +2554,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2016 | * 3-1 Queue ETH_Q0=0 | 2554 | * 3-1 Queue ETH_Q0=0 |
2017 | * 7-4 Reserved = 0; | 2555 | * 7-4 Reserved = 0; |
2018 | */ | 2556 | */ |
2019 | mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | 2557 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); |
2020 | } | 2558 | } |
2021 | return; | 2559 | return; |
2022 | } | 2560 | } |
@@ -2026,11 +2564,11 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2026 | */ | 2564 | */ |
2027 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2565 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2028 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2566 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
2029 | mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2567 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE |
2030 | (eth_port_num) + table_index, 0); | 2568 | (eth_port_num) + table_index, 0); |
2031 | 2569 | ||
2032 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 2570 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
2033 | mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE | 2571 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE |
2034 | (eth_port_num) + table_index, 0); | 2572 | (eth_port_num) + table_index, 0); |
2035 | } | 2573 | } |
2036 | 2574 | ||
@@ -2064,15 +2602,15 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) | |||
2064 | 2602 | ||
2065 | /* Clear DA filter unicast table (Ex_dFUT) */ | 2603 | /* Clear DA filter unicast table (Ex_dFUT) */ |
2066 | for (table_index = 0; table_index <= 0xC; table_index += 4) | 2604 | for (table_index = 0; table_index <= 0xC; table_index += 4) |
2067 | mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | 2605 | mv_write(DA_FILTER_UNICAST_TABLE_BASE |
2068 | (eth_port_num) + table_index, 0); | 2606 | (eth_port_num) + table_index, 0); |
2069 | 2607 | ||
2070 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2608 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2071 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2609 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
2072 | mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2610 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE |
2073 | (eth_port_num) + table_index, 0); | 2611 | (eth_port_num) + table_index, 0); |
2074 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 2612 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
2075 | mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE | 2613 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE |
2076 | (eth_port_num) + table_index, 0); | 2614 | (eth_port_num) + table_index, 0); |
2077 | } | 2615 | } |
2078 | } | 2616 | } |
@@ -2101,12 +2639,12 @@ static void eth_clear_mib_counters(unsigned int eth_port_num) | |||
2101 | /* Perform dummy reads from MIB counters */ | 2639 | /* Perform dummy reads from MIB counters */ |
2102 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; | 2640 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; |
2103 | i += 4) | 2641 | i += 4) |
2104 | mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i); | 2642 | mv_read(MIB_COUNTERS_BASE(eth_port_num) + i); |
2105 | } | 2643 | } |
2106 | 2644 | ||
2107 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) | 2645 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) |
2108 | { | 2646 | { |
2109 | return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset); | 2647 | return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset); |
2110 | } | 2648 | } |
2111 | 2649 | ||
2112 | static void eth_update_mib_counters(struct mv643xx_private *mp) | 2650 | static void eth_update_mib_counters(struct mv643xx_private *mp) |
@@ -2191,7 +2729,7 @@ static int ethernet_phy_get(unsigned int eth_port_num) | |||
2191 | { | 2729 | { |
2192 | unsigned int reg_data; | 2730 | unsigned int reg_data; |
2193 | 2731 | ||
2194 | reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); | 2732 | reg_data = mv_read(PHY_ADDR_REG); |
2195 | 2733 | ||
2196 | return ((reg_data >> (5 * eth_port_num)) & 0x1f); | 2734 | return ((reg_data >> (5 * eth_port_num)) & 0x1f); |
2197 | } | 2735 | } |
@@ -2218,10 +2756,10 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) | |||
2218 | u32 reg_data; | 2756 | u32 reg_data; |
2219 | int addr_shift = 5 * eth_port_num; | 2757 | int addr_shift = 5 * eth_port_num; |
2220 | 2758 | ||
2221 | reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); | 2759 | reg_data = mv_read(PHY_ADDR_REG); |
2222 | reg_data &= ~(0x1f << addr_shift); | 2760 | reg_data &= ~(0x1f << addr_shift); |
2223 | reg_data |= (phy_addr & 0x1f) << addr_shift; | 2761 | reg_data |= (phy_addr & 0x1f) << addr_shift; |
2224 | mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data); | 2762 | mv_write(PHY_ADDR_REG, reg_data); |
2225 | } | 2763 | } |
2226 | 2764 | ||
2227 | /* | 2765 | /* |
@@ -2259,13 +2797,13 @@ static void ethernet_phy_reset(unsigned int eth_port_num) | |||
2259 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | 2797 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, |
2260 | unsigned int queues) | 2798 | unsigned int queues) |
2261 | { | 2799 | { |
2262 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); | 2800 | mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); |
2263 | } | 2801 | } |
2264 | 2802 | ||
2265 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | 2803 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, |
2266 | unsigned int queues) | 2804 | unsigned int queues) |
2267 | { | 2805 | { |
2268 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); | 2806 | mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues); |
2269 | } | 2807 | } |
2270 | 2808 | ||
2271 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | 2809 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) |
@@ -2273,21 +2811,18 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | |||
2273 | u32 queues; | 2811 | u32 queues; |
2274 | 2812 | ||
2275 | /* Stop Tx port activity. Check port Tx activity. */ | 2813 | /* Stop Tx port activity. Check port Tx activity. */ |
2276 | queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | 2814 | queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
2277 | & 0xFF; | ||
2278 | if (queues) { | 2815 | if (queues) { |
2279 | /* Issue stop command for active queues only */ | 2816 | /* Issue stop command for active queues only */ |
2280 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | 2817 | mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
2281 | (queues << 8)); | ||
2282 | 2818 | ||
2283 | /* Wait for all Tx activity to terminate. */ | 2819 | /* Wait for all Tx activity to terminate. */ |
2284 | /* Check port cause register that all Tx queues are stopped */ | 2820 | /* Check port cause register that all Tx queues are stopped */ |
2285 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | 2821 | while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) |
2286 | & 0xFF) | ||
2287 | udelay(PHY_WAIT_MICRO_SECONDS); | 2822 | udelay(PHY_WAIT_MICRO_SECONDS); |
2288 | 2823 | ||
2289 | /* Wait for Tx FIFO to empty */ | 2824 | /* Wait for Tx FIFO to empty */ |
2290 | while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & | 2825 | while (mv_read(PORT_STATUS_REG(port_num)) & |
2291 | ETH_PORT_TX_FIFO_EMPTY) | 2826 | ETH_PORT_TX_FIFO_EMPTY) |
2292 | udelay(PHY_WAIT_MICRO_SECONDS); | 2827 | udelay(PHY_WAIT_MICRO_SECONDS); |
2293 | } | 2828 | } |
@@ -2300,17 +2835,14 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) | |||
2300 | u32 queues; | 2835 | u32 queues; |
2301 | 2836 | ||
2302 | /* Stop Rx port activity. Check port Rx activity. */ | 2837 | /* Stop Rx port activity. Check port Rx activity. */ |
2303 | queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | 2838 | queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
2304 | & 0xFF; | ||
2305 | if (queues) { | 2839 | if (queues) { |
2306 | /* Issue stop command for active queues only */ | 2840 | /* Issue stop command for active queues only */ |
2307 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | 2841 | mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
2308 | (queues << 8)); | ||
2309 | 2842 | ||
2310 | /* Wait for all Rx activity to terminate. */ | 2843 | /* Wait for all Rx activity to terminate. */ |
2311 | /* Check port cause register that all Rx queues are stopped */ | 2844 | /* Check port cause register that all Rx queues are stopped */ |
2312 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | 2845 | while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) |
2313 | & 0xFF) | ||
2314 | udelay(PHY_WAIT_MICRO_SECONDS); | 2846 | udelay(PHY_WAIT_MICRO_SECONDS); |
2315 | } | 2847 | } |
2316 | 2848 | ||
@@ -2346,11 +2878,11 @@ static void eth_port_reset(unsigned int port_num) | |||
2346 | eth_clear_mib_counters(port_num); | 2878 | eth_clear_mib_counters(port_num); |
2347 | 2879 | ||
2348 | /* Reset the Enable bit in the Configuration Register */ | 2880 | /* Reset the Enable bit in the Configuration Register */ |
2349 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 2881 | reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); |
2350 | reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | | 2882 | reg_data &= ~(SERIAL_PORT_ENABLE | |
2351 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | | 2883 | DO_NOT_FORCE_LINK_FAIL | |
2352 | MV643XX_ETH_FORCE_LINK_PASS); | 2884 | FORCE_LINK_PASS); |
2353 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); | 2885 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data); |
2354 | } | 2886 | } |
2355 | 2887 | ||
2356 | 2888 | ||
@@ -2385,7 +2917,7 @@ static void eth_port_read_smi_reg(unsigned int port_num, | |||
2385 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | 2917 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); |
2386 | 2918 | ||
2387 | /* wait for the SMI register to become available */ | 2919 | /* wait for the SMI register to become available */ |
2388 | for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { | 2920 | for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { |
2389 | if (i == PHY_WAIT_ITERATIONS) { | 2921 | if (i == PHY_WAIT_ITERATIONS) { |
2390 | printk("mv643xx PHY busy timeout, port %d\n", port_num); | 2922 | printk("mv643xx PHY busy timeout, port %d\n", port_num); |
2391 | goto out; | 2923 | goto out; |
@@ -2393,11 +2925,11 @@ static void eth_port_read_smi_reg(unsigned int port_num, | |||
2393 | udelay(PHY_WAIT_MICRO_SECONDS); | 2925 | udelay(PHY_WAIT_MICRO_SECONDS); |
2394 | } | 2926 | } |
2395 | 2927 | ||
2396 | mv_write(MV643XX_ETH_SMI_REG, | 2928 | mv_write(SMI_REG, |
2397 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); | 2929 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); |
2398 | 2930 | ||
2399 | /* now wait for the data to be valid */ | 2931 | /* now wait for the data to be valid */ |
2400 | for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) { | 2932 | for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) { |
2401 | if (i == PHY_WAIT_ITERATIONS) { | 2933 | if (i == PHY_WAIT_ITERATIONS) { |
2402 | printk("mv643xx PHY read timeout, port %d\n", port_num); | 2934 | printk("mv643xx PHY read timeout, port %d\n", port_num); |
2403 | goto out; | 2935 | goto out; |
@@ -2405,7 +2937,7 @@ static void eth_port_read_smi_reg(unsigned int port_num, | |||
2405 | udelay(PHY_WAIT_MICRO_SECONDS); | 2937 | udelay(PHY_WAIT_MICRO_SECONDS); |
2406 | } | 2938 | } |
2407 | 2939 | ||
2408 | *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff; | 2940 | *value = mv_read(SMI_REG) & 0xffff; |
2409 | out: | 2941 | out: |
2410 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | 2942 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |
2411 | } | 2943 | } |
@@ -2443,7 +2975,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num, | |||
2443 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | 2975 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); |
2444 | 2976 | ||
2445 | /* wait for the SMI register to become available */ | 2977 | /* wait for the SMI register to become available */ |
2446 | for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { | 2978 | for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { |
2447 | if (i == PHY_WAIT_ITERATIONS) { | 2979 | if (i == PHY_WAIT_ITERATIONS) { |
2448 | printk("mv643xx PHY busy timeout, port %d\n", | 2980 | printk("mv643xx PHY busy timeout, port %d\n", |
2449 | eth_port_num); | 2981 | eth_port_num); |
@@ -2452,7 +2984,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num, | |||
2452 | udelay(PHY_WAIT_MICRO_SECONDS); | 2984 | udelay(PHY_WAIT_MICRO_SECONDS); |
2453 | } | 2985 | } |
2454 | 2986 | ||
2455 | mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) | | 2987 | mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | |
2456 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); | 2988 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); |
2457 | out: | 2989 | out: |
2458 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | 2990 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |
@@ -2742,6 +3274,7 @@ static const struct ethtool_ops mv643xx_ethtool_ops = { | |||
2742 | .get_drvinfo = mv643xx_get_drvinfo, | 3274 | .get_drvinfo = mv643xx_get_drvinfo, |
2743 | .get_link = mv643xx_eth_get_link, | 3275 | .get_link = mv643xx_eth_get_link, |
2744 | .set_sg = ethtool_op_set_sg, | 3276 | .set_sg = ethtool_op_set_sg, |
3277 | .get_sset_count = mv643xx_get_sset_count, | ||
2745 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | 3278 | .get_ethtool_stats = mv643xx_get_ethtool_stats, |
2746 | .get_strings = mv643xx_get_strings, | 3279 | .get_strings = mv643xx_get_strings, |
2747 | .nway_reset = mv643xx_eth_nway_restart, | 3280 | .nway_reset = mv643xx_eth_nway_restart, |
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h deleted file mode 100644 index be669eb23788..000000000000 --- a/drivers/net/mv643xx_eth.h +++ /dev/null | |||
@@ -1,370 +0,0 @@ | |||
1 | #ifndef __MV643XX_ETH_H__ | ||
2 | #define __MV643XX_ETH_H__ | ||
3 | |||
4 | #include <linux/module.h> | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/workqueue.h> | ||
8 | #include <linux/mii.h> | ||
9 | |||
10 | #include <linux/mv643xx.h> | ||
11 | |||
12 | #include <asm/dma-mapping.h> | ||
13 | |||
14 | /* Checksum offload for Tx works for most packets, but | ||
15 | * fails if previous packet sent did not use hw csum | ||
16 | */ | ||
17 | #define MV643XX_CHECKSUM_OFFLOAD_TX | ||
18 | #define MV643XX_NAPI | ||
19 | #define MV643XX_TX_FAST_REFILL | ||
20 | #undef MV643XX_COAL | ||
21 | |||
22 | /* | ||
23 | * Number of RX / TX descriptors on RX / TX rings. | ||
24 | * Note that allocating RX descriptors is done by allocating the RX | ||
25 | * ring AND a preallocated RX buffers (skb's) for each descriptor. | ||
26 | * The TX descriptors only allocates the TX descriptors ring, | ||
27 | * with no pre allocated TX buffers (skb's are allocated by higher layers. | ||
28 | */ | ||
29 | |||
30 | /* Default TX ring size is 1000 descriptors */ | ||
31 | #define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000 | ||
32 | |||
33 | /* Default RX ring size is 400 descriptors */ | ||
34 | #define MV643XX_DEFAULT_RX_QUEUE_SIZE 400 | ||
35 | |||
36 | #define MV643XX_TX_COAL 100 | ||
37 | #ifdef MV643XX_COAL | ||
38 | #define MV643XX_RX_COAL 100 | ||
39 | #endif | ||
40 | |||
41 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
42 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) | ||
43 | #else | ||
44 | #define MAX_DESCS_PER_SKB 1 | ||
45 | #endif | ||
46 | |||
47 | #define ETH_VLAN_HLEN 4 | ||
48 | #define ETH_FCS_LEN 4 | ||
49 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
50 | #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ | ||
51 | ETH_VLAN_HLEN + ETH_FCS_LEN) | ||
52 | #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment()) | ||
53 | |||
54 | #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ | ||
55 | #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ | ||
56 | |||
57 | #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2) | ||
58 | #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9) | ||
59 | #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR) | ||
60 | #define ETH_INT_CAUSE_EXT 0x00000002 | ||
61 | #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT) | ||
62 | |||
63 | #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0) | ||
64 | #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) | ||
65 | #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) | ||
66 | #define ETH_INT_CAUSE_PHY 0x00010000 | ||
67 | #define ETH_INT_CAUSE_STATE 0x00100000 | ||
68 | #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \ | ||
69 | ETH_INT_CAUSE_STATE) | ||
70 | |||
71 | #define ETH_INT_MASK_ALL 0x00000000 | ||
72 | #define ETH_INT_MASK_ALL_EXT 0x00000000 | ||
73 | |||
74 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | ||
75 | #define PHY_WAIT_MICRO_SECONDS 10 | ||
76 | |||
77 | /* Buffer offset from buffer pointer */ | ||
78 | #define RX_BUF_OFFSET 0x2 | ||
79 | |||
80 | /* Gigabit Ethernet Unit Global Registers */ | ||
81 | |||
82 | /* MIB Counters register definitions */ | ||
83 | #define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0 | ||
84 | #define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4 | ||
85 | #define ETH_MIB_BAD_OCTETS_RECEIVED 0x8 | ||
86 | #define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc | ||
87 | #define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10 | ||
88 | #define ETH_MIB_BAD_FRAMES_RECEIVED 0x14 | ||
89 | #define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18 | ||
90 | #define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c | ||
91 | #define ETH_MIB_FRAMES_64_OCTETS 0x20 | ||
92 | #define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24 | ||
93 | #define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28 | ||
94 | #define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c | ||
95 | #define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30 | ||
96 | #define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 | ||
97 | #define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38 | ||
98 | #define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c | ||
99 | #define ETH_MIB_GOOD_FRAMES_SENT 0x40 | ||
100 | #define ETH_MIB_EXCESSIVE_COLLISION 0x44 | ||
101 | #define ETH_MIB_MULTICAST_FRAMES_SENT 0x48 | ||
102 | #define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c | ||
103 | #define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50 | ||
104 | #define ETH_MIB_FC_SENT 0x54 | ||
105 | #define ETH_MIB_GOOD_FC_RECEIVED 0x58 | ||
106 | #define ETH_MIB_BAD_FC_RECEIVED 0x5c | ||
107 | #define ETH_MIB_UNDERSIZE_RECEIVED 0x60 | ||
108 | #define ETH_MIB_FRAGMENTS_RECEIVED 0x64 | ||
109 | #define ETH_MIB_OVERSIZE_RECEIVED 0x68 | ||
110 | #define ETH_MIB_JABBER_RECEIVED 0x6c | ||
111 | #define ETH_MIB_MAC_RECEIVE_ERROR 0x70 | ||
112 | #define ETH_MIB_BAD_CRC_EVENT 0x74 | ||
113 | #define ETH_MIB_COLLISION 0x78 | ||
114 | #define ETH_MIB_LATE_COLLISION 0x7c | ||
115 | |||
116 | /* Port serial status reg (PSR) */ | ||
117 | #define ETH_INTERFACE_PCM 0x00000001 | ||
118 | #define ETH_LINK_IS_UP 0x00000002 | ||
119 | #define ETH_PORT_AT_FULL_DUPLEX 0x00000004 | ||
120 | #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008 | ||
121 | #define ETH_GMII_SPEED_1000 0x00000010 | ||
122 | #define ETH_MII_SPEED_100 0x00000020 | ||
123 | #define ETH_TX_IN_PROGRESS 0x00000080 | ||
124 | #define ETH_BYPASS_ACTIVE 0x00000100 | ||
125 | #define ETH_PORT_AT_PARTITION_STATE 0x00000200 | ||
126 | #define ETH_PORT_TX_FIFO_EMPTY 0x00000400 | ||
127 | |||
128 | /* SMI reg */ | ||
129 | #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ | ||
130 | #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ | ||
131 | #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */ | ||
132 | #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ | ||
133 | |||
134 | /* Interrupt Cause Register Bit Definitions */ | ||
135 | |||
136 | /* SDMA command status fields macros */ | ||
137 | |||
138 | /* Tx & Rx descriptors status */ | ||
139 | #define ETH_ERROR_SUMMARY 0x00000001 | ||
140 | |||
141 | /* Tx & Rx descriptors command */ | ||
142 | #define ETH_BUFFER_OWNED_BY_DMA 0x80000000 | ||
143 | |||
144 | /* Tx descriptors status */ | ||
145 | #define ETH_LC_ERROR 0 | ||
146 | #define ETH_UR_ERROR 0x00000002 | ||
147 | #define ETH_RL_ERROR 0x00000004 | ||
148 | #define ETH_LLC_SNAP_FORMAT 0x00000200 | ||
149 | |||
150 | /* Rx descriptors status */ | ||
151 | #define ETH_OVERRUN_ERROR 0x00000002 | ||
152 | #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004 | ||
153 | #define ETH_RESOURCE_ERROR 0x00000006 | ||
154 | #define ETH_VLAN_TAGGED 0x00080000 | ||
155 | #define ETH_BPDU_FRAME 0x00100000 | ||
156 | #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000 | ||
157 | #define ETH_OTHER_FRAME_TYPE 0x00400000 | ||
158 | #define ETH_LAYER_2_IS_ETH_V_2 0x00800000 | ||
159 | #define ETH_FRAME_TYPE_IP_V_4 0x01000000 | ||
160 | #define ETH_FRAME_HEADER_OK 0x02000000 | ||
161 | #define ETH_RX_LAST_DESC 0x04000000 | ||
162 | #define ETH_RX_FIRST_DESC 0x08000000 | ||
163 | #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000 | ||
164 | #define ETH_RX_ENABLE_INTERRUPT 0x20000000 | ||
165 | #define ETH_LAYER_4_CHECKSUM_OK 0x40000000 | ||
166 | |||
167 | /* Rx descriptors byte count */ | ||
168 | #define ETH_FRAME_FRAGMENTED 0x00000004 | ||
169 | |||
170 | /* Tx descriptors command */ | ||
171 | #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400 | ||
172 | #define ETH_FRAME_SET_TO_VLAN 0x00008000 | ||
173 | #define ETH_UDP_FRAME 0x00010000 | ||
174 | #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000 | ||
175 | #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000 | ||
176 | #define ETH_ZERO_PADDING 0x00080000 | ||
177 | #define ETH_TX_LAST_DESC 0x00100000 | ||
178 | #define ETH_TX_FIRST_DESC 0x00200000 | ||
179 | #define ETH_GEN_CRC 0x00400000 | ||
180 | #define ETH_TX_ENABLE_INTERRUPT 0x00800000 | ||
181 | #define ETH_AUTO_MODE 0x40000000 | ||
182 | |||
183 | #define ETH_TX_IHL_SHIFT 11 | ||
184 | |||
185 | /* typedefs */ | ||
186 | |||
187 | typedef enum _eth_func_ret_status { | ||
188 | ETH_OK, /* Returned as expected. */ | ||
189 | ETH_ERROR, /* Fundamental error. */ | ||
190 | ETH_RETRY, /* Could not process request. Try later.*/ | ||
191 | ETH_END_OF_JOB, /* Ring has nothing to process. */ | ||
192 | ETH_QUEUE_FULL, /* Ring resource error. */ | ||
193 | ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ | ||
194 | } ETH_FUNC_RET_STATUS; | ||
195 | |||
196 | typedef enum _eth_target { | ||
197 | ETH_TARGET_DRAM, | ||
198 | ETH_TARGET_DEVICE, | ||
199 | ETH_TARGET_CBS, | ||
200 | ETH_TARGET_PCI0, | ||
201 | ETH_TARGET_PCI1 | ||
202 | } ETH_TARGET; | ||
203 | |||
204 | /* These are for big-endian machines. Little endian needs different | ||
205 | * definitions. | ||
206 | */ | ||
207 | #if defined(__BIG_ENDIAN) | ||
208 | struct eth_rx_desc { | ||
209 | u16 byte_cnt; /* Descriptor buffer byte count */ | ||
210 | u16 buf_size; /* Buffer size */ | ||
211 | u32 cmd_sts; /* Descriptor command status */ | ||
212 | u32 next_desc_ptr; /* Next descriptor pointer */ | ||
213 | u32 buf_ptr; /* Descriptor buffer pointer */ | ||
214 | }; | ||
215 | |||
216 | struct eth_tx_desc { | ||
217 | u16 byte_cnt; /* buffer byte count */ | ||
218 | u16 l4i_chk; /* CPU provided TCP checksum */ | ||
219 | u32 cmd_sts; /* Command/status field */ | ||
220 | u32 next_desc_ptr; /* Pointer to next descriptor */ | ||
221 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | ||
222 | }; | ||
223 | |||
224 | #elif defined(__LITTLE_ENDIAN) | ||
225 | struct eth_rx_desc { | ||
226 | u32 cmd_sts; /* Descriptor command status */ | ||
227 | u16 buf_size; /* Buffer size */ | ||
228 | u16 byte_cnt; /* Descriptor buffer byte count */ | ||
229 | u32 buf_ptr; /* Descriptor buffer pointer */ | ||
230 | u32 next_desc_ptr; /* Next descriptor pointer */ | ||
231 | }; | ||
232 | |||
233 | struct eth_tx_desc { | ||
234 | u32 cmd_sts; /* Command/status field */ | ||
235 | u16 l4i_chk; /* CPU provided TCP checksum */ | ||
236 | u16 byte_cnt; /* buffer byte count */ | ||
237 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | ||
238 | u32 next_desc_ptr; /* Pointer to next descriptor */ | ||
239 | }; | ||
240 | #else | ||
241 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined | ||
242 | #endif | ||
243 | |||
244 | /* Unified struct for Rx and Tx operations. The user is not required to */ | ||
245 | /* be familier with neither Tx nor Rx descriptors. */ | ||
246 | struct pkt_info { | ||
247 | unsigned short byte_cnt; /* Descriptor buffer byte count */ | ||
248 | unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */ | ||
249 | unsigned int cmd_sts; /* Descriptor command status */ | ||
250 | dma_addr_t buf_ptr; /* Descriptor buffer pointer */ | ||
251 | struct sk_buff *return_info; /* User resource return information */ | ||
252 | }; | ||
253 | |||
254 | /* Ethernet port specific information */ | ||
255 | |||
256 | struct mv643xx_mib_counters { | ||
257 | u64 good_octets_received; | ||
258 | u32 bad_octets_received; | ||
259 | u32 internal_mac_transmit_err; | ||
260 | u32 good_frames_received; | ||
261 | u32 bad_frames_received; | ||
262 | u32 broadcast_frames_received; | ||
263 | u32 multicast_frames_received; | ||
264 | u32 frames_64_octets; | ||
265 | u32 frames_65_to_127_octets; | ||
266 | u32 frames_128_to_255_octets; | ||
267 | u32 frames_256_to_511_octets; | ||
268 | u32 frames_512_to_1023_octets; | ||
269 | u32 frames_1024_to_max_octets; | ||
270 | u64 good_octets_sent; | ||
271 | u32 good_frames_sent; | ||
272 | u32 excessive_collision; | ||
273 | u32 multicast_frames_sent; | ||
274 | u32 broadcast_frames_sent; | ||
275 | u32 unrec_mac_control_received; | ||
276 | u32 fc_sent; | ||
277 | u32 good_fc_received; | ||
278 | u32 bad_fc_received; | ||
279 | u32 undersize_received; | ||
280 | u32 fragments_received; | ||
281 | u32 oversize_received; | ||
282 | u32 jabber_received; | ||
283 | u32 mac_receive_error; | ||
284 | u32 bad_crc_event; | ||
285 | u32 collision; | ||
286 | u32 late_collision; | ||
287 | }; | ||
288 | |||
289 | struct mv643xx_private { | ||
290 | int port_num; /* User Ethernet port number */ | ||
291 | |||
292 | u32 rx_sram_addr; /* Base address of rx sram area */ | ||
293 | u32 rx_sram_size; /* Size of rx sram area */ | ||
294 | u32 tx_sram_addr; /* Base address of tx sram area */ | ||
295 | u32 tx_sram_size; /* Size of tx sram area */ | ||
296 | |||
297 | int rx_resource_err; /* Rx ring resource error flag */ | ||
298 | |||
299 | /* Tx/Rx rings managment indexes fields. For driver use */ | ||
300 | |||
301 | /* Next available and first returning Rx resource */ | ||
302 | int rx_curr_desc_q, rx_used_desc_q; | ||
303 | |||
304 | /* Next available and first returning Tx resource */ | ||
305 | int tx_curr_desc_q, tx_used_desc_q; | ||
306 | |||
307 | #ifdef MV643XX_TX_FAST_REFILL | ||
308 | u32 tx_clean_threshold; | ||
309 | #endif | ||
310 | |||
311 | struct eth_rx_desc *p_rx_desc_area; | ||
312 | dma_addr_t rx_desc_dma; | ||
313 | int rx_desc_area_size; | ||
314 | struct sk_buff **rx_skb; | ||
315 | |||
316 | struct eth_tx_desc *p_tx_desc_area; | ||
317 | dma_addr_t tx_desc_dma; | ||
318 | int tx_desc_area_size; | ||
319 | struct sk_buff **tx_skb; | ||
320 | |||
321 | struct work_struct tx_timeout_task; | ||
322 | |||
323 | struct net_device *dev; | ||
324 | struct napi_struct napi; | ||
325 | struct net_device_stats stats; | ||
326 | struct mv643xx_mib_counters mib_counters; | ||
327 | spinlock_t lock; | ||
328 | /* Size of Tx Ring per queue */ | ||
329 | int tx_ring_size; | ||
330 | /* Number of tx descriptors in use */ | ||
331 | int tx_desc_count; | ||
332 | /* Size of Rx Ring per queue */ | ||
333 | int rx_ring_size; | ||
334 | /* Number of rx descriptors in use */ | ||
335 | int rx_desc_count; | ||
336 | |||
337 | /* | ||
338 | * Used in case RX Ring is empty, which can be caused when | ||
339 | * system does not have resources (skb's) | ||
340 | */ | ||
341 | struct timer_list timeout; | ||
342 | |||
343 | u32 rx_int_coal; | ||
344 | u32 tx_int_coal; | ||
345 | struct mii_if_info mii; | ||
346 | }; | ||
347 | |||
348 | /* Port operation control routines */ | ||
349 | static void eth_port_init(struct mv643xx_private *mp); | ||
350 | static void eth_port_reset(unsigned int eth_port_num); | ||
351 | static void eth_port_start(struct net_device *dev); | ||
352 | |||
353 | /* PHY and MIB routines */ | ||
354 | static void ethernet_phy_reset(unsigned int eth_port_num); | ||
355 | |||
356 | static void eth_port_write_smi_reg(unsigned int eth_port_num, | ||
357 | unsigned int phy_reg, unsigned int value); | ||
358 | |||
359 | static void eth_port_read_smi_reg(unsigned int eth_port_num, | ||
360 | unsigned int phy_reg, unsigned int *value); | ||
361 | |||
362 | static void eth_clear_mib_counters(unsigned int eth_port_num); | ||
363 | |||
364 | /* Port data flow control routines */ | ||
365 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | ||
366 | struct pkt_info *p_pkt_info); | ||
367 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | ||
368 | struct pkt_info *p_pkt_info); | ||
369 | |||
370 | #endif /* __MV643XX_ETH_H__ */ | ||
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index 14a768fbce2e..a20005c09e07 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c | |||
@@ -518,7 +518,6 @@ static void dump_packet(void *buf, int len) | |||
518 | /* We have a good packet, get it out of the buffer. */ | 518 | /* We have a good packet, get it out of the buffer. */ |
519 | static void ni5010_rx(struct net_device *dev) | 519 | static void ni5010_rx(struct net_device *dev) |
520 | { | 520 | { |
521 | struct ni5010_local *lp = netdev_priv(dev); | ||
522 | int ioaddr = dev->base_addr; | 521 | int ioaddr = dev->base_addr; |
523 | unsigned char rcv_stat; | 522 | unsigned char rcv_stat; |
524 | struct sk_buff *skb; | 523 | struct sk_buff *skb; |
@@ -577,7 +576,6 @@ static void ni5010_rx(struct net_device *dev) | |||
577 | 576 | ||
578 | PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n", | 577 | PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n", |
579 | dev->name, i_pkt_size)); | 578 | dev->name, i_pkt_size)); |
580 | |||
581 | } | 579 | } |
582 | 580 | ||
583 | static int process_xmt_interrupt(struct net_device *dev) | 581 | static int process_xmt_interrupt(struct net_device *dev) |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 9f9a421c99b3..ab4d309a858f 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -550,7 +550,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
550 | 550 | ||
551 | n = mac->rx->next_to_clean; | 551 | n = mac->rx->next_to_clean; |
552 | 552 | ||
553 | prefetch(RX_RING(mac, n)); | 553 | prefetch(&RX_RING(mac, n)); |
554 | 554 | ||
555 | for (count = 0; count < limit; count++) { | 555 | for (count = 0; count < limit; count++) { |
556 | macrx = RX_RING(mac, n); | 556 | macrx = RX_RING(mac, n); |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 419c00cbe6e9..e8960f294a6e 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -44,7 +44,8 @@ | |||
44 | printk( "Assertion failed! %s,%s,%s,line=%d\n", \ | 44 | printk( "Assertion failed! %s,%s,%s,line=%d\n", \ |
45 | #expr,__FILE__,__FUNCTION__,__LINE__); \ | 45 | #expr,__FILE__,__FUNCTION__,__LINE__); \ |
46 | } | 46 | } |
47 | #define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0) | 47 | #define dprintk(fmt, args...) \ |
48 | do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) | ||
48 | #else | 49 | #else |
49 | #define assert(expr) do {} while (0) | 50 | #define assert(expr) do {} while (0) |
50 | #define dprintk(fmt, args...) do {} while (0) | 51 | #define dprintk(fmt, args...) do {} while (0) |
@@ -111,19 +112,15 @@ enum mac_version { | |||
111 | RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd | 112 | RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd |
112 | RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe | 113 | RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe |
113 | RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb | 114 | RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb |
114 | RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 8168Bf | 115 | RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be |
115 | RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 8101Ec | 116 | RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb |
116 | RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 | 117 | RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 ? |
117 | RTL_GIGA_MAC_VER_15 = 0x0f // 8101 | 118 | RTL_GIGA_MAC_VER_15 = 0x0f, // 8101 ? |
118 | }; | 119 | RTL_GIGA_MAC_VER_16 = 0x11, // 8101Ec |
119 | 120 | RTL_GIGA_MAC_VER_17 = 0x10, // 8168Bf | |
120 | enum phy_version { | 121 | RTL_GIGA_MAC_VER_18 = 0x12, // 8168CP |
121 | RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */ | 122 | RTL_GIGA_MAC_VER_19 = 0x13, // 8168C |
122 | RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */ | 123 | RTL_GIGA_MAC_VER_20 = 0x14 // 8168C |
123 | RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */ | ||
124 | RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */ | ||
125 | RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */ | ||
126 | RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */ | ||
127 | }; | 124 | }; |
128 | 125 | ||
129 | #define _R(NAME,MAC,MASK) \ | 126 | #define _R(NAME,MAC,MASK) \ |
@@ -144,7 +141,12 @@ static const struct { | |||
144 | _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E | 141 | _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E |
145 | _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 | 142 | _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 |
146 | _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139 | 143 | _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139 |
147 | _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139 | 144 | _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880), // PCI-E 8139 |
145 | _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_17, 0xff7e1880), // PCI-E | ||
146 | _R("RTL8101e", RTL_GIGA_MAC_VER_16, 0xff7e1880), // PCI-E | ||
147 | _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_18, 0xff7e1880), // PCI-E | ||
148 | _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_19, 0xff7e1880), // PCI-E | ||
149 | _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_20, 0xff7e1880) // PCI-E | ||
148 | }; | 150 | }; |
149 | #undef _R | 151 | #undef _R |
150 | 152 | ||
@@ -165,7 +167,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = { | |||
165 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, | 167 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, |
166 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, | 168 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, |
167 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, | 169 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, |
168 | { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 }, | 170 | { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, |
169 | { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, | 171 | { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, |
170 | { PCI_VENDOR_ID_LINKSYS, 0x1032, | 172 | { PCI_VENDOR_ID_LINKSYS, 0x1032, |
171 | PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, | 173 | PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, |
@@ -277,6 +279,7 @@ enum rtl_register_content { | |||
277 | TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ | 279 | TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ |
278 | 280 | ||
279 | /* Config1 register p.24 */ | 281 | /* Config1 register p.24 */ |
282 | MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ | ||
280 | PMEnable = (1 << 0), /* Power Management Enable */ | 283 | PMEnable = (1 << 0), /* Power Management Enable */ |
281 | 284 | ||
282 | /* Config2 register p. 25 */ | 285 | /* Config2 register p. 25 */ |
@@ -380,17 +383,20 @@ struct ring_info { | |||
380 | u8 __pad[sizeof(void *) - sizeof(u32)]; | 383 | u8 __pad[sizeof(void *) - sizeof(u32)]; |
381 | }; | 384 | }; |
382 | 385 | ||
386 | enum features { | ||
387 | RTL_FEATURE_WOL = (1 << 0), | ||
388 | RTL_FEATURE_MSI = (1 << 1), | ||
389 | }; | ||
390 | |||
383 | struct rtl8169_private { | 391 | struct rtl8169_private { |
384 | void __iomem *mmio_addr; /* memory map physical address */ | 392 | void __iomem *mmio_addr; /* memory map physical address */ |
385 | struct pci_dev *pci_dev; /* Index of PCI device */ | 393 | struct pci_dev *pci_dev; /* Index of PCI device */ |
386 | struct net_device *dev; | 394 | struct net_device *dev; |
387 | struct napi_struct napi; | 395 | struct napi_struct napi; |
388 | struct net_device_stats stats; /* statistics of net device */ | ||
389 | spinlock_t lock; /* spin lock flag */ | 396 | spinlock_t lock; /* spin lock flag */ |
390 | u32 msg_enable; | 397 | u32 msg_enable; |
391 | int chipset; | 398 | int chipset; |
392 | int mac_version; | 399 | int mac_version; |
393 | int phy_version; | ||
394 | u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ | 400 | u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ |
395 | u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ | 401 | u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ |
396 | u32 dirty_rx; | 402 | u32 dirty_rx; |
@@ -420,7 +426,7 @@ struct rtl8169_private { | |||
420 | unsigned int (*phy_reset_pending)(void __iomem *); | 426 | unsigned int (*phy_reset_pending)(void __iomem *); |
421 | unsigned int (*link_ok)(void __iomem *); | 427 | unsigned int (*link_ok)(void __iomem *); |
422 | struct delayed_work task; | 428 | struct delayed_work task; |
423 | unsigned wol_enabled : 1; | 429 | unsigned features; |
424 | }; | 430 | }; |
425 | 431 | ||
426 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); | 432 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); |
@@ -626,7 +632,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
626 | 632 | ||
627 | RTL_W8(Cfg9346, Cfg9346_Lock); | 633 | RTL_W8(Cfg9346, Cfg9346_Lock); |
628 | 634 | ||
629 | tp->wol_enabled = (wol->wolopts) ? 1 : 0; | 635 | if (wol->wolopts) |
636 | tp->features |= RTL_FEATURE_WOL; | ||
637 | else | ||
638 | tp->features &= ~RTL_FEATURE_WOL; | ||
630 | 639 | ||
631 | spin_unlock_irq(&tp->lock); | 640 | spin_unlock_irq(&tp->lock); |
632 | 641 | ||
@@ -707,7 +716,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, | |||
707 | 716 | ||
708 | /* This tweak comes straight from Realtek's driver. */ | 717 | /* This tweak comes straight from Realtek's driver. */ |
709 | if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) && | 718 | if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) && |
710 | (tp->mac_version == RTL_GIGA_MAC_VER_13)) { | 719 | ((tp->mac_version == RTL_GIGA_MAC_VER_13) || |
720 | (tp->mac_version == RTL_GIGA_MAC_VER_16))) { | ||
711 | auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA; | 721 | auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA; |
712 | } | 722 | } |
713 | } | 723 | } |
@@ -715,7 +725,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, | |||
715 | /* The 8100e/8101e do Fast Ethernet only. */ | 725 | /* The 8100e/8101e do Fast Ethernet only. */ |
716 | if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || | 726 | if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || |
717 | (tp->mac_version == RTL_GIGA_MAC_VER_14) || | 727 | (tp->mac_version == RTL_GIGA_MAC_VER_14) || |
718 | (tp->mac_version == RTL_GIGA_MAC_VER_15)) { | 728 | (tp->mac_version == RTL_GIGA_MAC_VER_15) || |
729 | (tp->mac_version == RTL_GIGA_MAC_VER_16)) { | ||
719 | if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) && | 730 | if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) && |
720 | netif_msg_link(tp)) { | 731 | netif_msg_link(tp)) { |
721 | printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n", | 732 | printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n", |
@@ -726,7 +737,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, | |||
726 | 737 | ||
727 | auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | 738 | auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
728 | 739 | ||
729 | if (tp->mac_version == RTL_GIGA_MAC_VER_12) { | 740 | if ((tp->mac_version == RTL_GIGA_MAC_VER_12) || |
741 | (tp->mac_version == RTL_GIGA_MAC_VER_17)) { | ||
730 | /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */ | 742 | /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */ |
731 | mdio_write(ioaddr, 0x1f, 0x0000); | 743 | mdio_write(ioaddr, 0x1f, 0x0000); |
732 | mdio_write(ioaddr, 0x0e, 0x0000); | 744 | mdio_write(ioaddr, 0x0e, 0x0000); |
@@ -1104,26 +1116,51 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, | |||
1104 | */ | 1116 | */ |
1105 | const struct { | 1117 | const struct { |
1106 | u32 mask; | 1118 | u32 mask; |
1119 | u32 val; | ||
1107 | int mac_version; | 1120 | int mac_version; |
1108 | } mac_info[] = { | 1121 | } mac_info[] = { |
1109 | { 0x38800000, RTL_GIGA_MAC_VER_15 }, | 1122 | /* 8168B family. */ |
1110 | { 0x38000000, RTL_GIGA_MAC_VER_12 }, | 1123 | { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, |
1111 | { 0x34000000, RTL_GIGA_MAC_VER_13 }, | 1124 | { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, |
1112 | { 0x30800000, RTL_GIGA_MAC_VER_14 }, | 1125 | { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, |
1113 | { 0x30000000, RTL_GIGA_MAC_VER_11 }, | 1126 | { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_20 }, |
1114 | { 0x98000000, RTL_GIGA_MAC_VER_06 }, | 1127 | |
1115 | { 0x18000000, RTL_GIGA_MAC_VER_05 }, | 1128 | /* 8168B family. */ |
1116 | { 0x10000000, RTL_GIGA_MAC_VER_04 }, | 1129 | { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, |
1117 | { 0x04000000, RTL_GIGA_MAC_VER_03 }, | 1130 | { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, |
1118 | { 0x00800000, RTL_GIGA_MAC_VER_02 }, | 1131 | { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, |
1119 | { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */ | 1132 | { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, |
1133 | |||
1134 | /* 8101 family. */ | ||
1135 | { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, | ||
1136 | { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, | ||
1137 | { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, | ||
1138 | /* FIXME: where did these entries come from ? -- FR */ | ||
1139 | { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, | ||
1140 | { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, | ||
1141 | |||
1142 | /* 8110 family. */ | ||
1143 | { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, | ||
1144 | { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, | ||
1145 | { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, | ||
1146 | { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, | ||
1147 | { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, | ||
1148 | { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, | ||
1149 | |||
1150 | { 0x00000000, 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */ | ||
1120 | }, *p = mac_info; | 1151 | }, *p = mac_info; |
1121 | u32 reg; | 1152 | u32 reg; |
1122 | 1153 | ||
1123 | reg = RTL_R32(TxConfig) & 0xfc800000; | 1154 | reg = RTL_R32(TxConfig); |
1124 | while ((reg & p->mask) != p->mask) | 1155 | while ((reg & p->mask) != p->val) |
1125 | p++; | 1156 | p++; |
1126 | tp->mac_version = p->mac_version; | 1157 | tp->mac_version = p->mac_version; |
1158 | |||
1159 | if (p->mask == 0x00000000) { | ||
1160 | struct pci_dev *pdev = tp->pci_dev; | ||
1161 | |||
1162 | dev_info(&pdev->dev, "unknown MAC (%08x)\n", reg); | ||
1163 | } | ||
1127 | } | 1164 | } |
1128 | 1165 | ||
1129 | static void rtl8169_print_mac_version(struct rtl8169_private *tp) | 1166 | static void rtl8169_print_mac_version(struct rtl8169_private *tp) |
@@ -1131,54 +1168,21 @@ static void rtl8169_print_mac_version(struct rtl8169_private *tp) | |||
1131 | dprintk("mac_version = 0x%02x\n", tp->mac_version); | 1168 | dprintk("mac_version = 0x%02x\n", tp->mac_version); |
1132 | } | 1169 | } |
1133 | 1170 | ||
1134 | static void rtl8169_get_phy_version(struct rtl8169_private *tp, | 1171 | struct phy_reg { |
1135 | void __iomem *ioaddr) | ||
1136 | { | ||
1137 | const struct { | ||
1138 | u16 mask; | ||
1139 | u16 set; | ||
1140 | int phy_version; | ||
1141 | } phy_info[] = { | ||
1142 | { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G }, | ||
1143 | { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F }, | ||
1144 | { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E }, | ||
1145 | { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */ | ||
1146 | }, *p = phy_info; | ||
1147 | u16 reg; | 1172 | u16 reg; |
1173 | u16 val; | ||
1174 | }; | ||
1148 | 1175 | ||
1149 | reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff; | 1176 | static void rtl_phy_write(void __iomem *ioaddr, struct phy_reg *regs, int len) |
1150 | while ((reg & p->mask) != p->set) | ||
1151 | p++; | ||
1152 | tp->phy_version = p->phy_version; | ||
1153 | } | ||
1154 | |||
1155 | static void rtl8169_print_phy_version(struct rtl8169_private *tp) | ||
1156 | { | 1177 | { |
1157 | struct { | 1178 | while (len-- > 0) { |
1158 | int version; | 1179 | mdio_write(ioaddr, regs->reg, regs->val); |
1159 | char *msg; | 1180 | regs++; |
1160 | u32 reg; | ||
1161 | } phy_print[] = { | ||
1162 | { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 }, | ||
1163 | { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 }, | ||
1164 | { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 }, | ||
1165 | { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 }, | ||
1166 | { 0, NULL, 0x0000 } | ||
1167 | }, *p; | ||
1168 | |||
1169 | for (p = phy_print; p->msg; p++) { | ||
1170 | if (tp->phy_version == p->version) { | ||
1171 | dprintk("phy_version == %s (%04x)\n", p->msg, p->reg); | ||
1172 | return; | ||
1173 | } | ||
1174 | } | 1181 | } |
1175 | dprintk("phy_version == Unknown\n"); | ||
1176 | } | 1182 | } |
1177 | 1183 | ||
1178 | static void rtl8169_hw_phy_config(struct net_device *dev) | 1184 | static void rtl8169s_hw_phy_config(void __iomem *ioaddr) |
1179 | { | 1185 | { |
1180 | struct rtl8169_private *tp = netdev_priv(dev); | ||
1181 | void __iomem *ioaddr = tp->mmio_addr; | ||
1182 | struct { | 1186 | struct { |
1183 | u16 regs[5]; /* Beware of bit-sign propagation */ | 1187 | u16 regs[5]; /* Beware of bit-sign propagation */ |
1184 | } phy_magic[5] = { { | 1188 | } phy_magic[5] = { { |
@@ -1211,33 +1215,9 @@ static void rtl8169_hw_phy_config(struct net_device *dev) | |||
1211 | }, *p = phy_magic; | 1215 | }, *p = phy_magic; |
1212 | unsigned int i; | 1216 | unsigned int i; |
1213 | 1217 | ||
1214 | rtl8169_print_mac_version(tp); | 1218 | mdio_write(ioaddr, 0x1f, 0x0001); //w 31 2 0 1 |
1215 | rtl8169_print_phy_version(tp); | 1219 | mdio_write(ioaddr, 0x15, 0x1000); //w 21 15 0 1000 |
1216 | 1220 | mdio_write(ioaddr, 0x18, 0x65c7); //w 24 15 0 65c7 | |
1217 | if (tp->mac_version <= RTL_GIGA_MAC_VER_01) | ||
1218 | return; | ||
1219 | if (tp->phy_version >= RTL_GIGA_PHY_VER_H) | ||
1220 | return; | ||
1221 | |||
1222 | dprintk("MAC version != 0 && PHY version == 0 or 1\n"); | ||
1223 | dprintk("Do final_reg2.cfg\n"); | ||
1224 | |||
1225 | /* Shazam ! */ | ||
1226 | |||
1227 | if (tp->mac_version == RTL_GIGA_MAC_VER_04) { | ||
1228 | mdio_write(ioaddr, 31, 0x0002); | ||
1229 | mdio_write(ioaddr, 1, 0x90d0); | ||
1230 | mdio_write(ioaddr, 31, 0x0000); | ||
1231 | return; | ||
1232 | } | ||
1233 | |||
1234 | if ((tp->mac_version != RTL_GIGA_MAC_VER_02) && | ||
1235 | (tp->mac_version != RTL_GIGA_MAC_VER_03)) | ||
1236 | return; | ||
1237 | |||
1238 | mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 | ||
1239 | mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 | ||
1240 | mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 | ||
1241 | rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 | 1221 | rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 |
1242 | 1222 | ||
1243 | for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) { | 1223 | for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) { |
@@ -1250,7 +1230,115 @@ static void rtl8169_hw_phy_config(struct net_device *dev) | |||
1250 | rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1 | 1230 | rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1 |
1251 | rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 | 1231 | rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 |
1252 | } | 1232 | } |
1253 | mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0 | 1233 | mdio_write(ioaddr, 0x1f, 0x0000); //w 31 2 0 0 |
1234 | } | ||
1235 | |||
1236 | static void rtl8169sb_hw_phy_config(void __iomem *ioaddr) | ||
1237 | { | ||
1238 | struct phy_reg phy_reg_init[] = { | ||
1239 | { 0x1f, 0x0002 }, | ||
1240 | { 0x01, 0x90d0 }, | ||
1241 | { 0x1f, 0x0000 } | ||
1242 | }; | ||
1243 | |||
1244 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
1245 | } | ||
1246 | static void rtl8168b_hw_phy_config(void __iomem *ioaddr) | ||
1247 | { | ||
1248 | struct phy_reg phy_reg_init[] = { | ||
1249 | { 0x1f, 0x0000 }, | ||
1250 | { 0x10, 0xf41b }, | ||
1251 | { 0x1f, 0x0000 } | ||
1252 | }; | ||
1253 | |||
1254 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
1255 | } | ||
1256 | |||
1257 | static void rtl8168cp_hw_phy_config(void __iomem *ioaddr) | ||
1258 | { | ||
1259 | struct phy_reg phy_reg_init[] = { | ||
1260 | { 0x1f, 0x0000 }, | ||
1261 | { 0x1d, 0x0f00 }, | ||
1262 | { 0x1f, 0x0002 }, | ||
1263 | { 0x0c, 0x1ec8 }, | ||
1264 | { 0x1f, 0x0000 } | ||
1265 | }; | ||
1266 | |||
1267 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
1268 | } | ||
1269 | |||
1270 | static void rtl8168c_hw_phy_config(void __iomem *ioaddr) | ||
1271 | { | ||
1272 | struct phy_reg phy_reg_init[] = { | ||
1273 | { 0x1f, 0x0001 }, | ||
1274 | { 0x12, 0x2300 }, | ||
1275 | { 0x1f, 0x0002 }, | ||
1276 | { 0x00, 0x88d4 }, | ||
1277 | { 0x01, 0x82b1 }, | ||
1278 | { 0x03, 0x7002 }, | ||
1279 | { 0x08, 0x9e30 }, | ||
1280 | { 0x09, 0x01f0 }, | ||
1281 | { 0x0a, 0x5500 }, | ||
1282 | { 0x0c, 0x00c8 }, | ||
1283 | { 0x1f, 0x0003 }, | ||
1284 | { 0x12, 0xc096 }, | ||
1285 | { 0x16, 0x000a }, | ||
1286 | { 0x1f, 0x0000 } | ||
1287 | }; | ||
1288 | |||
1289 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
1290 | } | ||
1291 | |||
1292 | static void rtl8168cx_hw_phy_config(void __iomem *ioaddr) | ||
1293 | { | ||
1294 | struct phy_reg phy_reg_init[] = { | ||
1295 | { 0x1f, 0x0000 }, | ||
1296 | { 0x12, 0x2300 }, | ||
1297 | { 0x1f, 0x0003 }, | ||
1298 | { 0x16, 0x0f0a }, | ||
1299 | { 0x1f, 0x0000 }, | ||
1300 | { 0x1f, 0x0002 }, | ||
1301 | { 0x0c, 0x7eb8 }, | ||
1302 | { 0x1f, 0x0000 } | ||
1303 | }; | ||
1304 | |||
1305 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
1306 | } | ||
1307 | |||
1308 | static void rtl_hw_phy_config(struct net_device *dev) | ||
1309 | { | ||
1310 | struct rtl8169_private *tp = netdev_priv(dev); | ||
1311 | void __iomem *ioaddr = tp->mmio_addr; | ||
1312 | |||
1313 | rtl8169_print_mac_version(tp); | ||
1314 | |||
1315 | switch (tp->mac_version) { | ||
1316 | case RTL_GIGA_MAC_VER_01: | ||
1317 | break; | ||
1318 | case RTL_GIGA_MAC_VER_02: | ||
1319 | case RTL_GIGA_MAC_VER_03: | ||
1320 | rtl8169s_hw_phy_config(ioaddr); | ||
1321 | break; | ||
1322 | case RTL_GIGA_MAC_VER_04: | ||
1323 | rtl8169sb_hw_phy_config(ioaddr); | ||
1324 | break; | ||
1325 | case RTL_GIGA_MAC_VER_11: | ||
1326 | case RTL_GIGA_MAC_VER_12: | ||
1327 | case RTL_GIGA_MAC_VER_17: | ||
1328 | rtl8168b_hw_phy_config(ioaddr); | ||
1329 | break; | ||
1330 | case RTL_GIGA_MAC_VER_18: | ||
1331 | rtl8168cp_hw_phy_config(ioaddr); | ||
1332 | break; | ||
1333 | case RTL_GIGA_MAC_VER_19: | ||
1334 | rtl8168c_hw_phy_config(ioaddr); | ||
1335 | break; | ||
1336 | case RTL_GIGA_MAC_VER_20: | ||
1337 | rtl8168cx_hw_phy_config(ioaddr); | ||
1338 | break; | ||
1339 | default: | ||
1340 | break; | ||
1341 | } | ||
1254 | } | 1342 | } |
1255 | 1343 | ||
1256 | static void rtl8169_phy_timer(unsigned long __opaque) | 1344 | static void rtl8169_phy_timer(unsigned long __opaque) |
@@ -1262,7 +1350,6 @@ static void rtl8169_phy_timer(unsigned long __opaque) | |||
1262 | unsigned long timeout = RTL8169_PHY_TIMEOUT; | 1350 | unsigned long timeout = RTL8169_PHY_TIMEOUT; |
1263 | 1351 | ||
1264 | assert(tp->mac_version > RTL_GIGA_MAC_VER_01); | 1352 | assert(tp->mac_version > RTL_GIGA_MAC_VER_01); |
1265 | assert(tp->phy_version < RTL_GIGA_PHY_VER_H); | ||
1266 | 1353 | ||
1267 | if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) | 1354 | if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) |
1268 | return; | 1355 | return; |
@@ -1297,8 +1384,7 @@ static inline void rtl8169_delete_timer(struct net_device *dev) | |||
1297 | struct rtl8169_private *tp = netdev_priv(dev); | 1384 | struct rtl8169_private *tp = netdev_priv(dev); |
1298 | struct timer_list *timer = &tp->timer; | 1385 | struct timer_list *timer = &tp->timer; |
1299 | 1386 | ||
1300 | if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || | 1387 | if (tp->mac_version <= RTL_GIGA_MAC_VER_01) |
1301 | (tp->phy_version >= RTL_GIGA_PHY_VER_H)) | ||
1302 | return; | 1388 | return; |
1303 | 1389 | ||
1304 | del_timer_sync(timer); | 1390 | del_timer_sync(timer); |
@@ -1309,8 +1395,7 @@ static inline void rtl8169_request_timer(struct net_device *dev) | |||
1309 | struct rtl8169_private *tp = netdev_priv(dev); | 1395 | struct rtl8169_private *tp = netdev_priv(dev); |
1310 | struct timer_list *timer = &tp->timer; | 1396 | struct timer_list *timer = &tp->timer; |
1311 | 1397 | ||
1312 | if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || | 1398 | if (tp->mac_version <= RTL_GIGA_MAC_VER_01) |
1313 | (tp->phy_version >= RTL_GIGA_PHY_VER_H)) | ||
1314 | return; | 1399 | return; |
1315 | 1400 | ||
1316 | mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT); | 1401 | mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT); |
@@ -1362,7 +1447,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
1362 | { | 1447 | { |
1363 | void __iomem *ioaddr = tp->mmio_addr; | 1448 | void __iomem *ioaddr = tp->mmio_addr; |
1364 | 1449 | ||
1365 | rtl8169_hw_phy_config(dev); | 1450 | rtl_hw_phy_config(dev); |
1366 | 1451 | ||
1367 | dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); | 1452 | dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); |
1368 | RTL_W8(0x82, 0x01); | 1453 | RTL_W8(0x82, 0x01); |
@@ -1457,6 +1542,7 @@ static const struct rtl_cfg_info { | |||
1457 | unsigned int align; | 1542 | unsigned int align; |
1458 | u16 intr_event; | 1543 | u16 intr_event; |
1459 | u16 napi_event; | 1544 | u16 napi_event; |
1545 | unsigned msi; | ||
1460 | } rtl_cfg_infos [] = { | 1546 | } rtl_cfg_infos [] = { |
1461 | [RTL_CFG_0] = { | 1547 | [RTL_CFG_0] = { |
1462 | .hw_start = rtl_hw_start_8169, | 1548 | .hw_start = rtl_hw_start_8169, |
@@ -1464,7 +1550,8 @@ static const struct rtl_cfg_info { | |||
1464 | .align = 0, | 1550 | .align = 0, |
1465 | .intr_event = SYSErr | LinkChg | RxOverflow | | 1551 | .intr_event = SYSErr | LinkChg | RxOverflow | |
1466 | RxFIFOOver | TxErr | TxOK | RxOK | RxErr, | 1552 | RxFIFOOver | TxErr | TxOK | RxOK | RxErr, |
1467 | .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow | 1553 | .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, |
1554 | .msi = 0 | ||
1468 | }, | 1555 | }, |
1469 | [RTL_CFG_1] = { | 1556 | [RTL_CFG_1] = { |
1470 | .hw_start = rtl_hw_start_8168, | 1557 | .hw_start = rtl_hw_start_8168, |
@@ -1472,7 +1559,8 @@ static const struct rtl_cfg_info { | |||
1472 | .align = 8, | 1559 | .align = 8, |
1473 | .intr_event = SYSErr | LinkChg | RxOverflow | | 1560 | .intr_event = SYSErr | LinkChg | RxOverflow | |
1474 | TxErr | TxOK | RxOK | RxErr, | 1561 | TxErr | TxOK | RxOK | RxErr, |
1475 | .napi_event = TxErr | TxOK | RxOK | RxOverflow | 1562 | .napi_event = TxErr | TxOK | RxOK | RxOverflow, |
1563 | .msi = RTL_FEATURE_MSI | ||
1476 | }, | 1564 | }, |
1477 | [RTL_CFG_2] = { | 1565 | [RTL_CFG_2] = { |
1478 | .hw_start = rtl_hw_start_8101, | 1566 | .hw_start = rtl_hw_start_8101, |
@@ -1480,10 +1568,39 @@ static const struct rtl_cfg_info { | |||
1480 | .align = 8, | 1568 | .align = 8, |
1481 | .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | | 1569 | .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | |
1482 | RxFIFOOver | TxErr | TxOK | RxOK | RxErr, | 1570 | RxFIFOOver | TxErr | TxOK | RxOK | RxErr, |
1483 | .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow | 1571 | .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, |
1572 | .msi = RTL_FEATURE_MSI | ||
1484 | } | 1573 | } |
1485 | }; | 1574 | }; |
1486 | 1575 | ||
1576 | /* Cfg9346_Unlock assumed. */ | ||
1577 | static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, | ||
1578 | const struct rtl_cfg_info *cfg) | ||
1579 | { | ||
1580 | unsigned msi = 0; | ||
1581 | u8 cfg2; | ||
1582 | |||
1583 | cfg2 = RTL_R8(Config2) & ~MSIEnable; | ||
1584 | if (cfg->msi) { | ||
1585 | if (pci_enable_msi(pdev)) { | ||
1586 | dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); | ||
1587 | } else { | ||
1588 | cfg2 |= MSIEnable; | ||
1589 | msi = RTL_FEATURE_MSI; | ||
1590 | } | ||
1591 | } | ||
1592 | RTL_W8(Config2, cfg2); | ||
1593 | return msi; | ||
1594 | } | ||
1595 | |||
1596 | static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) | ||
1597 | { | ||
1598 | if (tp->features & RTL_FEATURE_MSI) { | ||
1599 | pci_disable_msi(pdev); | ||
1600 | tp->features &= ~RTL_FEATURE_MSI; | ||
1601 | } | ||
1602 | } | ||
1603 | |||
1487 | static int __devinit | 1604 | static int __devinit |
1488 | rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1605 | rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1489 | { | 1606 | { |
@@ -1596,10 +1713,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1596 | 1713 | ||
1597 | /* Identify chip attached to board */ | 1714 | /* Identify chip attached to board */ |
1598 | rtl8169_get_mac_version(tp, ioaddr); | 1715 | rtl8169_get_mac_version(tp, ioaddr); |
1599 | rtl8169_get_phy_version(tp, ioaddr); | ||
1600 | 1716 | ||
1601 | rtl8169_print_mac_version(tp); | 1717 | rtl8169_print_mac_version(tp); |
1602 | rtl8169_print_phy_version(tp); | ||
1603 | 1718 | ||
1604 | for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { | 1719 | for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { |
1605 | if (tp->mac_version == rtl_chip_info[i].mac_version) | 1720 | if (tp->mac_version == rtl_chip_info[i].mac_version) |
@@ -1619,6 +1734,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1619 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 1734 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
1620 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); | 1735 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); |
1621 | RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); | 1736 | RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); |
1737 | tp->features |= rtl_try_msi(pdev, ioaddr, cfg); | ||
1622 | RTL_W8(Cfg9346, Cfg9346_Lock); | 1738 | RTL_W8(Cfg9346, Cfg9346_Lock); |
1623 | 1739 | ||
1624 | if (RTL_R8(PHYstatus) & TBI_Enable) { | 1740 | if (RTL_R8(PHYstatus) & TBI_Enable) { |
@@ -1686,7 +1802,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1686 | 1802 | ||
1687 | rc = register_netdev(dev); | 1803 | rc = register_netdev(dev); |
1688 | if (rc < 0) | 1804 | if (rc < 0) |
1689 | goto err_out_unmap_5; | 1805 | goto err_out_msi_5; |
1690 | 1806 | ||
1691 | pci_set_drvdata(pdev, dev); | 1807 | pci_set_drvdata(pdev, dev); |
1692 | 1808 | ||
@@ -1709,7 +1825,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1709 | out: | 1825 | out: |
1710 | return rc; | 1826 | return rc; |
1711 | 1827 | ||
1712 | err_out_unmap_5: | 1828 | err_out_msi_5: |
1829 | rtl_disable_msi(pdev, tp); | ||
1713 | iounmap(ioaddr); | 1830 | iounmap(ioaddr); |
1714 | err_out_free_res_4: | 1831 | err_out_free_res_4: |
1715 | pci_release_regions(pdev); | 1832 | pci_release_regions(pdev); |
@@ -1730,6 +1847,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) | |||
1730 | flush_scheduled_work(); | 1847 | flush_scheduled_work(); |
1731 | 1848 | ||
1732 | unregister_netdev(dev); | 1849 | unregister_netdev(dev); |
1850 | rtl_disable_msi(pdev, tp); | ||
1733 | rtl8169_release_board(pdev, dev, tp->mmio_addr); | 1851 | rtl8169_release_board(pdev, dev, tp->mmio_addr); |
1734 | pci_set_drvdata(pdev, NULL); | 1852 | pci_set_drvdata(pdev, NULL); |
1735 | } | 1853 | } |
@@ -1773,7 +1891,8 @@ static int rtl8169_open(struct net_device *dev) | |||
1773 | 1891 | ||
1774 | smp_mb(); | 1892 | smp_mb(); |
1775 | 1893 | ||
1776 | retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED, | 1894 | retval = request_irq(dev->irq, rtl8169_interrupt, |
1895 | (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, | ||
1777 | dev->name, dev); | 1896 | dev->name, dev); |
1778 | if (retval < 0) | 1897 | if (retval < 0) |
1779 | goto err_release_ring_2; | 1898 | goto err_release_ring_2; |
@@ -1933,7 +2052,7 @@ static void rtl_hw_start_8169(struct net_device *dev) | |||
1933 | 2052 | ||
1934 | if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || | 2053 | if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || |
1935 | (tp->mac_version == RTL_GIGA_MAC_VER_03)) { | 2054 | (tp->mac_version == RTL_GIGA_MAC_VER_03)) { |
1936 | dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. " | 2055 | dprintk("Set MAC Reg C+CR Offset 0xE0. " |
1937 | "Bit-3 and bit-14 MUST be 1\n"); | 2056 | "Bit-3 and bit-14 MUST be 1\n"); |
1938 | tp->cp_cmd |= (1 << 14); | 2057 | tp->cp_cmd |= (1 << 14); |
1939 | } | 2058 | } |
@@ -2029,7 +2148,8 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
2029 | void __iomem *ioaddr = tp->mmio_addr; | 2148 | void __iomem *ioaddr = tp->mmio_addr; |
2030 | struct pci_dev *pdev = tp->pci_dev; | 2149 | struct pci_dev *pdev = tp->pci_dev; |
2031 | 2150 | ||
2032 | if (tp->mac_version == RTL_GIGA_MAC_VER_13) { | 2151 | if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || |
2152 | (tp->mac_version == RTL_GIGA_MAC_VER_16)) { | ||
2033 | pci_write_config_word(pdev, 0x68, 0x00); | 2153 | pci_write_config_word(pdev, 0x68, 0x00); |
2034 | pci_write_config_word(pdev, 0x69, 0x08); | 2154 | pci_write_config_word(pdev, 0x69, 0x08); |
2035 | } | 2155 | } |
@@ -2259,7 +2379,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) | |||
2259 | dev_kfree_skb(skb); | 2379 | dev_kfree_skb(skb); |
2260 | tx_skb->skb = NULL; | 2380 | tx_skb->skb = NULL; |
2261 | } | 2381 | } |
2262 | tp->stats.tx_dropped++; | 2382 | tp->dev->stats.tx_dropped++; |
2263 | } | 2383 | } |
2264 | } | 2384 | } |
2265 | tp->cur_tx = tp->dirty_tx = 0; | 2385 | tp->cur_tx = tp->dirty_tx = 0; |
@@ -2310,7 +2430,7 @@ static void rtl8169_reinit_task(struct work_struct *work) | |||
2310 | ret = rtl8169_open(dev); | 2430 | ret = rtl8169_open(dev); |
2311 | if (unlikely(ret < 0)) { | 2431 | if (unlikely(ret < 0)) { |
2312 | if (net_ratelimit() && netif_msg_drv(tp)) { | 2432 | if (net_ratelimit() && netif_msg_drv(tp)) { |
2313 | printk(PFX KERN_ERR "%s: reinit failure (status = %d)." | 2433 | printk(KERN_ERR PFX "%s: reinit failure (status = %d)." |
2314 | " Rescheduling.\n", dev->name, ret); | 2434 | " Rescheduling.\n", dev->name, ret); |
2315 | } | 2435 | } |
2316 | rtl8169_schedule_work(dev, rtl8169_reinit_task); | 2436 | rtl8169_schedule_work(dev, rtl8169_reinit_task); |
@@ -2340,9 +2460,10 @@ static void rtl8169_reset_task(struct work_struct *work) | |||
2340 | rtl8169_init_ring_indexes(tp); | 2460 | rtl8169_init_ring_indexes(tp); |
2341 | rtl_hw_start(dev); | 2461 | rtl_hw_start(dev); |
2342 | netif_wake_queue(dev); | 2462 | netif_wake_queue(dev); |
2463 | rtl8169_check_link_status(dev, tp, tp->mmio_addr); | ||
2343 | } else { | 2464 | } else { |
2344 | if (net_ratelimit() && netif_msg_intr(tp)) { | 2465 | if (net_ratelimit() && netif_msg_intr(tp)) { |
2345 | printk(PFX KERN_EMERG "%s: Rx buffers shortage\n", | 2466 | printk(KERN_EMERG PFX "%s: Rx buffers shortage\n", |
2346 | dev->name); | 2467 | dev->name); |
2347 | } | 2468 | } |
2348 | rtl8169_schedule_work(dev, rtl8169_reset_task); | 2469 | rtl8169_schedule_work(dev, rtl8169_reset_task); |
@@ -2496,7 +2617,7 @@ err_stop: | |||
2496 | netif_stop_queue(dev); | 2617 | netif_stop_queue(dev); |
2497 | ret = NETDEV_TX_BUSY; | 2618 | ret = NETDEV_TX_BUSY; |
2498 | err_update_stats: | 2619 | err_update_stats: |
2499 | tp->stats.tx_dropped++; | 2620 | dev->stats.tx_dropped++; |
2500 | goto out; | 2621 | goto out; |
2501 | } | 2622 | } |
2502 | 2623 | ||
@@ -2571,8 +2692,8 @@ static void rtl8169_tx_interrupt(struct net_device *dev, | |||
2571 | if (status & DescOwn) | 2692 | if (status & DescOwn) |
2572 | break; | 2693 | break; |
2573 | 2694 | ||
2574 | tp->stats.tx_bytes += len; | 2695 | dev->stats.tx_bytes += len; |
2575 | tp->stats.tx_packets++; | 2696 | dev->stats.tx_packets++; |
2576 | 2697 | ||
2577 | rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); | 2698 | rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); |
2578 | 2699 | ||
@@ -2672,14 +2793,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
2672 | "%s: Rx ERROR. status = %08x\n", | 2793 | "%s: Rx ERROR. status = %08x\n", |
2673 | dev->name, status); | 2794 | dev->name, status); |
2674 | } | 2795 | } |
2675 | tp->stats.rx_errors++; | 2796 | dev->stats.rx_errors++; |
2676 | if (status & (RxRWT | RxRUNT)) | 2797 | if (status & (RxRWT | RxRUNT)) |
2677 | tp->stats.rx_length_errors++; | 2798 | dev->stats.rx_length_errors++; |
2678 | if (status & RxCRC) | 2799 | if (status & RxCRC) |
2679 | tp->stats.rx_crc_errors++; | 2800 | dev->stats.rx_crc_errors++; |
2680 | if (status & RxFOVF) { | 2801 | if (status & RxFOVF) { |
2681 | rtl8169_schedule_work(dev, rtl8169_reset_task); | 2802 | rtl8169_schedule_work(dev, rtl8169_reset_task); |
2682 | tp->stats.rx_fifo_errors++; | 2803 | dev->stats.rx_fifo_errors++; |
2683 | } | 2804 | } |
2684 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); | 2805 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); |
2685 | } else { | 2806 | } else { |
@@ -2694,8 +2815,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
2694 | * sized frames. | 2815 | * sized frames. |
2695 | */ | 2816 | */ |
2696 | if (unlikely(rtl8169_fragmented_frame(status))) { | 2817 | if (unlikely(rtl8169_fragmented_frame(status))) { |
2697 | tp->stats.rx_dropped++; | 2818 | dev->stats.rx_dropped++; |
2698 | tp->stats.rx_length_errors++; | 2819 | dev->stats.rx_length_errors++; |
2699 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); | 2820 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); |
2700 | continue; | 2821 | continue; |
2701 | } | 2822 | } |
@@ -2719,8 +2840,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
2719 | rtl8169_rx_skb(skb); | 2840 | rtl8169_rx_skb(skb); |
2720 | 2841 | ||
2721 | dev->last_rx = jiffies; | 2842 | dev->last_rx = jiffies; |
2722 | tp->stats.rx_bytes += pkt_size; | 2843 | dev->stats.rx_bytes += pkt_size; |
2723 | tp->stats.rx_packets++; | 2844 | dev->stats.rx_packets++; |
2724 | } | 2845 | } |
2725 | 2846 | ||
2726 | /* Work around for AMD plateform. */ | 2847 | /* Work around for AMD plateform. */ |
@@ -2881,7 +3002,7 @@ core_down: | |||
2881 | rtl8169_asic_down(ioaddr); | 3002 | rtl8169_asic_down(ioaddr); |
2882 | 3003 | ||
2883 | /* Update the error counts. */ | 3004 | /* Update the error counts. */ |
2884 | tp->stats.rx_missed_errors += RTL_R32(RxMissed); | 3005 | dev->stats.rx_missed_errors += RTL_R32(RxMissed); |
2885 | RTL_W32(RxMissed, 0); | 3006 | RTL_W32(RxMissed, 0); |
2886 | 3007 | ||
2887 | spin_unlock_irq(&tp->lock); | 3008 | spin_unlock_irq(&tp->lock); |
@@ -2984,7 +3105,9 @@ static void rtl_set_rx_mode(struct net_device *dev) | |||
2984 | (tp->mac_version == RTL_GIGA_MAC_VER_12) || | 3105 | (tp->mac_version == RTL_GIGA_MAC_VER_12) || |
2985 | (tp->mac_version == RTL_GIGA_MAC_VER_13) || | 3106 | (tp->mac_version == RTL_GIGA_MAC_VER_13) || |
2986 | (tp->mac_version == RTL_GIGA_MAC_VER_14) || | 3107 | (tp->mac_version == RTL_GIGA_MAC_VER_14) || |
2987 | (tp->mac_version == RTL_GIGA_MAC_VER_15)) { | 3108 | (tp->mac_version == RTL_GIGA_MAC_VER_15) || |
3109 | (tp->mac_version == RTL_GIGA_MAC_VER_16) || | ||
3110 | (tp->mac_version == RTL_GIGA_MAC_VER_17)) { | ||
2988 | mc_filter[0] = 0xffffffff; | 3111 | mc_filter[0] = 0xffffffff; |
2989 | mc_filter[1] = 0xffffffff; | 3112 | mc_filter[1] = 0xffffffff; |
2990 | } | 3113 | } |
@@ -3011,12 +3134,12 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) | |||
3011 | 3134 | ||
3012 | if (netif_running(dev)) { | 3135 | if (netif_running(dev)) { |
3013 | spin_lock_irqsave(&tp->lock, flags); | 3136 | spin_lock_irqsave(&tp->lock, flags); |
3014 | tp->stats.rx_missed_errors += RTL_R32(RxMissed); | 3137 | dev->stats.rx_missed_errors += RTL_R32(RxMissed); |
3015 | RTL_W32(RxMissed, 0); | 3138 | RTL_W32(RxMissed, 0); |
3016 | spin_unlock_irqrestore(&tp->lock, flags); | 3139 | spin_unlock_irqrestore(&tp->lock, flags); |
3017 | } | 3140 | } |
3018 | 3141 | ||
3019 | return &tp->stats; | 3142 | return &dev->stats; |
3020 | } | 3143 | } |
3021 | 3144 | ||
3022 | #ifdef CONFIG_PM | 3145 | #ifdef CONFIG_PM |
@@ -3037,14 +3160,15 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3037 | 3160 | ||
3038 | rtl8169_asic_down(ioaddr); | 3161 | rtl8169_asic_down(ioaddr); |
3039 | 3162 | ||
3040 | tp->stats.rx_missed_errors += RTL_R32(RxMissed); | 3163 | dev->stats.rx_missed_errors += RTL_R32(RxMissed); |
3041 | RTL_W32(RxMissed, 0); | 3164 | RTL_W32(RxMissed, 0); |
3042 | 3165 | ||
3043 | spin_unlock_irq(&tp->lock); | 3166 | spin_unlock_irq(&tp->lock); |
3044 | 3167 | ||
3045 | out_pci_suspend: | 3168 | out_pci_suspend: |
3046 | pci_save_state(pdev); | 3169 | pci_save_state(pdev); |
3047 | pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); | 3170 | pci_enable_wake(pdev, pci_choose_state(pdev, state), |
3171 | (tp->features & RTL_FEATURE_WOL) ? 1 : 0); | ||
3048 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 3172 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
3049 | 3173 | ||
3050 | return 0; | 3174 | return 0; |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 24cfb6275d9b..c27c7d63b6a5 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -4271,7 +4271,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev) | |||
4271 | del_timer_sync(&hw->watchdog_timer); | 4271 | del_timer_sync(&hw->watchdog_timer); |
4272 | cancel_work_sync(&hw->restart_work); | 4272 | cancel_work_sync(&hw->restart_work); |
4273 | 4273 | ||
4274 | for (i = hw->ports; i >= 0; --i) | 4274 | for (i = hw->ports-1; i >= 0; --i) |
4275 | unregister_netdev(hw->dev[i]); | 4275 | unregister_netdev(hw->dev[i]); |
4276 | 4276 | ||
4277 | sky2_write32(hw, B0_IMSK, 0); | 4277 | sky2_write32(hw, B0_IMSK, 0); |
@@ -4289,7 +4289,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev) | |||
4289 | pci_release_regions(pdev); | 4289 | pci_release_regions(pdev); |
4290 | pci_disable_device(pdev); | 4290 | pci_disable_device(pdev); |
4291 | 4291 | ||
4292 | for (i = hw->ports; i >= 0; --i) | 4292 | for (i = hw->ports-1; i >= 0; --i) |
4293 | free_netdev(hw->dev[i]); | 4293 | free_netdev(hw->dev[i]); |
4294 | 4294 | ||
4295 | iounmap(hw->regs); | 4295 | iounmap(hw->regs); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 59d4da2734c1..d0bb5b9d2120 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1230,7 +1230,7 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) | |||
1230 | case PCI_DEVICE_ID_JMICRON_JMB363: | 1230 | case PCI_DEVICE_ID_JMICRON_JMB363: |
1231 | /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ | 1231 | /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ |
1232 | /* Set the class codes correctly and then direct IDE 0 */ | 1232 | /* Set the class codes correctly and then direct IDE 0 */ |
1233 | conf1 |= 0x00C2A102; /* Set 1, 8, 13, 15, 17, 22, 23 */ | 1233 | conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */ |
1234 | break; | 1234 | break; |
1235 | 1235 | ||
1236 | case PCI_DEVICE_ID_JMICRON_JMB368: | 1236 | case PCI_DEVICE_ID_JMICRON_JMB368: |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 6141389dcdb2..2e6129f13d38 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -115,7 +115,6 @@ obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o | |||
115 | obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o | 115 | obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o |
116 | obj-$(CONFIG_SCSI_MESH) += mesh.o | 116 | obj-$(CONFIG_SCSI_MESH) += mesh.o |
117 | obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o | 117 | obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o |
118 | obj-$(CONFIG_SCSI_PLUTO) += pluto.o | ||
119 | obj-$(CONFIG_SCSI_DECNCR) += NCR53C9x.o dec_esp.o | 118 | obj-$(CONFIG_SCSI_DECNCR) += NCR53C9x.o dec_esp.o |
120 | obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o | 119 | obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o |
121 | obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o | 120 | obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o |
@@ -123,7 +122,6 @@ obj-$(CONFIG_SCSI_PPA) += ppa.o | |||
123 | obj-$(CONFIG_SCSI_IMM) += imm.o | 122 | obj-$(CONFIG_SCSI_IMM) += imm.o |
124 | obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o | 123 | obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o |
125 | obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o | 124 | obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o |
126 | obj-$(CONFIG_SCSI_FCAL) += fcal.o | ||
127 | obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o | 125 | obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o |
128 | obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o | 126 | obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o |
129 | obj-$(CONFIG_SCSI_NSP32) += nsp32.o | 127 | obj-$(CONFIG_SCSI_NSP32) += nsp32.o |
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c index c4d17231c828..6d86a9be538f 100644 --- a/drivers/scsi/aic7xxx/aic7770.c +++ b/drivers/scsi/aic7xxx/aic7770.c | |||
@@ -60,8 +60,6 @@ | |||
60 | #define ID_OLV_274xD 0x04907783 /* Olivetti OEM (Differential) */ | 60 | #define ID_OLV_274xD 0x04907783 /* Olivetti OEM (Differential) */ |
61 | 61 | ||
62 | static int aic7770_chip_init(struct ahc_softc *ahc); | 62 | static int aic7770_chip_init(struct ahc_softc *ahc); |
63 | static int aic7770_suspend(struct ahc_softc *ahc); | ||
64 | static int aic7770_resume(struct ahc_softc *ahc); | ||
65 | static int aha2840_load_seeprom(struct ahc_softc *ahc); | 63 | static int aha2840_load_seeprom(struct ahc_softc *ahc); |
66 | static ahc_device_setup_t ahc_aic7770_VL_setup; | 64 | static ahc_device_setup_t ahc_aic7770_VL_setup; |
67 | static ahc_device_setup_t ahc_aic7770_EISA_setup; | 65 | static ahc_device_setup_t ahc_aic7770_EISA_setup; |
@@ -155,8 +153,6 @@ aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) | |||
155 | return (error); | 153 | return (error); |
156 | 154 | ||
157 | ahc->bus_chip_init = aic7770_chip_init; | 155 | ahc->bus_chip_init = aic7770_chip_init; |
158 | ahc->bus_suspend = aic7770_suspend; | ||
159 | ahc->bus_resume = aic7770_resume; | ||
160 | 156 | ||
161 | error = ahc_reset(ahc, /*reinit*/FALSE); | 157 | error = ahc_reset(ahc, /*reinit*/FALSE); |
162 | if (error != 0) | 158 | if (error != 0) |
@@ -272,18 +268,6 @@ aic7770_chip_init(struct ahc_softc *ahc) | |||
272 | return (ahc_chip_init(ahc)); | 268 | return (ahc_chip_init(ahc)); |
273 | } | 269 | } |
274 | 270 | ||
275 | static int | ||
276 | aic7770_suspend(struct ahc_softc *ahc) | ||
277 | { | ||
278 | return (ahc_suspend(ahc)); | ||
279 | } | ||
280 | |||
281 | static int | ||
282 | aic7770_resume(struct ahc_softc *ahc) | ||
283 | { | ||
284 | return (ahc_resume(ahc)); | ||
285 | } | ||
286 | |||
287 | /* | 271 | /* |
288 | * Read the 284x SEEPROM. | 272 | * Read the 284x SEEPROM. |
289 | */ | 273 | */ |
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index 27adbb294ac1..ce638aa6005a 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h | |||
@@ -1003,8 +1003,15 @@ struct ahd_suspend_channel_state { | |||
1003 | uint8_t seqctl; | 1003 | uint8_t seqctl; |
1004 | }; | 1004 | }; |
1005 | 1005 | ||
1006 | struct ahd_suspend_pci_state { | ||
1007 | uint32_t devconfig; | ||
1008 | uint8_t command; | ||
1009 | uint8_t csize_lattime; | ||
1010 | }; | ||
1011 | |||
1006 | struct ahd_suspend_state { | 1012 | struct ahd_suspend_state { |
1007 | struct ahd_suspend_channel_state channel[2]; | 1013 | struct ahd_suspend_channel_state channel[2]; |
1014 | struct ahd_suspend_pci_state pci_state; | ||
1008 | uint8_t optionmode; | 1015 | uint8_t optionmode; |
1009 | uint8_t dscommand0; | 1016 | uint8_t dscommand0; |
1010 | uint8_t dspcistatus; | 1017 | uint8_t dspcistatus; |
@@ -1333,6 +1340,8 @@ struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t); | |||
1333 | int ahd_pci_config(struct ahd_softc *, | 1340 | int ahd_pci_config(struct ahd_softc *, |
1334 | struct ahd_pci_identity *); | 1341 | struct ahd_pci_identity *); |
1335 | int ahd_pci_test_register_access(struct ahd_softc *); | 1342 | int ahd_pci_test_register_access(struct ahd_softc *); |
1343 | void ahd_pci_suspend(struct ahd_softc *); | ||
1344 | void ahd_pci_resume(struct ahd_softc *); | ||
1336 | 1345 | ||
1337 | /************************** SCB and SCB queue management **********************/ | 1346 | /************************** SCB and SCB queue management **********************/ |
1338 | void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, | 1347 | void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, |
@@ -1343,6 +1352,8 @@ struct ahd_softc *ahd_alloc(void *platform_arg, char *name); | |||
1343 | int ahd_softc_init(struct ahd_softc *); | 1352 | int ahd_softc_init(struct ahd_softc *); |
1344 | void ahd_controller_info(struct ahd_softc *ahd, char *buf); | 1353 | void ahd_controller_info(struct ahd_softc *ahd, char *buf); |
1345 | int ahd_init(struct ahd_softc *ahd); | 1354 | int ahd_init(struct ahd_softc *ahd); |
1355 | int ahd_suspend(struct ahd_softc *ahd); | ||
1356 | void ahd_resume(struct ahd_softc *ahd); | ||
1346 | int ahd_default_config(struct ahd_softc *ahd); | 1357 | int ahd_default_config(struct ahd_softc *ahd); |
1347 | int ahd_parse_vpddata(struct ahd_softc *ahd, | 1358 | int ahd_parse_vpddata(struct ahd_softc *ahd, |
1348 | struct vpd_config *vpd); | 1359 | struct vpd_config *vpd); |
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 05f692bd0adc..a7dd8cdda472 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c | |||
@@ -7175,7 +7175,6 @@ ahd_pause_and_flushwork(struct ahd_softc *ahd) | |||
7175 | ahd->flags &= ~AHD_ALL_INTERRUPTS; | 7175 | ahd->flags &= ~AHD_ALL_INTERRUPTS; |
7176 | } | 7176 | } |
7177 | 7177 | ||
7178 | #if 0 | ||
7179 | int | 7178 | int |
7180 | ahd_suspend(struct ahd_softc *ahd) | 7179 | ahd_suspend(struct ahd_softc *ahd) |
7181 | { | 7180 | { |
@@ -7189,19 +7188,15 @@ ahd_suspend(struct ahd_softc *ahd) | |||
7189 | ahd_shutdown(ahd); | 7188 | ahd_shutdown(ahd); |
7190 | return (0); | 7189 | return (0); |
7191 | } | 7190 | } |
7192 | #endif /* 0 */ | ||
7193 | 7191 | ||
7194 | #if 0 | 7192 | void |
7195 | int | ||
7196 | ahd_resume(struct ahd_softc *ahd) | 7193 | ahd_resume(struct ahd_softc *ahd) |
7197 | { | 7194 | { |
7198 | 7195 | ||
7199 | ahd_reset(ahd, /*reinit*/TRUE); | 7196 | ahd_reset(ahd, /*reinit*/TRUE); |
7200 | ahd_intr_enable(ahd, TRUE); | 7197 | ahd_intr_enable(ahd, TRUE); |
7201 | ahd_restart(ahd); | 7198 | ahd_restart(ahd); |
7202 | return (0); | ||
7203 | } | 7199 | } |
7204 | #endif /* 0 */ | ||
7205 | 7200 | ||
7206 | /************************** Busy Target Table *********************************/ | 7201 | /************************** Busy Target Table *********************************/ |
7207 | /* | 7202 | /* |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 42c0f14a262c..2d020405480c 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c | |||
@@ -315,8 +315,8 @@ uint32_t aic79xx_slowcrc; | |||
315 | */ | 315 | */ |
316 | static char *aic79xx = NULL; | 316 | static char *aic79xx = NULL; |
317 | 317 | ||
318 | MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>"); | 318 | MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>"); |
319 | MODULE_DESCRIPTION("Adaptec Aic790X U320 SCSI Host Bus Adapter driver"); | 319 | MODULE_DESCRIPTION("Adaptec AIC790X U320 SCSI Host Bus Adapter driver"); |
320 | MODULE_LICENSE("Dual BSD/GPL"); | 320 | MODULE_LICENSE("Dual BSD/GPL"); |
321 | MODULE_VERSION(AIC79XX_DRIVER_VERSION); | 321 | MODULE_VERSION(AIC79XX_DRIVER_VERSION); |
322 | module_param(aic79xx, charp, 0444); | 322 | module_param(aic79xx, charp, 0444); |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c index c62ce41f2793..66f0259edb69 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c | |||
@@ -50,6 +50,8 @@ static int ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, | |||
50 | static int ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, | 50 | static int ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, |
51 | u_long *bus_addr, | 51 | u_long *bus_addr, |
52 | uint8_t __iomem **maddr); | 52 | uint8_t __iomem **maddr); |
53 | static int ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
54 | static int ahd_linux_pci_dev_resume(struct pci_dev *pdev); | ||
53 | static void ahd_linux_pci_dev_remove(struct pci_dev *pdev); | 55 | static void ahd_linux_pci_dev_remove(struct pci_dev *pdev); |
54 | 56 | ||
55 | /* Define the macro locally since it's different for different class of chips. | 57 | /* Define the macro locally since it's different for different class of chips. |
@@ -86,10 +88,58 @@ MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table); | |||
86 | static struct pci_driver aic79xx_pci_driver = { | 88 | static struct pci_driver aic79xx_pci_driver = { |
87 | .name = "aic79xx", | 89 | .name = "aic79xx", |
88 | .probe = ahd_linux_pci_dev_probe, | 90 | .probe = ahd_linux_pci_dev_probe, |
91 | #ifdef CONFIG_PM | ||
92 | .suspend = ahd_linux_pci_dev_suspend, | ||
93 | .resume = ahd_linux_pci_dev_resume, | ||
94 | #endif | ||
89 | .remove = ahd_linux_pci_dev_remove, | 95 | .remove = ahd_linux_pci_dev_remove, |
90 | .id_table = ahd_linux_pci_id_table | 96 | .id_table = ahd_linux_pci_id_table |
91 | }; | 97 | }; |
92 | 98 | ||
99 | static int | ||
100 | ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg) | ||
101 | { | ||
102 | struct ahd_softc *ahd = pci_get_drvdata(pdev); | ||
103 | int rc; | ||
104 | |||
105 | if ((rc = ahd_suspend(ahd))) | ||
106 | return rc; | ||
107 | |||
108 | ahd_pci_suspend(ahd); | ||
109 | |||
110 | pci_save_state(pdev); | ||
111 | pci_disable_device(pdev); | ||
112 | |||
113 | if (mesg.event == PM_EVENT_SUSPEND) | ||
114 | pci_set_power_state(pdev, PCI_D3hot); | ||
115 | |||
116 | return rc; | ||
117 | } | ||
118 | |||
119 | static int | ||
120 | ahd_linux_pci_dev_resume(struct pci_dev *pdev) | ||
121 | { | ||
122 | struct ahd_softc *ahd = pci_get_drvdata(pdev); | ||
123 | int rc; | ||
124 | |||
125 | pci_set_power_state(pdev, PCI_D0); | ||
126 | pci_restore_state(pdev); | ||
127 | |||
128 | if ((rc = pci_enable_device(pdev))) { | ||
129 | dev_printk(KERN_ERR, &pdev->dev, | ||
130 | "failed to enable device after resume (%d)\n", rc); | ||
131 | return rc; | ||
132 | } | ||
133 | |||
134 | pci_set_master(pdev); | ||
135 | |||
136 | ahd_pci_resume(ahd); | ||
137 | |||
138 | ahd_resume(ahd); | ||
139 | |||
140 | return rc; | ||
141 | } | ||
142 | |||
93 | static void | 143 | static void |
94 | ahd_linux_pci_dev_remove(struct pci_dev *pdev) | 144 | ahd_linux_pci_dev_remove(struct pci_dev *pdev) |
95 | { | 145 | { |
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c index 0bada0028aa0..7a203a90601a 100644 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c | |||
@@ -389,6 +389,33 @@ ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) | |||
389 | return error; | 389 | return error; |
390 | } | 390 | } |
391 | 391 | ||
392 | void | ||
393 | ahd_pci_suspend(struct ahd_softc *ahd) | ||
394 | { | ||
395 | /* | ||
396 | * Save chip register configuration data for chip resets | ||
397 | * that occur during runtime and resume events. | ||
398 | */ | ||
399 | ahd->suspend_state.pci_state.devconfig = | ||
400 | ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); | ||
401 | ahd->suspend_state.pci_state.command = | ||
402 | ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/1); | ||
403 | ahd->suspend_state.pci_state.csize_lattime = | ||
404 | ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, /*bytes*/1); | ||
405 | |||
406 | } | ||
407 | |||
408 | void | ||
409 | ahd_pci_resume(struct ahd_softc *ahd) | ||
410 | { | ||
411 | ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, | ||
412 | ahd->suspend_state.pci_state.devconfig, /*bytes*/4); | ||
413 | ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, | ||
414 | ahd->suspend_state.pci_state.command, /*bytes*/1); | ||
415 | ahd_pci_write_config(ahd->dev_softc, CSIZE_LATTIME, | ||
416 | ahd->suspend_state.pci_state.csize_lattime, /*bytes*/1); | ||
417 | } | ||
418 | |||
392 | /* | 419 | /* |
393 | * Perform some simple tests that should catch situations where | 420 | * Perform some simple tests that should catch situations where |
394 | * our registers are invalidly mapped. | 421 | * our registers are invalidly mapped. |
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index e1bd57b9f23d..3d4e42d90452 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h | |||
@@ -962,16 +962,6 @@ struct ahc_softc { | |||
962 | ahc_bus_chip_init_t bus_chip_init; | 962 | ahc_bus_chip_init_t bus_chip_init; |
963 | 963 | ||
964 | /* | 964 | /* |
965 | * Bus specific suspend routine. | ||
966 | */ | ||
967 | ahc_bus_suspend_t bus_suspend; | ||
968 | |||
969 | /* | ||
970 | * Bus specific resume routine. | ||
971 | */ | ||
972 | ahc_bus_resume_t bus_resume; | ||
973 | |||
974 | /* | ||
975 | * Target mode related state kept on a per enabled lun basis. | 965 | * Target mode related state kept on a per enabled lun basis. |
976 | * Targets that are not enabled will have null entries. | 966 | * Targets that are not enabled will have null entries. |
977 | * As an initiator, we keep one target entry for our initiator | 967 | * As an initiator, we keep one target entry for our initiator |
@@ -1153,6 +1143,7 @@ struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t); | |||
1153 | int ahc_pci_config(struct ahc_softc *, | 1143 | int ahc_pci_config(struct ahc_softc *, |
1154 | struct ahc_pci_identity *); | 1144 | struct ahc_pci_identity *); |
1155 | int ahc_pci_test_register_access(struct ahc_softc *); | 1145 | int ahc_pci_test_register_access(struct ahc_softc *); |
1146 | void ahc_pci_resume(struct ahc_softc *ahc); | ||
1156 | 1147 | ||
1157 | /*************************** EISA/VL Front End ********************************/ | 1148 | /*************************** EISA/VL Front End ********************************/ |
1158 | struct aic7770_identity *aic7770_find_device(uint32_t); | 1149 | struct aic7770_identity *aic7770_find_device(uint32_t); |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 7770befbf50c..390b0fc991c5 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
@@ -335,8 +335,8 @@ static uint32_t aic7xxx_periodic_otag; | |||
335 | */ | 335 | */ |
336 | static char *aic7xxx = NULL; | 336 | static char *aic7xxx = NULL; |
337 | 337 | ||
338 | MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>"); | 338 | MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>"); |
339 | MODULE_DESCRIPTION("Adaptec Aic77XX/78XX SCSI Host Bus Adapter driver"); | 339 | MODULE_DESCRIPTION("Adaptec AIC77XX/78XX SCSI Host Bus Adapter driver"); |
340 | MODULE_LICENSE("Dual BSD/GPL"); | 340 | MODULE_LICENSE("Dual BSD/GPL"); |
341 | MODULE_VERSION(AIC7XXX_DRIVER_VERSION); | 341 | MODULE_VERSION(AIC7XXX_DRIVER_VERSION); |
342 | module_param(aic7xxx, charp, 0444); | 342 | module_param(aic7xxx, charp, 0444); |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index ea5687df732d..4488946cff2e 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c | |||
@@ -49,6 +49,8 @@ static int ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, | |||
49 | static int ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, | 49 | static int ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, |
50 | u_long *bus_addr, | 50 | u_long *bus_addr, |
51 | uint8_t __iomem **maddr); | 51 | uint8_t __iomem **maddr); |
52 | static int ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
53 | static int ahc_linux_pci_dev_resume(struct pci_dev *pdev); | ||
52 | static void ahc_linux_pci_dev_remove(struct pci_dev *pdev); | 54 | static void ahc_linux_pci_dev_remove(struct pci_dev *pdev); |
53 | 55 | ||
54 | /* Define the macro locally since it's different for different class of chips. | 56 | /* Define the macro locally since it's different for different class of chips. |
@@ -133,10 +135,54 @@ MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table); | |||
133 | static struct pci_driver aic7xxx_pci_driver = { | 135 | static struct pci_driver aic7xxx_pci_driver = { |
134 | .name = "aic7xxx", | 136 | .name = "aic7xxx", |
135 | .probe = ahc_linux_pci_dev_probe, | 137 | .probe = ahc_linux_pci_dev_probe, |
138 | #ifdef CONFIG_PM | ||
139 | .suspend = ahc_linux_pci_dev_suspend, | ||
140 | .resume = ahc_linux_pci_dev_resume, | ||
141 | #endif | ||
136 | .remove = ahc_linux_pci_dev_remove, | 142 | .remove = ahc_linux_pci_dev_remove, |
137 | .id_table = ahc_linux_pci_id_table | 143 | .id_table = ahc_linux_pci_id_table |
138 | }; | 144 | }; |
139 | 145 | ||
146 | static int | ||
147 | ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg) | ||
148 | { | ||
149 | struct ahc_softc *ahc = pci_get_drvdata(pdev); | ||
150 | int rc; | ||
151 | |||
152 | if ((rc = ahc_suspend(ahc))) | ||
153 | return rc; | ||
154 | |||
155 | pci_save_state(pdev); | ||
156 | pci_disable_device(pdev); | ||
157 | |||
158 | if (mesg.event == PM_EVENT_SUSPEND) | ||
159 | pci_set_power_state(pdev, PCI_D3hot); | ||
160 | |||
161 | return rc; | ||
162 | } | ||
163 | |||
164 | static int | ||
165 | ahc_linux_pci_dev_resume(struct pci_dev *pdev) | ||
166 | { | ||
167 | struct ahc_softc *ahc = pci_get_drvdata(pdev); | ||
168 | int rc; | ||
169 | |||
170 | pci_set_power_state(pdev, PCI_D0); | ||
171 | pci_restore_state(pdev); | ||
172 | |||
173 | if ((rc = pci_enable_device(pdev))) { | ||
174 | dev_printk(KERN_ERR, &pdev->dev, | ||
175 | "failed to enable device after resume (%d)\n", rc); | ||
176 | return rc; | ||
177 | } | ||
178 | |||
179 | pci_set_master(pdev); | ||
180 | |||
181 | ahc_pci_resume(ahc); | ||
182 | |||
183 | return (ahc_resume(ahc)); | ||
184 | } | ||
185 | |||
140 | static void | 186 | static void |
141 | ahc_linux_pci_dev_remove(struct pci_dev *pdev) | 187 | ahc_linux_pci_dev_remove(struct pci_dev *pdev) |
142 | { | 188 | { |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c index 09c8172c9e5e..ae35937b8055 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c | |||
@@ -633,8 +633,6 @@ static void write_brdctl(struct ahc_softc *ahc, uint8_t value); | |||
633 | static uint8_t read_brdctl(struct ahc_softc *ahc); | 633 | static uint8_t read_brdctl(struct ahc_softc *ahc); |
634 | static void ahc_pci_intr(struct ahc_softc *ahc); | 634 | static void ahc_pci_intr(struct ahc_softc *ahc); |
635 | static int ahc_pci_chip_init(struct ahc_softc *ahc); | 635 | static int ahc_pci_chip_init(struct ahc_softc *ahc); |
636 | static int ahc_pci_suspend(struct ahc_softc *ahc); | ||
637 | static int ahc_pci_resume(struct ahc_softc *ahc); | ||
638 | 636 | ||
639 | static int | 637 | static int |
640 | ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, | 638 | ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, |
@@ -791,8 +789,6 @@ ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry) | |||
791 | 789 | ||
792 | ahc->bus_intr = ahc_pci_intr; | 790 | ahc->bus_intr = ahc_pci_intr; |
793 | ahc->bus_chip_init = ahc_pci_chip_init; | 791 | ahc->bus_chip_init = ahc_pci_chip_init; |
794 | ahc->bus_suspend = ahc_pci_suspend; | ||
795 | ahc->bus_resume = ahc_pci_resume; | ||
796 | 792 | ||
797 | /* Remeber how the card was setup in case there is no SEEPROM */ | 793 | /* Remeber how the card was setup in case there is no SEEPROM */ |
798 | if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) { | 794 | if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) { |
@@ -2024,18 +2020,9 @@ ahc_pci_chip_init(struct ahc_softc *ahc) | |||
2024 | return (ahc_chip_init(ahc)); | 2020 | return (ahc_chip_init(ahc)); |
2025 | } | 2021 | } |
2026 | 2022 | ||
2027 | static int | 2023 | void |
2028 | ahc_pci_suspend(struct ahc_softc *ahc) | ||
2029 | { | ||
2030 | return (ahc_suspend(ahc)); | ||
2031 | } | ||
2032 | |||
2033 | static int | ||
2034 | ahc_pci_resume(struct ahc_softc *ahc) | 2024 | ahc_pci_resume(struct ahc_softc *ahc) |
2035 | { | 2025 | { |
2036 | |||
2037 | pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0); | ||
2038 | |||
2039 | /* | 2026 | /* |
2040 | * We assume that the OS has restored our register | 2027 | * We assume that the OS has restored our register |
2041 | * mappings, etc. Just update the config space registers | 2028 | * mappings, etc. Just update the config space registers |
@@ -2063,7 +2050,6 @@ ahc_pci_resume(struct ahc_softc *ahc) | |||
2063 | &sxfrctl1); | 2050 | &sxfrctl1); |
2064 | ahc_release_seeprom(&sd); | 2051 | ahc_release_seeprom(&sd); |
2065 | } | 2052 | } |
2066 | return (ahc_resume(ahc)); | ||
2067 | } | 2053 | } |
2068 | 2054 | ||
2069 | static int | 2055 | static int |
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l index f06e7035cb35..c0457b8c3b77 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l | |||
@@ -66,6 +66,7 @@ static char string_buf[MAX_STR_CONST]; | |||
66 | static char *string_buf_ptr; | 66 | static char *string_buf_ptr; |
67 | static int parren_count; | 67 | static int parren_count; |
68 | static char buf[255]; | 68 | static char buf[255]; |
69 | int mmlineno; | ||
69 | %} | 70 | %} |
70 | 71 | ||
71 | WORD [A-Za-z_][-A-Za-z_0-9]* | 72 | WORD [A-Za-z_][-A-Za-z_0-9]* |
@@ -76,7 +77,7 @@ MCARG [^(), \t]+ | |||
76 | 77 | ||
77 | %% | 78 | %% |
78 | \n { | 79 | \n { |
79 | ++yylineno; | 80 | ++mmlineno; |
80 | } | 81 | } |
81 | \r ; | 82 | \r ; |
82 | <ARGLIST>{SPACE} ; | 83 | <ARGLIST>{SPACE} ; |
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c index 4025608d6964..8f8db5f0aef7 100644 --- a/drivers/scsi/aic7xxx_old.c +++ b/drivers/scsi/aic7xxx_old.c | |||
@@ -8417,7 +8417,7 @@ aic7xxx_alloc(struct scsi_host_template *sht, struct aic7xxx_host *temp) | |||
8417 | p->host = host; | 8417 | p->host = host; |
8418 | 8418 | ||
8419 | p->scb_data = kzalloc(sizeof(scb_data_type), GFP_ATOMIC); | 8419 | p->scb_data = kzalloc(sizeof(scb_data_type), GFP_ATOMIC); |
8420 | if (!p->scb_data) | 8420 | if (p->scb_data) |
8421 | { | 8421 | { |
8422 | scbq_init (&p->scb_data->free_scbs); | 8422 | scbq_init (&p->scb_data->free_scbs); |
8423 | } | 8423 | } |
diff --git a/drivers/scsi/fcal.c b/drivers/scsi/fcal.c deleted file mode 100644 index c4e16c0775de..000000000000 --- a/drivers/scsi/fcal.c +++ /dev/null | |||
@@ -1,317 +0,0 @@ | |||
1 | /* fcal.c: Fibre Channel Arbitrated Loop SCSI host adapter driver. | ||
2 | * | ||
3 | * Copyright (C) 1998,1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
4 | * | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/delay.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/blkdev.h> | ||
13 | #include <linux/proc_fs.h> | ||
14 | #include <linux/stat.h> | ||
15 | #include <linux/init.h> | ||
16 | #ifdef CONFIG_KMOD | ||
17 | #include <linux/kmod.h> | ||
18 | #endif | ||
19 | |||
20 | #include <asm/irq.h> | ||
21 | |||
22 | #include "scsi.h" | ||
23 | #include <scsi/scsi_host.h> | ||
24 | #include "../fc4/fcp_impl.h" | ||
25 | #include "fcal.h" | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | |||
29 | /* #define FCAL_DEBUG */ | ||
30 | |||
31 | #define fcal_printk printk ("FCAL %s: ", fc->name); printk | ||
32 | |||
33 | #ifdef FCAL_DEBUG | ||
34 | #define FCALD(x) fcal_printk x; | ||
35 | #define FCALND(x) printk ("FCAL: "); printk x; | ||
36 | #else | ||
37 | #define FCALD(x) | ||
38 | #define FCALND(x) | ||
39 | #endif | ||
40 | |||
41 | static unsigned char alpa2target[] = { | ||
42 | 0x7e, 0x7d, 0x7c, 0xff, 0x7b, 0xff, 0xff, 0xff, 0x7a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x79, | ||
43 | 0x78, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x77, 0x76, 0xff, 0xff, 0x75, 0xff, 0x74, 0x73, 0x72, | ||
44 | 0xff, 0xff, 0xff, 0x71, 0xff, 0x70, 0x6f, 0x6e, 0xff, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0xff, | ||
45 | 0xff, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0xff, 0xff, 0x61, 0x60, 0xff, 0x5f, 0xff, 0xff, 0xff, | ||
46 | 0xff, 0xff, 0xff, 0x5e, 0xff, 0x5d, 0x5c, 0x5b, 0xff, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0xff, | ||
47 | 0xff, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0xff, 0xff, 0x4e, 0x4d, 0xff, 0x4c, 0xff, 0xff, 0xff, | ||
48 | 0xff, 0xff, 0xff, 0x4b, 0xff, 0x4a, 0x49, 0x48, 0xff, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0xff, | ||
49 | 0xff, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0xff, 0xff, 0x3b, 0x3a, 0xff, 0x39, 0xff, 0xff, 0xff, | ||
50 | 0x38, 0x37, 0x36, 0xff, 0x35, 0xff, 0xff, 0xff, 0x34, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x33, | ||
51 | 0x32, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x31, 0x30, 0xff, 0xff, 0x2f, 0xff, 0x2e, 0x2d, 0x2c, | ||
52 | 0xff, 0xff, 0xff, 0x2b, 0xff, 0x2a, 0x29, 0x28, 0xff, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0xff, | ||
53 | 0xff, 0x21, 0x20, 0x1f, 0x1e, 0x1d, 0x1c, 0xff, 0xff, 0x1b, 0x1a, 0xff, 0x19, 0xff, 0xff, 0xff, | ||
54 | 0xff, 0xff, 0xff, 0x18, 0xff, 0x17, 0x16, 0x15, 0xff, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0xff, | ||
55 | 0xff, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0xff, 0xff, 0x08, 0x07, 0xff, 0x06, 0xff, 0xff, 0xff, | ||
56 | 0x05, 0x04, 0x03, 0xff, 0x02, 0xff, 0xff, 0xff, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 | ||
57 | }; | ||
58 | |||
59 | static unsigned char target2alpa[] = { | ||
60 | 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6, 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, | ||
61 | 0xcd, 0xcc, 0xcb, 0xca, 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5, 0xb4, 0xb3, | ||
62 | 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9, 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, | ||
63 | 0x98, 0x97, 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79, 0x76, 0x75, 0x74, 0x73, | ||
64 | 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56, | ||
65 | 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, | ||
66 | 0x3a, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x27, 0x26, | ||
67 | 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17, 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01, 0x00 | ||
68 | }; | ||
69 | |||
70 | static int fcal_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd); | ||
71 | |||
72 | int fcal_slave_configure(struct scsi_device *device) | ||
73 | { | ||
74 | int depth_to_use; | ||
75 | |||
76 | if (device->tagged_supported) | ||
77 | depth_to_use = /* 254 */ 8; | ||
78 | else | ||
79 | depth_to_use = 2; | ||
80 | |||
81 | scsi_adjust_queue_depth(device, | ||
82 | (device->tagged_supported ? | ||
83 | MSG_SIMPLE_TAG : 0), | ||
84 | depth_to_use); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* Detect all FC Arbitrated Loops attached to the machine. | ||
90 | fc4 module has done all the work for us... */ | ||
91 | int __init fcal_detect(struct scsi_host_template *tpnt) | ||
92 | { | ||
93 | int nfcals = 0; | ||
94 | fc_channel *fc; | ||
95 | int fcalcount; | ||
96 | int i; | ||
97 | |||
98 | tpnt->proc_name = "fcal"; | ||
99 | fcalcount = 0; | ||
100 | for_each_online_fc_channel(fc) | ||
101 | if (fc->posmap) | ||
102 | fcalcount++; | ||
103 | FCALND(("%d channels online\n", fcalcount)) | ||
104 | if (!fcalcount) { | ||
105 | #if defined(MODULE) && defined(CONFIG_FC4_SOCAL_MODULE) && defined(CONFIG_KMOD) | ||
106 | request_module("socal"); | ||
107 | |||
108 | for_each_online_fc_channel(fc) | ||
109 | if (fc->posmap) | ||
110 | fcalcount++; | ||
111 | if (!fcalcount) | ||
112 | #endif | ||
113 | return 0; | ||
114 | } | ||
115 | for_each_online_fc_channel(fc) { | ||
116 | struct Scsi_Host *host; | ||
117 | long *ages; | ||
118 | struct fcal *fcal; | ||
119 | |||
120 | if (!fc->posmap) continue; | ||
121 | |||
122 | /* Strange, this is already registered to some other SCSI host, then it cannot be fcal */ | ||
123 | if (fc->scsi_name[0]) continue; | ||
124 | memcpy (fc->scsi_name, "FCAL", 4); | ||
125 | |||
126 | fc->can_queue = FCAL_CAN_QUEUE; | ||
127 | fc->rsp_size = 64; | ||
128 | fc->encode_addr = fcal_encode_addr; | ||
129 | |||
130 | ages = kmalloc (128 * sizeof(long), GFP_KERNEL); | ||
131 | if (!ages) continue; | ||
132 | |||
133 | host = scsi_register (tpnt, sizeof (struct fcal)); | ||
134 | if (!host) | ||
135 | { | ||
136 | kfree(ages); | ||
137 | continue; | ||
138 | } | ||
139 | |||
140 | if (!try_module_get(fc->module)) { | ||
141 | kfree(ages); | ||
142 | scsi_unregister(host); | ||
143 | continue; | ||
144 | } | ||
145 | |||
146 | nfcals++; | ||
147 | |||
148 | fcal = (struct fcal *)host->hostdata; | ||
149 | |||
150 | fc->fcp_register(fc, TYPE_SCSI_FCP, 0); | ||
151 | |||
152 | for (i = 0; i < fc->posmap->len; i++) { | ||
153 | int status, target, alpa; | ||
154 | |||
155 | alpa = fc->posmap->list[i]; | ||
156 | FCALD(("Sending PLOGI to %02x\n", alpa)) | ||
157 | target = alpa2target[alpa]; | ||
158 | status = fc_do_plogi(fc, alpa, fcal->node_wwn + target, | ||
159 | fcal->nport_wwn + target); | ||
160 | FCALD(("PLOGI returned with status %d\n", status)) | ||
161 | if (status != FC_STATUS_OK) | ||
162 | continue; | ||
163 | FCALD(("Sending PRLI to %02x\n", alpa)) | ||
164 | status = fc_do_prli(fc, alpa); | ||
165 | FCALD(("PRLI returned with status %d\n", status)) | ||
166 | if (status == FC_STATUS_OK) | ||
167 | fcal->map[target] = 1; | ||
168 | } | ||
169 | |||
170 | host->max_id = 127; | ||
171 | host->irq = fc->irq; | ||
172 | #ifdef __sparc_v9__ | ||
173 | host->unchecked_isa_dma = 1; | ||
174 | #endif | ||
175 | |||
176 | fc->channels = 1; | ||
177 | fc->targets = 127; | ||
178 | fc->ages = ages; | ||
179 | memset (ages, 0, 128 * sizeof(long)); | ||
180 | |||
181 | fcal->fc = fc; | ||
182 | |||
183 | FCALD(("Found FCAL\n")) | ||
184 | } | ||
185 | if (nfcals) | ||
186 | #ifdef __sparc__ | ||
187 | printk ("FCAL: Total of %d Sun Enterprise Network Array (A5000 or EX500) channels found\n", nfcals); | ||
188 | #else | ||
189 | printk ("FCAL: Total of %d Fibre Channel Arbitrated Loops found\n", nfcals); | ||
190 | #endif | ||
191 | return nfcals; | ||
192 | } | ||
193 | |||
194 | int fcal_release(struct Scsi_Host *host) | ||
195 | { | ||
196 | struct fcal *fcal = (struct fcal *)host->hostdata; | ||
197 | fc_channel *fc = fcal->fc; | ||
198 | |||
199 | module_put(fc->module); | ||
200 | |||
201 | fc->fcp_register(fc, TYPE_SCSI_FCP, 1); | ||
202 | FCALND((" releasing fcal.\n")); | ||
203 | kfree (fc->ages); | ||
204 | FCALND(("released fcal!\n")); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | #undef SPRINTF | ||
209 | #define SPRINTF(args...) { if (pos < (buffer + length)) pos += sprintf (pos, ## args); } | ||
210 | |||
211 | int fcal_proc_info (struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) | ||
212 | { | ||
213 | struct fcal *fcal; | ||
214 | fc_channel *fc; | ||
215 | char *pos = buffer; | ||
216 | int i, j; | ||
217 | |||
218 | if (inout) return length; | ||
219 | |||
220 | fcal = (struct fcal *)host->hostdata; | ||
221 | fc = fcal->fc; | ||
222 | |||
223 | #ifdef __sparc__ | ||
224 | SPRINTF ("Sun Enterprise Network Array (A5000 or E?500) on %s PROM node %x\n", fc->name, fc->dev->prom_node); | ||
225 | #else | ||
226 | SPRINTF ("Fibre Channel Arbitrated Loop on %s\n", fc->name); | ||
227 | #endif | ||
228 | SPRINTF ("Initiator AL-PA: %02x\n", fc->sid); | ||
229 | |||
230 | SPRINTF ("\nAttached devices:\n"); | ||
231 | |||
232 | for (i = 0; i < fc->posmap->len; i++) { | ||
233 | unsigned char alpa = fc->posmap->list[i]; | ||
234 | unsigned char target; | ||
235 | u32 *u1, *u2; | ||
236 | |||
237 | target = alpa2target[alpa]; | ||
238 | u1 = (u32 *)&fcal->nport_wwn[target]; | ||
239 | u2 = (u32 *)&fcal->node_wwn[target]; | ||
240 | if (!u1[0] && !u1[1]) { | ||
241 | SPRINTF (" [AL-PA: %02x] Not responded to PLOGI\n", alpa); | ||
242 | } else if (!fcal->map[target]) { | ||
243 | SPRINTF (" [AL-PA: %02x, Port WWN: %08x%08x, Node WWN: %08x%08x] Not responded to PRLI\n", | ||
244 | alpa, u1[0], u1[1], u2[0], u2[1]); | ||
245 | } else { | ||
246 | struct scsi_device *scd; | ||
247 | shost_for_each_device(scd, host) | ||
248 | if (scd->id == target) { | ||
249 | SPRINTF (" [AL-PA: %02x, Id: %02d, Port WWN: %08x%08x, Node WWN: %08x%08x] ", | ||
250 | alpa, target, u1[0], u1[1], u2[0], u2[1]); | ||
251 | SPRINTF ("%s ", scsi_device_type(scd->type)); | ||
252 | |||
253 | for (j = 0; (j < 8) && (scd->vendor[j] >= 0x20); j++) | ||
254 | SPRINTF ("%c", scd->vendor[j]); | ||
255 | SPRINTF (" "); | ||
256 | |||
257 | for (j = 0; (j < 16) && (scd->model[j] >= 0x20); j++) | ||
258 | SPRINTF ("%c", scd->model[j]); | ||
259 | |||
260 | SPRINTF ("\n"); | ||
261 | } | ||
262 | } | ||
263 | } | ||
264 | SPRINTF ("\n"); | ||
265 | |||
266 | *start = buffer + offset; | ||
267 | |||
268 | if ((pos - buffer) < offset) | ||
269 | return 0; | ||
270 | else if (pos - buffer - offset < length) | ||
271 | return pos - buffer - offset; | ||
272 | else | ||
273 | return length; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | For FC-AL, we use a simple addressing: we have just one channel 0, | ||
278 | and all AL-PAs are mapped to targets 0..0x7e | ||
279 | */ | ||
280 | static int fcal_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd) | ||
281 | { | ||
282 | struct fcal *f; | ||
283 | |||
284 | /* We don't support LUNs yet - I'm not sure if LUN should be in SCSI fcp_cdb, or in second byte of addr[0] */ | ||
285 | if (SCpnt->cmnd[1] & 0xe0) return -EINVAL; | ||
286 | /* FC-PLDA tells us... */ | ||
287 | memset(addr, 0, 8); | ||
288 | f = (struct fcal *)SCpnt->device->host->hostdata; | ||
289 | if (!f->map[SCpnt->device->id]) | ||
290 | return -EINVAL; | ||
291 | /* Now, determine DID: It will be Native Identifier, so we zero upper | ||
292 | 2 bytes of the 3 byte DID, lowest byte will be AL-PA */ | ||
293 | fcmd->did = target2alpa[SCpnt->device->id]; | ||
294 | FCALD(("trying DID %06x\n", fcmd->did)) | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static struct scsi_host_template driver_template = { | ||
299 | .name = "Fibre Channel Arbitrated Loop", | ||
300 | .detect = fcal_detect, | ||
301 | .release = fcal_release, | ||
302 | .proc_info = fcal_proc_info, | ||
303 | .queuecommand = fcp_scsi_queuecommand, | ||
304 | .slave_configure = fcal_slave_configure, | ||
305 | .can_queue = FCAL_CAN_QUEUE, | ||
306 | .this_id = -1, | ||
307 | .sg_tablesize = 1, | ||
308 | .cmd_per_lun = 1, | ||
309 | .use_clustering = ENABLE_CLUSTERING, | ||
310 | .eh_abort_handler = fcp_scsi_abort, | ||
311 | .eh_device_reset_handler = fcp_scsi_dev_reset, | ||
312 | .eh_host_reset_handler = fcp_scsi_host_reset, | ||
313 | }; | ||
314 | #include "scsi_module.c" | ||
315 | |||
316 | MODULE_LICENSE("GPL"); | ||
317 | |||
diff --git a/drivers/scsi/fcal.h b/drivers/scsi/fcal.h deleted file mode 100644 index 7ff2c3494f9e..000000000000 --- a/drivers/scsi/fcal.h +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | /* fcal.h: Generic Fibre Channel Arbitrated Loop SCSI host adapter driver definitions. | ||
2 | * | ||
3 | * Copyright (C) 1998,1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
4 | */ | ||
5 | |||
6 | #ifndef _FCAL_H | ||
7 | #define _FCAL_H | ||
8 | |||
9 | #include "../fc4/fcp_impl.h" | ||
10 | |||
11 | struct fcal { | ||
12 | /* fc must be first */ | ||
13 | fc_channel *fc; | ||
14 | unsigned char map[128]; | ||
15 | fc_wwn nport_wwn[128]; | ||
16 | fc_wwn node_wwn[128]; | ||
17 | }; | ||
18 | |||
19 | /* Arbitrary constant. Cannot be too large, as fc4 layer has limitations | ||
20 | for a particular channel */ | ||
21 | #define FCAL_CAN_QUEUE 512 | ||
22 | |||
23 | int fcal_detect(struct scsi_host_template *); | ||
24 | int fcal_release(struct Scsi_Host *); | ||
25 | int fcal_slave_configure(struct scsi_device *); | ||
26 | |||
27 | #endif /* !(_FCAL_H) */ | ||
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 5ab3ce762485..b253b8c718d3 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -4734,7 +4734,7 @@ static struct scsi_host_template gdth_template = { | |||
4734 | }; | 4734 | }; |
4735 | 4735 | ||
4736 | #ifdef CONFIG_ISA | 4736 | #ifdef CONFIG_ISA |
4737 | static int gdth_isa_probe_one(ulong32 isa_bios) | 4737 | static int __init gdth_isa_probe_one(ulong32 isa_bios) |
4738 | { | 4738 | { |
4739 | struct Scsi_Host *shp; | 4739 | struct Scsi_Host *shp; |
4740 | gdth_ha_str *ha; | 4740 | gdth_ha_str *ha; |
@@ -4862,7 +4862,7 @@ static int gdth_isa_probe_one(ulong32 isa_bios) | |||
4862 | #endif /* CONFIG_ISA */ | 4862 | #endif /* CONFIG_ISA */ |
4863 | 4863 | ||
4864 | #ifdef CONFIG_EISA | 4864 | #ifdef CONFIG_EISA |
4865 | static int gdth_eisa_probe_one(ushort eisa_slot) | 4865 | static int __init gdth_eisa_probe_one(ushort eisa_slot) |
4866 | { | 4866 | { |
4867 | struct Scsi_Host *shp; | 4867 | struct Scsi_Host *shp; |
4868 | gdth_ha_str *ha; | 4868 | gdth_ha_str *ha; |
@@ -4991,7 +4991,7 @@ static int gdth_eisa_probe_one(ushort eisa_slot) | |||
4991 | #endif /* CONFIG_EISA */ | 4991 | #endif /* CONFIG_EISA */ |
4992 | 4992 | ||
4993 | #ifdef CONFIG_PCI | 4993 | #ifdef CONFIG_PCI |
4994 | static int gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) | 4994 | static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) |
4995 | { | 4995 | { |
4996 | struct Scsi_Host *shp; | 4996 | struct Scsi_Host *shp; |
4997 | gdth_ha_str *ha; | 4997 | gdth_ha_str *ha; |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 112ab6abe62b..24271a871b8c 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -345,6 +345,12 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
345 | shost->active_mode = sht->supported_mode; | 345 | shost->active_mode = sht->supported_mode; |
346 | shost->use_sg_chaining = sht->use_sg_chaining; | 346 | shost->use_sg_chaining = sht->use_sg_chaining; |
347 | 347 | ||
348 | if (sht->supported_mode == MODE_UNKNOWN) | ||
349 | /* means we didn't set it ... default to INITIATOR */ | ||
350 | shost->active_mode = MODE_INITIATOR; | ||
351 | else | ||
352 | shost->active_mode = sht->supported_mode; | ||
353 | |||
348 | if (sht->max_host_blocked) | 354 | if (sht->max_host_blocked) |
349 | shost->max_host_blocked = sht->max_host_blocked; | 355 | shost->max_host_blocked = sht->max_host_blocked; |
350 | else | 356 | else |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 8515054cdf70..0844331abb87 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -375,8 +375,9 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag) | |||
375 | scp->result = SAM_STAT_CHECK_CONDITION; | 375 | scp->result = SAM_STAT_CHECK_CONDITION; |
376 | memset(&scp->sense_buffer, | 376 | memset(&scp->sense_buffer, |
377 | 0, sizeof(scp->sense_buffer)); | 377 | 0, sizeof(scp->sense_buffer)); |
378 | memcpy(&scp->sense_buffer, | 378 | memcpy(&scp->sense_buffer, &req->sg_list, |
379 | &req->sg_list, le32_to_cpu(req->dataxfer_length)); | 379 | min(sizeof(scp->sense_buffer), |
380 | le32_to_cpu(req->dataxfer_length))); | ||
380 | break; | 381 | break; |
381 | 382 | ||
382 | default: | 383 | default: |
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 22d40fd5845b..4c4465d39a1d 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c | |||
@@ -665,7 +665,7 @@ static void initio_init(struct initio_host * host, u8 *bios_addr) | |||
665 | host->max_tags[i] = 0xFF; | 665 | host->max_tags[i] = 0xFF; |
666 | } /* for */ | 666 | } /* for */ |
667 | printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n", | 667 | printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n", |
668 | host->addr, host->irq, | 668 | host->addr, host->pci_dev->irq, |
669 | host->bios_addr, host->scsi_id); | 669 | host->bios_addr, host->scsi_id); |
670 | /* Reset SCSI Bus */ | 670 | /* Reset SCSI Bus */ |
671 | if (host->config & HCC_SCSI_RESET) { | 671 | if (host->config & HCC_SCSI_RESET) { |
@@ -2891,6 +2891,8 @@ static int initio_probe_one(struct pci_dev *pdev, | |||
2891 | goto out_release_region; | 2891 | goto out_release_region; |
2892 | } | 2892 | } |
2893 | 2893 | ||
2894 | host->pci_dev = pdev; | ||
2895 | |||
2894 | host->num_scbs = num_scb; | 2896 | host->num_scbs = num_scb; |
2895 | host->scb = scb; | 2897 | host->scb = scb; |
2896 | host->next_pending = scb; | 2898 | host->next_pending = scb; |
@@ -2905,6 +2907,7 @@ static int initio_probe_one(struct pci_dev *pdev, | |||
2905 | host->scb_end = tmp; | 2907 | host->scb_end = tmp; |
2906 | host->first_avail = scb; | 2908 | host->first_avail = scb; |
2907 | host->last_avail = prev; | 2909 | host->last_avail = prev; |
2910 | spin_lock_init(&host->avail_lock); | ||
2908 | 2911 | ||
2909 | initio_init(host, phys_to_virt(bios_seg << 4)); | 2912 | initio_init(host, phys_to_virt(bios_seg << 4)); |
2910 | 2913 | ||
@@ -2928,7 +2931,6 @@ static int initio_probe_one(struct pci_dev *pdev, | |||
2928 | } | 2931 | } |
2929 | 2932 | ||
2930 | pci_set_drvdata(pdev, shost); | 2933 | pci_set_drvdata(pdev, shost); |
2931 | host->pci_dev = pdev; | ||
2932 | 2934 | ||
2933 | error = scsi_add_host(shost, &pdev->dev); | 2935 | error = scsi_add_host(shost, &pdev->dev); |
2934 | if (error) | 2936 | if (error) |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 149fdd25f8e8..d6a98bc970ff 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -901,7 +901,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
901 | } | 901 | } |
902 | } | 902 | } |
903 | 903 | ||
904 | vport->disc_trc = kmzlloc( | 904 | vport->disc_trc = kzalloc( |
905 | (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc), | 905 | (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc), |
906 | GFP_KERNEL); | 906 | GFP_KERNEL); |
907 | 907 | ||
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c deleted file mode 100644 index 0363c1cd68c1..000000000000 --- a/drivers/scsi/pluto.c +++ /dev/null | |||
@@ -1,349 +0,0 @@ | |||
1 | /* pluto.c: SparcSTORAGE Array SCSI host adapter driver. | ||
2 | * | ||
3 | * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
4 | * | ||
5 | */ | ||
6 | |||
7 | #include <linux/completion.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/blkdev.h> | ||
14 | #include <linux/proc_fs.h> | ||
15 | #include <linux/stat.h> | ||
16 | #include <linux/init.h> | ||
17 | #ifdef CONFIG_KMOD | ||
18 | #include <linux/kmod.h> | ||
19 | #endif | ||
20 | |||
21 | #include <asm/irq.h> | ||
22 | |||
23 | #include "scsi.h" | ||
24 | #include <scsi/scsi_host.h> | ||
25 | #include "../fc4/fcp_impl.h" | ||
26 | #include "pluto.h" | ||
27 | |||
28 | #include <linux/module.h> | ||
29 | |||
30 | #define RQ_SCSI_BUSY 0xffff | ||
31 | #define RQ_SCSI_DONE 0xfffe | ||
32 | |||
33 | /* #define PLUTO_DEBUG */ | ||
34 | |||
35 | #define pluto_printk printk ("PLUTO %s: ", fc->name); printk | ||
36 | |||
37 | #ifdef PLUTO_DEBUG | ||
38 | #define PLD(x) pluto_printk x; | ||
39 | #define PLND(x) printk ("PLUTO: "); printk x; | ||
40 | #else | ||
41 | #define PLD(x) | ||
42 | #define PLND(x) | ||
43 | #endif | ||
44 | |||
45 | static struct ctrl_inquiry { | ||
46 | struct Scsi_Host host; | ||
47 | struct pluto pluto; | ||
48 | Scsi_Cmnd cmd; | ||
49 | char inquiry[256]; | ||
50 | fc_channel *fc; | ||
51 | } *fcs __initdata; | ||
52 | static int fcscount __initdata = 0; | ||
53 | static atomic_t fcss __initdata = ATOMIC_INIT(0); | ||
54 | static DECLARE_COMPLETION(fc_detect_complete); | ||
55 | |||
56 | static int pluto_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd); | ||
57 | |||
58 | static void __init pluto_detect_done(Scsi_Cmnd *SCpnt) | ||
59 | { | ||
60 | /* Do nothing */ | ||
61 | } | ||
62 | |||
63 | static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt) | ||
64 | { | ||
65 | PLND(("Detect done %08lx\n", (long)SCpnt)) | ||
66 | if (atomic_dec_and_test (&fcss)) | ||
67 | complete(&fc_detect_complete); | ||
68 | } | ||
69 | |||
70 | int pluto_slave_configure(struct scsi_device *device) | ||
71 | { | ||
72 | int depth_to_use; | ||
73 | |||
74 | if (device->tagged_supported) | ||
75 | depth_to_use = /* 254 */ 8; | ||
76 | else | ||
77 | depth_to_use = 2; | ||
78 | |||
79 | scsi_adjust_queue_depth(device, | ||
80 | (device->tagged_supported ? | ||
81 | MSG_SIMPLE_TAG : 0), | ||
82 | depth_to_use); | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | /* Detect all SSAs attached to the machine. | ||
88 | To be fast, do it on all online FC channels at the same time. */ | ||
89 | int __init pluto_detect(struct scsi_host_template *tpnt) | ||
90 | { | ||
91 | int i, retry, nplutos; | ||
92 | fc_channel *fc; | ||
93 | struct scsi_device dev; | ||
94 | |||
95 | tpnt->proc_name = "pluto"; | ||
96 | fcscount = 0; | ||
97 | for_each_online_fc_channel(fc) { | ||
98 | if (!fc->posmap) | ||
99 | fcscount++; | ||
100 | } | ||
101 | PLND(("%d channels online\n", fcscount)) | ||
102 | if (!fcscount) { | ||
103 | #if defined(MODULE) && defined(CONFIG_FC4_SOC_MODULE) && defined(CONFIG_KMOD) | ||
104 | request_module("soc"); | ||
105 | |||
106 | for_each_online_fc_channel(fc) { | ||
107 | if (!fc->posmap) | ||
108 | fcscount++; | ||
109 | } | ||
110 | if (!fcscount) | ||
111 | #endif | ||
112 | return 0; | ||
113 | } | ||
114 | fcs = kcalloc(fcscount, sizeof (struct ctrl_inquiry), GFP_DMA); | ||
115 | if (!fcs) { | ||
116 | printk ("PLUTO: Not enough memory to probe\n"); | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | memset (&dev, 0, sizeof(dev)); | ||
121 | atomic_set (&fcss, fcscount); | ||
122 | |||
123 | i = 0; | ||
124 | for_each_online_fc_channel(fc) { | ||
125 | Scsi_Cmnd *SCpnt; | ||
126 | struct Scsi_Host *host; | ||
127 | struct pluto *pluto; | ||
128 | |||
129 | if (i == fcscount) break; | ||
130 | if (fc->posmap) continue; | ||
131 | |||
132 | PLD(("trying to find SSA\n")) | ||
133 | |||
134 | /* If this is already registered to some other SCSI host, then it cannot be pluto */ | ||
135 | if (fc->scsi_name[0]) continue; | ||
136 | memcpy (fc->scsi_name, "SSA", 4); | ||
137 | |||
138 | fcs[i].fc = fc; | ||
139 | |||
140 | fc->can_queue = PLUTO_CAN_QUEUE; | ||
141 | fc->rsp_size = 64; | ||
142 | fc->encode_addr = pluto_encode_addr; | ||
143 | |||
144 | fc->fcp_register(fc, TYPE_SCSI_FCP, 0); | ||
145 | |||
146 | SCpnt = &(fcs[i].cmd); | ||
147 | host = &(fcs[i].host); | ||
148 | pluto = (struct pluto *)host->hostdata; | ||
149 | |||
150 | pluto->fc = fc; | ||
151 | |||
152 | SCpnt->cmnd[0] = INQUIRY; | ||
153 | SCpnt->cmnd[4] = 255; | ||
154 | |||
155 | /* FC layer requires this, so that SCpnt->device->tagged_supported is initially 0 */ | ||
156 | SCpnt->device = &dev; | ||
157 | dev.host = host; | ||
158 | |||
159 | SCpnt->cmd_len = COMMAND_SIZE(INQUIRY); | ||
160 | |||
161 | SCpnt->request->cmd_flags &= ~REQ_STARTED; | ||
162 | |||
163 | SCpnt->request_bufflen = 256; | ||
164 | SCpnt->request_buffer = fcs[i].inquiry; | ||
165 | PLD(("set up %d %08lx\n", i, (long)SCpnt)) | ||
166 | i++; | ||
167 | } | ||
168 | |||
169 | for (retry = 0; retry < 5; retry++) { | ||
170 | for (i = 0; i < fcscount; i++) { | ||
171 | if (!fcs[i].fc) break; | ||
172 | if (!(fcs[i].cmd.request->cmd_flags & REQ_STARTED)) { | ||
173 | fcs[i].cmd.request->cmd_flags |= REQ_STARTED; | ||
174 | disable_irq(fcs[i].fc->irq); | ||
175 | PLND(("queuecommand %d %d\n", retry, i)) | ||
176 | fcp_scsi_queuecommand (&(fcs[i].cmd), | ||
177 | pluto_detect_scsi_done); | ||
178 | enable_irq(fcs[i].fc->irq); | ||
179 | } | ||
180 | } | ||
181 | |||
182 | wait_for_completion_timeout(&fc_detect_complete, 10 * HZ); | ||
183 | PLND(("Woken up\n")) | ||
184 | if (!atomic_read(&fcss)) | ||
185 | break; /* All fc channels have answered us */ | ||
186 | } | ||
187 | |||
188 | PLND(("Finished search\n")) | ||
189 | for (i = 0, nplutos = 0; i < fcscount; i++) { | ||
190 | Scsi_Cmnd *SCpnt; | ||
191 | |||
192 | if (!(fc = fcs[i].fc)) break; | ||
193 | |||
194 | SCpnt = &(fcs[i].cmd); | ||
195 | |||
196 | /* Let FC mid-level free allocated resources */ | ||
197 | pluto_detect_scsi_done(SCpnt); | ||
198 | |||
199 | if (!SCpnt->result) { | ||
200 | struct pluto_inquiry *inq; | ||
201 | struct pluto *pluto; | ||
202 | struct Scsi_Host *host; | ||
203 | |||
204 | inq = (struct pluto_inquiry *)fcs[i].inquiry; | ||
205 | |||
206 | if ((inq->dtype & 0x1f) == TYPE_PROCESSOR && | ||
207 | !strncmp (inq->vendor_id, "SUN", 3) && | ||
208 | !strncmp (inq->product_id, "SSA", 3)) { | ||
209 | char *p; | ||
210 | long *ages; | ||
211 | |||
212 | ages = kcalloc((inq->channels + 1) * inq->targets, sizeof(long), GFP_KERNEL); | ||
213 | if (!ages) continue; | ||
214 | |||
215 | host = scsi_register (tpnt, sizeof (struct pluto)); | ||
216 | if(!host) | ||
217 | { | ||
218 | kfree(ages); | ||
219 | continue; | ||
220 | } | ||
221 | |||
222 | if (!try_module_get(fc->module)) { | ||
223 | kfree(ages); | ||
224 | scsi_unregister(host); | ||
225 | continue; | ||
226 | } | ||
227 | |||
228 | nplutos++; | ||
229 | |||
230 | pluto = (struct pluto *)host->hostdata; | ||
231 | |||
232 | host->max_id = inq->targets; | ||
233 | host->max_channel = inq->channels; | ||
234 | host->irq = fc->irq; | ||
235 | |||
236 | fc->channels = inq->channels + 1; | ||
237 | fc->targets = inq->targets; | ||
238 | fc->ages = ages; | ||
239 | |||
240 | pluto->fc = fc; | ||
241 | memcpy (pluto->rev_str, inq->revision, 4); | ||
242 | pluto->rev_str[4] = 0; | ||
243 | p = strchr (pluto->rev_str, ' '); | ||
244 | if (p) *p = 0; | ||
245 | memcpy (pluto->fw_rev_str, inq->fw_revision, 4); | ||
246 | pluto->fw_rev_str[4] = 0; | ||
247 | p = strchr (pluto->fw_rev_str, ' '); | ||
248 | if (p) *p = 0; | ||
249 | memcpy (pluto->serial_str, inq->serial, 12); | ||
250 | pluto->serial_str[12] = 0; | ||
251 | p = strchr (pluto->serial_str, ' '); | ||
252 | if (p) *p = 0; | ||
253 | |||
254 | PLD(("Found SSA rev %s fw rev %s serial %s %dx%d\n", pluto->rev_str, pluto->fw_rev_str, pluto->serial_str, host->max_channel, host->max_id)) | ||
255 | } else | ||
256 | fc->fcp_register(fc, TYPE_SCSI_FCP, 1); | ||
257 | } else | ||
258 | fc->fcp_register(fc, TYPE_SCSI_FCP, 1); | ||
259 | } | ||
260 | kfree(fcs); | ||
261 | if (nplutos) | ||
262 | printk ("PLUTO: Total of %d SparcSTORAGE Arrays found\n", nplutos); | ||
263 | return nplutos; | ||
264 | } | ||
265 | |||
266 | int pluto_release(struct Scsi_Host *host) | ||
267 | { | ||
268 | struct pluto *pluto = (struct pluto *)host->hostdata; | ||
269 | fc_channel *fc = pluto->fc; | ||
270 | |||
271 | module_put(fc->module); | ||
272 | |||
273 | fc->fcp_register(fc, TYPE_SCSI_FCP, 1); | ||
274 | PLND((" releasing pluto.\n")); | ||
275 | kfree (fc->ages); | ||
276 | PLND(("released pluto!\n")); | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | const char *pluto_info(struct Scsi_Host *host) | ||
281 | { | ||
282 | static char buf[128], *p; | ||
283 | struct pluto *pluto = (struct pluto *) host->hostdata; | ||
284 | |||
285 | sprintf(buf, "SUN SparcSTORAGE Array %s fw %s serial %s %dx%d on %s", | ||
286 | pluto->rev_str, pluto->fw_rev_str, pluto->serial_str, | ||
287 | host->max_channel, host->max_id, pluto->fc->name); | ||
288 | #ifdef __sparc__ | ||
289 | p = strchr(buf, 0); | ||
290 | sprintf(p, " PROM node %x", pluto->fc->dev->prom_node); | ||
291 | #endif | ||
292 | return buf; | ||
293 | } | ||
294 | |||
295 | /* SSA uses this FC4S addressing: | ||
296 | switch (addr[0]) | ||
297 | { | ||
298 | case 0: CONTROLLER - All of addr[1]..addr[3] has to be 0 | ||
299 | case 1: SINGLE DISK - addr[1] channel, addr[2] id, addr[3] 0 | ||
300 | case 2: DISK GROUP - ??? | ||
301 | } | ||
302 | |||
303 | So that SCSI mid-layer can access to these, we reserve | ||
304 | channel 0 id 0 lun 0 for CONTROLLER | ||
305 | and channels 1 .. max_channel are normal single disks. | ||
306 | */ | ||
307 | static int pluto_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd) | ||
308 | { | ||
309 | PLND(("encode addr %d %d %d\n", SCpnt->device->channel, SCpnt->device->id, SCpnt->cmnd[1] & 0xe0)) | ||
310 | /* We don't support LUNs - neither does SSA :) */ | ||
311 | if (SCpnt->cmnd[1] & 0xe0) | ||
312 | return -EINVAL; | ||
313 | if (!SCpnt->device->channel) { | ||
314 | if (SCpnt->device->id) | ||
315 | return -EINVAL; | ||
316 | memset (addr, 0, 4 * sizeof(u16)); | ||
317 | } else { | ||
318 | addr[0] = 1; | ||
319 | addr[1] = SCpnt->device->channel - 1; | ||
320 | addr[2] = SCpnt->device->id; | ||
321 | addr[3] = 0; | ||
322 | } | ||
323 | /* We're Point-to-Point, so target it to the default DID */ | ||
324 | fcmd->did = fc->did; | ||
325 | PLND(("trying %04x%04x%04x%04x\n", addr[0], addr[1], addr[2], addr[3])) | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static struct scsi_host_template driver_template = { | ||
330 | .name = "Sparc Storage Array 100/200", | ||
331 | .detect = pluto_detect, | ||
332 | .release = pluto_release, | ||
333 | .info = pluto_info, | ||
334 | .queuecommand = fcp_scsi_queuecommand, | ||
335 | .slave_configure = pluto_slave_configure, | ||
336 | .can_queue = PLUTO_CAN_QUEUE, | ||
337 | .this_id = -1, | ||
338 | .sg_tablesize = 1, | ||
339 | .cmd_per_lun = 1, | ||
340 | .use_clustering = ENABLE_CLUSTERING, | ||
341 | .eh_abort_handler = fcp_scsi_abort, | ||
342 | .eh_device_reset_handler = fcp_scsi_dev_reset, | ||
343 | .eh_host_reset_handler = fcp_scsi_host_reset, | ||
344 | }; | ||
345 | |||
346 | #include "scsi_module.c" | ||
347 | |||
348 | MODULE_LICENSE("GPL"); | ||
349 | |||
diff --git a/drivers/scsi/pluto.h b/drivers/scsi/pluto.h deleted file mode 100644 index 5da20616ac36..000000000000 --- a/drivers/scsi/pluto.h +++ /dev/null | |||
@@ -1,47 +0,0 @@ | |||
1 | /* pluto.h: SparcSTORAGE Array SCSI host adapter driver definitions. | ||
2 | * | ||
3 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
4 | */ | ||
5 | |||
6 | #ifndef _PLUTO_H | ||
7 | #define _PLUTO_H | ||
8 | |||
9 | #include "../fc4/fcp_impl.h" | ||
10 | |||
11 | struct pluto { | ||
12 | /* This must be first */ | ||
13 | fc_channel *fc; | ||
14 | char rev_str[5]; | ||
15 | char fw_rev_str[5]; | ||
16 | char serial_str[13]; | ||
17 | }; | ||
18 | |||
19 | struct pluto_inquiry { | ||
20 | u8 dtype; | ||
21 | u8 removable:1, qualifier:7; | ||
22 | u8 iso:2, ecma:3, ansi:3; | ||
23 | u8 aenc:1, trmiop:1, :2, rdf:4; | ||
24 | u8 len; | ||
25 | u8 xxx1; | ||
26 | u8 xxx2; | ||
27 | u8 reladdr:1, wbus32:1, wbus16:1, sync:1, linked:1, :1, cmdque:1, softreset:1; | ||
28 | u8 vendor_id[8]; | ||
29 | u8 product_id[16]; | ||
30 | u8 revision[4]; | ||
31 | u8 fw_revision[4]; | ||
32 | u8 serial[12]; | ||
33 | u8 xxx3[2]; | ||
34 | u8 channels; | ||
35 | u8 targets; | ||
36 | }; | ||
37 | |||
38 | /* This is the max number of outstanding SCSI commands per pluto */ | ||
39 | #define PLUTO_CAN_QUEUE 254 | ||
40 | |||
41 | int pluto_detect(struct scsi_host_template *); | ||
42 | int pluto_release(struct Scsi_Host *); | ||
43 | const char * pluto_info(struct Scsi_Host *); | ||
44 | int pluto_slave_configure(struct scsi_device *); | ||
45 | |||
46 | #endif /* !(_PLUTO_H) */ | ||
47 | |||
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 76089cf55f4e..3aeb68bcb7ac 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -4310,7 +4310,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
4310 | printk(KERN_WARNING "scsi(%li): Unable to set a " | 4310 | printk(KERN_WARNING "scsi(%li): Unable to set a " |
4311 | "suitable DMA mask - aborting\n", ha->host_no); | 4311 | "suitable DMA mask - aborting\n", ha->host_no); |
4312 | error = -ENODEV; | 4312 | error = -ENODEV; |
4313 | goto error_free_irq; | 4313 | goto error_put_host; |
4314 | } | 4314 | } |
4315 | } else | 4315 | } else |
4316 | dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", | 4316 | dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", |
@@ -4320,7 +4320,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
4320 | printk(KERN_WARNING "scsi(%li): Unable to set a " | 4320 | printk(KERN_WARNING "scsi(%li): Unable to set a " |
4321 | "suitable DMA mask - aborting\n", ha->host_no); | 4321 | "suitable DMA mask - aborting\n", ha->host_no); |
4322 | error = -ENODEV; | 4322 | error = -ENODEV; |
4323 | goto error_free_irq; | 4323 | goto error_put_host; |
4324 | } | 4324 | } |
4325 | #endif | 4325 | #endif |
4326 | 4326 | ||
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index 59915fb70301..ff2c363ead26 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h | |||
@@ -91,8 +91,8 @@ | |||
91 | #define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) | 91 | #define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) |
92 | 92 | ||
93 | /* ISP request and response entry counts (37-65535) */ | 93 | /* ISP request and response entry counts (37-65535) */ |
94 | #define REQUEST_ENTRY_CNT 256 /* Number of request entries. */ | 94 | #define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ |
95 | #define RESPONSE_ENTRY_CNT 16 /* Number of response entries. */ | 95 | #define RESPONSE_ENTRY_CNT 63 /* Number of response entries. */ |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * SCSI Request Block structure (sp) that is placed | 98 | * SCSI Request Block structure (sp) that is placed |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 05fa7796a559..fb388b8c07cf 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -114,7 +114,6 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj, | |||
114 | { | 114 | { |
115 | struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, | 115 | struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, |
116 | struct device, kobj))); | 116 | struct device, kobj))); |
117 | unsigned long flags; | ||
118 | uint16_t cnt; | 117 | uint16_t cnt; |
119 | 118 | ||
120 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) | 119 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) |
@@ -144,11 +143,9 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj, | |||
144 | } | 143 | } |
145 | 144 | ||
146 | /* Write NVRAM. */ | 145 | /* Write NVRAM. */ |
147 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
148 | ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); | 146 | ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); |
149 | ha->isp_ops->read_nvram(ha, (uint8_t *)ha->nvram, ha->nvram_base, | 147 | ha->isp_ops->read_nvram(ha, (uint8_t *)ha->nvram, ha->nvram_base, |
150 | count); | 148 | count); |
151 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
152 | 149 | ||
153 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 150 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); |
154 | 151 | ||
@@ -397,16 +394,13 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj, | |||
397 | { | 394 | { |
398 | struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, | 395 | struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, |
399 | struct device, kobj))); | 396 | struct device, kobj))); |
400 | unsigned long flags; | ||
401 | 397 | ||
402 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size) | 398 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size) |
403 | return 0; | 399 | return 0; |
404 | 400 | ||
405 | /* Write NVRAM. */ | 401 | /* Write NVRAM. */ |
406 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
407 | ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); | 402 | ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); |
408 | ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, ha->vpd_base, count); | 403 | ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, ha->vpd_base, count); |
409 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
410 | 404 | ||
411 | return count; | 405 | return count; |
412 | } | 406 | } |
@@ -544,6 +538,9 @@ qla2x00_serial_num_show(struct class_device *cdev, char *buf) | |||
544 | scsi_qla_host_t *ha = shost_priv(class_to_shost(cdev)); | 538 | scsi_qla_host_t *ha = shost_priv(class_to_shost(cdev)); |
545 | uint32_t sn; | 539 | uint32_t sn; |
546 | 540 | ||
541 | if (IS_FWI2_CAPABLE(ha)) | ||
542 | return snprintf(buf, PAGE_SIZE, "\n"); | ||
543 | |||
547 | sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; | 544 | sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; |
548 | return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, | 545 | return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, |
549 | sn % 100000); | 546 | sn % 100000); |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 1900fbf6cd74..04e8cbca4c0d 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2271,6 +2271,7 @@ typedef struct scsi_qla_host { | |||
2271 | 2271 | ||
2272 | spinlock_t hardware_lock ____cacheline_aligned; | 2272 | spinlock_t hardware_lock ____cacheline_aligned; |
2273 | 2273 | ||
2274 | int bars; | ||
2274 | device_reg_t __iomem *iobase; /* Base I/O address */ | 2275 | device_reg_t __iomem *iobase; /* Base I/O address */ |
2275 | unsigned long pio_address; | 2276 | unsigned long pio_address; |
2276 | unsigned long pio_length; | 2277 | unsigned long pio_length; |
diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h index dd435410dfa2..d78d35e681ab 100644 --- a/drivers/scsi/qla2xxx/qla_devtbl.h +++ b/drivers/scsi/qla2xxx/qla_devtbl.h | |||
@@ -1,4 +1,4 @@ | |||
1 | #define QLA_MODEL_NAMES 0x57 | 1 | #define QLA_MODEL_NAMES 0x5C |
2 | 2 | ||
3 | /* | 3 | /* |
4 | * Adapter model names and descriptions. | 4 | * Adapter model names and descriptions. |
@@ -91,4 +91,9 @@ static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = { | |||
91 | " ", " ", /* 0x154 */ | 91 | " ", " ", /* 0x154 */ |
92 | "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x155 */ | 92 | "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x155 */ |
93 | "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x156 */ | 93 | "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x156 */ |
94 | " ", " ", /* 0x157 */ | ||
95 | " ", " ", /* 0x158 */ | ||
96 | " ", " ", /* 0x159 */ | ||
97 | " ", " ", /* 0x15a */ | ||
98 | "QME2472", "Dell BS PCI-Express to 4Gb FC, Dual Channel", /* 0x15b */ | ||
94 | }; | 99 | }; |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 7f6a89bd94f3..024c662ec34d 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -525,7 +525,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha) | |||
525 | 525 | ||
526 | /* Check for pending interrupts. */ | 526 | /* Check for pending interrupts. */ |
527 | /* During init we issue marker directly */ | 527 | /* During init we issue marker directly */ |
528 | if (!ha->marker_needed) | 528 | if (!ha->marker_needed && !ha->flags.init_done) |
529 | qla2x00_poll(ha); | 529 | qla2x00_poll(ha); |
530 | 530 | ||
531 | spin_lock_irq(&ha->hardware_lock); | 531 | spin_lock_irq(&ha->hardware_lock); |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index c4768c4f3990..1104bd2eed40 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1012,8 +1012,14 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1012 | case CS_DATA_UNDERRUN: | 1012 | case CS_DATA_UNDERRUN: |
1013 | resid = resid_len; | 1013 | resid = resid_len; |
1014 | /* Use F/W calculated residual length. */ | 1014 | /* Use F/W calculated residual length. */ |
1015 | if (IS_FWI2_CAPABLE(ha)) | 1015 | if (IS_FWI2_CAPABLE(ha)) { |
1016 | if (scsi_status & SS_RESIDUAL_UNDER && | ||
1017 | resid != fw_resid_len) { | ||
1018 | scsi_status &= ~SS_RESIDUAL_UNDER; | ||
1019 | lscsi_status = 0; | ||
1020 | } | ||
1016 | resid = fw_resid_len; | 1021 | resid = fw_resid_len; |
1022 | } | ||
1017 | 1023 | ||
1018 | if (scsi_status & SS_RESIDUAL_UNDER) { | 1024 | if (scsi_status & SS_RESIDUAL_UNDER) { |
1019 | scsi_set_resid(cp, resid); | 1025 | scsi_set_resid(cp, resid); |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index c53ec67c47f4..ccd662a6f5dc 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -252,7 +252,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp) | |||
252 | /* Clean up */ | 252 | /* Clean up */ |
253 | ha->mcp = NULL; | 253 | ha->mcp = NULL; |
254 | 254 | ||
255 | if (!abort_active) { | 255 | if (abort_active || !io_lock_on) { |
256 | DEBUG11(printk("%s(%ld): checking for additional resp " | 256 | DEBUG11(printk("%s(%ld): checking for additional resp " |
257 | "interrupt.\n", __func__, ha->host_no)); | 257 | "interrupt.\n", __func__, ha->host_no)); |
258 | 258 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 0351d380c2d7..a5bcf1f390b3 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1482,6 +1482,17 @@ qla2x00_iospace_config(scsi_qla_host_t *ha) | |||
1482 | unsigned long pio, pio_len, pio_flags; | 1482 | unsigned long pio, pio_len, pio_flags; |
1483 | unsigned long mmio, mmio_len, mmio_flags; | 1483 | unsigned long mmio, mmio_len, mmio_flags; |
1484 | 1484 | ||
1485 | if (pci_request_selected_regions(ha->pdev, ha->bars, | ||
1486 | QLA2XXX_DRIVER_NAME)) { | ||
1487 | qla_printk(KERN_WARNING, ha, | ||
1488 | "Failed to reserve PIO/MMIO regions (%s)\n", | ||
1489 | pci_name(ha->pdev)); | ||
1490 | |||
1491 | goto iospace_error_exit; | ||
1492 | } | ||
1493 | if (!(ha->bars & 1)) | ||
1494 | goto skip_pio; | ||
1495 | |||
1485 | /* We only need PIO for Flash operations on ISP2312 v2 chips. */ | 1496 | /* We only need PIO for Flash operations on ISP2312 v2 chips. */ |
1486 | pio = pci_resource_start(ha->pdev, 0); | 1497 | pio = pci_resource_start(ha->pdev, 0); |
1487 | pio_len = pci_resource_len(ha->pdev, 0); | 1498 | pio_len = pci_resource_len(ha->pdev, 0); |
@@ -1499,7 +1510,10 @@ qla2x00_iospace_config(scsi_qla_host_t *ha) | |||
1499 | pci_name(ha->pdev)); | 1510 | pci_name(ha->pdev)); |
1500 | pio = 0; | 1511 | pio = 0; |
1501 | } | 1512 | } |
1513 | ha->pio_address = pio; | ||
1514 | ha->pio_length = pio_len; | ||
1502 | 1515 | ||
1516 | skip_pio: | ||
1503 | /* Use MMIO operations for all accesses. */ | 1517 | /* Use MMIO operations for all accesses. */ |
1504 | mmio = pci_resource_start(ha->pdev, 1); | 1518 | mmio = pci_resource_start(ha->pdev, 1); |
1505 | mmio_len = pci_resource_len(ha->pdev, 1); | 1519 | mmio_len = pci_resource_len(ha->pdev, 1); |
@@ -1518,16 +1532,6 @@ qla2x00_iospace_config(scsi_qla_host_t *ha) | |||
1518 | goto iospace_error_exit; | 1532 | goto iospace_error_exit; |
1519 | } | 1533 | } |
1520 | 1534 | ||
1521 | if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { | ||
1522 | qla_printk(KERN_WARNING, ha, | ||
1523 | "Failed to reserve PIO/MMIO regions (%s)\n", | ||
1524 | pci_name(ha->pdev)); | ||
1525 | |||
1526 | goto iospace_error_exit; | ||
1527 | } | ||
1528 | |||
1529 | ha->pio_address = pio; | ||
1530 | ha->pio_length = pio_len; | ||
1531 | ha->iobase = ioremap(mmio, MIN_IOBASE_LEN); | 1535 | ha->iobase = ioremap(mmio, MIN_IOBASE_LEN); |
1532 | if (!ha->iobase) { | 1536 | if (!ha->iobase) { |
1533 | qla_printk(KERN_ERR, ha, | 1537 | qla_printk(KERN_ERR, ha, |
@@ -1579,21 +1583,26 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1579 | char pci_info[30]; | 1583 | char pci_info[30]; |
1580 | char fw_str[30]; | 1584 | char fw_str[30]; |
1581 | struct scsi_host_template *sht; | 1585 | struct scsi_host_template *sht; |
1586 | int bars; | ||
1582 | 1587 | ||
1583 | if (pci_enable_device(pdev)) | 1588 | bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); |
1584 | goto probe_out; | ||
1585 | |||
1586 | if (pci_find_aer_capability(pdev)) | ||
1587 | if (pci_enable_pcie_error_reporting(pdev)) | ||
1588 | goto probe_out; | ||
1589 | |||
1590 | sht = &qla2x00_driver_template; | 1589 | sht = &qla2x00_driver_template; |
1591 | if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || | 1590 | if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || |
1592 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || | 1591 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || |
1593 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || | 1592 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || |
1594 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || | 1593 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || |
1595 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) | 1594 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) { |
1595 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
1596 | sht = &qla24xx_driver_template; | 1596 | sht = &qla24xx_driver_template; |
1597 | } | ||
1598 | |||
1599 | if (pci_enable_device_bars(pdev, bars)) | ||
1600 | goto probe_out; | ||
1601 | |||
1602 | if (pci_find_aer_capability(pdev)) | ||
1603 | if (pci_enable_pcie_error_reporting(pdev)) | ||
1604 | goto probe_out; | ||
1605 | |||
1597 | host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); | 1606 | host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); |
1598 | if (host == NULL) { | 1607 | if (host == NULL) { |
1599 | printk(KERN_WARNING | 1608 | printk(KERN_WARNING |
@@ -1610,6 +1619,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1610 | ha->host_no = host->host_no; | 1619 | ha->host_no = host->host_no; |
1611 | sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no); | 1620 | sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no); |
1612 | ha->parent = NULL; | 1621 | ha->parent = NULL; |
1622 | ha->bars = bars; | ||
1613 | 1623 | ||
1614 | /* Set ISP-type information. */ | 1624 | /* Set ISP-type information. */ |
1615 | qla2x00_set_isp_flags(ha); | 1625 | qla2x00_set_isp_flags(ha); |
@@ -1880,7 +1890,7 @@ qla2x00_free_device(scsi_qla_host_t *ha) | |||
1880 | /* release io space registers */ | 1890 | /* release io space registers */ |
1881 | if (ha->iobase) | 1891 | if (ha->iobase) |
1882 | iounmap(ha->iobase); | 1892 | iounmap(ha->iobase); |
1883 | pci_release_regions(ha->pdev); | 1893 | pci_release_selected_regions(ha->pdev, ha->bars); |
1884 | } | 1894 | } |
1885 | 1895 | ||
1886 | static inline void | 1896 | static inline void |
@@ -2890,7 +2900,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) | |||
2890 | pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; | 2900 | pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; |
2891 | scsi_qla_host_t *ha = pci_get_drvdata(pdev); | 2901 | scsi_qla_host_t *ha = pci_get_drvdata(pdev); |
2892 | 2902 | ||
2893 | if (pci_enable_device(pdev)) { | 2903 | if (pci_enable_device_bars(pdev, ha->bars)) { |
2894 | qla_printk(KERN_WARNING, ha, | 2904 | qla_printk(KERN_WARNING, ha, |
2895 | "Can't re-enable PCI device after reset.\n"); | 2905 | "Can't re-enable PCI device after reset.\n"); |
2896 | 2906 | ||
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 40b059fc1981..ad2fa01bd233 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | 8 | ||
9 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
10 | #include <linux/vmalloc.h> | ||
10 | #include <asm/uaccess.h> | 11 | #include <asm/uaccess.h> |
11 | 12 | ||
12 | static uint16_t qla2x00_nvram_request(scsi_qla_host_t *, uint32_t); | 13 | static uint16_t qla2x00_nvram_request(scsi_qla_host_t *, uint32_t); |
@@ -642,7 +643,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, | |||
642 | } | 643 | } |
643 | 644 | ||
644 | /* Go with burst-write. */ | 645 | /* Go with burst-write. */ |
645 | if (optrom && (liter + OPTROM_BURST_DWORDS) < dwords) { | 646 | if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { |
646 | /* Copy data to DMA'ble buffer. */ | 647 | /* Copy data to DMA'ble buffer. */ |
647 | for (miter = 0, s = optrom, d = dwptr; | 648 | for (miter = 0, s = optrom, d = dwptr; |
648 | miter < OPTROM_BURST_DWORDS; miter++, s++, d++) | 649 | miter < OPTROM_BURST_DWORDS; miter++, s++, d++) |
@@ -656,7 +657,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, | |||
656 | "Unable to burst-write optrom segment " | 657 | "Unable to burst-write optrom segment " |
657 | "(%x/%x/%llx).\n", ret, | 658 | "(%x/%x/%llx).\n", ret, |
658 | flash_data_to_access_addr(faddr), | 659 | flash_data_to_access_addr(faddr), |
659 | optrom_dma); | 660 | (unsigned long long)optrom_dma); |
660 | qla_printk(KERN_WARNING, ha, | 661 | qla_printk(KERN_WARNING, ha, |
661 | "Reverting to slow-write.\n"); | 662 | "Reverting to slow-write.\n"); |
662 | 663 | ||
@@ -745,9 +746,11 @@ qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, | |||
745 | int ret, stat; | 746 | int ret, stat; |
746 | uint32_t i; | 747 | uint32_t i; |
747 | uint16_t *wptr; | 748 | uint16_t *wptr; |
749 | unsigned long flags; | ||
748 | 750 | ||
749 | ret = QLA_SUCCESS; | 751 | ret = QLA_SUCCESS; |
750 | 752 | ||
753 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
751 | qla2x00_lock_nvram_access(ha); | 754 | qla2x00_lock_nvram_access(ha); |
752 | 755 | ||
753 | /* Disable NVRAM write-protection. */ | 756 | /* Disable NVRAM write-protection. */ |
@@ -764,6 +767,7 @@ qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, | |||
764 | qla2x00_set_nvram_protection(ha, stat); | 767 | qla2x00_set_nvram_protection(ha, stat); |
765 | 768 | ||
766 | qla2x00_unlock_nvram_access(ha); | 769 | qla2x00_unlock_nvram_access(ha); |
770 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
767 | 771 | ||
768 | return ret; | 772 | return ret; |
769 | } | 773 | } |
@@ -776,9 +780,11 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, | |||
776 | uint32_t i; | 780 | uint32_t i; |
777 | uint32_t *dwptr; | 781 | uint32_t *dwptr; |
778 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 782 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
783 | unsigned long flags; | ||
779 | 784 | ||
780 | ret = QLA_SUCCESS; | 785 | ret = QLA_SUCCESS; |
781 | 786 | ||
787 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
782 | /* Enable flash write. */ | 788 | /* Enable flash write. */ |
783 | WRT_REG_DWORD(®->ctrl_status, | 789 | WRT_REG_DWORD(®->ctrl_status, |
784 | RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE); | 790 | RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE); |
@@ -812,6 +818,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, | |||
812 | WRT_REG_DWORD(®->ctrl_status, | 818 | WRT_REG_DWORD(®->ctrl_status, |
813 | RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); | 819 | RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); |
814 | RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ | 820 | RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ |
821 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
815 | 822 | ||
816 | return ret; | 823 | return ret; |
817 | } | 824 | } |
@@ -836,8 +843,20 @@ int | |||
836 | qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, | 843 | qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, |
837 | uint32_t bytes) | 844 | uint32_t bytes) |
838 | { | 845 | { |
839 | return qla24xx_write_flash_data(ha, (uint32_t *)buf, | 846 | #define RMW_BUFFER_SIZE (64 * 1024) |
840 | FA_VPD_NVRAM_ADDR | naddr, bytes >> 2); | 847 | uint8_t *dbuf; |
848 | |||
849 | dbuf = vmalloc(RMW_BUFFER_SIZE); | ||
850 | if (!dbuf) | ||
851 | return QLA_MEMORY_ALLOC_FAILED; | ||
852 | ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, | ||
853 | RMW_BUFFER_SIZE); | ||
854 | memcpy(dbuf + (naddr << 2), buf, bytes); | ||
855 | ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, | ||
856 | RMW_BUFFER_SIZE); | ||
857 | vfree(dbuf); | ||
858 | |||
859 | return QLA_SUCCESS; | ||
841 | } | 860 | } |
842 | 861 | ||
843 | static inline void | 862 | static inline void |
@@ -1853,7 +1872,8 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, | |||
1853 | qla_printk(KERN_WARNING, ha, | 1872 | qla_printk(KERN_WARNING, ha, |
1854 | "Unable to burst-read optrom segment " | 1873 | "Unable to burst-read optrom segment " |
1855 | "(%x/%x/%llx).\n", rval, | 1874 | "(%x/%x/%llx).\n", rval, |
1856 | flash_data_to_access_addr(faddr), optrom_dma); | 1875 | flash_data_to_access_addr(faddr), |
1876 | (unsigned long long)optrom_dma); | ||
1857 | qla_printk(KERN_WARNING, ha, | 1877 | qla_printk(KERN_WARNING, ha, |
1858 | "Reverting to slow-read.\n"); | 1878 | "Reverting to slow-read.\n"); |
1859 | 1879 | ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 2d551a3006f6..ae6f7a2fb19f 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.02.00-k4" | 10 | #define QLA2XXX_VERSION "8.02.00-k5" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 2 | 13 | #define QLA_DRIVER_MINOR_VER 2 |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index d29f8464b74f..ebaca4ca4a13 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/blkdev.h> | 25 | #include <linux/blkdev.h> |
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/scatterlist.h> | ||
28 | 27 | ||
29 | #include <scsi/scsi.h> | 28 | #include <scsi/scsi.h> |
30 | #include <scsi/scsi_cmnd.h> | 29 | #include <scsi/scsi_cmnd.h> |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index daed37df00b1..d531ceeb0d8c 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -209,11 +209,13 @@ show_shost_mode(unsigned int mode, char *buf) | |||
209 | static ssize_t show_shost_supported_mode(struct class_device *class_dev, char *buf) | 209 | static ssize_t show_shost_supported_mode(struct class_device *class_dev, char *buf) |
210 | { | 210 | { |
211 | struct Scsi_Host *shost = class_to_shost(class_dev); | 211 | struct Scsi_Host *shost = class_to_shost(class_dev); |
212 | unsigned int supported_mode = shost->hostt->supported_mode; | ||
212 | 213 | ||
213 | if (shost->hostt->supported_mode == MODE_UNKNOWN) | 214 | if (supported_mode == MODE_UNKNOWN) |
214 | return snprintf(buf, 20, "unknown\n"); | 215 | /* by default this should be initiator */ |
215 | else | 216 | supported_mode = MODE_INITIATOR; |
216 | return show_shost_mode(shost->hostt->supported_mode, buf); | 217 | |
218 | return show_shost_mode(supported_mode, buf); | ||
217 | } | 219 | } |
218 | 220 | ||
219 | static CLASS_DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); | 221 | static CLASS_DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); |
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h index 7519728dfc38..62d29cfac9e4 100644 --- a/drivers/scsi/sym53c8xx_2/sym53c8xx.h +++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h | |||
@@ -127,7 +127,6 @@ struct sym_driver_setup { | |||
127 | u_char settle_delay; | 127 | u_char settle_delay; |
128 | u_char use_nvram; | 128 | u_char use_nvram; |
129 | u_long excludes[8]; | 129 | u_long excludes[8]; |
130 | char tag_ctrl[100]; | ||
131 | }; | 130 | }; |
132 | 131 | ||
133 | #define SYM_SETUP_MAX_TAG sym_driver_setup.max_tag | 132 | #define SYM_SETUP_MAX_TAG sym_driver_setup.max_tag |
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c index 9916a2a22558..190770bdc194 100644 --- a/drivers/scsi/sym53c8xx_2/sym_fw.c +++ b/drivers/scsi/sym53c8xx_2/sym_fw.c | |||
@@ -104,8 +104,9 @@ static struct sym_fwz_ofs sym_fw2z_ofs = { | |||
104 | * Patch routine for firmware #1. | 104 | * Patch routine for firmware #1. |
105 | */ | 105 | */ |
106 | static void | 106 | static void |
107 | sym_fw1_patch(struct sym_hcb *np) | 107 | sym_fw1_patch(struct Scsi_Host *shost) |
108 | { | 108 | { |
109 | struct sym_hcb *np = sym_get_hcb(shost); | ||
109 | struct sym_fw1a_scr *scripta0; | 110 | struct sym_fw1a_scr *scripta0; |
110 | struct sym_fw1b_scr *scriptb0; | 111 | struct sym_fw1b_scr *scriptb0; |
111 | 112 | ||
@@ -145,8 +146,11 @@ sym_fw1_patch(struct sym_hcb *np) | |||
145 | * Patch routine for firmware #2. | 146 | * Patch routine for firmware #2. |
146 | */ | 147 | */ |
147 | static void | 148 | static void |
148 | sym_fw2_patch(struct sym_hcb *np) | 149 | sym_fw2_patch(struct Scsi_Host *shost) |
149 | { | 150 | { |
151 | struct sym_data *sym_data = shost_priv(shost); | ||
152 | struct pci_dev *pdev = sym_data->pdev; | ||
153 | struct sym_hcb *np = sym_data->ncb; | ||
150 | struct sym_fw2a_scr *scripta0; | 154 | struct sym_fw2a_scr *scripta0; |
151 | struct sym_fw2b_scr *scriptb0; | 155 | struct sym_fw2b_scr *scriptb0; |
152 | 156 | ||
@@ -167,7 +171,7 @@ sym_fw2_patch(struct sym_hcb *np) | |||
167 | * Remove useless 64 bit DMA specific SCRIPTS, | 171 | * Remove useless 64 bit DMA specific SCRIPTS, |
168 | * when this feature is not available. | 172 | * when this feature is not available. |
169 | */ | 173 | */ |
170 | if (!np->use_dac) { | 174 | if (!use_dac(np)) { |
171 | scripta0->is_dmap_dirty[0] = cpu_to_scr(SCR_NO_OP); | 175 | scripta0->is_dmap_dirty[0] = cpu_to_scr(SCR_NO_OP); |
172 | scripta0->is_dmap_dirty[1] = 0; | 176 | scripta0->is_dmap_dirty[1] = 0; |
173 | scripta0->is_dmap_dirty[2] = cpu_to_scr(SCR_NO_OP); | 177 | scripta0->is_dmap_dirty[2] = cpu_to_scr(SCR_NO_OP); |
@@ -205,14 +209,14 @@ sym_fw2_patch(struct sym_hcb *np) | |||
205 | * Remove a couple of work-arounds specific to C1010 if | 209 | * Remove a couple of work-arounds specific to C1010 if |
206 | * they are not desirable. See `sym_fw2.h' for more details. | 210 | * they are not desirable. See `sym_fw2.h' for more details. |
207 | */ | 211 | */ |
208 | if (!(np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 && | 212 | if (!(pdev->device == PCI_DEVICE_ID_LSI_53C1010_66 && |
209 | np->revision_id < 0x1 && | 213 | pdev->revision < 0x1 && |
210 | np->pciclk_khz < 60000)) { | 214 | np->pciclk_khz < 60000)) { |
211 | scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); | 215 | scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); |
212 | scripta0->datao_phase[1] = cpu_to_scr(0); | 216 | scripta0->datao_phase[1] = cpu_to_scr(0); |
213 | } | 217 | } |
214 | if (!(np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 && | 218 | if (!(pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 /* && |
215 | /* np->revision_id < 0xff */ 1)) { | 219 | pdev->revision < 0xff */)) { |
216 | scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); | 220 | scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); |
217 | scripta0->sel_done[1] = cpu_to_scr(0); | 221 | scripta0->sel_done[1] = cpu_to_scr(0); |
218 | } | 222 | } |
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.h b/drivers/scsi/sym53c8xx_2/sym_fw.h index 66ec35beab5b..ae7e0f9e93fc 100644 --- a/drivers/scsi/sym53c8xx_2/sym_fw.h +++ b/drivers/scsi/sym53c8xx_2/sym_fw.h | |||
@@ -143,7 +143,7 @@ struct sym_fw { | |||
143 | *z_ofs; /* Useful offsets in script Z */ | 143 | *z_ofs; /* Useful offsets in script Z */ |
144 | /* Setup and patch methods for this firmware */ | 144 | /* Setup and patch methods for this firmware */ |
145 | void (*setup)(struct sym_hcb *, struct sym_fw *); | 145 | void (*setup)(struct sym_hcb *, struct sym_fw *); |
146 | void (*patch)(struct sym_hcb *); | 146 | void (*patch)(struct Scsi_Host *); |
147 | }; | 147 | }; |
148 | 148 | ||
149 | /* | 149 | /* |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index db03c4c8ec1e..0f74aba5b237 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -39,7 +39,6 @@ | |||
39 | */ | 39 | */ |
40 | #include <linux/ctype.h> | 40 | #include <linux/ctype.h> |
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/interrupt.h> | ||
43 | #include <linux/module.h> | 42 | #include <linux/module.h> |
44 | #include <linux/moduleparam.h> | 43 | #include <linux/moduleparam.h> |
45 | #include <linux/spinlock.h> | 44 | #include <linux/spinlock.h> |
@@ -54,16 +53,12 @@ | |||
54 | #define NAME53C "sym53c" | 53 | #define NAME53C "sym53c" |
55 | #define NAME53C8XX "sym53c8xx" | 54 | #define NAME53C8XX "sym53c8xx" |
56 | 55 | ||
57 | #define IRQ_FMT "%d" | ||
58 | #define IRQ_PRM(x) (x) | ||
59 | |||
60 | struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; | 56 | struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; |
61 | unsigned int sym_debug_flags = 0; | 57 | unsigned int sym_debug_flags = 0; |
62 | 58 | ||
63 | static char *excl_string; | 59 | static char *excl_string; |
64 | static char *safe_string; | 60 | static char *safe_string; |
65 | module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); | 61 | module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); |
66 | module_param_string(tag_ctrl, sym_driver_setup.tag_ctrl, 100, 0); | ||
67 | module_param_named(burst, sym_driver_setup.burst_order, byte, 0); | 62 | module_param_named(burst, sym_driver_setup.burst_order, byte, 0); |
68 | module_param_named(led, sym_driver_setup.scsi_led, byte, 0); | 63 | module_param_named(led, sym_driver_setup.scsi_led, byte, 0); |
69 | module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); | 64 | module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); |
@@ -78,7 +73,6 @@ module_param_named(excl, excl_string, charp, 0); | |||
78 | module_param_named(safe, safe_string, charp, 0); | 73 | module_param_named(safe, safe_string, charp, 0); |
79 | 74 | ||
80 | MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); | 75 | MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); |
81 | MODULE_PARM_DESC(tag_ctrl, "More detailed control over tags per LUN"); | ||
82 | MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); | 76 | MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); |
83 | MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); | 77 | MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); |
84 | MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); | 78 | MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); |
@@ -134,56 +128,26 @@ static struct scsi_transport_template *sym2_transport_template = NULL; | |||
134 | * Driver private area in the SCSI command structure. | 128 | * Driver private area in the SCSI command structure. |
135 | */ | 129 | */ |
136 | struct sym_ucmd { /* Override the SCSI pointer structure */ | 130 | struct sym_ucmd { /* Override the SCSI pointer structure */ |
137 | dma_addr_t data_mapping; | 131 | struct completion *eh_done; /* SCSI error handling */ |
138 | unsigned char data_mapped; | ||
139 | unsigned char to_do; /* For error handling */ | ||
140 | void (*old_done)(struct scsi_cmnd *); /* For error handling */ | ||
141 | struct completion *eh_done; /* For error handling */ | ||
142 | }; | 132 | }; |
143 | 133 | ||
144 | #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp)) | 134 | #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp)) |
145 | #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) | 135 | #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) |
146 | 136 | ||
147 | static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) | ||
148 | { | ||
149 | if (SYM_UCMD_PTR(cmd)->data_mapped) | ||
150 | scsi_dma_unmap(cmd); | ||
151 | |||
152 | SYM_UCMD_PTR(cmd)->data_mapped = 0; | ||
153 | } | ||
154 | |||
155 | static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) | ||
156 | { | ||
157 | int use_sg; | ||
158 | |||
159 | use_sg = scsi_dma_map(cmd); | ||
160 | if (use_sg > 0) { | ||
161 | SYM_UCMD_PTR(cmd)->data_mapped = 2; | ||
162 | SYM_UCMD_PTR(cmd)->data_mapping = use_sg; | ||
163 | } | ||
164 | |||
165 | return use_sg; | ||
166 | } | ||
167 | |||
168 | #define unmap_scsi_data(np, cmd) \ | ||
169 | __unmap_scsi_data(np->s.device, cmd) | ||
170 | #define map_scsi_sg_data(np, cmd) \ | ||
171 | __map_scsi_sg_data(np->s.device, cmd) | ||
172 | /* | 137 | /* |
173 | * Complete a pending CAM CCB. | 138 | * Complete a pending CAM CCB. |
174 | */ | 139 | */ |
175 | void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) | 140 | void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) |
176 | { | 141 | { |
177 | unmap_scsi_data(np, cmd); | 142 | struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); |
178 | cmd->scsi_done(cmd); | 143 | BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd)); |
179 | } | ||
180 | 144 | ||
181 | static void sym_xpt_done2(struct sym_hcb *np, struct scsi_cmnd *cmd, int cam_status) | 145 | if (ucmd->eh_done) |
182 | { | 146 | complete(ucmd->eh_done); |
183 | sym_set_cam_status(cmd, cam_status); | ||
184 | sym_xpt_done(np, cmd); | ||
185 | } | ||
186 | 147 | ||
148 | scsi_dma_unmap(cmd); | ||
149 | cmd->scsi_done(cmd); | ||
150 | } | ||
187 | 151 | ||
188 | /* | 152 | /* |
189 | * Tell the SCSI layer about a BUS RESET. | 153 | * Tell the SCSI layer about a BUS RESET. |
@@ -199,14 +163,6 @@ void sym_xpt_async_bus_reset(struct sym_hcb *np) | |||
199 | } | 163 | } |
200 | 164 | ||
201 | /* | 165 | /* |
202 | * Tell the SCSI layer about a BUS DEVICE RESET message sent. | ||
203 | */ | ||
204 | void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target) | ||
205 | { | ||
206 | printf_notice("%s: TARGET %d has been reset.\n", sym_name(np), target); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Choose the more appropriate CAM status if | 166 | * Choose the more appropriate CAM status if |
211 | * the IO encountered an extended error. | 167 | * the IO encountered an extended error. |
212 | */ | 168 | */ |
@@ -307,14 +263,14 @@ static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd | |||
307 | 263 | ||
308 | cp->data_len = 0; | 264 | cp->data_len = 0; |
309 | 265 | ||
310 | use_sg = map_scsi_sg_data(np, cmd); | 266 | use_sg = scsi_dma_map(cmd); |
311 | if (use_sg > 0) { | 267 | if (use_sg > 0) { |
312 | struct scatterlist *sg; | 268 | struct scatterlist *sg; |
313 | struct sym_tcb *tp = &np->target[cp->target]; | 269 | struct sym_tcb *tp = &np->target[cp->target]; |
314 | struct sym_tblmove *data; | 270 | struct sym_tblmove *data; |
315 | 271 | ||
316 | if (use_sg > SYM_CONF_MAX_SG) { | 272 | if (use_sg > SYM_CONF_MAX_SG) { |
317 | unmap_scsi_data(np, cmd); | 273 | scsi_dma_unmap(cmd); |
318 | return -1; | 274 | return -1; |
319 | } | 275 | } |
320 | 276 | ||
@@ -351,15 +307,6 @@ static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) | |||
351 | int order; | 307 | int order; |
352 | 308 | ||
353 | /* | 309 | /* |
354 | * Minimal checkings, so that we will not | ||
355 | * go outside our tables. | ||
356 | */ | ||
357 | if (sdev->id == np->myaddr) { | ||
358 | sym_xpt_done2(np, cmd, DID_NO_CONNECT); | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Retrieve the target descriptor. | 310 | * Retrieve the target descriptor. |
364 | */ | 311 | */ |
365 | tp = &np->target[sdev->id]; | 312 | tp = &np->target[sdev->id]; |
@@ -433,7 +380,7 @@ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct s | |||
433 | */ | 380 | */ |
434 | switch (dir) { | 381 | switch (dir) { |
435 | case DMA_BIDIRECTIONAL: | 382 | case DMA_BIDIRECTIONAL: |
436 | printk("%s: got DMA_BIDIRECTIONAL command", sym_name(np)); | 383 | scmd_printk(KERN_INFO, cmd, "got DMA_BIDIRECTIONAL command"); |
437 | sym_set_cam_status(cmd, DID_ERROR); | 384 | sym_set_cam_status(cmd, DID_ERROR); |
438 | goto out_abort; | 385 | goto out_abort; |
439 | case DMA_TO_DEVICE: | 386 | case DMA_TO_DEVICE: |
@@ -542,14 +489,16 @@ static void sym_timer(struct sym_hcb *np) | |||
542 | /* | 489 | /* |
543 | * PCI BUS error handler. | 490 | * PCI BUS error handler. |
544 | */ | 491 | */ |
545 | void sym_log_bus_error(struct sym_hcb *np) | 492 | void sym_log_bus_error(struct Scsi_Host *shost) |
546 | { | 493 | { |
547 | u_short pci_sts; | 494 | struct sym_data *sym_data = shost_priv(shost); |
548 | pci_read_config_word(np->s.device, PCI_STATUS, &pci_sts); | 495 | struct pci_dev *pdev = sym_data->pdev; |
496 | unsigned short pci_sts; | ||
497 | pci_read_config_word(pdev, PCI_STATUS, &pci_sts); | ||
549 | if (pci_sts & 0xf900) { | 498 | if (pci_sts & 0xf900) { |
550 | pci_write_config_word(np->s.device, PCI_STATUS, pci_sts); | 499 | pci_write_config_word(pdev, PCI_STATUS, pci_sts); |
551 | printf("%s: PCI STATUS = 0x%04x\n", | 500 | shost_printk(KERN_WARNING, shost, |
552 | sym_name(np), pci_sts & 0xf900); | 501 | "PCI bus error: status = 0x%04x\n", pci_sts & 0xf900); |
553 | } | 502 | } |
554 | } | 503 | } |
555 | 504 | ||
@@ -564,7 +513,7 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, | |||
564 | struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); | 513 | struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); |
565 | int sts = 0; | 514 | int sts = 0; |
566 | 515 | ||
567 | cmd->scsi_done = done; | 516 | cmd->scsi_done = done; |
568 | memset(ucp, 0, sizeof(*ucp)); | 517 | memset(ucp, 0, sizeof(*ucp)); |
569 | 518 | ||
570 | /* | 519 | /* |
@@ -593,18 +542,23 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, | |||
593 | */ | 542 | */ |
594 | static irqreturn_t sym53c8xx_intr(int irq, void *dev_id) | 543 | static irqreturn_t sym53c8xx_intr(int irq, void *dev_id) |
595 | { | 544 | { |
596 | unsigned long flags; | 545 | struct Scsi_Host *shost = dev_id; |
597 | struct sym_hcb *np = (struct sym_hcb *)dev_id; | 546 | struct sym_data *sym_data = shost_priv(shost); |
547 | irqreturn_t result; | ||
548 | |||
549 | /* Avoid spinloop trying to handle interrupts on frozen device */ | ||
550 | if (pci_channel_offline(sym_data->pdev)) | ||
551 | return IRQ_NONE; | ||
598 | 552 | ||
599 | if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); | 553 | if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); |
600 | 554 | ||
601 | spin_lock_irqsave(np->s.host->host_lock, flags); | 555 | spin_lock(shost->host_lock); |
602 | sym_interrupt(np); | 556 | result = sym_interrupt(shost); |
603 | spin_unlock_irqrestore(np->s.host->host_lock, flags); | 557 | spin_unlock(shost->host_lock); |
604 | 558 | ||
605 | if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); | 559 | if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); |
606 | 560 | ||
607 | return IRQ_HANDLED; | 561 | return result; |
608 | } | 562 | } |
609 | 563 | ||
610 | /* | 564 | /* |
@@ -630,59 +584,61 @@ static void sym53c8xx_timer(unsigned long npref) | |||
630 | #define SYM_EH_HOST_RESET 3 | 584 | #define SYM_EH_HOST_RESET 3 |
631 | 585 | ||
632 | /* | 586 | /* |
633 | * What we will do regarding the involved SCSI command. | ||
634 | */ | ||
635 | #define SYM_EH_DO_IGNORE 0 | ||
636 | #define SYM_EH_DO_WAIT 2 | ||
637 | |||
638 | /* | ||
639 | * scsi_done() alias when error recovery is in progress. | ||
640 | */ | ||
641 | static void sym_eh_done(struct scsi_cmnd *cmd) | ||
642 | { | ||
643 | struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); | ||
644 | BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd)); | ||
645 | |||
646 | cmd->scsi_done = ucmd->old_done; | ||
647 | |||
648 | if (ucmd->to_do == SYM_EH_DO_WAIT) | ||
649 | complete(ucmd->eh_done); | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * Generic method for our eh processing. | 587 | * Generic method for our eh processing. |
654 | * The 'op' argument tells what we have to do. | 588 | * The 'op' argument tells what we have to do. |
655 | */ | 589 | */ |
656 | static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) | 590 | static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) |
657 | { | 591 | { |
658 | struct sym_hcb *np = SYM_SOFTC_PTR(cmd); | ||
659 | struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); | 592 | struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); |
660 | struct Scsi_Host *host = cmd->device->host; | 593 | struct Scsi_Host *shost = cmd->device->host; |
594 | struct sym_data *sym_data = shost_priv(shost); | ||
595 | struct pci_dev *pdev = sym_data->pdev; | ||
596 | struct sym_hcb *np = sym_data->ncb; | ||
661 | SYM_QUEHEAD *qp; | 597 | SYM_QUEHEAD *qp; |
662 | int to_do = SYM_EH_DO_IGNORE; | 598 | int cmd_queued = 0; |
663 | int sts = -1; | 599 | int sts = -1; |
664 | struct completion eh_done; | 600 | struct completion eh_done; |
665 | 601 | ||
666 | dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname); | 602 | scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname); |
603 | |||
604 | /* We may be in an error condition because the PCI bus | ||
605 | * went down. In this case, we need to wait until the | ||
606 | * PCI bus is reset, the card is reset, and only then | ||
607 | * proceed with the scsi error recovery. There's no | ||
608 | * point in hurrying; take a leisurely wait. | ||
609 | */ | ||
610 | #define WAIT_FOR_PCI_RECOVERY 35 | ||
611 | if (pci_channel_offline(pdev)) { | ||
612 | struct completion *io_reset; | ||
613 | int finished_reset = 0; | ||
614 | init_completion(&eh_done); | ||
615 | spin_lock_irq(shost->host_lock); | ||
616 | /* Make sure we didn't race */ | ||
617 | if (pci_channel_offline(pdev)) { | ||
618 | if (!sym_data->io_reset) | ||
619 | sym_data->io_reset = &eh_done; | ||
620 | io_reset = sym_data->io_reset; | ||
621 | } else { | ||
622 | finished_reset = 1; | ||
623 | } | ||
624 | spin_unlock_irq(shost->host_lock); | ||
625 | if (!finished_reset) | ||
626 | finished_reset = wait_for_completion_timeout(io_reset, | ||
627 | WAIT_FOR_PCI_RECOVERY*HZ); | ||
628 | if (!finished_reset) | ||
629 | return SCSI_FAILED; | ||
630 | } | ||
667 | 631 | ||
668 | spin_lock_irq(host->host_lock); | 632 | spin_lock_irq(shost->host_lock); |
669 | /* This one is queued in some place -> to wait for completion */ | 633 | /* This one is queued in some place -> to wait for completion */ |
670 | FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { | 634 | FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { |
671 | struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); | 635 | struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); |
672 | if (cp->cmd == cmd) { | 636 | if (cp->cmd == cmd) { |
673 | to_do = SYM_EH_DO_WAIT; | 637 | cmd_queued = 1; |
674 | break; | 638 | break; |
675 | } | 639 | } |
676 | } | 640 | } |
677 | 641 | ||
678 | if (to_do == SYM_EH_DO_WAIT) { | ||
679 | init_completion(&eh_done); | ||
680 | ucmd->old_done = cmd->scsi_done; | ||
681 | ucmd->eh_done = &eh_done; | ||
682 | wmb(); | ||
683 | cmd->scsi_done = sym_eh_done; | ||
684 | } | ||
685 | |||
686 | /* Try to proceed the operation we have been asked for */ | 642 | /* Try to proceed the operation we have been asked for */ |
687 | sts = -1; | 643 | sts = -1; |
688 | switch(op) { | 644 | switch(op) { |
@@ -698,7 +654,7 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) | |||
698 | break; | 654 | break; |
699 | case SYM_EH_HOST_RESET: | 655 | case SYM_EH_HOST_RESET: |
700 | sym_reset_scsi_bus(np, 0); | 656 | sym_reset_scsi_bus(np, 0); |
701 | sym_start_up (np, 1); | 657 | sym_start_up(shost, 1); |
702 | sts = 0; | 658 | sts = 0; |
703 | break; | 659 | break; |
704 | default: | 660 | default: |
@@ -706,21 +662,21 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) | |||
706 | } | 662 | } |
707 | 663 | ||
708 | /* On error, restore everything and cross fingers :) */ | 664 | /* On error, restore everything and cross fingers :) */ |
709 | if (sts) { | 665 | if (sts) |
710 | cmd->scsi_done = ucmd->old_done; | 666 | cmd_queued = 0; |
711 | to_do = SYM_EH_DO_IGNORE; | ||
712 | } | ||
713 | |||
714 | ucmd->to_do = to_do; | ||
715 | spin_unlock_irq(host->host_lock); | ||
716 | 667 | ||
717 | if (to_do == SYM_EH_DO_WAIT) { | 668 | if (cmd_queued) { |
669 | init_completion(&eh_done); | ||
670 | ucmd->eh_done = &eh_done; | ||
671 | spin_unlock_irq(shost->host_lock); | ||
718 | if (!wait_for_completion_timeout(&eh_done, 5*HZ)) { | 672 | if (!wait_for_completion_timeout(&eh_done, 5*HZ)) { |
719 | ucmd->to_do = SYM_EH_DO_IGNORE; | 673 | ucmd->eh_done = NULL; |
720 | wmb(); | ||
721 | sts = -2; | 674 | sts = -2; |
722 | } | 675 | } |
676 | } else { | ||
677 | spin_unlock_irq(shost->host_lock); | ||
723 | } | 678 | } |
679 | |||
724 | dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, | 680 | dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, |
725 | sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); | 681 | sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); |
726 | return sts ? SCSI_FAILED : SCSI_SUCCESS; | 682 | return sts ? SCSI_FAILED : SCSI_SUCCESS; |
@@ -775,59 +731,6 @@ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) | |||
775 | } | 731 | } |
776 | } | 732 | } |
777 | 733 | ||
778 | /* | ||
779 | * Linux select queue depths function | ||
780 | */ | ||
781 | #define DEF_DEPTH (sym_driver_setup.max_tag) | ||
782 | #define ALL_TARGETS -2 | ||
783 | #define NO_TARGET -1 | ||
784 | #define ALL_LUNS -2 | ||
785 | #define NO_LUN -1 | ||
786 | |||
787 | static int device_queue_depth(struct sym_hcb *np, int target, int lun) | ||
788 | { | ||
789 | int c, h, t, u, v; | ||
790 | char *p = sym_driver_setup.tag_ctrl; | ||
791 | char *ep; | ||
792 | |||
793 | h = -1; | ||
794 | t = NO_TARGET; | ||
795 | u = NO_LUN; | ||
796 | while ((c = *p++) != 0) { | ||
797 | v = simple_strtoul(p, &ep, 0); | ||
798 | switch(c) { | ||
799 | case '/': | ||
800 | ++h; | ||
801 | t = ALL_TARGETS; | ||
802 | u = ALL_LUNS; | ||
803 | break; | ||
804 | case 't': | ||
805 | if (t != target) | ||
806 | t = (target == v) ? v : NO_TARGET; | ||
807 | u = ALL_LUNS; | ||
808 | break; | ||
809 | case 'u': | ||
810 | if (u != lun) | ||
811 | u = (lun == v) ? v : NO_LUN; | ||
812 | break; | ||
813 | case 'q': | ||
814 | if (h == np->s.unit && | ||
815 | (t == ALL_TARGETS || t == target) && | ||
816 | (u == ALL_LUNS || u == lun)) | ||
817 | return v; | ||
818 | break; | ||
819 | case '-': | ||
820 | t = ALL_TARGETS; | ||
821 | u = ALL_LUNS; | ||
822 | break; | ||
823 | default: | ||
824 | break; | ||
825 | } | ||
826 | p = ep; | ||
827 | } | ||
828 | return DEF_DEPTH; | ||
829 | } | ||
830 | |||
831 | static int sym53c8xx_slave_alloc(struct scsi_device *sdev) | 734 | static int sym53c8xx_slave_alloc(struct scsi_device *sdev) |
832 | { | 735 | { |
833 | struct sym_hcb *np = sym_get_hcb(sdev->host); | 736 | struct sym_hcb *np = sym_get_hcb(sdev->host); |
@@ -892,21 +795,16 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev) | |||
892 | * Use at least 2. | 795 | * Use at least 2. |
893 | * Donnot use more than our maximum. | 796 | * Donnot use more than our maximum. |
894 | */ | 797 | */ |
895 | reqtags = device_queue_depth(np, sdev->id, sdev->lun); | 798 | reqtags = sym_driver_setup.max_tag; |
896 | if (reqtags > tp->usrtags) | 799 | if (reqtags > tp->usrtags) |
897 | reqtags = tp->usrtags; | 800 | reqtags = tp->usrtags; |
898 | if (!sdev->tagged_supported) | 801 | if (!sdev->tagged_supported) |
899 | reqtags = 0; | 802 | reqtags = 0; |
900 | #if 1 /* Avoid to locally queue commands for no good reasons */ | ||
901 | if (reqtags > SYM_CONF_MAX_TAG) | 803 | if (reqtags > SYM_CONF_MAX_TAG) |
902 | reqtags = SYM_CONF_MAX_TAG; | 804 | reqtags = SYM_CONF_MAX_TAG; |
903 | depth_to_use = (reqtags ? reqtags : 2); | 805 | depth_to_use = reqtags ? reqtags : 2; |
904 | #else | ||
905 | depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2); | ||
906 | #endif | ||
907 | scsi_adjust_queue_depth(sdev, | 806 | scsi_adjust_queue_depth(sdev, |
908 | (sdev->tagged_supported ? | 807 | sdev->tagged_supported ? MSG_SIMPLE_TAG : 0, |
909 | MSG_SIMPLE_TAG : 0), | ||
910 | depth_to_use); | 808 | depth_to_use); |
911 | lp->s.scdev_depth = depth_to_use; | 809 | lp->s.scdev_depth = depth_to_use; |
912 | sym_tune_dev_queuing(tp, sdev->lun, reqtags); | 810 | sym_tune_dev_queuing(tp, sdev->lun, reqtags); |
@@ -1089,8 +987,9 @@ static int is_keyword(char *ptr, int len, char *verb) | |||
1089 | * Parse a control command | 987 | * Parse a control command |
1090 | */ | 988 | */ |
1091 | 989 | ||
1092 | static int sym_user_command(struct sym_hcb *np, char *buffer, int length) | 990 | static int sym_user_command(struct Scsi_Host *shost, char *buffer, int length) |
1093 | { | 991 | { |
992 | struct sym_hcb *np = sym_get_hcb(shost); | ||
1094 | char *ptr = buffer; | 993 | char *ptr = buffer; |
1095 | int len = length; | 994 | int len = length; |
1096 | struct sym_usrcmd cmd, *uc = &cmd; | 995 | struct sym_usrcmd cmd, *uc = &cmd; |
@@ -1217,9 +1116,9 @@ printk("sym_user_command: data=%ld\n", uc->data); | |||
1217 | else { | 1116 | else { |
1218 | unsigned long flags; | 1117 | unsigned long flags; |
1219 | 1118 | ||
1220 | spin_lock_irqsave(np->s.host->host_lock, flags); | 1119 | spin_lock_irqsave(shost->host_lock, flags); |
1221 | sym_exec_user_command (np, uc); | 1120 | sym_exec_user_command(np, uc); |
1222 | spin_unlock_irqrestore(np->s.host->host_lock, flags); | 1121 | spin_unlock_irqrestore(shost->host_lock, flags); |
1223 | } | 1122 | } |
1224 | return length; | 1123 | return length; |
1225 | } | 1124 | } |
@@ -1275,8 +1174,11 @@ static int copy_info(struct info_str *info, char *fmt, ...) | |||
1275 | /* | 1174 | /* |
1276 | * Copy formatted information into the input buffer. | 1175 | * Copy formatted information into the input buffer. |
1277 | */ | 1176 | */ |
1278 | static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len) | 1177 | static int sym_host_info(struct Scsi_Host *shost, char *ptr, off_t offset, int len) |
1279 | { | 1178 | { |
1179 | struct sym_data *sym_data = shost_priv(shost); | ||
1180 | struct pci_dev *pdev = sym_data->pdev; | ||
1181 | struct sym_hcb *np = sym_data->ncb; | ||
1280 | struct info_str info; | 1182 | struct info_str info; |
1281 | 1183 | ||
1282 | info.buffer = ptr; | 1184 | info.buffer = ptr; |
@@ -1285,10 +1187,10 @@ static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len) | |||
1285 | info.pos = 0; | 1187 | info.pos = 0; |
1286 | 1188 | ||
1287 | copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, " | 1189 | copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, " |
1288 | "revision id 0x%x\n", | 1190 | "revision id 0x%x\n", np->s.chip_name, |
1289 | np->s.chip_name, np->device_id, np->revision_id); | 1191 | pdev->device, pdev->revision); |
1290 | copy_info(&info, "At PCI address %s, IRQ " IRQ_FMT "\n", | 1192 | copy_info(&info, "At PCI address %s, IRQ %u\n", |
1291 | pci_name(np->s.device), IRQ_PRM(np->s.irq)); | 1193 | pci_name(pdev), pdev->irq); |
1292 | copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n", | 1194 | copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n", |
1293 | (int) (np->minsync_dt ? np->minsync_dt : np->minsync), | 1195 | (int) (np->minsync_dt ? np->minsync_dt : np->minsync), |
1294 | np->maxwide ? "Wide" : "Narrow", | 1196 | np->maxwide ? "Wide" : "Narrow", |
@@ -1307,15 +1209,14 @@ static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len) | |||
1307 | * - func = 0 means read (returns adapter infos) | 1209 | * - func = 0 means read (returns adapter infos) |
1308 | * - func = 1 means write (not yet merget from sym53c8xx) | 1210 | * - func = 1 means write (not yet merget from sym53c8xx) |
1309 | */ | 1211 | */ |
1310 | static int sym53c8xx_proc_info(struct Scsi_Host *host, char *buffer, | 1212 | static int sym53c8xx_proc_info(struct Scsi_Host *shost, char *buffer, |
1311 | char **start, off_t offset, int length, int func) | 1213 | char **start, off_t offset, int length, int func) |
1312 | { | 1214 | { |
1313 | struct sym_hcb *np = sym_get_hcb(host); | ||
1314 | int retv; | 1215 | int retv; |
1315 | 1216 | ||
1316 | if (func) { | 1217 | if (func) { |
1317 | #ifdef SYM_LINUX_USER_COMMAND_SUPPORT | 1218 | #ifdef SYM_LINUX_USER_COMMAND_SUPPORT |
1318 | retv = sym_user_command(np, buffer, length); | 1219 | retv = sym_user_command(shost, buffer, length); |
1319 | #else | 1220 | #else |
1320 | retv = -EINVAL; | 1221 | retv = -EINVAL; |
1321 | #endif | 1222 | #endif |
@@ -1323,7 +1224,7 @@ static int sym53c8xx_proc_info(struct Scsi_Host *host, char *buffer, | |||
1323 | if (start) | 1224 | if (start) |
1324 | *start = buffer; | 1225 | *start = buffer; |
1325 | #ifdef SYM_LINUX_USER_INFO_SUPPORT | 1226 | #ifdef SYM_LINUX_USER_INFO_SUPPORT |
1326 | retv = sym_host_info(np, buffer, offset, length); | 1227 | retv = sym_host_info(shost, buffer, offset, length); |
1327 | #else | 1228 | #else |
1328 | retv = -EINVAL; | 1229 | retv = -EINVAL; |
1329 | #endif | 1230 | #endif |
@@ -1341,8 +1242,8 @@ static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev) | |||
1341 | /* | 1242 | /* |
1342 | * Free O/S specific resources. | 1243 | * Free O/S specific resources. |
1343 | */ | 1244 | */ |
1344 | if (np->s.irq) | 1245 | if (pdev->irq) |
1345 | free_irq(np->s.irq, np); | 1246 | free_irq(pdev->irq, np); |
1346 | if (np->s.ioaddr) | 1247 | if (np->s.ioaddr) |
1347 | pci_iounmap(pdev, np->s.ioaddr); | 1248 | pci_iounmap(pdev, np->s.ioaddr); |
1348 | if (np->s.ramaddr) | 1249 | if (np->s.ramaddr) |
@@ -1356,31 +1257,6 @@ static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev) | |||
1356 | } | 1257 | } |
1357 | 1258 | ||
1358 | /* | 1259 | /* |
1359 | * Ask/tell the system about DMA addressing. | ||
1360 | */ | ||
1361 | static int sym_setup_bus_dma_mask(struct sym_hcb *np) | ||
1362 | { | ||
1363 | #if SYM_CONF_DMA_ADDRESSING_MODE > 0 | ||
1364 | #if SYM_CONF_DMA_ADDRESSING_MODE == 1 | ||
1365 | #define DMA_DAC_MASK DMA_40BIT_MASK | ||
1366 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 | ||
1367 | #define DMA_DAC_MASK DMA_64BIT_MASK | ||
1368 | #endif | ||
1369 | if ((np->features & FE_DAC) && | ||
1370 | !pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) { | ||
1371 | np->use_dac = 1; | ||
1372 | return 0; | ||
1373 | } | ||
1374 | #endif | ||
1375 | |||
1376 | if (!pci_set_dma_mask(np->s.device, DMA_32BIT_MASK)) | ||
1377 | return 0; | ||
1378 | |||
1379 | printf_warning("%s: No suitable DMA available\n", sym_name(np)); | ||
1380 | return -1; | ||
1381 | } | ||
1382 | |||
1383 | /* | ||
1384 | * Host attach and initialisations. | 1260 | * Host attach and initialisations. |
1385 | * | 1261 | * |
1386 | * Allocate host data and ncb structure. | 1262 | * Allocate host data and ncb structure. |
@@ -1392,32 +1268,28 @@ static int sym_setup_bus_dma_mask(struct sym_hcb *np) | |||
1392 | static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, | 1268 | static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, |
1393 | int unit, struct sym_device *dev) | 1269 | int unit, struct sym_device *dev) |
1394 | { | 1270 | { |
1395 | struct host_data *host_data; | 1271 | struct sym_data *sym_data; |
1396 | struct sym_hcb *np = NULL; | 1272 | struct sym_hcb *np = NULL; |
1397 | struct Scsi_Host *instance = NULL; | 1273 | struct Scsi_Host *shost; |
1398 | struct pci_dev *pdev = dev->pdev; | 1274 | struct pci_dev *pdev = dev->pdev; |
1399 | unsigned long flags; | 1275 | unsigned long flags; |
1400 | struct sym_fw *fw; | 1276 | struct sym_fw *fw; |
1401 | 1277 | ||
1402 | printk(KERN_INFO | 1278 | printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n", |
1403 | "sym%d: <%s> rev 0x%x at pci %s irq " IRQ_FMT "\n", | 1279 | unit, dev->chip.name, pdev->revision, pci_name(pdev), |
1404 | unit, dev->chip.name, dev->chip.revision_id, | 1280 | pdev->irq); |
1405 | pci_name(pdev), IRQ_PRM(pdev->irq)); | ||
1406 | 1281 | ||
1407 | /* | 1282 | /* |
1408 | * Get the firmware for this chip. | 1283 | * Get the firmware for this chip. |
1409 | */ | 1284 | */ |
1410 | fw = sym_find_firmware(&dev->chip); | 1285 | fw = sym_find_firmware(&dev->chip); |
1411 | if (!fw) | 1286 | if (!fw) |
1412 | goto attach_failed; | 1287 | return NULL; |
1413 | 1288 | ||
1414 | /* | 1289 | shost = scsi_host_alloc(tpnt, sizeof(*sym_data)); |
1415 | * Allocate host_data structure | 1290 | if (!shost) |
1416 | */ | 1291 | return NULL; |
1417 | instance = scsi_host_alloc(tpnt, sizeof(*host_data)); | 1292 | sym_data = shost_priv(shost); |
1418 | if (!instance) | ||
1419 | goto attach_failed; | ||
1420 | host_data = (struct host_data *) instance->hostdata; | ||
1421 | 1293 | ||
1422 | /* | 1294 | /* |
1423 | * Allocate immediately the host control block, | 1295 | * Allocate immediately the host control block, |
@@ -1428,22 +1300,19 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, | |||
1428 | np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); | 1300 | np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); |
1429 | if (!np) | 1301 | if (!np) |
1430 | goto attach_failed; | 1302 | goto attach_failed; |
1431 | np->s.device = pdev; | ||
1432 | np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ | 1303 | np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ |
1433 | host_data->ncb = np; | 1304 | sym_data->ncb = np; |
1434 | np->s.host = instance; | 1305 | sym_data->pdev = pdev; |
1306 | np->s.host = shost; | ||
1435 | 1307 | ||
1436 | pci_set_drvdata(pdev, np); | 1308 | pci_set_drvdata(pdev, shost); |
1437 | 1309 | ||
1438 | /* | 1310 | /* |
1439 | * Copy some useful infos to the HCB. | 1311 | * Copy some useful infos to the HCB. |
1440 | */ | 1312 | */ |
1441 | np->hcb_ba = vtobus(np); | 1313 | np->hcb_ba = vtobus(np); |
1442 | np->verbose = sym_driver_setup.verbose; | 1314 | np->verbose = sym_driver_setup.verbose; |
1443 | np->s.device = pdev; | ||
1444 | np->s.unit = unit; | 1315 | np->s.unit = unit; |
1445 | np->device_id = dev->chip.device_id; | ||
1446 | np->revision_id = dev->chip.revision_id; | ||
1447 | np->features = dev->chip.features; | 1316 | np->features = dev->chip.features; |
1448 | np->clock_divn = dev->chip.nr_divisor; | 1317 | np->clock_divn = dev->chip.nr_divisor; |
1449 | np->maxoffs = dev->chip.offset_max; | 1318 | np->maxoffs = dev->chip.offset_max; |
@@ -1456,8 +1325,13 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, | |||
1456 | strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); | 1325 | strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); |
1457 | sprintf(np->s.inst_name, "sym%d", np->s.unit); | 1326 | sprintf(np->s.inst_name, "sym%d", np->s.unit); |
1458 | 1327 | ||
1459 | if (sym_setup_bus_dma_mask(np)) | 1328 | if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) && |
1329 | !pci_set_dma_mask(pdev, DMA_DAC_MASK)) { | ||
1330 | set_dac(np); | ||
1331 | } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | ||
1332 | printf_warning("%s: No suitable DMA available\n", sym_name(np)); | ||
1460 | goto attach_failed; | 1333 | goto attach_failed; |
1334 | } | ||
1461 | 1335 | ||
1462 | /* | 1336 | /* |
1463 | * Try to map the controller chip to | 1337 | * Try to map the controller chip to |
@@ -1466,19 +1340,16 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, | |||
1466 | np->mmio_ba = (u32)dev->mmio_base; | 1340 | np->mmio_ba = (u32)dev->mmio_base; |
1467 | np->s.ioaddr = dev->s.ioaddr; | 1341 | np->s.ioaddr = dev->s.ioaddr; |
1468 | np->s.ramaddr = dev->s.ramaddr; | 1342 | np->s.ramaddr = dev->s.ramaddr; |
1469 | np->s.io_ws = (np->features & FE_IO256) ? 256 : 128; | ||
1470 | 1343 | ||
1471 | /* | 1344 | /* |
1472 | * Map on-chip RAM if present and supported. | 1345 | * Map on-chip RAM if present and supported. |
1473 | */ | 1346 | */ |
1474 | if (!(np->features & FE_RAM)) | 1347 | if (!(np->features & FE_RAM)) |
1475 | dev->ram_base = 0; | 1348 | dev->ram_base = 0; |
1476 | if (dev->ram_base) { | 1349 | if (dev->ram_base) |
1477 | np->ram_ba = (u32)dev->ram_base; | 1350 | np->ram_ba = (u32)dev->ram_base; |
1478 | np->ram_ws = (np->features & FE_RAM8K) ? 8192 : 4096; | ||
1479 | } | ||
1480 | 1351 | ||
1481 | if (sym_hcb_attach(instance, fw, dev->nvram)) | 1352 | if (sym_hcb_attach(shost, fw, dev->nvram)) |
1482 | goto attach_failed; | 1353 | goto attach_failed; |
1483 | 1354 | ||
1484 | /* | 1355 | /* |
@@ -1486,25 +1357,25 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, | |||
1486 | * If we synchonize the C code with SCRIPTS on interrupt, | 1357 | * If we synchonize the C code with SCRIPTS on interrupt, |
1487 | * we do not want to share the INTR line at all. | 1358 | * we do not want to share the INTR line at all. |
1488 | */ | 1359 | */ |
1489 | if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, np)) { | 1360 | if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, |
1490 | printf_err("%s: request irq %d failure\n", | 1361 | shost)) { |
1362 | printf_err("%s: request irq %u failure\n", | ||
1491 | sym_name(np), pdev->irq); | 1363 | sym_name(np), pdev->irq); |
1492 | goto attach_failed; | 1364 | goto attach_failed; |
1493 | } | 1365 | } |
1494 | np->s.irq = pdev->irq; | ||
1495 | 1366 | ||
1496 | /* | 1367 | /* |
1497 | * After SCSI devices have been opened, we cannot | 1368 | * After SCSI devices have been opened, we cannot |
1498 | * reset the bus safely, so we do it here. | 1369 | * reset the bus safely, so we do it here. |
1499 | */ | 1370 | */ |
1500 | spin_lock_irqsave(instance->host_lock, flags); | 1371 | spin_lock_irqsave(shost->host_lock, flags); |
1501 | if (sym_reset_scsi_bus(np, 0)) | 1372 | if (sym_reset_scsi_bus(np, 0)) |
1502 | goto reset_failed; | 1373 | goto reset_failed; |
1503 | 1374 | ||
1504 | /* | 1375 | /* |
1505 | * Start the SCRIPTS. | 1376 | * Start the SCRIPTS. |
1506 | */ | 1377 | */ |
1507 | sym_start_up (np, 1); | 1378 | sym_start_up(shost, 1); |
1508 | 1379 | ||
1509 | /* | 1380 | /* |
1510 | * Start the timer daemon | 1381 | * Start the timer daemon |
@@ -1519,33 +1390,37 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, | |||
1519 | * Fill Linux host instance structure | 1390 | * Fill Linux host instance structure |
1520 | * and return success. | 1391 | * and return success. |
1521 | */ | 1392 | */ |
1522 | instance->max_channel = 0; | 1393 | shost->max_channel = 0; |
1523 | instance->this_id = np->myaddr; | 1394 | shost->this_id = np->myaddr; |
1524 | instance->max_id = np->maxwide ? 16 : 8; | 1395 | shost->max_id = np->maxwide ? 16 : 8; |
1525 | instance->max_lun = SYM_CONF_MAX_LUN; | 1396 | shost->max_lun = SYM_CONF_MAX_LUN; |
1526 | instance->unique_id = pci_resource_start(pdev, 0); | 1397 | shost->unique_id = pci_resource_start(pdev, 0); |
1527 | instance->cmd_per_lun = SYM_CONF_MAX_TAG; | 1398 | shost->cmd_per_lun = SYM_CONF_MAX_TAG; |
1528 | instance->can_queue = (SYM_CONF_MAX_START-2); | 1399 | shost->can_queue = (SYM_CONF_MAX_START-2); |
1529 | instance->sg_tablesize = SYM_CONF_MAX_SG; | 1400 | shost->sg_tablesize = SYM_CONF_MAX_SG; |
1530 | instance->max_cmd_len = 16; | 1401 | shost->max_cmd_len = 16; |
1531 | BUG_ON(sym2_transport_template == NULL); | 1402 | BUG_ON(sym2_transport_template == NULL); |
1532 | instance->transportt = sym2_transport_template; | 1403 | shost->transportt = sym2_transport_template; |
1404 | |||
1405 | /* 53c896 rev 1 errata: DMA may not cross 16MB boundary */ | ||
1406 | if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2) | ||
1407 | shost->dma_boundary = 0xFFFFFF; | ||
1533 | 1408 | ||
1534 | spin_unlock_irqrestore(instance->host_lock, flags); | 1409 | spin_unlock_irqrestore(shost->host_lock, flags); |
1535 | 1410 | ||
1536 | return instance; | 1411 | return shost; |
1537 | 1412 | ||
1538 | reset_failed: | 1413 | reset_failed: |
1539 | printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " | 1414 | printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " |
1540 | "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); | 1415 | "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); |
1541 | spin_unlock_irqrestore(instance->host_lock, flags); | 1416 | spin_unlock_irqrestore(shost->host_lock, flags); |
1542 | attach_failed: | 1417 | attach_failed: |
1543 | if (!instance) | 1418 | if (!shost) |
1544 | return NULL; | 1419 | return NULL; |
1545 | printf_info("%s: giving up ...\n", sym_name(np)); | 1420 | printf_info("%s: giving up ...\n", sym_name(np)); |
1546 | if (np) | 1421 | if (np) |
1547 | sym_free_resources(np, pdev); | 1422 | sym_free_resources(np, pdev); |
1548 | scsi_host_put(instance); | 1423 | scsi_host_put(shost); |
1549 | 1424 | ||
1550 | return NULL; | 1425 | return NULL; |
1551 | } | 1426 | } |
@@ -1558,7 +1433,6 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, | |||
1558 | static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) | 1433 | static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) |
1559 | { | 1434 | { |
1560 | devp->nvram = nvp; | 1435 | devp->nvram = nvp; |
1561 | devp->device_id = devp->chip.device_id; | ||
1562 | nvp->type = 0; | 1436 | nvp->type = 0; |
1563 | 1437 | ||
1564 | sym_read_nvram(devp, nvp); | 1438 | sym_read_nvram(devp, nvp); |
@@ -1573,7 +1447,6 @@ static int __devinit sym_check_supported(struct sym_device *device) | |||
1573 | { | 1447 | { |
1574 | struct sym_chip *chip; | 1448 | struct sym_chip *chip; |
1575 | struct pci_dev *pdev = device->pdev; | 1449 | struct pci_dev *pdev = device->pdev; |
1576 | u_char revision; | ||
1577 | unsigned long io_port = pci_resource_start(pdev, 0); | 1450 | unsigned long io_port = pci_resource_start(pdev, 0); |
1578 | int i; | 1451 | int i; |
1579 | 1452 | ||
@@ -1593,14 +1466,12 @@ static int __devinit sym_check_supported(struct sym_device *device) | |||
1593 | * to our device structure so we can make it match the actual device | 1466 | * to our device structure so we can make it match the actual device |
1594 | * and options. | 1467 | * and options. |
1595 | */ | 1468 | */ |
1596 | pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); | 1469 | chip = sym_lookup_chip_table(pdev->device, pdev->revision); |
1597 | chip = sym_lookup_chip_table(pdev->device, revision); | ||
1598 | if (!chip) { | 1470 | if (!chip) { |
1599 | dev_info(&pdev->dev, "device not supported\n"); | 1471 | dev_info(&pdev->dev, "device not supported\n"); |
1600 | return -ENODEV; | 1472 | return -ENODEV; |
1601 | } | 1473 | } |
1602 | memcpy(&device->chip, chip, sizeof(device->chip)); | 1474 | memcpy(&device->chip, chip, sizeof(device->chip)); |
1603 | device->chip.revision_id = revision; | ||
1604 | 1475 | ||
1605 | return 0; | 1476 | return 0; |
1606 | } | 1477 | } |
@@ -1641,7 +1512,7 @@ static int __devinit sym_set_workarounds(struct sym_device *device) | |||
1641 | * We must ensure the chip will use WRITE AND INVALIDATE. | 1512 | * We must ensure the chip will use WRITE AND INVALIDATE. |
1642 | * The revision number limit is for now arbitrary. | 1513 | * The revision number limit is for now arbitrary. |
1643 | */ | 1514 | */ |
1644 | if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && chip->revision_id < 0x4) { | 1515 | if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) { |
1645 | chip->features |= (FE_WRIE | FE_CLSE); | 1516 | chip->features |= (FE_WRIE | FE_CLSE); |
1646 | } | 1517 | } |
1647 | 1518 | ||
@@ -1769,8 +1640,9 @@ static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev) | |||
1769 | * Detach the host. | 1640 | * Detach the host. |
1770 | * We have to free resources and halt the NCR chip. | 1641 | * We have to free resources and halt the NCR chip. |
1771 | */ | 1642 | */ |
1772 | static int sym_detach(struct sym_hcb *np, struct pci_dev *pdev) | 1643 | static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev) |
1773 | { | 1644 | { |
1645 | struct sym_hcb *np = sym_get_hcb(shost); | ||
1774 | printk("%s: detaching ...\n", sym_name(np)); | 1646 | printk("%s: detaching ...\n", sym_name(np)); |
1775 | 1647 | ||
1776 | del_timer_sync(&np->s.timer); | 1648 | del_timer_sync(&np->s.timer); |
@@ -1823,7 +1695,7 @@ static int __devinit sym2_probe(struct pci_dev *pdev, | |||
1823 | { | 1695 | { |
1824 | struct sym_device sym_dev; | 1696 | struct sym_device sym_dev; |
1825 | struct sym_nvram nvram; | 1697 | struct sym_nvram nvram; |
1826 | struct Scsi_Host *instance; | 1698 | struct Scsi_Host *shost; |
1827 | 1699 | ||
1828 | memset(&sym_dev, 0, sizeof(sym_dev)); | 1700 | memset(&sym_dev, 0, sizeof(sym_dev)); |
1829 | memset(&nvram, 0, sizeof(nvram)); | 1701 | memset(&nvram, 0, sizeof(nvram)); |
@@ -1850,13 +1722,13 @@ static int __devinit sym2_probe(struct pci_dev *pdev, | |||
1850 | 1722 | ||
1851 | sym_get_nvram(&sym_dev, &nvram); | 1723 | sym_get_nvram(&sym_dev, &nvram); |
1852 | 1724 | ||
1853 | instance = sym_attach(&sym2_template, attach_count, &sym_dev); | 1725 | shost = sym_attach(&sym2_template, attach_count, &sym_dev); |
1854 | if (!instance) | 1726 | if (!shost) |
1855 | goto free; | 1727 | goto free; |
1856 | 1728 | ||
1857 | if (scsi_add_host(instance, &pdev->dev)) | 1729 | if (scsi_add_host(shost, &pdev->dev)) |
1858 | goto detach; | 1730 | goto detach; |
1859 | scsi_scan_host(instance); | 1731 | scsi_scan_host(shost); |
1860 | 1732 | ||
1861 | attach_count++; | 1733 | attach_count++; |
1862 | 1734 | ||
@@ -1874,20 +1746,143 @@ static int __devinit sym2_probe(struct pci_dev *pdev, | |||
1874 | 1746 | ||
1875 | static void __devexit sym2_remove(struct pci_dev *pdev) | 1747 | static void __devexit sym2_remove(struct pci_dev *pdev) |
1876 | { | 1748 | { |
1877 | struct sym_hcb *np = pci_get_drvdata(pdev); | 1749 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
1878 | struct Scsi_Host *host = np->s.host; | ||
1879 | |||
1880 | scsi_remove_host(host); | ||
1881 | scsi_host_put(host); | ||
1882 | |||
1883 | sym_detach(np, pdev); | ||
1884 | 1750 | ||
1751 | scsi_remove_host(shost); | ||
1752 | scsi_host_put(shost); | ||
1753 | sym_detach(shost, pdev); | ||
1885 | pci_release_regions(pdev); | 1754 | pci_release_regions(pdev); |
1886 | pci_disable_device(pdev); | 1755 | pci_disable_device(pdev); |
1887 | 1756 | ||
1888 | attach_count--; | 1757 | attach_count--; |
1889 | } | 1758 | } |
1890 | 1759 | ||
1760 | /** | ||
1761 | * sym2_io_error_detected() - called when PCI error is detected | ||
1762 | * @pdev: pointer to PCI device | ||
1763 | * @state: current state of the PCI slot | ||
1764 | */ | ||
1765 | static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev, | ||
1766 | enum pci_channel_state state) | ||
1767 | { | ||
1768 | /* If slot is permanently frozen, turn everything off */ | ||
1769 | if (state == pci_channel_io_perm_failure) { | ||
1770 | sym2_remove(pdev); | ||
1771 | return PCI_ERS_RESULT_DISCONNECT; | ||
1772 | } | ||
1773 | |||
1774 | disable_irq(pdev->irq); | ||
1775 | pci_disable_device(pdev); | ||
1776 | |||
1777 | /* Request that MMIO be enabled, so register dump can be taken. */ | ||
1778 | return PCI_ERS_RESULT_CAN_RECOVER; | ||
1779 | } | ||
1780 | |||
1781 | /** | ||
1782 | * sym2_io_slot_dump - Enable MMIO and dump debug registers | ||
1783 | * @pdev: pointer to PCI device | ||
1784 | */ | ||
1785 | static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev) | ||
1786 | { | ||
1787 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
1788 | |||
1789 | sym_dump_registers(shost); | ||
1790 | |||
1791 | /* Request a slot reset. */ | ||
1792 | return PCI_ERS_RESULT_NEED_RESET; | ||
1793 | } | ||
1794 | |||
1795 | /** | ||
1796 | * sym2_reset_workarounds - hardware-specific work-arounds | ||
1797 | * | ||
1798 | * This routine is similar to sym_set_workarounds(), except | ||
1799 | * that, at this point, we already know that the device was | ||
1800 | * succesfully intialized at least once before, and so most | ||
1801 | * of the steps taken there are un-needed here. | ||
1802 | */ | ||
1803 | static void sym2_reset_workarounds(struct pci_dev *pdev) | ||
1804 | { | ||
1805 | u_short status_reg; | ||
1806 | struct sym_chip *chip; | ||
1807 | |||
1808 | chip = sym_lookup_chip_table(pdev->device, pdev->revision); | ||
1809 | |||
1810 | /* Work around for errant bit in 895A, in a fashion | ||
1811 | * similar to what is done in sym_set_workarounds(). | ||
1812 | */ | ||
1813 | pci_read_config_word(pdev, PCI_STATUS, &status_reg); | ||
1814 | if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) { | ||
1815 | status_reg = PCI_STATUS_66MHZ; | ||
1816 | pci_write_config_word(pdev, PCI_STATUS, status_reg); | ||
1817 | pci_read_config_word(pdev, PCI_STATUS, &status_reg); | ||
1818 | } | ||
1819 | } | ||
1820 | |||
1821 | /** | ||
1822 | * sym2_io_slot_reset() - called when the pci bus has been reset. | ||
1823 | * @pdev: pointer to PCI device | ||
1824 | * | ||
1825 | * Restart the card from scratch. | ||
1826 | */ | ||
1827 | static pci_ers_result_t sym2_io_slot_reset(struct pci_dev *pdev) | ||
1828 | { | ||
1829 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
1830 | struct sym_hcb *np = sym_get_hcb(shost); | ||
1831 | |||
1832 | printk(KERN_INFO "%s: recovering from a PCI slot reset\n", | ||
1833 | sym_name(np)); | ||
1834 | |||
1835 | if (pci_enable_device(pdev)) { | ||
1836 | printk(KERN_ERR "%s: Unable to enable after PCI reset\n", | ||
1837 | sym_name(np)); | ||
1838 | return PCI_ERS_RESULT_DISCONNECT; | ||
1839 | } | ||
1840 | |||
1841 | pci_set_master(pdev); | ||
1842 | enable_irq(pdev->irq); | ||
1843 | |||
1844 | /* If the chip can do Memory Write Invalidate, enable it */ | ||
1845 | if (np->features & FE_WRIE) { | ||
1846 | if (pci_set_mwi(pdev)) | ||
1847 | return PCI_ERS_RESULT_DISCONNECT; | ||
1848 | } | ||
1849 | |||
1850 | /* Perform work-arounds, analogous to sym_set_workarounds() */ | ||
1851 | sym2_reset_workarounds(pdev); | ||
1852 | |||
1853 | /* Perform host reset only on one instance of the card */ | ||
1854 | if (PCI_FUNC(pdev->devfn) == 0) { | ||
1855 | if (sym_reset_scsi_bus(np, 0)) { | ||
1856 | printk(KERN_ERR "%s: Unable to reset scsi host\n", | ||
1857 | sym_name(np)); | ||
1858 | return PCI_ERS_RESULT_DISCONNECT; | ||
1859 | } | ||
1860 | sym_start_up(shost, 1); | ||
1861 | } | ||
1862 | |||
1863 | return PCI_ERS_RESULT_RECOVERED; | ||
1864 | } | ||
1865 | |||
1866 | /** | ||
1867 | * sym2_io_resume() - resume normal ops after PCI reset | ||
1868 | * @pdev: pointer to PCI device | ||
1869 | * | ||
1870 | * Called when the error recovery driver tells us that its | ||
1871 | * OK to resume normal operation. Use completion to allow | ||
1872 | * halted scsi ops to resume. | ||
1873 | */ | ||
1874 | static void sym2_io_resume(struct pci_dev *pdev) | ||
1875 | { | ||
1876 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
1877 | struct sym_data *sym_data = shost_priv(shost); | ||
1878 | |||
1879 | spin_lock_irq(shost->host_lock); | ||
1880 | if (sym_data->io_reset) | ||
1881 | complete_all(sym_data->io_reset); | ||
1882 | sym_data->io_reset = NULL; | ||
1883 | spin_unlock_irq(shost->host_lock); | ||
1884 | } | ||
1885 | |||
1891 | static void sym2_get_signalling(struct Scsi_Host *shost) | 1886 | static void sym2_get_signalling(struct Scsi_Host *shost) |
1892 | { | 1887 | { |
1893 | struct sym_hcb *np = sym_get_hcb(shost); | 1888 | struct sym_hcb *np = sym_get_hcb(shost); |
@@ -2050,11 +2045,19 @@ static struct pci_device_id sym2_id_table[] __devinitdata = { | |||
2050 | 2045 | ||
2051 | MODULE_DEVICE_TABLE(pci, sym2_id_table); | 2046 | MODULE_DEVICE_TABLE(pci, sym2_id_table); |
2052 | 2047 | ||
2048 | static struct pci_error_handlers sym2_err_handler = { | ||
2049 | .error_detected = sym2_io_error_detected, | ||
2050 | .mmio_enabled = sym2_io_slot_dump, | ||
2051 | .slot_reset = sym2_io_slot_reset, | ||
2052 | .resume = sym2_io_resume, | ||
2053 | }; | ||
2054 | |||
2053 | static struct pci_driver sym2_driver = { | 2055 | static struct pci_driver sym2_driver = { |
2054 | .name = NAME53C8XX, | 2056 | .name = NAME53C8XX, |
2055 | .id_table = sym2_id_table, | 2057 | .id_table = sym2_id_table, |
2056 | .probe = sym2_probe, | 2058 | .probe = sym2_probe, |
2057 | .remove = __devexit_p(sym2_remove), | 2059 | .remove = __devexit_p(sym2_remove), |
2060 | .err_handler = &sym2_err_handler, | ||
2058 | }; | 2061 | }; |
2059 | 2062 | ||
2060 | static int __init sym2_init(void) | 2063 | static int __init sym2_init(void) |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h index 0f097ba4f712..567fbe0b4f09 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.h +++ b/drivers/scsi/sym53c8xx_2/sym_glue.h | |||
@@ -40,7 +40,9 @@ | |||
40 | #ifndef SYM_GLUE_H | 40 | #ifndef SYM_GLUE_H |
41 | #define SYM_GLUE_H | 41 | #define SYM_GLUE_H |
42 | 42 | ||
43 | #include <linux/completion.h> | ||
43 | #include <linux/delay.h> | 44 | #include <linux/delay.h> |
45 | #include <linux/interrupt.h> | ||
44 | #include <linux/ioport.h> | 46 | #include <linux/ioport.h> |
45 | #include <linux/pci.h> | 47 | #include <linux/pci.h> |
46 | #include <linux/string.h> | 48 | #include <linux/string.h> |
@@ -177,14 +179,11 @@ struct sym_shcb { | |||
177 | int unit; | 179 | int unit; |
178 | char inst_name[16]; | 180 | char inst_name[16]; |
179 | char chip_name[8]; | 181 | char chip_name[8]; |
180 | struct pci_dev *device; | ||
181 | 182 | ||
182 | struct Scsi_Host *host; | 183 | struct Scsi_Host *host; |
183 | 184 | ||
184 | void __iomem * ioaddr; /* MMIO kernel io address */ | 185 | void __iomem * ioaddr; /* MMIO kernel io address */ |
185 | void __iomem * ramaddr; /* RAM kernel io address */ | 186 | void __iomem * ramaddr; /* RAM kernel io address */ |
186 | u_short io_ws; /* IO window size */ | ||
187 | int irq; /* IRQ number */ | ||
188 | 187 | ||
189 | struct timer_list timer; /* Timer handler link header */ | 188 | struct timer_list timer; /* Timer handler link header */ |
190 | u_long lasttime; | 189 | u_long lasttime; |
@@ -212,20 +211,21 @@ struct sym_device { | |||
212 | } s; | 211 | } s; |
213 | struct sym_chip chip; | 212 | struct sym_chip chip; |
214 | struct sym_nvram *nvram; | 213 | struct sym_nvram *nvram; |
215 | u_short device_id; | ||
216 | u_char host_id; | 214 | u_char host_id; |
217 | }; | 215 | }; |
218 | 216 | ||
219 | /* | 217 | /* |
220 | * Driver host data structure. | 218 | * Driver host data structure. |
221 | */ | 219 | */ |
222 | struct host_data { | 220 | struct sym_data { |
223 | struct sym_hcb *ncb; | 221 | struct sym_hcb *ncb; |
222 | struct completion *io_reset; /* PCI error handling */ | ||
223 | struct pci_dev *pdev; | ||
224 | }; | 224 | }; |
225 | 225 | ||
226 | static inline struct sym_hcb * sym_get_hcb(struct Scsi_Host *host) | 226 | static inline struct sym_hcb * sym_get_hcb(struct Scsi_Host *host) |
227 | { | 227 | { |
228 | return ((struct host_data *)host->hostdata)->ncb; | 228 | return ((struct sym_data *)host->hostdata)->ncb; |
229 | } | 229 | } |
230 | 230 | ||
231 | #include "sym_fw.h" | 231 | #include "sym_fw.h" |
@@ -263,8 +263,8 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) | |||
263 | void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *ccb); | 263 | void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *ccb); |
264 | #define sym_print_addr(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg) | 264 | #define sym_print_addr(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg) |
265 | void sym_xpt_async_bus_reset(struct sym_hcb *np); | 265 | void sym_xpt_async_bus_reset(struct sym_hcb *np); |
266 | void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target); | ||
267 | int sym_setup_data_and_start (struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); | 266 | int sym_setup_data_and_start (struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); |
268 | void sym_log_bus_error(struct sym_hcb *np); | 267 | void sym_log_bus_error(struct Scsi_Host *); |
268 | void sym_dump_registers(struct Scsi_Host *); | ||
269 | 269 | ||
270 | #endif /* SYM_GLUE_H */ | 270 | #endif /* SYM_GLUE_H */ |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 21cd4c7f5289..463f119f20e9 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -52,7 +52,7 @@ | |||
52 | * Needed function prototypes. | 52 | * Needed function prototypes. |
53 | */ | 53 | */ |
54 | static void sym_int_ma (struct sym_hcb *np); | 54 | static void sym_int_ma (struct sym_hcb *np); |
55 | static void sym_int_sir (struct sym_hcb *np); | 55 | static void sym_int_sir(struct sym_hcb *); |
56 | static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np); | 56 | static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np); |
57 | static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa); | 57 | static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa); |
58 | static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln); | 58 | static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln); |
@@ -684,6 +684,8 @@ static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram) | |||
684 | */ | 684 | */ |
685 | static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) | 685 | static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) |
686 | { | 686 | { |
687 | struct sym_data *sym_data = shost_priv(shost); | ||
688 | struct pci_dev *pdev = sym_data->pdev; | ||
687 | u_char burst_max; | 689 | u_char burst_max; |
688 | u32 period; | 690 | u32 period; |
689 | int i; | 691 | int i; |
@@ -778,19 +780,12 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru | |||
778 | * 64 bit addressing (895A/896/1010) ? | 780 | * 64 bit addressing (895A/896/1010) ? |
779 | */ | 781 | */ |
780 | if (np->features & FE_DAC) { | 782 | if (np->features & FE_DAC) { |
781 | #if SYM_CONF_DMA_ADDRESSING_MODE == 0 | 783 | if (!use_dac(np)) |
782 | np->rv_ccntl1 |= (DDAC); | 784 | np->rv_ccntl1 |= (DDAC); |
783 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 1 | 785 | else if (SYM_CONF_DMA_ADDRESSING_MODE == 1) |
784 | if (!np->use_dac) | 786 | np->rv_ccntl1 |= (XTIMOD | EXTIBMV); |
785 | np->rv_ccntl1 |= (DDAC); | 787 | else if (SYM_CONF_DMA_ADDRESSING_MODE == 2) |
786 | else | 788 | np->rv_ccntl1 |= (0 | EXTIBMV); |
787 | np->rv_ccntl1 |= (XTIMOD | EXTIBMV); | ||
788 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 | ||
789 | if (!np->use_dac) | ||
790 | np->rv_ccntl1 |= (DDAC); | ||
791 | else | ||
792 | np->rv_ccntl1 |= (0 | EXTIBMV); | ||
793 | #endif | ||
794 | } | 789 | } |
795 | 790 | ||
796 | /* | 791 | /* |
@@ -804,8 +799,8 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru | |||
804 | * In dual channel mode, contention occurs if internal cycles | 799 | * In dual channel mode, contention occurs if internal cycles |
805 | * are used. Disable internal cycles. | 800 | * are used. Disable internal cycles. |
806 | */ | 801 | */ |
807 | if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 && | 802 | if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && |
808 | np->revision_id < 0x1) | 803 | pdev->revision < 0x1) |
809 | np->rv_ccntl0 |= DILS; | 804 | np->rv_ccntl0 |= DILS; |
810 | 805 | ||
811 | /* | 806 | /* |
@@ -828,10 +823,10 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru | |||
828 | * this driver. The generic ncr driver that does not use | 823 | * this driver. The generic ncr driver that does not use |
829 | * LOAD/STORE instructions does not need this work-around. | 824 | * LOAD/STORE instructions does not need this work-around. |
830 | */ | 825 | */ |
831 | if ((np->device_id == PCI_DEVICE_ID_NCR_53C810 && | 826 | if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 && |
832 | np->revision_id >= 0x10 && np->revision_id <= 0x11) || | 827 | pdev->revision >= 0x10 && pdev->revision <= 0x11) || |
833 | (np->device_id == PCI_DEVICE_ID_NCR_53C860 && | 828 | (pdev->device == PCI_DEVICE_ID_NCR_53C860 && |
834 | np->revision_id <= 0x1)) | 829 | pdev->revision <= 0x1)) |
835 | np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); | 830 | np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); |
836 | 831 | ||
837 | /* | 832 | /* |
@@ -897,7 +892,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru | |||
897 | if ((SYM_SETUP_SCSI_LED || | 892 | if ((SYM_SETUP_SCSI_LED || |
898 | (nvram->type == SYM_SYMBIOS_NVRAM || | 893 | (nvram->type == SYM_SYMBIOS_NVRAM || |
899 | (nvram->type == SYM_TEKRAM_NVRAM && | 894 | (nvram->type == SYM_TEKRAM_NVRAM && |
900 | np->device_id == PCI_DEVICE_ID_NCR_53C895))) && | 895 | pdev->device == PCI_DEVICE_ID_NCR_53C895))) && |
901 | !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) | 896 | !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) |
902 | np->features |= FE_LED0; | 897 | np->features |= FE_LED0; |
903 | 898 | ||
@@ -1135,8 +1130,9 @@ restart_test: | |||
1135 | * First 24 register of the chip: | 1130 | * First 24 register of the chip: |
1136 | * r0..rf | 1131 | * r0..rf |
1137 | */ | 1132 | */ |
1138 | static void sym_log_hard_error(struct sym_hcb *np, u_short sist, u_char dstat) | 1133 | static void sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat) |
1139 | { | 1134 | { |
1135 | struct sym_hcb *np = sym_get_hcb(shost); | ||
1140 | u32 dsp; | 1136 | u32 dsp; |
1141 | int script_ofs; | 1137 | int script_ofs; |
1142 | int script_size; | 1138 | int script_size; |
@@ -1180,16 +1176,27 @@ static void sym_log_hard_error(struct sym_hcb *np, u_short sist, u_char dstat) | |||
1180 | scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); | 1176 | scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); |
1181 | } | 1177 | } |
1182 | 1178 | ||
1183 | printf ("%s: regdump:", sym_name(np)); | 1179 | printf("%s: regdump:", sym_name(np)); |
1184 | for (i=0; i<24;i++) | 1180 | for (i = 0; i < 24; i++) |
1185 | printf (" %02x", (unsigned)INB_OFF(np, i)); | 1181 | printf(" %02x", (unsigned)INB_OFF(np, i)); |
1186 | printf (".\n"); | 1182 | printf(".\n"); |
1187 | 1183 | ||
1188 | /* | 1184 | /* |
1189 | * PCI BUS error. | 1185 | * PCI BUS error. |
1190 | */ | 1186 | */ |
1191 | if (dstat & (MDPE|BF)) | 1187 | if (dstat & (MDPE|BF)) |
1192 | sym_log_bus_error(np); | 1188 | sym_log_bus_error(shost); |
1189 | } | ||
1190 | |||
1191 | void sym_dump_registers(struct Scsi_Host *shost) | ||
1192 | { | ||
1193 | struct sym_hcb *np = sym_get_hcb(shost); | ||
1194 | u_short sist; | ||
1195 | u_char dstat; | ||
1196 | |||
1197 | sist = INW(np, nc_sist); | ||
1198 | dstat = INB(np, nc_dstat); | ||
1199 | sym_log_hard_error(shost, sist, dstat); | ||
1193 | } | 1200 | } |
1194 | 1201 | ||
1195 | static struct sym_chip sym_dev_table[] = { | 1202 | static struct sym_chip sym_dev_table[] = { |
@@ -1312,7 +1319,7 @@ int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s) | |||
1312 | { | 1319 | { |
1313 | int i; | 1320 | int i; |
1314 | 1321 | ||
1315 | if (!np->use_dac) | 1322 | if (!use_dac(np)) |
1316 | goto weird; | 1323 | goto weird; |
1317 | 1324 | ||
1318 | /* Look up existing mappings */ | 1325 | /* Look up existing mappings */ |
@@ -1519,7 +1526,8 @@ void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp) | |||
1519 | np->squeueput = qidx; | 1526 | np->squeueput = qidx; |
1520 | 1527 | ||
1521 | if (DEBUG_FLAGS & DEBUG_QUEUE) | 1528 | if (DEBUG_FLAGS & DEBUG_QUEUE) |
1522 | printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); | 1529 | scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n", |
1530 | np->squeueput); | ||
1523 | 1531 | ||
1524 | /* | 1532 | /* |
1525 | * Script processor may be waiting for reselect. | 1533 | * Script processor may be waiting for reselect. |
@@ -1696,8 +1704,11 @@ static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status) | |||
1696 | * 1: SCSI BUS RESET delivered or received. | 1704 | * 1: SCSI BUS RESET delivered or received. |
1697 | * 2: SCSI BUS MODE changed. | 1705 | * 2: SCSI BUS MODE changed. |
1698 | */ | 1706 | */ |
1699 | void sym_start_up (struct sym_hcb *np, int reason) | 1707 | void sym_start_up(struct Scsi_Host *shost, int reason) |
1700 | { | 1708 | { |
1709 | struct sym_data *sym_data = shost_priv(shost); | ||
1710 | struct pci_dev *pdev = sym_data->pdev; | ||
1711 | struct sym_hcb *np = sym_data->ncb; | ||
1701 | int i; | 1712 | int i; |
1702 | u32 phys; | 1713 | u32 phys; |
1703 | 1714 | ||
@@ -1746,7 +1757,7 @@ void sym_start_up (struct sym_hcb *np, int reason) | |||
1746 | * This also let point to first position the start | 1757 | * This also let point to first position the start |
1747 | * and done queue pointers used from SCRIPTS. | 1758 | * and done queue pointers used from SCRIPTS. |
1748 | */ | 1759 | */ |
1749 | np->fw_patch(np); | 1760 | np->fw_patch(shost); |
1750 | 1761 | ||
1751 | /* | 1762 | /* |
1752 | * Wakeup all pending jobs. | 1763 | * Wakeup all pending jobs. |
@@ -1788,7 +1799,7 @@ void sym_start_up (struct sym_hcb *np, int reason) | |||
1788 | /* | 1799 | /* |
1789 | * For now, disable AIP generation on C1010-66. | 1800 | * For now, disable AIP generation on C1010-66. |
1790 | */ | 1801 | */ |
1791 | if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66) | 1802 | if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66) |
1792 | OUTB(np, nc_aipcntl1, DISAIP); | 1803 | OUTB(np, nc_aipcntl1, DISAIP); |
1793 | 1804 | ||
1794 | /* | 1805 | /* |
@@ -1798,8 +1809,8 @@ void sym_start_up (struct sym_hcb *np, int reason) | |||
1798 | * that from SCRIPTS for each selection/reselection, but | 1809 | * that from SCRIPTS for each selection/reselection, but |
1799 | * I just don't want. :) | 1810 | * I just don't want. :) |
1800 | */ | 1811 | */ |
1801 | if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 && | 1812 | if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && |
1802 | np->revision_id < 1) | 1813 | pdev->revision < 1) |
1803 | OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); | 1814 | OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); |
1804 | 1815 | ||
1805 | /* | 1816 | /* |
@@ -1807,9 +1818,9 @@ void sym_start_up (struct sym_hcb *np, int reason) | |||
1807 | * Disable overlapped arbitration for some dual function devices, | 1818 | * Disable overlapped arbitration for some dual function devices, |
1808 | * regardless revision id (kind of post-chip-design feature. ;-)) | 1819 | * regardless revision id (kind of post-chip-design feature. ;-)) |
1809 | */ | 1820 | */ |
1810 | if (np->device_id == PCI_DEVICE_ID_NCR_53C875) | 1821 | if (pdev->device == PCI_DEVICE_ID_NCR_53C875) |
1811 | OUTB(np, nc_ctest0, (1<<5)); | 1822 | OUTB(np, nc_ctest0, (1<<5)); |
1812 | else if (np->device_id == PCI_DEVICE_ID_NCR_53C896) | 1823 | else if (pdev->device == PCI_DEVICE_ID_NCR_53C896) |
1813 | np->rv_ccntl0 |= DPR; | 1824 | np->rv_ccntl0 |= DPR; |
1814 | 1825 | ||
1815 | /* | 1826 | /* |
@@ -1827,7 +1838,7 @@ void sym_start_up (struct sym_hcb *np, int reason) | |||
1827 | * Set up scratch C and DRS IO registers to map the 32 bit | 1838 | * Set up scratch C and DRS IO registers to map the 32 bit |
1828 | * DMA address range our data structures are located in. | 1839 | * DMA address range our data structures are located in. |
1829 | */ | 1840 | */ |
1830 | if (np->use_dac) { | 1841 | if (use_dac(np)) { |
1831 | np->dmap_bah[0] = 0; /* ??? */ | 1842 | np->dmap_bah[0] = 0; /* ??? */ |
1832 | OUTL(np, nc_scrx[0], np->dmap_bah[0]); | 1843 | OUTL(np, nc_scrx[0], np->dmap_bah[0]); |
1833 | OUTL(np, nc_drs, np->dmap_bah[0]); | 1844 | OUTL(np, nc_drs, np->dmap_bah[0]); |
@@ -1900,7 +1911,7 @@ void sym_start_up (struct sym_hcb *np, int reason) | |||
1900 | if (sym_verbose >= 2) | 1911 | if (sym_verbose >= 2) |
1901 | printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); | 1912 | printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); |
1902 | memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); | 1913 | memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); |
1903 | if (np->ram_ws == 8192) { | 1914 | if (np->features & FE_RAM8K) { |
1904 | memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); | 1915 | memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); |
1905 | phys = scr_to_cpu(np->scr_ram_seg); | 1916 | phys = scr_to_cpu(np->scr_ram_seg); |
1906 | OUTL(np, nc_mmws, phys); | 1917 | OUTL(np, nc_mmws, phys); |
@@ -2214,8 +2225,9 @@ static void sym_int_udc (struct sym_hcb *np) | |||
2214 | * mode to eight bit asynchronous, etc... | 2225 | * mode to eight bit asynchronous, etc... |
2215 | * So, just reinitializing all except chip should be enough. | 2226 | * So, just reinitializing all except chip should be enough. |
2216 | */ | 2227 | */ |
2217 | static void sym_int_sbmc (struct sym_hcb *np) | 2228 | static void sym_int_sbmc(struct Scsi_Host *shost) |
2218 | { | 2229 | { |
2230 | struct sym_hcb *np = sym_get_hcb(shost); | ||
2219 | u_char scsi_mode = INB(np, nc_stest4) & SMODE; | 2231 | u_char scsi_mode = INB(np, nc_stest4) & SMODE; |
2220 | 2232 | ||
2221 | /* | 2233 | /* |
@@ -2228,7 +2240,7 @@ static void sym_int_sbmc (struct sym_hcb *np) | |||
2228 | * Should suspend command processing for a few seconds and | 2240 | * Should suspend command processing for a few seconds and |
2229 | * reinitialize all except the chip. | 2241 | * reinitialize all except the chip. |
2230 | */ | 2242 | */ |
2231 | sym_start_up (np, 2); | 2243 | sym_start_up(shost, 2); |
2232 | } | 2244 | } |
2233 | 2245 | ||
2234 | /* | 2246 | /* |
@@ -2756,8 +2768,11 @@ reset_all: | |||
2756 | * Use at your own decision and risk. | 2768 | * Use at your own decision and risk. |
2757 | */ | 2769 | */ |
2758 | 2770 | ||
2759 | void sym_interrupt (struct sym_hcb *np) | 2771 | irqreturn_t sym_interrupt(struct Scsi_Host *shost) |
2760 | { | 2772 | { |
2773 | struct sym_data *sym_data = shost_priv(shost); | ||
2774 | struct sym_hcb *np = sym_data->ncb; | ||
2775 | struct pci_dev *pdev = sym_data->pdev; | ||
2761 | u_char istat, istatc; | 2776 | u_char istat, istatc; |
2762 | u_char dstat; | 2777 | u_char dstat; |
2763 | u_short sist; | 2778 | u_short sist; |
@@ -2782,7 +2797,7 @@ void sym_interrupt (struct sym_hcb *np) | |||
2782 | } | 2797 | } |
2783 | 2798 | ||
2784 | if (!(istat & (SIP|DIP))) | 2799 | if (!(istat & (SIP|DIP))) |
2785 | return; | 2800 | return (istat & INTF) ? IRQ_HANDLED : IRQ_NONE; |
2786 | 2801 | ||
2787 | #if 0 /* We should never get this one */ | 2802 | #if 0 /* We should never get this one */ |
2788 | if (istat & CABRT) | 2803 | if (istat & CABRT) |
@@ -2809,6 +2824,13 @@ void sym_interrupt (struct sym_hcb *np) | |||
2809 | dstat |= INB(np, nc_dstat); | 2824 | dstat |= INB(np, nc_dstat); |
2810 | istatc = INB(np, nc_istat); | 2825 | istatc = INB(np, nc_istat); |
2811 | istat |= istatc; | 2826 | istat |= istatc; |
2827 | |||
2828 | /* Prevent deadlock waiting on a condition that may | ||
2829 | * never clear. */ | ||
2830 | if (unlikely(sist == 0xffff && dstat == 0xff)) { | ||
2831 | if (pci_channel_offline(pdev)) | ||
2832 | return IRQ_NONE; | ||
2833 | } | ||
2812 | } while (istatc & (SIP|DIP)); | 2834 | } while (istatc & (SIP|DIP)); |
2813 | 2835 | ||
2814 | if (DEBUG_FLAGS & DEBUG_TINY) | 2836 | if (DEBUG_FLAGS & DEBUG_TINY) |
@@ -2842,10 +2864,10 @@ void sym_interrupt (struct sym_hcb *np) | |||
2842 | !(dstat & (MDPE|BF|ABRT|IID))) { | 2864 | !(dstat & (MDPE|BF|ABRT|IID))) { |
2843 | if (sist & PAR) sym_int_par (np, sist); | 2865 | if (sist & PAR) sym_int_par (np, sist); |
2844 | else if (sist & MA) sym_int_ma (np); | 2866 | else if (sist & MA) sym_int_ma (np); |
2845 | else if (dstat & SIR) sym_int_sir (np); | 2867 | else if (dstat & SIR) sym_int_sir(np); |
2846 | else if (dstat & SSI) OUTONB_STD(); | 2868 | else if (dstat & SSI) OUTONB_STD(); |
2847 | else goto unknown_int; | 2869 | else goto unknown_int; |
2848 | return; | 2870 | return IRQ_HANDLED; |
2849 | } | 2871 | } |
2850 | 2872 | ||
2851 | /* | 2873 | /* |
@@ -2861,8 +2883,8 @@ void sym_interrupt (struct sym_hcb *np) | |||
2861 | */ | 2883 | */ |
2862 | if (sist & RST) { | 2884 | if (sist & RST) { |
2863 | printf("%s: SCSI BUS reset detected.\n", sym_name(np)); | 2885 | printf("%s: SCSI BUS reset detected.\n", sym_name(np)); |
2864 | sym_start_up (np, 1); | 2886 | sym_start_up(shost, 1); |
2865 | return; | 2887 | return IRQ_HANDLED; |
2866 | } | 2888 | } |
2867 | 2889 | ||
2868 | OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ | 2890 | OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ |
@@ -2870,11 +2892,11 @@ void sym_interrupt (struct sym_hcb *np) | |||
2870 | 2892 | ||
2871 | if (!(sist & (GEN|HTH|SGE)) && | 2893 | if (!(sist & (GEN|HTH|SGE)) && |
2872 | !(dstat & (MDPE|BF|ABRT|IID))) { | 2894 | !(dstat & (MDPE|BF|ABRT|IID))) { |
2873 | if (sist & SBMC) sym_int_sbmc (np); | 2895 | if (sist & SBMC) sym_int_sbmc(shost); |
2874 | else if (sist & STO) sym_int_sto (np); | 2896 | else if (sist & STO) sym_int_sto (np); |
2875 | else if (sist & UDC) sym_int_udc (np); | 2897 | else if (sist & UDC) sym_int_udc (np); |
2876 | else goto unknown_int; | 2898 | else goto unknown_int; |
2877 | return; | 2899 | return IRQ_HANDLED; |
2878 | } | 2900 | } |
2879 | 2901 | ||
2880 | /* | 2902 | /* |
@@ -2884,12 +2906,12 @@ void sym_interrupt (struct sym_hcb *np) | |||
2884 | * Reset everything. | 2906 | * Reset everything. |
2885 | */ | 2907 | */ |
2886 | 2908 | ||
2887 | sym_log_hard_error(np, sist, dstat); | 2909 | sym_log_hard_error(shost, sist, dstat); |
2888 | 2910 | ||
2889 | if ((sist & (GEN|HTH|SGE)) || | 2911 | if ((sist & (GEN|HTH|SGE)) || |
2890 | (dstat & (MDPE|BF|ABRT|IID))) { | 2912 | (dstat & (MDPE|BF|ABRT|IID))) { |
2891 | sym_start_reset(np); | 2913 | sym_start_reset(np); |
2892 | return; | 2914 | return IRQ_HANDLED; |
2893 | } | 2915 | } |
2894 | 2916 | ||
2895 | unknown_int: | 2917 | unknown_int: |
@@ -2900,6 +2922,7 @@ unknown_int: | |||
2900 | printf( "%s: unknown interrupt(s) ignored, " | 2922 | printf( "%s: unknown interrupt(s) ignored, " |
2901 | "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", | 2923 | "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", |
2902 | sym_name(np), istat, dstat, sist); | 2924 | sym_name(np), istat, dstat, sist); |
2925 | return IRQ_NONE; | ||
2903 | } | 2926 | } |
2904 | 2927 | ||
2905 | /* | 2928 | /* |
@@ -3520,7 +3543,8 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num) | |||
3520 | * If we sent a BDR, make upper layer aware of that. | 3543 | * If we sent a BDR, make upper layer aware of that. |
3521 | */ | 3544 | */ |
3522 | if (np->abrt_msg[0] == M_RESET) | 3545 | if (np->abrt_msg[0] == M_RESET) |
3523 | sym_xpt_async_sent_bdr(np, target); | 3546 | starget_printk(KERN_NOTICE, starget, |
3547 | "has been reset\n"); | ||
3524 | break; | 3548 | break; |
3525 | } | 3549 | } |
3526 | 3550 | ||
@@ -4304,7 +4328,7 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym | |||
4304 | /* | 4328 | /* |
4305 | * chip exception handler for programmed interrupts. | 4329 | * chip exception handler for programmed interrupts. |
4306 | */ | 4330 | */ |
4307 | static void sym_int_sir (struct sym_hcb *np) | 4331 | static void sym_int_sir(struct sym_hcb *np) |
4308 | { | 4332 | { |
4309 | u_char num = INB(np, nc_dsps); | 4333 | u_char num = INB(np, nc_dsps); |
4310 | u32 dsa = INL(np, nc_dsa); | 4334 | u32 dsa = INL(np, nc_dsa); |
@@ -4343,31 +4367,30 @@ static void sym_int_sir (struct sym_hcb *np) | |||
4343 | return; | 4367 | return; |
4344 | /* | 4368 | /* |
4345 | * The device didn't go to MSG OUT phase after having | 4369 | * The device didn't go to MSG OUT phase after having |
4346 | * been selected with ATN. We donnot want to handle | 4370 | * been selected with ATN. We do not want to handle that. |
4347 | * that. | ||
4348 | */ | 4371 | */ |
4349 | case SIR_SEL_ATN_NO_MSG_OUT: | 4372 | case SIR_SEL_ATN_NO_MSG_OUT: |
4350 | printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", | 4373 | scmd_printk(KERN_WARNING, cp->cmd, |
4351 | sym_name (np), target); | 4374 | "No MSG OUT phase after selection with ATN\n"); |
4352 | goto out_stuck; | 4375 | goto out_stuck; |
4353 | /* | 4376 | /* |
4354 | * The device didn't switch to MSG IN phase after | 4377 | * The device didn't switch to MSG IN phase after |
4355 | * having reseleted the initiator. | 4378 | * having reselected the initiator. |
4356 | */ | 4379 | */ |
4357 | case SIR_RESEL_NO_MSG_IN: | 4380 | case SIR_RESEL_NO_MSG_IN: |
4358 | printf ("%s:%d: No MSG IN phase after reselection.\n", | 4381 | scmd_printk(KERN_WARNING, cp->cmd, |
4359 | sym_name (np), target); | 4382 | "No MSG IN phase after reselection\n"); |
4360 | goto out_stuck; | 4383 | goto out_stuck; |
4361 | /* | 4384 | /* |
4362 | * After reselection, the device sent a message that wasn't | 4385 | * After reselection, the device sent a message that wasn't |
4363 | * an IDENTIFY. | 4386 | * an IDENTIFY. |
4364 | */ | 4387 | */ |
4365 | case SIR_RESEL_NO_IDENTIFY: | 4388 | case SIR_RESEL_NO_IDENTIFY: |
4366 | printf ("%s:%d: No IDENTIFY after reselection.\n", | 4389 | scmd_printk(KERN_WARNING, cp->cmd, |
4367 | sym_name (np), target); | 4390 | "No IDENTIFY after reselection\n"); |
4368 | goto out_stuck; | 4391 | goto out_stuck; |
4369 | /* | 4392 | /* |
4370 | * The device reselected a LUN we donnot know about. | 4393 | * The device reselected a LUN we do not know about. |
4371 | */ | 4394 | */ |
4372 | case SIR_RESEL_BAD_LUN: | 4395 | case SIR_RESEL_BAD_LUN: |
4373 | np->msgout[0] = M_RESET; | 4396 | np->msgout[0] = M_RESET; |
@@ -4380,8 +4403,7 @@ static void sym_int_sir (struct sym_hcb *np) | |||
4380 | np->msgout[0] = M_ABORT; | 4403 | np->msgout[0] = M_ABORT; |
4381 | goto out; | 4404 | goto out; |
4382 | /* | 4405 | /* |
4383 | * The device reselected for a tagged nexus that we donnot | 4406 | * The device reselected for a tagged nexus that we do not have. |
4384 | * have. | ||
4385 | */ | 4407 | */ |
4386 | case SIR_RESEL_BAD_I_T_L_Q: | 4408 | case SIR_RESEL_BAD_I_T_L_Q: |
4387 | np->msgout[0] = M_ABORT_TAG; | 4409 | np->msgout[0] = M_ABORT_TAG; |
@@ -4393,8 +4415,8 @@ static void sym_int_sir (struct sym_hcb *np) | |||
4393 | case SIR_RESEL_ABORTED: | 4415 | case SIR_RESEL_ABORTED: |
4394 | np->lastmsg = np->msgout[0]; | 4416 | np->lastmsg = np->msgout[0]; |
4395 | np->msgout[0] = M_NOOP; | 4417 | np->msgout[0] = M_NOOP; |
4396 | printf ("%s:%d: message %x sent on bad reselection.\n", | 4418 | scmd_printk(KERN_WARNING, cp->cmd, |
4397 | sym_name (np), target, np->lastmsg); | 4419 | "message %x sent on bad reselection\n", np->lastmsg); |
4398 | goto out; | 4420 | goto out; |
4399 | /* | 4421 | /* |
4400 | * The SCRIPTS let us know that a message has been | 4422 | * The SCRIPTS let us know that a message has been |
@@ -5578,16 +5600,13 @@ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram | |||
5578 | np->scriptz_ba = vtobus(np->scriptz0); | 5600 | np->scriptz_ba = vtobus(np->scriptz0); |
5579 | 5601 | ||
5580 | if (np->ram_ba) { | 5602 | if (np->ram_ba) { |
5581 | np->scripta_ba = np->ram_ba; | 5603 | np->scripta_ba = np->ram_ba; |
5582 | if (np->features & FE_RAM8K) { | 5604 | if (np->features & FE_RAM8K) { |
5583 | np->ram_ws = 8192; | ||
5584 | np->scriptb_ba = np->scripta_ba + 4096; | 5605 | np->scriptb_ba = np->scripta_ba + 4096; |
5585 | #if 0 /* May get useful for 64 BIT PCI addressing */ | 5606 | #if 0 /* May get useful for 64 BIT PCI addressing */ |
5586 | np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); | 5607 | np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); |
5587 | #endif | 5608 | #endif |
5588 | } | 5609 | } |
5589 | else | ||
5590 | np->ram_ws = 4096; | ||
5591 | } | 5610 | } |
5592 | 5611 | ||
5593 | /* | 5612 | /* |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h index 79ab6a177039..ad078805e62b 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.h +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h | |||
@@ -883,10 +883,7 @@ struct sym_hcb { | |||
883 | * Physical bus addresses of the chip. | 883 | * Physical bus addresses of the chip. |
884 | */ | 884 | */ |
885 | u32 mmio_ba; /* MMIO 32 bit BUS address */ | 885 | u32 mmio_ba; /* MMIO 32 bit BUS address */ |
886 | int mmio_ws; /* MMIO Window size */ | ||
887 | |||
888 | u32 ram_ba; /* RAM 32 bit BUS address */ | 886 | u32 ram_ba; /* RAM 32 bit BUS address */ |
889 | int ram_ws; /* RAM window size */ | ||
890 | 887 | ||
891 | /* | 888 | /* |
892 | * SCRIPTS virtual and physical bus addresses. | 889 | * SCRIPTS virtual and physical bus addresses. |
@@ -912,14 +909,12 @@ struct sym_hcb { | |||
912 | struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ | 909 | struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ |
913 | struct sym_fwz_ba fwz_bas; /* Useful SCRIPTZ bus addresses */ | 910 | struct sym_fwz_ba fwz_bas; /* Useful SCRIPTZ bus addresses */ |
914 | void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw); | 911 | void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw); |
915 | void (*fw_patch)(struct sym_hcb *np); | 912 | void (*fw_patch)(struct Scsi_Host *); |
916 | char *fw_name; | 913 | char *fw_name; |
917 | 914 | ||
918 | /* | 915 | /* |
919 | * General controller parameters and configuration. | 916 | * General controller parameters and configuration. |
920 | */ | 917 | */ |
921 | u_short device_id; /* PCI device id */ | ||
922 | u_char revision_id; /* PCI device revision id */ | ||
923 | u_int features; /* Chip features map */ | 918 | u_int features; /* Chip features map */ |
924 | u_char myaddr; /* SCSI id of the adapter */ | 919 | u_char myaddr; /* SCSI id of the adapter */ |
925 | u_char maxburst; /* log base 2 of dwords burst */ | 920 | u_char maxburst; /* log base 2 of dwords burst */ |
@@ -1031,6 +1026,14 @@ struct sym_hcb { | |||
1031 | #endif | 1026 | #endif |
1032 | }; | 1027 | }; |
1033 | 1028 | ||
1029 | #if SYM_CONF_DMA_ADDRESSING_MODE == 0 | ||
1030 | #define use_dac(np) 0 | ||
1031 | #define set_dac(np) do { } while (0) | ||
1032 | #else | ||
1033 | #define use_dac(np) (np)->use_dac | ||
1034 | #define set_dac(np) (np)->use_dac = 1 | ||
1035 | #endif | ||
1036 | |||
1034 | #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) | 1037 | #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) |
1035 | 1038 | ||
1036 | 1039 | ||
@@ -1052,8 +1055,8 @@ void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn); | |||
1052 | #else | 1055 | #else |
1053 | void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp); | 1056 | void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp); |
1054 | #endif | 1057 | #endif |
1055 | void sym_start_up(struct sym_hcb *np, int reason); | 1058 | void sym_start_up(struct Scsi_Host *, int reason); |
1056 | void sym_interrupt(struct sym_hcb *np); | 1059 | irqreturn_t sym_interrupt(struct Scsi_Host *); |
1057 | int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task); | 1060 | int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task); |
1058 | struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); | 1061 | struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); |
1059 | void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); | 1062 | void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); |
@@ -1073,18 +1076,21 @@ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram | |||
1073 | */ | 1076 | */ |
1074 | 1077 | ||
1075 | #if SYM_CONF_DMA_ADDRESSING_MODE == 0 | 1078 | #if SYM_CONF_DMA_ADDRESSING_MODE == 0 |
1079 | #define DMA_DAC_MASK DMA_32BIT_MASK | ||
1076 | #define sym_build_sge(np, data, badd, len) \ | 1080 | #define sym_build_sge(np, data, badd, len) \ |
1077 | do { \ | 1081 | do { \ |
1078 | (data)->addr = cpu_to_scr(badd); \ | 1082 | (data)->addr = cpu_to_scr(badd); \ |
1079 | (data)->size = cpu_to_scr(len); \ | 1083 | (data)->size = cpu_to_scr(len); \ |
1080 | } while (0) | 1084 | } while (0) |
1081 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 1 | 1085 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 1 |
1086 | #define DMA_DAC_MASK DMA_40BIT_MASK | ||
1082 | #define sym_build_sge(np, data, badd, len) \ | 1087 | #define sym_build_sge(np, data, badd, len) \ |
1083 | do { \ | 1088 | do { \ |
1084 | (data)->addr = cpu_to_scr(badd); \ | 1089 | (data)->addr = cpu_to_scr(badd); \ |
1085 | (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \ | 1090 | (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \ |
1086 | } while (0) | 1091 | } while (0) |
1087 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 | 1092 | #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 |
1093 | #define DMA_DAC_MASK DMA_64BIT_MASK | ||
1088 | int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s); | 1094 | int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s); |
1089 | static __inline void | 1095 | static __inline void |
1090 | sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len) | 1096 | sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len) |
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c index 15d69298ab6e..5662fbb3ff60 100644 --- a/drivers/scsi/sym53c8xx_2/sym_nvram.c +++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c | |||
@@ -696,7 +696,7 @@ static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram) | |||
696 | u_short csum; | 696 | u_short csum; |
697 | int x; | 697 | int x; |
698 | 698 | ||
699 | switch (np->device_id) { | 699 | switch (np->pdev->device) { |
700 | case PCI_DEVICE_ID_NCR_53C885: | 700 | case PCI_DEVICE_ID_NCR_53C885: |
701 | case PCI_DEVICE_ID_NCR_53C895: | 701 | case PCI_DEVICE_ID_NCR_53C895: |
702 | case PCI_DEVICE_ID_NCR_53C896: | 702 | case PCI_DEVICE_ID_NCR_53C896: |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 37bddc1802de..81db48f07ca1 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -343,11 +343,12 @@ config IBMASR | |||
343 | module will be called ibmasr. | 343 | module will be called ibmasr. |
344 | 344 | ||
345 | config WAFER_WDT | 345 | config WAFER_WDT |
346 | tristate "ICP Wafer 5823 Single Board Computer Watchdog" | 346 | tristate "ICP Single Board Computer Watchdog Timer" |
347 | depends on X86 | 347 | depends on X86 |
348 | help | 348 | help |
349 | This is a driver for the hardware watchdog on the ICP Wafer 5823 | 349 | This is a driver for the hardware watchdog on the ICP Single |
350 | Single Board Computer (and probably other similar models). | 350 | Board Computer. This driver is working on (at least) the following |
351 | IPC SBC's: Wafer 5823, Rocky 4783, Rocky 3703 and Rocky 3782. | ||
351 | 352 | ||
352 | To compile this driver as a module, choose M here: the | 353 | To compile this driver as a module, choose M here: the |
353 | module will be called wafer5823wdt. | 354 | module will be called wafer5823wdt. |
@@ -609,6 +610,12 @@ config WDT_RM9K_GPI | |||
609 | To compile this driver as a module, choose M here: the | 610 | To compile this driver as a module, choose M here: the |
610 | module will be called rm9k_wdt. | 611 | module will be called rm9k_wdt. |
611 | 612 | ||
613 | config AR7_WDT | ||
614 | tristate "TI AR7 Watchdog Timer" | ||
615 | depends on AR7 | ||
616 | help | ||
617 | Hardware driver for the TI AR7 Watchdog Timer. | ||
618 | |||
612 | # PARISC Architecture | 619 | # PARISC Architecture |
613 | 620 | ||
614 | # POWERPC Architecture | 621 | # POWERPC Architecture |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 389f8b14ccc4..7d9e5734f8bb 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -90,6 +90,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o | |||
90 | obj-$(CONFIG_INDYDOG) += indydog.o | 90 | obj-$(CONFIG_INDYDOG) += indydog.o |
91 | obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o | 91 | obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o |
92 | obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o | 92 | obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o |
93 | obj-$(CONFIG_AR7_WDT) += ar7_wdt.o | ||
93 | 94 | ||
94 | # PARISC Architecture | 95 | # PARISC Architecture |
95 | 96 | ||
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c new file mode 100644 index 000000000000..cdaab8c3d3d0 --- /dev/null +++ b/drivers/watchdog/ar7_wdt.c | |||
@@ -0,0 +1,349 @@ | |||
1 | /* | ||
2 | * drivers/watchdog/ar7_wdt.c | ||
3 | * | ||
4 | * Copyright (C) 2007 Nicolas Thill <nico@openwrt.org> | ||
5 | * Copyright (c) 2005 Enrik Berkhan <Enrik.Berkhan@akk.org> | ||
6 | * | ||
7 | * Some code taken from: | ||
8 | * National Semiconductor SCx200 Watchdog support | ||
9 | * Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/moduleparam.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/miscdevice.h> | ||
31 | #include <linux/watchdog.h> | ||
32 | #include <linux/notifier.h> | ||
33 | #include <linux/reboot.h> | ||
34 | #include <linux/fs.h> | ||
35 | #include <linux/ioport.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/uaccess.h> | ||
38 | |||
39 | #include <asm/addrspace.h> | ||
40 | #include <asm/ar7/ar7.h> | ||
41 | |||
42 | #define DRVNAME "ar7_wdt" | ||
43 | #define LONGNAME "TI AR7 Watchdog Timer" | ||
44 | |||
45 | MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>"); | ||
46 | MODULE_DESCRIPTION(LONGNAME); | ||
47 | MODULE_LICENSE("GPL"); | ||
48 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
49 | |||
50 | static int margin = 60; | ||
51 | module_param(margin, int, 0); | ||
52 | MODULE_PARM_DESC(margin, "Watchdog margin in seconds"); | ||
53 | |||
54 | static int nowayout = WATCHDOG_NOWAYOUT; | ||
55 | module_param(nowayout, int, 0); | ||
56 | MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); | ||
57 | |||
58 | #define READ_REG(x) readl((void __iomem *)&(x)) | ||
59 | #define WRITE_REG(x, v) writel((v), (void __iomem *)&(x)) | ||
60 | |||
61 | struct ar7_wdt { | ||
62 | u32 kick_lock; | ||
63 | u32 kick; | ||
64 | u32 change_lock; | ||
65 | u32 change; | ||
66 | u32 disable_lock; | ||
67 | u32 disable; | ||
68 | u32 prescale_lock; | ||
69 | u32 prescale; | ||
70 | }; | ||
71 | |||
72 | static struct semaphore open_semaphore; | ||
73 | static unsigned expect_close; | ||
74 | |||
75 | /* XXX currently fixed, allows max margin ~68.72 secs */ | ||
76 | #define prescale_value 0xffff | ||
77 | |||
78 | /* Offset of the WDT registers */ | ||
79 | static unsigned long ar7_regs_wdt; | ||
80 | /* Pointer to the remapped WDT IO space */ | ||
81 | static struct ar7_wdt *ar7_wdt; | ||
82 | static void ar7_wdt_get_regs(void) | ||
83 | { | ||
84 | u16 chip_id = ar7_chip_id(); | ||
85 | switch (chip_id) { | ||
86 | case AR7_CHIP_7100: | ||
87 | case AR7_CHIP_7200: | ||
88 | ar7_regs_wdt = AR7_REGS_WDT; | ||
89 | break; | ||
90 | default: | ||
91 | ar7_regs_wdt = UR8_REGS_WDT; | ||
92 | break; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | |||
97 | static void ar7_wdt_kick(u32 value) | ||
98 | { | ||
99 | WRITE_REG(ar7_wdt->kick_lock, 0x5555); | ||
100 | if ((READ_REG(ar7_wdt->kick_lock) & 3) == 1) { | ||
101 | WRITE_REG(ar7_wdt->kick_lock, 0xaaaa); | ||
102 | if ((READ_REG(ar7_wdt->kick_lock) & 3) == 3) { | ||
103 | WRITE_REG(ar7_wdt->kick, value); | ||
104 | return; | ||
105 | } | ||
106 | } | ||
107 | printk(KERN_ERR DRVNAME ": failed to unlock WDT kick reg\n"); | ||
108 | } | ||
109 | |||
110 | static void ar7_wdt_prescale(u32 value) | ||
111 | { | ||
112 | WRITE_REG(ar7_wdt->prescale_lock, 0x5a5a); | ||
113 | if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 1) { | ||
114 | WRITE_REG(ar7_wdt->prescale_lock, 0xa5a5); | ||
115 | if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 3) { | ||
116 | WRITE_REG(ar7_wdt->prescale, value); | ||
117 | return; | ||
118 | } | ||
119 | } | ||
120 | printk(KERN_ERR DRVNAME ": failed to unlock WDT prescale reg\n"); | ||
121 | } | ||
122 | |||
123 | static void ar7_wdt_change(u32 value) | ||
124 | { | ||
125 | WRITE_REG(ar7_wdt->change_lock, 0x6666); | ||
126 | if ((READ_REG(ar7_wdt->change_lock) & 3) == 1) { | ||
127 | WRITE_REG(ar7_wdt->change_lock, 0xbbbb); | ||
128 | if ((READ_REG(ar7_wdt->change_lock) & 3) == 3) { | ||
129 | WRITE_REG(ar7_wdt->change, value); | ||
130 | return; | ||
131 | } | ||
132 | } | ||
133 | printk(KERN_ERR DRVNAME ": failed to unlock WDT change reg\n"); | ||
134 | } | ||
135 | |||
136 | static void ar7_wdt_disable(u32 value) | ||
137 | { | ||
138 | WRITE_REG(ar7_wdt->disable_lock, 0x7777); | ||
139 | if ((READ_REG(ar7_wdt->disable_lock) & 3) == 1) { | ||
140 | WRITE_REG(ar7_wdt->disable_lock, 0xcccc); | ||
141 | if ((READ_REG(ar7_wdt->disable_lock) & 3) == 2) { | ||
142 | WRITE_REG(ar7_wdt->disable_lock, 0xdddd); | ||
143 | if ((READ_REG(ar7_wdt->disable_lock) & 3) == 3) { | ||
144 | WRITE_REG(ar7_wdt->disable, value); | ||
145 | return; | ||
146 | } | ||
147 | } | ||
148 | } | ||
149 | printk(KERN_ERR DRVNAME ": failed to unlock WDT disable reg\n"); | ||
150 | } | ||
151 | |||
152 | static void ar7_wdt_update_margin(int new_margin) | ||
153 | { | ||
154 | u32 change; | ||
155 | |||
156 | change = new_margin * (ar7_vbus_freq() / prescale_value); | ||
157 | if (change < 1) change = 1; | ||
158 | if (change > 0xffff) change = 0xffff; | ||
159 | ar7_wdt_change(change); | ||
160 | margin = change * prescale_value / ar7_vbus_freq(); | ||
161 | printk(KERN_INFO DRVNAME | ||
162 | ": timer margin %d seconds (prescale %d, change %d, freq %d)\n", | ||
163 | margin, prescale_value, change, ar7_vbus_freq()); | ||
164 | } | ||
165 | |||
166 | static void ar7_wdt_enable_wdt(void) | ||
167 | { | ||
168 | printk(KERN_DEBUG DRVNAME ": enabling watchdog timer\n"); | ||
169 | ar7_wdt_disable(1); | ||
170 | ar7_wdt_kick(1); | ||
171 | } | ||
172 | |||
173 | static void ar7_wdt_disable_wdt(void) | ||
174 | { | ||
175 | printk(KERN_DEBUG DRVNAME ": disabling watchdog timer\n"); | ||
176 | ar7_wdt_disable(0); | ||
177 | } | ||
178 | |||
179 | static int ar7_wdt_open(struct inode *inode, struct file *file) | ||
180 | { | ||
181 | /* only allow one at a time */ | ||
182 | if (down_trylock(&open_semaphore)) | ||
183 | return -EBUSY; | ||
184 | ar7_wdt_enable_wdt(); | ||
185 | expect_close = 0; | ||
186 | |||
187 | return nonseekable_open(inode, file); | ||
188 | } | ||
189 | |||
190 | static int ar7_wdt_release(struct inode *inode, struct file *file) | ||
191 | { | ||
192 | if (!expect_close) | ||
193 | printk(KERN_WARNING DRVNAME | ||
194 | ": watchdog device closed unexpectedly," | ||
195 | "will not disable the watchdog timer\n"); | ||
196 | else if (!nowayout) | ||
197 | ar7_wdt_disable_wdt(); | ||
198 | |||
199 | up(&open_semaphore); | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int ar7_wdt_notify_sys(struct notifier_block *this, | ||
205 | unsigned long code, void *unused) | ||
206 | { | ||
207 | if (code == SYS_HALT || code == SYS_POWER_OFF) | ||
208 | if (!nowayout) | ||
209 | ar7_wdt_disable_wdt(); | ||
210 | |||
211 | return NOTIFY_DONE; | ||
212 | } | ||
213 | |||
214 | static struct notifier_block ar7_wdt_notifier = { | ||
215 | .notifier_call = ar7_wdt_notify_sys | ||
216 | }; | ||
217 | |||
218 | static ssize_t ar7_wdt_write(struct file *file, const char *data, | ||
219 | size_t len, loff_t *ppos) | ||
220 | { | ||
221 | /* check for a magic close character */ | ||
222 | if (len) { | ||
223 | size_t i; | ||
224 | |||
225 | ar7_wdt_kick(1); | ||
226 | |||
227 | expect_close = 0; | ||
228 | for (i = 0; i < len; ++i) { | ||
229 | char c; | ||
230 | if (get_user(c, data+i)) | ||
231 | return -EFAULT; | ||
232 | if (c == 'V') | ||
233 | expect_close = 1; | ||
234 | } | ||
235 | |||
236 | } | ||
237 | return len; | ||
238 | } | ||
239 | |||
240 | static int ar7_wdt_ioctl(struct inode *inode, struct file *file, | ||
241 | unsigned int cmd, unsigned long arg) | ||
242 | { | ||
243 | static struct watchdog_info ident = { | ||
244 | .identity = LONGNAME, | ||
245 | .firmware_version = 1, | ||
246 | .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING), | ||
247 | }; | ||
248 | int new_margin; | ||
249 | |||
250 | switch (cmd) { | ||
251 | default: | ||
252 | return -ENOTTY; | ||
253 | case WDIOC_GETSUPPORT: | ||
254 | if (copy_to_user((struct watchdog_info *)arg, &ident, | ||
255 | sizeof(ident))) | ||
256 | return -EFAULT; | ||
257 | return 0; | ||
258 | case WDIOC_GETSTATUS: | ||
259 | case WDIOC_GETBOOTSTATUS: | ||
260 | if (put_user(0, (int *)arg)) | ||
261 | return -EFAULT; | ||
262 | return 0; | ||
263 | case WDIOC_KEEPALIVE: | ||
264 | ar7_wdt_kick(1); | ||
265 | return 0; | ||
266 | case WDIOC_SETTIMEOUT: | ||
267 | if (get_user(new_margin, (int *)arg)) | ||
268 | return -EFAULT; | ||
269 | if (new_margin < 1) | ||
270 | return -EINVAL; | ||
271 | |||
272 | ar7_wdt_update_margin(new_margin); | ||
273 | ar7_wdt_kick(1); | ||
274 | |||
275 | case WDIOC_GETTIMEOUT: | ||
276 | if (put_user(margin, (int *)arg)) | ||
277 | return -EFAULT; | ||
278 | return 0; | ||
279 | } | ||
280 | } | ||
281 | |||
282 | static struct file_operations ar7_wdt_fops = { | ||
283 | .owner = THIS_MODULE, | ||
284 | .write = ar7_wdt_write, | ||
285 | .ioctl = ar7_wdt_ioctl, | ||
286 | .open = ar7_wdt_open, | ||
287 | .release = ar7_wdt_release, | ||
288 | }; | ||
289 | |||
290 | static struct miscdevice ar7_wdt_miscdev = { | ||
291 | .minor = WATCHDOG_MINOR, | ||
292 | .name = "watchdog", | ||
293 | .fops = &ar7_wdt_fops, | ||
294 | }; | ||
295 | |||
296 | static int __init ar7_wdt_init(void) | ||
297 | { | ||
298 | int rc; | ||
299 | |||
300 | ar7_wdt_get_regs(); | ||
301 | |||
302 | if (!request_mem_region(ar7_regs_wdt, sizeof(struct ar7_wdt), | ||
303 | LONGNAME)) { | ||
304 | printk(KERN_WARNING DRVNAME ": watchdog I/O region busy\n"); | ||
305 | return -EBUSY; | ||
306 | } | ||
307 | |||
308 | ar7_wdt = (struct ar7_wdt *) | ||
309 | ioremap(ar7_regs_wdt, sizeof(struct ar7_wdt)); | ||
310 | |||
311 | ar7_wdt_disable_wdt(); | ||
312 | ar7_wdt_prescale(prescale_value); | ||
313 | ar7_wdt_update_margin(margin); | ||
314 | |||
315 | sema_init(&open_semaphore, 1); | ||
316 | |||
317 | rc = register_reboot_notifier(&ar7_wdt_notifier); | ||
318 | if (rc) { | ||
319 | printk(KERN_ERR DRVNAME | ||
320 | ": unable to register reboot notifier\n"); | ||
321 | goto out_alloc; | ||
322 | } | ||
323 | |||
324 | rc = misc_register(&ar7_wdt_miscdev); | ||
325 | if (rc) { | ||
326 | printk(KERN_ERR DRVNAME ": unable to register misc device\n"); | ||
327 | goto out_register; | ||
328 | } | ||
329 | goto out; | ||
330 | |||
331 | out_register: | ||
332 | unregister_reboot_notifier(&ar7_wdt_notifier); | ||
333 | out_alloc: | ||
334 | iounmap(ar7_wdt); | ||
335 | release_mem_region(ar7_regs_wdt, sizeof(struct ar7_wdt)); | ||
336 | out: | ||
337 | return rc; | ||
338 | } | ||
339 | |||
340 | static void __exit ar7_wdt_cleanup(void) | ||
341 | { | ||
342 | misc_deregister(&ar7_wdt_miscdev); | ||
343 | unregister_reboot_notifier(&ar7_wdt_notifier); | ||
344 | iounmap(ar7_wdt); | ||
345 | release_mem_region(ar7_regs_wdt, sizeof(struct ar7_wdt)); | ||
346 | } | ||
347 | |||
348 | module_init(ar7_wdt_init); | ||
349 | module_exit(ar7_wdt_cleanup); | ||