diff options
Diffstat (limited to 'drivers')
82 files changed, 6184 insertions, 1525 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 11e4eb9f304e..06f212ff2b4f 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -99,6 +99,7 @@ enum { | |||
99 | HOST_CAP_SSC = (1 << 14), /* Slumber capable */ | 99 | HOST_CAP_SSC = (1 << 14), /* Slumber capable */ |
100 | HOST_CAP_CLO = (1 << 24), /* Command List Override support */ | 100 | HOST_CAP_CLO = (1 << 24), /* Command List Override support */ |
101 | HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ | 101 | HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ |
102 | HOST_CAP_SNTF = (1 << 29), /* SNotification register */ | ||
102 | HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ | 103 | HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ |
103 | HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ | 104 | HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ |
104 | 105 | ||
@@ -113,11 +114,11 @@ enum { | |||
113 | PORT_TFDATA = 0x20, /* taskfile data */ | 114 | PORT_TFDATA = 0x20, /* taskfile data */ |
114 | PORT_SIG = 0x24, /* device TF signature */ | 115 | PORT_SIG = 0x24, /* device TF signature */ |
115 | PORT_CMD_ISSUE = 0x38, /* command issue */ | 116 | PORT_CMD_ISSUE = 0x38, /* command issue */ |
116 | PORT_SCR = 0x28, /* SATA phy register block */ | ||
117 | PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ | 117 | PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ |
118 | PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ | 118 | PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ |
119 | PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ | 119 | PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ |
120 | PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ | 120 | PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ |
121 | PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ | ||
121 | 122 | ||
122 | /* PORT_IRQ_{STAT,MASK} bits */ | 123 | /* PORT_IRQ_{STAT,MASK} bits */ |
123 | PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ | 124 | PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ |
@@ -216,8 +217,8 @@ struct ahci_port_priv { | |||
216 | unsigned int ncq_saw_sdb:1; | 217 | unsigned int ncq_saw_sdb:1; |
217 | }; | 218 | }; |
218 | 219 | ||
219 | static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); | 220 | static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
220 | static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 221 | static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
221 | static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 222 | static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
222 | static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); | 223 | static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); |
223 | static void ahci_irq_clear(struct ata_port *ap); | 224 | static void ahci_irq_clear(struct ata_port *ap); |
@@ -417,7 +418,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
417 | 418 | ||
418 | /* ATI */ | 419 | /* ATI */ |
419 | { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ | 420 | { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ |
420 | { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 */ | 421 | { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 IDE */ |
422 | { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700 AHCI */ | ||
423 | { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700 nraid5 */ | ||
424 | { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700 raid5 */ | ||
421 | 425 | ||
422 | /* VIA */ | 426 | /* VIA */ |
423 | { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ | 427 | { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ |
@@ -545,13 +549,19 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
545 | hpriv->saved_cap = cap = readl(mmio + HOST_CAP); | 549 | hpriv->saved_cap = cap = readl(mmio + HOST_CAP); |
546 | hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); | 550 | hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); |
547 | 551 | ||
548 | /* some chips lie about 64bit support */ | 552 | /* some chips have errata preventing 64bit use */ |
549 | if ((cap & HOST_CAP_64) && (pi->flags & AHCI_FLAG_32BIT_ONLY)) { | 553 | if ((cap & HOST_CAP_64) && (pi->flags & AHCI_FLAG_32BIT_ONLY)) { |
550 | dev_printk(KERN_INFO, &pdev->dev, | 554 | dev_printk(KERN_INFO, &pdev->dev, |
551 | "controller can't do 64bit DMA, forcing 32bit\n"); | 555 | "controller can't do 64bit DMA, forcing 32bit\n"); |
552 | cap &= ~HOST_CAP_64; | 556 | cap &= ~HOST_CAP_64; |
553 | } | 557 | } |
554 | 558 | ||
559 | if ((cap & HOST_CAP_NCQ) && (pi->flags & AHCI_FLAG_NO_NCQ)) { | ||
560 | dev_printk(KERN_INFO, &pdev->dev, | ||
561 | "controller can't do NCQ, turning off CAP_NCQ\n"); | ||
562 | cap &= ~HOST_CAP_NCQ; | ||
563 | } | ||
564 | |||
555 | /* fixup zero port_map */ | 565 | /* fixup zero port_map */ |
556 | if (!port_map) { | 566 | if (!port_map) { |
557 | port_map = (1 << ahci_nr_ports(cap)) - 1; | 567 | port_map = (1 << ahci_nr_ports(cap)) - 1; |
@@ -625,38 +635,45 @@ static void ahci_restore_initial_config(struct ata_host *host) | |||
625 | (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ | 635 | (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ |
626 | } | 636 | } |
627 | 637 | ||
628 | static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in) | 638 | static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) |
629 | { | 639 | { |
630 | unsigned int sc_reg; | 640 | static const int offset[] = { |
631 | 641 | [SCR_STATUS] = PORT_SCR_STAT, | |
632 | switch (sc_reg_in) { | 642 | [SCR_CONTROL] = PORT_SCR_CTL, |
633 | case SCR_STATUS: sc_reg = 0; break; | 643 | [SCR_ERROR] = PORT_SCR_ERR, |
634 | case SCR_CONTROL: sc_reg = 1; break; | 644 | [SCR_ACTIVE] = PORT_SCR_ACT, |
635 | case SCR_ERROR: sc_reg = 2; break; | 645 | [SCR_NOTIFICATION] = PORT_SCR_NTF, |
636 | case SCR_ACTIVE: sc_reg = 3; break; | 646 | }; |
637 | default: | 647 | struct ahci_host_priv *hpriv = ap->host->private_data; |
638 | return 0xffffffffU; | ||
639 | } | ||
640 | 648 | ||
641 | return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); | 649 | if (sc_reg < ARRAY_SIZE(offset) && |
650 | (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) | ||
651 | return offset[sc_reg]; | ||
652 | return 0; | ||
642 | } | 653 | } |
643 | 654 | ||
644 | 655 | static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) | |
645 | static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in, | ||
646 | u32 val) | ||
647 | { | 656 | { |
648 | unsigned int sc_reg; | 657 | void __iomem *port_mmio = ahci_port_base(ap); |
649 | 658 | int offset = ahci_scr_offset(ap, sc_reg); | |
650 | switch (sc_reg_in) { | 659 | |
651 | case SCR_STATUS: sc_reg = 0; break; | 660 | if (offset) { |
652 | case SCR_CONTROL: sc_reg = 1; break; | 661 | *val = readl(port_mmio + offset); |
653 | case SCR_ERROR: sc_reg = 2; break; | 662 | return 0; |
654 | case SCR_ACTIVE: sc_reg = 3; break; | ||
655 | default: | ||
656 | return; | ||
657 | } | 663 | } |
664 | return -EINVAL; | ||
665 | } | ||
658 | 666 | ||
659 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); | 667 | static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
668 | { | ||
669 | void __iomem *port_mmio = ahci_port_base(ap); | ||
670 | int offset = ahci_scr_offset(ap, sc_reg); | ||
671 | |||
672 | if (offset) { | ||
673 | writel(val, port_mmio + offset); | ||
674 | return 0; | ||
675 | } | ||
676 | return -EINVAL; | ||
660 | } | 677 | } |
661 | 678 | ||
662 | static void ahci_start_engine(struct ata_port *ap) | 679 | static void ahci_start_engine(struct ata_port *ap) |
@@ -948,37 +965,87 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, | |||
948 | pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); | 965 | pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); |
949 | } | 966 | } |
950 | 967 | ||
951 | static int ahci_clo(struct ata_port *ap) | 968 | static int ahci_kick_engine(struct ata_port *ap, int force_restart) |
952 | { | 969 | { |
953 | void __iomem *port_mmio = ap->ioaddr.cmd_addr; | 970 | void __iomem *port_mmio = ap->ioaddr.cmd_addr; |
954 | struct ahci_host_priv *hpriv = ap->host->private_data; | 971 | struct ahci_host_priv *hpriv = ap->host->private_data; |
955 | u32 tmp; | 972 | u32 tmp; |
973 | int busy, rc; | ||
974 | |||
975 | /* do we need to kick the port? */ | ||
976 | busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ); | ||
977 | if (!busy && !force_restart) | ||
978 | return 0; | ||
979 | |||
980 | /* stop engine */ | ||
981 | rc = ahci_stop_engine(ap); | ||
982 | if (rc) | ||
983 | goto out_restart; | ||
956 | 984 | ||
957 | if (!(hpriv->cap & HOST_CAP_CLO)) | 985 | /* need to do CLO? */ |
958 | return -EOPNOTSUPP; | 986 | if (!busy) { |
987 | rc = 0; | ||
988 | goto out_restart; | ||
989 | } | ||
959 | 990 | ||
991 | if (!(hpriv->cap & HOST_CAP_CLO)) { | ||
992 | rc = -EOPNOTSUPP; | ||
993 | goto out_restart; | ||
994 | } | ||
995 | |||
996 | /* perform CLO */ | ||
960 | tmp = readl(port_mmio + PORT_CMD); | 997 | tmp = readl(port_mmio + PORT_CMD); |
961 | tmp |= PORT_CMD_CLO; | 998 | tmp |= PORT_CMD_CLO; |
962 | writel(tmp, port_mmio + PORT_CMD); | 999 | writel(tmp, port_mmio + PORT_CMD); |
963 | 1000 | ||
1001 | rc = 0; | ||
964 | tmp = ata_wait_register(port_mmio + PORT_CMD, | 1002 | tmp = ata_wait_register(port_mmio + PORT_CMD, |
965 | PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); | 1003 | PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); |
966 | if (tmp & PORT_CMD_CLO) | 1004 | if (tmp & PORT_CMD_CLO) |
967 | return -EIO; | 1005 | rc = -EIO; |
968 | 1006 | ||
969 | return 0; | 1007 | /* restart engine */ |
1008 | out_restart: | ||
1009 | ahci_start_engine(ap); | ||
1010 | return rc; | ||
970 | } | 1011 | } |
971 | 1012 | ||
972 | static int ahci_softreset(struct ata_port *ap, unsigned int *class, | 1013 | static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, |
973 | unsigned long deadline) | 1014 | struct ata_taskfile *tf, int is_cmd, u16 flags, |
1015 | unsigned long timeout_msec) | ||
974 | { | 1016 | { |
1017 | const u32 cmd_fis_len = 5; /* five dwords */ | ||
975 | struct ahci_port_priv *pp = ap->private_data; | 1018 | struct ahci_port_priv *pp = ap->private_data; |
976 | void __iomem *port_mmio = ahci_port_base(ap); | 1019 | void __iomem *port_mmio = ahci_port_base(ap); |
977 | const u32 cmd_fis_len = 5; /* five dwords */ | 1020 | u8 *fis = pp->cmd_tbl; |
1021 | u32 tmp; | ||
1022 | |||
1023 | /* prep the command */ | ||
1024 | ata_tf_to_fis(tf, pmp, is_cmd, fis); | ||
1025 | ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); | ||
1026 | |||
1027 | /* issue & wait */ | ||
1028 | writel(1, port_mmio + PORT_CMD_ISSUE); | ||
1029 | |||
1030 | if (timeout_msec) { | ||
1031 | tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, | ||
1032 | 1, timeout_msec); | ||
1033 | if (tmp & 0x1) { | ||
1034 | ahci_kick_engine(ap, 1); | ||
1035 | return -EBUSY; | ||
1036 | } | ||
1037 | } else | ||
1038 | readl(port_mmio + PORT_CMD_ISSUE); /* flush */ | ||
1039 | |||
1040 | return 0; | ||
1041 | } | ||
1042 | |||
1043 | static int ahci_do_softreset(struct ata_port *ap, unsigned int *class, | ||
1044 | int pmp, unsigned long deadline) | ||
1045 | { | ||
978 | const char *reason = NULL; | 1046 | const char *reason = NULL; |
1047 | unsigned long now, msecs; | ||
979 | struct ata_taskfile tf; | 1048 | struct ata_taskfile tf; |
980 | u32 tmp; | ||
981 | u8 *fis; | ||
982 | int rc; | 1049 | int rc; |
983 | 1050 | ||
984 | DPRINTK("ENTER\n"); | 1051 | DPRINTK("ENTER\n"); |
@@ -990,43 +1057,22 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class, | |||
990 | } | 1057 | } |
991 | 1058 | ||
992 | /* prepare for SRST (AHCI-1.1 10.4.1) */ | 1059 | /* prepare for SRST (AHCI-1.1 10.4.1) */ |
993 | rc = ahci_stop_engine(ap); | 1060 | rc = ahci_kick_engine(ap, 1); |
994 | if (rc) { | 1061 | if (rc) |
995 | reason = "failed to stop engine"; | 1062 | ata_port_printk(ap, KERN_WARNING, |
996 | goto fail_restart; | 1063 | "failed to reset engine (errno=%d)", rc); |
997 | } | ||
998 | |||
999 | /* check BUSY/DRQ, perform Command List Override if necessary */ | ||
1000 | if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) { | ||
1001 | rc = ahci_clo(ap); | ||
1002 | |||
1003 | if (rc == -EOPNOTSUPP) { | ||
1004 | reason = "port busy but CLO unavailable"; | ||
1005 | goto fail_restart; | ||
1006 | } else if (rc) { | ||
1007 | reason = "port busy but CLO failed"; | ||
1008 | goto fail_restart; | ||
1009 | } | ||
1010 | } | ||
1011 | |||
1012 | /* restart engine */ | ||
1013 | ahci_start_engine(ap); | ||
1014 | 1064 | ||
1015 | ata_tf_init(ap->device, &tf); | 1065 | ata_tf_init(ap->device, &tf); |
1016 | fis = pp->cmd_tbl; | ||
1017 | 1066 | ||
1018 | /* issue the first D2H Register FIS */ | 1067 | /* issue the first D2H Register FIS */ |
1019 | ahci_fill_cmd_slot(pp, 0, | 1068 | msecs = 0; |
1020 | cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); | 1069 | now = jiffies; |
1070 | if (time_after(now, deadline)) | ||
1071 | msecs = jiffies_to_msecs(deadline - now); | ||
1021 | 1072 | ||
1022 | tf.ctl |= ATA_SRST; | 1073 | tf.ctl |= ATA_SRST; |
1023 | ata_tf_to_fis(&tf, fis, 0); | 1074 | if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, |
1024 | fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ | 1075 | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { |
1025 | |||
1026 | writel(1, port_mmio + PORT_CMD_ISSUE); | ||
1027 | |||
1028 | tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500); | ||
1029 | if (tmp & 0x1) { | ||
1030 | rc = -EIO; | 1076 | rc = -EIO; |
1031 | reason = "1st FIS failed"; | 1077 | reason = "1st FIS failed"; |
1032 | goto fail; | 1078 | goto fail; |
@@ -1036,14 +1082,8 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class, | |||
1036 | msleep(1); | 1082 | msleep(1); |
1037 | 1083 | ||
1038 | /* issue the second D2H Register FIS */ | 1084 | /* issue the second D2H Register FIS */ |
1039 | ahci_fill_cmd_slot(pp, 0, cmd_fis_len); | ||
1040 | |||
1041 | tf.ctl &= ~ATA_SRST; | 1085 | tf.ctl &= ~ATA_SRST; |
1042 | ata_tf_to_fis(&tf, fis, 0); | 1086 | ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); |
1043 | fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ | ||
1044 | |||
1045 | writel(1, port_mmio + PORT_CMD_ISSUE); | ||
1046 | readl(port_mmio + PORT_CMD_ISSUE); /* flush */ | ||
1047 | 1087 | ||
1048 | /* spec mandates ">= 2ms" before checking status. | 1088 | /* spec mandates ">= 2ms" before checking status. |
1049 | * We wait 150ms, because that was the magic delay used for | 1089 | * We wait 150ms, because that was the magic delay used for |
@@ -1066,13 +1106,17 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class, | |||
1066 | DPRINTK("EXIT, class=%u\n", *class); | 1106 | DPRINTK("EXIT, class=%u\n", *class); |
1067 | return 0; | 1107 | return 0; |
1068 | 1108 | ||
1069 | fail_restart: | ||
1070 | ahci_start_engine(ap); | ||
1071 | fail: | 1109 | fail: |
1072 | ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); | 1110 | ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); |
1073 | return rc; | 1111 | return rc; |
1074 | } | 1112 | } |
1075 | 1113 | ||
1114 | static int ahci_softreset(struct ata_port *ap, unsigned int *class, | ||
1115 | unsigned long deadline) | ||
1116 | { | ||
1117 | return ahci_do_softreset(ap, class, 0, deadline); | ||
1118 | } | ||
1119 | |||
1076 | static int ahci_hardreset(struct ata_port *ap, unsigned int *class, | 1120 | static int ahci_hardreset(struct ata_port *ap, unsigned int *class, |
1077 | unsigned long deadline) | 1121 | unsigned long deadline) |
1078 | { | 1122 | { |
@@ -1088,7 +1132,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class, | |||
1088 | /* clear D2H reception area to properly wait for D2H FIS */ | 1132 | /* clear D2H reception area to properly wait for D2H FIS */ |
1089 | ata_tf_init(ap->device, &tf); | 1133 | ata_tf_init(ap->device, &tf); |
1090 | tf.command = 0x80; | 1134 | tf.command = 0x80; |
1091 | ata_tf_to_fis(&tf, d2h_fis, 0); | 1135 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
1092 | 1136 | ||
1093 | rc = sata_std_hardreset(ap, class, deadline); | 1137 | rc = sata_std_hardreset(ap, class, deadline); |
1094 | 1138 | ||
@@ -1106,6 +1150,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class, | |||
1106 | static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, | 1150 | static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, |
1107 | unsigned long deadline) | 1151 | unsigned long deadline) |
1108 | { | 1152 | { |
1153 | u32 serror; | ||
1109 | int rc; | 1154 | int rc; |
1110 | 1155 | ||
1111 | DPRINTK("ENTER\n"); | 1156 | DPRINTK("ENTER\n"); |
@@ -1116,7 +1161,8 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, | |||
1116 | deadline); | 1161 | deadline); |
1117 | 1162 | ||
1118 | /* vt8251 needs SError cleared for the port to operate */ | 1163 | /* vt8251 needs SError cleared for the port to operate */ |
1119 | ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR)); | 1164 | ahci_scr_read(ap, SCR_ERROR, &serror); |
1165 | ahci_scr_write(ap, SCR_ERROR, serror); | ||
1120 | 1166 | ||
1121 | ahci_start_engine(ap); | 1167 | ahci_start_engine(ap); |
1122 | 1168 | ||
@@ -1205,7 +1251,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) | |||
1205 | */ | 1251 | */ |
1206 | cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; | 1252 | cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; |
1207 | 1253 | ||
1208 | ata_tf_to_fis(&qc->tf, cmd_tbl, 0); | 1254 | ata_tf_to_fis(&qc->tf, 0, 1, cmd_tbl); |
1209 | if (is_atapi) { | 1255 | if (is_atapi) { |
1210 | memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); | 1256 | memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); |
1211 | memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); | 1257 | memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); |
@@ -1238,7 +1284,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) | |||
1238 | ata_ehi_clear_desc(ehi); | 1284 | ata_ehi_clear_desc(ehi); |
1239 | 1285 | ||
1240 | /* AHCI needs SError cleared; otherwise, it might lock up */ | 1286 | /* AHCI needs SError cleared; otherwise, it might lock up */ |
1241 | serror = ahci_scr_read(ap, SCR_ERROR); | 1287 | ahci_scr_read(ap, SCR_ERROR, &serror); |
1242 | ahci_scr_write(ap, SCR_ERROR, serror); | 1288 | ahci_scr_write(ap, SCR_ERROR, serror); |
1243 | 1289 | ||
1244 | /* analyze @irq_stat */ | 1290 | /* analyze @irq_stat */ |
@@ -1262,12 +1308,12 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) | |||
1262 | if (irq_stat & PORT_IRQ_IF_ERR) { | 1308 | if (irq_stat & PORT_IRQ_IF_ERR) { |
1263 | err_mask |= AC_ERR_ATA_BUS; | 1309 | err_mask |= AC_ERR_ATA_BUS; |
1264 | action |= ATA_EH_SOFTRESET; | 1310 | action |= ATA_EH_SOFTRESET; |
1265 | ata_ehi_push_desc(ehi, ", interface fatal error"); | 1311 | ata_ehi_push_desc(ehi, "interface fatal error"); |
1266 | } | 1312 | } |
1267 | 1313 | ||
1268 | if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { | 1314 | if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { |
1269 | ata_ehi_hotplugged(ehi); | 1315 | ata_ehi_hotplugged(ehi); |
1270 | ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ? | 1316 | ata_ehi_push_desc(ehi, "%s", irq_stat & PORT_IRQ_CONNECT ? |
1271 | "connection status changed" : "PHY RDY changed"); | 1317 | "connection status changed" : "PHY RDY changed"); |
1272 | } | 1318 | } |
1273 | 1319 | ||
@@ -1276,7 +1322,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) | |||
1276 | 1322 | ||
1277 | err_mask |= AC_ERR_HSM; | 1323 | err_mask |= AC_ERR_HSM; |
1278 | action |= ATA_EH_SOFTRESET; | 1324 | action |= ATA_EH_SOFTRESET; |
1279 | ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x", | 1325 | ata_ehi_push_desc(ehi, "unknown FIS %08x %08x %08x %08x", |
1280 | unk[0], unk[1], unk[2], unk[3]); | 1326 | unk[0], unk[1], unk[2], unk[3]); |
1281 | } | 1327 | } |
1282 | 1328 | ||
@@ -1512,11 +1558,17 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) | |||
1512 | { | 1558 | { |
1513 | struct ata_port *ap = qc->ap; | 1559 | struct ata_port *ap = qc->ap; |
1514 | 1560 | ||
1515 | if (qc->flags & ATA_QCFLAG_FAILED) { | 1561 | /* make DMA engine forget about the failed command */ |
1516 | /* make DMA engine forget about the failed command */ | 1562 | if (qc->flags & ATA_QCFLAG_FAILED) |
1517 | ahci_stop_engine(ap); | 1563 | ahci_kick_engine(ap, 1); |
1518 | ahci_start_engine(ap); | 1564 | } |
1519 | } | 1565 | |
1566 | static int ahci_port_resume(struct ata_port *ap) | ||
1567 | { | ||
1568 | ahci_power_up(ap); | ||
1569 | ahci_start_port(ap); | ||
1570 | |||
1571 | return 0; | ||
1520 | } | 1572 | } |
1521 | 1573 | ||
1522 | #ifdef CONFIG_PM | 1574 | #ifdef CONFIG_PM |
@@ -1536,14 +1588,6 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) | |||
1536 | return rc; | 1588 | return rc; |
1537 | } | 1589 | } |
1538 | 1590 | ||
1539 | static int ahci_port_resume(struct ata_port *ap) | ||
1540 | { | ||
1541 | ahci_power_up(ap); | ||
1542 | ahci_start_port(ap); | ||
1543 | |||
1544 | return 0; | ||
1545 | } | ||
1546 | |||
1547 | static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) | 1591 | static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) |
1548 | { | 1592 | { |
1549 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | 1593 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
@@ -1734,12 +1778,13 @@ static void ahci_print_info(struct ata_host *host) | |||
1734 | 1778 | ||
1735 | dev_printk(KERN_INFO, &pdev->dev, | 1779 | dev_printk(KERN_INFO, &pdev->dev, |
1736 | "flags: " | 1780 | "flags: " |
1737 | "%s%s%s%s%s%s" | 1781 | "%s%s%s%s%s%s%s" |
1738 | "%s%s%s%s%s%s%s\n" | 1782 | "%s%s%s%s%s%s%s\n" |
1739 | , | 1783 | , |
1740 | 1784 | ||
1741 | cap & (1 << 31) ? "64bit " : "", | 1785 | cap & (1 << 31) ? "64bit " : "", |
1742 | cap & (1 << 30) ? "ncq " : "", | 1786 | cap & (1 << 30) ? "ncq " : "", |
1787 | cap & (1 << 29) ? "sntf " : "", | ||
1743 | cap & (1 << 28) ? "ilck " : "", | 1788 | cap & (1 << 28) ? "ilck " : "", |
1744 | cap & (1 << 27) ? "stag " : "", | 1789 | cap & (1 << 27) ? "stag " : "", |
1745 | cap & (1 << 26) ? "pm " : "", | 1790 | cap & (1 << 26) ? "pm " : "", |
@@ -1794,7 +1839,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1794 | ahci_save_initial_config(pdev, &pi, hpriv); | 1839 | ahci_save_initial_config(pdev, &pi, hpriv); |
1795 | 1840 | ||
1796 | /* prepare host */ | 1841 | /* prepare host */ |
1797 | if (!(pi.flags & AHCI_FLAG_NO_NCQ) && (hpriv->cap & HOST_CAP_NCQ)) | 1842 | if (hpriv->cap & HOST_CAP_NCQ) |
1798 | pi.flags |= ATA_FLAG_NCQ; | 1843 | pi.flags |= ATA_FLAG_NCQ; |
1799 | 1844 | ||
1800 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map)); | 1845 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map)); |
@@ -1808,10 +1853,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1808 | void __iomem *port_mmio = ahci_port_base(ap); | 1853 | void __iomem *port_mmio = ahci_port_base(ap); |
1809 | 1854 | ||
1810 | /* standard SATA port setup */ | 1855 | /* standard SATA port setup */ |
1811 | if (hpriv->port_map & (1 << i)) { | 1856 | if (hpriv->port_map & (1 << i)) |
1812 | ap->ioaddr.cmd_addr = port_mmio; | 1857 | ap->ioaddr.cmd_addr = port_mmio; |
1813 | ap->ioaddr.scr_addr = port_mmio + PORT_SCR; | ||
1814 | } | ||
1815 | 1858 | ||
1816 | /* disabled/not-implemented port */ | 1859 | /* disabled/not-implemented port */ |
1817 | else | 1860 | else |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 88e2dd0983b5..6001aae0b884 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -111,8 +111,9 @@ MODULE_VERSION(DRV_VERSION); | |||
111 | /** | 111 | /** |
112 | * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure | 112 | * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure |
113 | * @tf: Taskfile to convert | 113 | * @tf: Taskfile to convert |
114 | * @fis: Buffer into which data will output | ||
115 | * @pmp: Port multiplier port | 114 | * @pmp: Port multiplier port |
115 | * @is_cmd: This FIS is for command | ||
116 | * @fis: Buffer into which data will output | ||
116 | * | 117 | * |
117 | * Converts a standard ATA taskfile to a Serial ATA | 118 | * Converts a standard ATA taskfile to a Serial ATA |
118 | * FIS structure (Register - Host to Device). | 119 | * FIS structure (Register - Host to Device). |
@@ -120,12 +121,13 @@ MODULE_VERSION(DRV_VERSION); | |||
120 | * LOCKING: | 121 | * LOCKING: |
121 | * Inherited from caller. | 122 | * Inherited from caller. |
122 | */ | 123 | */ |
123 | 124 | void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) | |
124 | void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) | ||
125 | { | 125 | { |
126 | fis[0] = 0x27; /* Register - Host to Device FIS */ | 126 | fis[0] = 0x27; /* Register - Host to Device FIS */ |
127 | fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, | 127 | fis[1] = pmp & 0xf; /* Port multiplier number*/ |
128 | bit 7 indicates Command FIS */ | 128 | if (is_cmd) |
129 | fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ | ||
130 | |||
129 | fis[2] = tf->command; | 131 | fis[2] = tf->command; |
130 | fis[3] = tf->feature; | 132 | fis[3] = tf->feature; |
131 | 133 | ||
@@ -2387,21 +2389,35 @@ int sata_down_spd_limit(struct ata_port *ap) | |||
2387 | u32 sstatus, spd, mask; | 2389 | u32 sstatus, spd, mask; |
2388 | int rc, highbit; | 2390 | int rc, highbit; |
2389 | 2391 | ||
2392 | if (!sata_scr_valid(ap)) | ||
2393 | return -EOPNOTSUPP; | ||
2394 | |||
2395 | /* If SCR can be read, use it to determine the current SPD. | ||
2396 | * If not, use cached value in ap->sata_spd. | ||
2397 | */ | ||
2390 | rc = sata_scr_read(ap, SCR_STATUS, &sstatus); | 2398 | rc = sata_scr_read(ap, SCR_STATUS, &sstatus); |
2391 | if (rc) | 2399 | if (rc == 0) |
2392 | return rc; | 2400 | spd = (sstatus >> 4) & 0xf; |
2401 | else | ||
2402 | spd = ap->sata_spd; | ||
2393 | 2403 | ||
2394 | mask = ap->sata_spd_limit; | 2404 | mask = ap->sata_spd_limit; |
2395 | if (mask <= 1) | 2405 | if (mask <= 1) |
2396 | return -EINVAL; | 2406 | return -EINVAL; |
2407 | |||
2408 | /* unconditionally mask off the highest bit */ | ||
2397 | highbit = fls(mask) - 1; | 2409 | highbit = fls(mask) - 1; |
2398 | mask &= ~(1 << highbit); | 2410 | mask &= ~(1 << highbit); |
2399 | 2411 | ||
2400 | spd = (sstatus >> 4) & 0xf; | 2412 | /* Mask off all speeds higher than or equal to the current |
2401 | if (spd <= 1) | 2413 | * one. Force 1.5Gbps if current SPD is not available. |
2402 | return -EINVAL; | 2414 | */ |
2403 | spd--; | 2415 | if (spd > 1) |
2404 | mask &= (1 << spd) - 1; | 2416 | mask &= (1 << (spd - 1)) - 1; |
2417 | else | ||
2418 | mask &= 1; | ||
2419 | |||
2420 | /* were we already at the bottom? */ | ||
2405 | if (!mask) | 2421 | if (!mask) |
2406 | return -EINVAL; | 2422 | return -EINVAL; |
2407 | 2423 | ||
@@ -3251,9 +3267,11 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, | |||
3251 | last = cur; | 3267 | last = cur; |
3252 | last_jiffies = jiffies; | 3268 | last_jiffies = jiffies; |
3253 | 3269 | ||
3254 | /* check deadline */ | 3270 | /* Check deadline. If debouncing failed, return |
3271 | * -EPIPE to tell upper layer to lower link speed. | ||
3272 | */ | ||
3255 | if (time_after(jiffies, deadline)) | 3273 | if (time_after(jiffies, deadline)) |
3256 | return -EBUSY; | 3274 | return -EPIPE; |
3257 | } | 3275 | } |
3258 | } | 3276 | } |
3259 | 3277 | ||
@@ -3769,6 +3787,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3769 | { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, | 3787 | { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, |
3770 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, | 3788 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, |
3771 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, | 3789 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, |
3790 | { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, | ||
3772 | 3791 | ||
3773 | /* Devices with NCQ limits */ | 3792 | /* Devices with NCQ limits */ |
3774 | 3793 | ||
@@ -5729,10 +5748,8 @@ int sata_scr_valid(struct ata_port *ap) | |||
5729 | */ | 5748 | */ |
5730 | int sata_scr_read(struct ata_port *ap, int reg, u32 *val) | 5749 | int sata_scr_read(struct ata_port *ap, int reg, u32 *val) |
5731 | { | 5750 | { |
5732 | if (sata_scr_valid(ap)) { | 5751 | if (sata_scr_valid(ap)) |
5733 | *val = ap->ops->scr_read(ap, reg); | 5752 | return ap->ops->scr_read(ap, reg, val); |
5734 | return 0; | ||
5735 | } | ||
5736 | return -EOPNOTSUPP; | 5753 | return -EOPNOTSUPP; |
5737 | } | 5754 | } |
5738 | 5755 | ||
@@ -5754,10 +5771,8 @@ int sata_scr_read(struct ata_port *ap, int reg, u32 *val) | |||
5754 | */ | 5771 | */ |
5755 | int sata_scr_write(struct ata_port *ap, int reg, u32 val) | 5772 | int sata_scr_write(struct ata_port *ap, int reg, u32 val) |
5756 | { | 5773 | { |
5757 | if (sata_scr_valid(ap)) { | 5774 | if (sata_scr_valid(ap)) |
5758 | ap->ops->scr_write(ap, reg, val); | 5775 | return ap->ops->scr_write(ap, reg, val); |
5759 | return 0; | ||
5760 | } | ||
5761 | return -EOPNOTSUPP; | 5776 | return -EOPNOTSUPP; |
5762 | } | 5777 | } |
5763 | 5778 | ||
@@ -5778,10 +5793,13 @@ int sata_scr_write(struct ata_port *ap, int reg, u32 val) | |||
5778 | */ | 5793 | */ |
5779 | int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val) | 5794 | int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val) |
5780 | { | 5795 | { |
5796 | int rc; | ||
5797 | |||
5781 | if (sata_scr_valid(ap)) { | 5798 | if (sata_scr_valid(ap)) { |
5782 | ap->ops->scr_write(ap, reg, val); | 5799 | rc = ap->ops->scr_write(ap, reg, val); |
5783 | ap->ops->scr_read(ap, reg); | 5800 | if (rc == 0) |
5784 | return 0; | 5801 | rc = ap->ops->scr_read(ap, reg, &val); |
5802 | return rc; | ||
5785 | } | 5803 | } |
5786 | return -EOPNOTSUPP; | 5804 | return -EOPNOTSUPP; |
5787 | } | 5805 | } |
@@ -5993,6 +6011,7 @@ void ata_dev_init(struct ata_device *dev) | |||
5993 | 6011 | ||
5994 | /* SATA spd limit is bound to the first device */ | 6012 | /* SATA spd limit is bound to the first device */ |
5995 | ap->sata_spd_limit = ap->hw_sata_spd_limit; | 6013 | ap->sata_spd_limit = ap->hw_sata_spd_limit; |
6014 | ap->sata_spd = 0; | ||
5996 | 6015 | ||
5997 | /* High bits of dev->flags are used to record warm plug | 6016 | /* High bits of dev->flags are used to record warm plug |
5998 | * requests which occur asynchronously. Synchronize using | 6017 | * requests which occur asynchronously. Synchronize using |
@@ -6058,6 +6077,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host) | |||
6058 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); | 6077 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); |
6059 | INIT_LIST_HEAD(&ap->eh_done_q); | 6078 | INIT_LIST_HEAD(&ap->eh_done_q); |
6060 | init_waitqueue_head(&ap->eh_wait_q); | 6079 | init_waitqueue_head(&ap->eh_wait_q); |
6080 | init_timer_deferrable(&ap->fastdrain_timer); | ||
6081 | ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; | ||
6082 | ap->fastdrain_timer.data = (unsigned long)ap; | ||
6061 | 6083 | ||
6062 | ap->cbl = ATA_CBL_NONE; | 6084 | ap->cbl = ATA_CBL_NONE; |
6063 | 6085 | ||
@@ -6434,7 +6456,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6434 | for (i = 0; i < host->n_ports; i++) { | 6456 | for (i = 0; i < host->n_ports; i++) { |
6435 | struct ata_port *ap = host->ports[i]; | 6457 | struct ata_port *ap = host->ports[i]; |
6436 | 6458 | ||
6437 | ata_scsi_scan_host(ap); | 6459 | ata_scsi_scan_host(ap, 1); |
6438 | } | 6460 | } |
6439 | 6461 | ||
6440 | return 0; | 6462 | return 0; |
@@ -6942,6 +6964,9 @@ EXPORT_SYMBOL_GPL(ata_pci_default_filter); | |||
6942 | EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); | 6964 | EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); |
6943 | #endif /* CONFIG_PCI */ | 6965 | #endif /* CONFIG_PCI */ |
6944 | 6966 | ||
6967 | EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); | ||
6968 | EXPORT_SYMBOL_GPL(ata_ehi_push_desc); | ||
6969 | EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); | ||
6945 | EXPORT_SYMBOL_GPL(ata_eng_timeout); | 6970 | EXPORT_SYMBOL_GPL(ata_eng_timeout); |
6946 | EXPORT_SYMBOL_GPL(ata_port_schedule_eh); | 6971 | EXPORT_SYMBOL_GPL(ata_port_schedule_eh); |
6947 | EXPORT_SYMBOL_GPL(ata_port_abort); | 6972 | EXPORT_SYMBOL_GPL(ata_port_abort); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9aa62a0754f6..ac6ceed4bb60 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -56,6 +56,7 @@ enum { | |||
56 | */ | 56 | */ |
57 | enum { | 57 | enum { |
58 | ATA_EH_PRERESET_TIMEOUT = 10 * HZ, | 58 | ATA_EH_PRERESET_TIMEOUT = 10 * HZ, |
59 | ATA_EH_FASTDRAIN_INTERVAL = 3 * HZ, | ||
59 | }; | 60 | }; |
60 | 61 | ||
61 | /* The following table determines how we sequence resets. Each entry | 62 | /* The following table determines how we sequence resets. Each entry |
@@ -85,6 +86,71 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) | |||
85 | { } | 86 | { } |
86 | #endif /* CONFIG_PM */ | 87 | #endif /* CONFIG_PM */ |
87 | 88 | ||
89 | static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, | ||
90 | va_list args) | ||
91 | { | ||
92 | ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, | ||
93 | ATA_EH_DESC_LEN - ehi->desc_len, | ||
94 | fmt, args); | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * __ata_ehi_push_desc - push error description without adding separator | ||
99 | * @ehi: target EHI | ||
100 | * @fmt: printf format string | ||
101 | * | ||
102 | * Format string according to @fmt and append it to @ehi->desc. | ||
103 | * | ||
104 | * LOCKING: | ||
105 | * spin_lock_irqsave(host lock) | ||
106 | */ | ||
107 | void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) | ||
108 | { | ||
109 | va_list args; | ||
110 | |||
111 | va_start(args, fmt); | ||
112 | __ata_ehi_pushv_desc(ehi, fmt, args); | ||
113 | va_end(args); | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * ata_ehi_push_desc - push error description with separator | ||
118 | * @ehi: target EHI | ||
119 | * @fmt: printf format string | ||
120 | * | ||
121 | * Format string according to @fmt and append it to @ehi->desc. | ||
122 | * If @ehi->desc is not empty, ", " is added in-between. | ||
123 | * | ||
124 | * LOCKING: | ||
125 | * spin_lock_irqsave(host lock) | ||
126 | */ | ||
127 | void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) | ||
128 | { | ||
129 | va_list args; | ||
130 | |||
131 | if (ehi->desc_len) | ||
132 | __ata_ehi_push_desc(ehi, ", "); | ||
133 | |||
134 | va_start(args, fmt); | ||
135 | __ata_ehi_pushv_desc(ehi, fmt, args); | ||
136 | va_end(args); | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * ata_ehi_clear_desc - clean error description | ||
141 | * @ehi: target EHI | ||
142 | * | ||
143 | * Clear @ehi->desc. | ||
144 | * | ||
145 | * LOCKING: | ||
146 | * spin_lock_irqsave(host lock) | ||
147 | */ | ||
148 | void ata_ehi_clear_desc(struct ata_eh_info *ehi) | ||
149 | { | ||
150 | ehi->desc[0] = '\0'; | ||
151 | ehi->desc_len = 0; | ||
152 | } | ||
153 | |||
88 | static void ata_ering_record(struct ata_ering *ering, int is_io, | 154 | static void ata_ering_record(struct ata_ering *ering, int is_io, |
89 | unsigned int err_mask) | 155 | unsigned int err_mask) |
90 | { | 156 | { |
@@ -296,6 +362,9 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
296 | repeat: | 362 | repeat: |
297 | /* invoke error handler */ | 363 | /* invoke error handler */ |
298 | if (ap->ops->error_handler) { | 364 | if (ap->ops->error_handler) { |
365 | /* kill fast drain timer */ | ||
366 | del_timer_sync(&ap->fastdrain_timer); | ||
367 | |||
299 | /* process port resume request */ | 368 | /* process port resume request */ |
300 | ata_eh_handle_port_resume(ap); | 369 | ata_eh_handle_port_resume(ap); |
301 | 370 | ||
@@ -511,6 +580,94 @@ void ata_eng_timeout(struct ata_port *ap) | |||
511 | DPRINTK("EXIT\n"); | 580 | DPRINTK("EXIT\n"); |
512 | } | 581 | } |
513 | 582 | ||
583 | static int ata_eh_nr_in_flight(struct ata_port *ap) | ||
584 | { | ||
585 | unsigned int tag; | ||
586 | int nr = 0; | ||
587 | |||
588 | /* count only non-internal commands */ | ||
589 | for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) | ||
590 | if (ata_qc_from_tag(ap, tag)) | ||
591 | nr++; | ||
592 | |||
593 | return nr; | ||
594 | } | ||
595 | |||
596 | void ata_eh_fastdrain_timerfn(unsigned long arg) | ||
597 | { | ||
598 | struct ata_port *ap = (void *)arg; | ||
599 | unsigned long flags; | ||
600 | int cnt; | ||
601 | |||
602 | spin_lock_irqsave(ap->lock, flags); | ||
603 | |||
604 | cnt = ata_eh_nr_in_flight(ap); | ||
605 | |||
606 | /* are we done? */ | ||
607 | if (!cnt) | ||
608 | goto out_unlock; | ||
609 | |||
610 | if (cnt == ap->fastdrain_cnt) { | ||
611 | unsigned int tag; | ||
612 | |||
613 | /* No progress during the last interval, tag all | ||
614 | * in-flight qcs as timed out and freeze the port. | ||
615 | */ | ||
616 | for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { | ||
617 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); | ||
618 | if (qc) | ||
619 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
620 | } | ||
621 | |||
622 | ata_port_freeze(ap); | ||
623 | } else { | ||
624 | /* some qcs have finished, give it another chance */ | ||
625 | ap->fastdrain_cnt = cnt; | ||
626 | ap->fastdrain_timer.expires = | ||
627 | jiffies + ATA_EH_FASTDRAIN_INTERVAL; | ||
628 | add_timer(&ap->fastdrain_timer); | ||
629 | } | ||
630 | |||
631 | out_unlock: | ||
632 | spin_unlock_irqrestore(ap->lock, flags); | ||
633 | } | ||
634 | |||
635 | /** | ||
636 | * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain | ||
637 | * @ap: target ATA port | ||
638 | * @fastdrain: activate fast drain | ||
639 | * | ||
640 | * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain | ||
641 | * is non-zero and EH wasn't pending before. Fast drain ensures | ||
642 | * that EH kicks in in timely manner. | ||
643 | * | ||
644 | * LOCKING: | ||
645 | * spin_lock_irqsave(host lock) | ||
646 | */ | ||
647 | static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) | ||
648 | { | ||
649 | int cnt; | ||
650 | |||
651 | /* already scheduled? */ | ||
652 | if (ap->pflags & ATA_PFLAG_EH_PENDING) | ||
653 | return; | ||
654 | |||
655 | ap->pflags |= ATA_PFLAG_EH_PENDING; | ||
656 | |||
657 | if (!fastdrain) | ||
658 | return; | ||
659 | |||
660 | /* do we have in-flight qcs? */ | ||
661 | cnt = ata_eh_nr_in_flight(ap); | ||
662 | if (!cnt) | ||
663 | return; | ||
664 | |||
665 | /* activate fast drain */ | ||
666 | ap->fastdrain_cnt = cnt; | ||
667 | ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL; | ||
668 | add_timer(&ap->fastdrain_timer); | ||
669 | } | ||
670 | |||
514 | /** | 671 | /** |
515 | * ata_qc_schedule_eh - schedule qc for error handling | 672 | * ata_qc_schedule_eh - schedule qc for error handling |
516 | * @qc: command to schedule error handling for | 673 | * @qc: command to schedule error handling for |
@@ -528,7 +685,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | |||
528 | WARN_ON(!ap->ops->error_handler); | 685 | WARN_ON(!ap->ops->error_handler); |
529 | 686 | ||
530 | qc->flags |= ATA_QCFLAG_FAILED; | 687 | qc->flags |= ATA_QCFLAG_FAILED; |
531 | qc->ap->pflags |= ATA_PFLAG_EH_PENDING; | 688 | ata_eh_set_pending(ap, 1); |
532 | 689 | ||
533 | /* The following will fail if timeout has already expired. | 690 | /* The following will fail if timeout has already expired. |
534 | * ata_scsi_error() takes care of such scmds on EH entry. | 691 | * ata_scsi_error() takes care of such scmds on EH entry. |
@@ -555,7 +712,7 @@ void ata_port_schedule_eh(struct ata_port *ap) | |||
555 | if (ap->pflags & ATA_PFLAG_INITIALIZING) | 712 | if (ap->pflags & ATA_PFLAG_INITIALIZING) |
556 | return; | 713 | return; |
557 | 714 | ||
558 | ap->pflags |= ATA_PFLAG_EH_PENDING; | 715 | ata_eh_set_pending(ap, 1); |
559 | scsi_schedule_eh(ap->scsi_host); | 716 | scsi_schedule_eh(ap->scsi_host); |
560 | 717 | ||
561 | DPRINTK("port EH scheduled\n"); | 718 | DPRINTK("port EH scheduled\n"); |
@@ -579,6 +736,9 @@ int ata_port_abort(struct ata_port *ap) | |||
579 | 736 | ||
580 | WARN_ON(!ap->ops->error_handler); | 737 | WARN_ON(!ap->ops->error_handler); |
581 | 738 | ||
739 | /* we're gonna abort all commands, no need for fast drain */ | ||
740 | ata_eh_set_pending(ap, 0); | ||
741 | |||
582 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 742 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
583 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); | 743 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); |
584 | 744 | ||
@@ -1130,7 +1290,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap) | |||
1130 | /* we've got the perpetrator, condemn it */ | 1290 | /* we've got the perpetrator, condemn it */ |
1131 | qc = __ata_qc_from_tag(ap, tag); | 1291 | qc = __ata_qc_from_tag(ap, tag); |
1132 | memcpy(&qc->result_tf, &tf, sizeof(tf)); | 1292 | memcpy(&qc->result_tf, &tf, sizeof(tf)); |
1133 | qc->err_mask |= AC_ERR_DEV; | 1293 | qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; |
1134 | ehc->i.err_mask &= ~AC_ERR_DEV; | 1294 | ehc->i.err_mask &= ~AC_ERR_DEV; |
1135 | } | 1295 | } |
1136 | 1296 | ||
@@ -1413,8 +1573,12 @@ static void ata_eh_autopsy(struct ata_port *ap) | |||
1413 | if (rc == 0) { | 1573 | if (rc == 0) { |
1414 | ehc->i.serror |= serror; | 1574 | ehc->i.serror |= serror; |
1415 | ata_eh_analyze_serror(ap); | 1575 | ata_eh_analyze_serror(ap); |
1416 | } else if (rc != -EOPNOTSUPP) | 1576 | } else if (rc != -EOPNOTSUPP) { |
1577 | /* SError read failed, force hardreset and probing */ | ||
1578 | ata_ehi_schedule_probe(&ehc->i); | ||
1417 | ehc->i.action |= ATA_EH_HARDRESET; | 1579 | ehc->i.action |= ATA_EH_HARDRESET; |
1580 | ehc->i.err_mask |= AC_ERR_OTHER; | ||
1581 | } | ||
1418 | 1582 | ||
1419 | /* analyze NCQ failure */ | 1583 | /* analyze NCQ failure */ |
1420 | ata_eh_analyze_ncq_error(ap); | 1584 | ata_eh_analyze_ncq_error(ap); |
@@ -1524,14 +1688,14 @@ static void ata_eh_report(struct ata_port *ap) | |||
1524 | ehc->i.err_mask, ap->sactive, ehc->i.serror, | 1688 | ehc->i.err_mask, ap->sactive, ehc->i.serror, |
1525 | ehc->i.action, frozen); | 1689 | ehc->i.action, frozen); |
1526 | if (desc) | 1690 | if (desc) |
1527 | ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc); | 1691 | ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); |
1528 | } else { | 1692 | } else { |
1529 | ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x " | 1693 | ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x " |
1530 | "SAct 0x%x SErr 0x%x action 0x%x%s\n", | 1694 | "SAct 0x%x SErr 0x%x action 0x%x%s\n", |
1531 | ehc->i.err_mask, ap->sactive, ehc->i.serror, | 1695 | ehc->i.err_mask, ap->sactive, ehc->i.serror, |
1532 | ehc->i.action, frozen); | 1696 | ehc->i.action, frozen); |
1533 | if (desc) | 1697 | if (desc) |
1534 | ata_port_printk(ap, KERN_ERR, "(%s)\n", desc); | 1698 | ata_port_printk(ap, KERN_ERR, "%s\n", desc); |
1535 | } | 1699 | } |
1536 | 1700 | ||
1537 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 1701 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
@@ -1551,7 +1715,7 @@ static void ata_eh_report(struct ata_port *ap) | |||
1551 | "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " | 1715 | "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " |
1552 | "tag %d cdb 0x%x data %u %s\n " | 1716 | "tag %d cdb 0x%x data %u %s\n " |
1553 | "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " | 1717 | "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " |
1554 | "Emask 0x%x (%s)\n", | 1718 | "Emask 0x%x (%s)%s\n", |
1555 | cmd->command, cmd->feature, cmd->nsect, | 1719 | cmd->command, cmd->feature, cmd->nsect, |
1556 | cmd->lbal, cmd->lbam, cmd->lbah, | 1720 | cmd->lbal, cmd->lbam, cmd->lbah, |
1557 | cmd->hob_feature, cmd->hob_nsect, | 1721 | cmd->hob_feature, cmd->hob_nsect, |
@@ -1562,7 +1726,8 @@ static void ata_eh_report(struct ata_port *ap) | |||
1562 | res->lbal, res->lbam, res->lbah, | 1726 | res->lbal, res->lbam, res->lbah, |
1563 | res->hob_feature, res->hob_nsect, | 1727 | res->hob_feature, res->hob_nsect, |
1564 | res->hob_lbal, res->hob_lbam, res->hob_lbah, | 1728 | res->hob_lbal, res->hob_lbam, res->hob_lbah, |
1565 | res->device, qc->err_mask, ata_err_string(qc->err_mask)); | 1729 | res->device, qc->err_mask, ata_err_string(qc->err_mask), |
1730 | qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); | ||
1566 | } | 1731 | } |
1567 | } | 1732 | } |
1568 | 1733 | ||
@@ -1648,7 +1813,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1648 | } else | 1813 | } else |
1649 | ata_port_printk(ap, KERN_ERR, | 1814 | ata_port_printk(ap, KERN_ERR, |
1650 | "prereset failed (errno=%d)\n", rc); | 1815 | "prereset failed (errno=%d)\n", rc); |
1651 | return rc; | 1816 | goto out; |
1652 | } | 1817 | } |
1653 | } | 1818 | } |
1654 | 1819 | ||
@@ -1661,7 +1826,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1661 | /* prereset told us not to reset, bang classes and return */ | 1826 | /* prereset told us not to reset, bang classes and return */ |
1662 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 1827 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
1663 | classes[i] = ATA_DEV_NONE; | 1828 | classes[i] = ATA_DEV_NONE; |
1664 | return 0; | 1829 | rc = 0; |
1830 | goto out; | ||
1665 | } | 1831 | } |
1666 | 1832 | ||
1667 | /* did prereset() screw up? if so, fix up to avoid oopsing */ | 1833 | /* did prereset() screw up? if so, fix up to avoid oopsing */ |
@@ -1697,7 +1863,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1697 | ata_port_printk(ap, KERN_ERR, | 1863 | ata_port_printk(ap, KERN_ERR, |
1698 | "follow-up softreset required " | 1864 | "follow-up softreset required " |
1699 | "but no softreset avaliable\n"); | 1865 | "but no softreset avaliable\n"); |
1700 | return -EINVAL; | 1866 | rc = -EINVAL; |
1867 | goto out; | ||
1701 | } | 1868 | } |
1702 | 1869 | ||
1703 | ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); | 1870 | ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); |
@@ -1707,7 +1874,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1707 | classes[0] == ATA_DEV_UNKNOWN) { | 1874 | classes[0] == ATA_DEV_UNKNOWN) { |
1708 | ata_port_printk(ap, KERN_ERR, | 1875 | ata_port_printk(ap, KERN_ERR, |
1709 | "classification failed\n"); | 1876 | "classification failed\n"); |
1710 | return -EINVAL; | 1877 | rc = -EINVAL; |
1878 | goto out; | ||
1711 | } | 1879 | } |
1712 | } | 1880 | } |
1713 | 1881 | ||
@@ -1724,7 +1892,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1724 | schedule_timeout_uninterruptible(delta); | 1892 | schedule_timeout_uninterruptible(delta); |
1725 | } | 1893 | } |
1726 | 1894 | ||
1727 | if (reset == hardreset && | 1895 | if (rc == -EPIPE || |
1728 | try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1) | 1896 | try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1) |
1729 | sata_down_spd_limit(ap); | 1897 | sata_down_spd_limit(ap); |
1730 | if (hardreset) | 1898 | if (hardreset) |
@@ -1733,12 +1901,18 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1733 | } | 1901 | } |
1734 | 1902 | ||
1735 | if (rc == 0) { | 1903 | if (rc == 0) { |
1904 | u32 sstatus; | ||
1905 | |||
1736 | /* After the reset, the device state is PIO 0 and the | 1906 | /* After the reset, the device state is PIO 0 and the |
1737 | * controller state is undefined. Record the mode. | 1907 | * controller state is undefined. Record the mode. |
1738 | */ | 1908 | */ |
1739 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 1909 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
1740 | ap->device[i].pio_mode = XFER_PIO_0; | 1910 | ap->device[i].pio_mode = XFER_PIO_0; |
1741 | 1911 | ||
1912 | /* record current link speed */ | ||
1913 | if (sata_scr_read(ap, SCR_STATUS, &sstatus) == 0) | ||
1914 | ap->sata_spd = (sstatus >> 4) & 0xf; | ||
1915 | |||
1742 | if (postreset) | 1916 | if (postreset) |
1743 | postreset(ap, classes); | 1917 | postreset(ap, classes); |
1744 | 1918 | ||
@@ -1746,7 +1920,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1746 | ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); | 1920 | ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); |
1747 | ehc->i.action |= ATA_EH_REVALIDATE; | 1921 | ehc->i.action |= ATA_EH_REVALIDATE; |
1748 | } | 1922 | } |
1749 | 1923 | out: | |
1924 | /* clear hotplug flag */ | ||
1925 | ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; | ||
1750 | return rc; | 1926 | return rc; |
1751 | } | 1927 | } |
1752 | 1928 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index cfde22da07ac..12ac0b511f79 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2947,17 +2947,22 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) | |||
2947 | return rc; | 2947 | return rc; |
2948 | } | 2948 | } |
2949 | 2949 | ||
2950 | void ata_scsi_scan_host(struct ata_port *ap) | 2950 | void ata_scsi_scan_host(struct ata_port *ap, int sync) |
2951 | { | 2951 | { |
2952 | int tries = 5; | ||
2953 | struct ata_device *last_failed_dev = NULL; | ||
2954 | struct ata_device *dev; | ||
2952 | unsigned int i; | 2955 | unsigned int i; |
2953 | 2956 | ||
2954 | if (ap->flags & ATA_FLAG_DISABLED) | 2957 | if (ap->flags & ATA_FLAG_DISABLED) |
2955 | return; | 2958 | return; |
2956 | 2959 | ||
2960 | repeat: | ||
2957 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2961 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
2958 | struct ata_device *dev = &ap->device[i]; | ||
2959 | struct scsi_device *sdev; | 2962 | struct scsi_device *sdev; |
2960 | 2963 | ||
2964 | dev = &ap->device[i]; | ||
2965 | |||
2961 | if (!ata_dev_enabled(dev) || dev->sdev) | 2966 | if (!ata_dev_enabled(dev) || dev->sdev) |
2962 | continue; | 2967 | continue; |
2963 | 2968 | ||
@@ -2967,6 +2972,45 @@ void ata_scsi_scan_host(struct ata_port *ap) | |||
2967 | scsi_device_put(sdev); | 2972 | scsi_device_put(sdev); |
2968 | } | 2973 | } |
2969 | } | 2974 | } |
2975 | |||
2976 | /* If we scanned while EH was in progress or allocation | ||
2977 | * failure occurred, scan would have failed silently. Check | ||
2978 | * whether all devices are attached. | ||
2979 | */ | ||
2980 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
2981 | dev = &ap->device[i]; | ||
2982 | if (ata_dev_enabled(dev) && !dev->sdev) | ||
2983 | break; | ||
2984 | } | ||
2985 | if (i == ATA_MAX_DEVICES) | ||
2986 | return; | ||
2987 | |||
2988 | /* we're missing some SCSI devices */ | ||
2989 | if (sync) { | ||
2990 | /* If caller requested synchrnous scan && we've made | ||
2991 | * any progress, sleep briefly and repeat. | ||
2992 | */ | ||
2993 | if (dev != last_failed_dev) { | ||
2994 | msleep(100); | ||
2995 | last_failed_dev = dev; | ||
2996 | goto repeat; | ||
2997 | } | ||
2998 | |||
2999 | /* We might be failing to detect boot device, give it | ||
3000 | * a few more chances. | ||
3001 | */ | ||
3002 | if (--tries) { | ||
3003 | msleep(100); | ||
3004 | goto repeat; | ||
3005 | } | ||
3006 | |||
3007 | ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan " | ||
3008 | "failed without making any progress,\n" | ||
3009 | " switching to async\n"); | ||
3010 | } | ||
3011 | |||
3012 | queue_delayed_work(ata_aux_wq, &ap->hotplug_task, | ||
3013 | round_jiffies_relative(HZ)); | ||
2970 | } | 3014 | } |
2971 | 3015 | ||
2972 | /** | 3016 | /** |
@@ -3093,20 +3137,7 @@ void ata_scsi_hotplug(struct work_struct *work) | |||
3093 | } | 3137 | } |
3094 | 3138 | ||
3095 | /* scan for new ones */ | 3139 | /* scan for new ones */ |
3096 | ata_scsi_scan_host(ap); | 3140 | ata_scsi_scan_host(ap, 0); |
3097 | |||
3098 | /* If we scanned while EH was in progress, scan would have | ||
3099 | * failed silently. Requeue if there are enabled but | ||
3100 | * unattached devices. | ||
3101 | */ | ||
3102 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
3103 | struct ata_device *dev = &ap->device[i]; | ||
3104 | if (ata_dev_enabled(dev) && !dev->sdev) { | ||
3105 | queue_delayed_work(ata_aux_wq, &ap->hotplug_task, | ||
3106 | round_jiffies_relative(HZ)); | ||
3107 | break; | ||
3108 | } | ||
3109 | } | ||
3110 | 3141 | ||
3111 | DPRINTK("EXIT\n"); | 3142 | DPRINTK("EXIT\n"); |
3112 | } | 3143 | } |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index ca7d2245d684..6c289c7b1322 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * libata-bmdma.c - helper library for PCI IDE BMDMA | 2 | * libata-sff.c - helper library for PCI IDE BMDMA |
3 | * | 3 | * |
4 | * Maintained by: Jeff Garzik <jgarzik@pobox.com> | 4 | * Maintained by: Jeff Garzik <jgarzik@pobox.com> |
5 | * Please ALWAYS copy linux-ide@vger.kernel.org | 5 | * Please ALWAYS copy linux-ide@vger.kernel.org |
@@ -211,6 +211,8 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
211 | tf->hob_lbal = ioread8(ioaddr->lbal_addr); | 211 | tf->hob_lbal = ioread8(ioaddr->lbal_addr); |
212 | tf->hob_lbam = ioread8(ioaddr->lbam_addr); | 212 | tf->hob_lbam = ioread8(ioaddr->lbam_addr); |
213 | tf->hob_lbah = ioread8(ioaddr->lbah_addr); | 213 | tf->hob_lbah = ioread8(ioaddr->lbah_addr); |
214 | iowrite8(tf->ctl, ioaddr->ctl_addr); | ||
215 | ap->last_ctl = tf->ctl; | ||
214 | } | 216 | } |
215 | } | 217 | } |
216 | 218 | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index ba17fc5f2e99..564cd234c805 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -112,7 +112,7 @@ static inline int ata_acpi_on_devcfg(struct ata_device *adev) { return 0; } | |||
112 | /* libata-scsi.c */ | 112 | /* libata-scsi.c */ |
113 | extern int ata_scsi_add_hosts(struct ata_host *host, | 113 | extern int ata_scsi_add_hosts(struct ata_host *host, |
114 | struct scsi_host_template *sht); | 114 | struct scsi_host_template *sht); |
115 | extern void ata_scsi_scan_host(struct ata_port *ap); | 115 | extern void ata_scsi_scan_host(struct ata_port *ap, int sync); |
116 | extern int ata_scsi_offline_dev(struct ata_device *dev); | 116 | extern int ata_scsi_offline_dev(struct ata_device *dev); |
117 | extern void ata_scsi_hotplug(struct work_struct *work); | 117 | extern void ata_scsi_hotplug(struct work_struct *work); |
118 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, | 118 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, |
@@ -151,6 +151,7 @@ extern int ata_bus_probe(struct ata_port *ap); | |||
151 | extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); | 151 | extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); |
152 | extern void ata_scsi_error(struct Scsi_Host *host); | 152 | extern void ata_scsi_error(struct Scsi_Host *host); |
153 | extern void ata_port_wait_eh(struct ata_port *ap); | 153 | extern void ata_port_wait_eh(struct ata_port *ap); |
154 | extern void ata_eh_fastdrain_timerfn(unsigned long arg); | ||
154 | extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); | 155 | extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); |
155 | 156 | ||
156 | /* libata-sff.c */ | 157 | /* libata-sff.c */ |
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index 79f841bca593..a909f793ffc1 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c | |||
@@ -213,8 +213,9 @@ static int __devinit pata_platform_probe(struct platform_device *pdev) | |||
213 | pata_platform_setup_port(&ap->ioaddr, pp_info); | 213 | pata_platform_setup_port(&ap->ioaddr, pp_info); |
214 | 214 | ||
215 | /* activate */ | 215 | /* activate */ |
216 | return ata_host_activate(host, platform_get_irq(pdev, 0), ata_interrupt, | 216 | return ata_host_activate(host, platform_get_irq(pdev, 0), |
217 | pp_info->irq_flags, &pata_platform_sht); | 217 | ata_interrupt, pp_info ? pp_info->irq_flags |
218 | : 0, &pata_platform_sht); | ||
218 | } | 219 | } |
219 | 220 | ||
220 | /** | 221 | /** |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index c55667e0eb65..36cdbd2b0bd5 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
@@ -238,12 +238,6 @@ static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
238 | else | 238 | else |
239 | offset = 0; /* 100MHz */ | 239 | offset = 0; /* 100MHz */ |
240 | 240 | ||
241 | /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */ | ||
242 | if (adev->class == ATA_DEV_ATAPI && speed > XFER_UDMA_4) { | ||
243 | printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); | ||
244 | speed = XFER_UDMA_4; | ||
245 | } | ||
246 | |||
247 | if (speed >= XFER_UDMA_0) | 241 | if (speed >= XFER_UDMA_0) |
248 | idx = speed - XFER_UDMA_0; | 242 | idx = speed - XFER_UDMA_0; |
249 | else | 243 | else |
@@ -264,6 +258,17 @@ static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
264 | JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]); | 258 | JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]); |
265 | } | 259 | } |
266 | 260 | ||
261 | unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) | ||
262 | { | ||
263 | /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */ | ||
264 | if (adev->class == ATA_DEV_ATAPI && | ||
265 | (mask & (0xE0 << ATA_SHIFT_UDMA))) { | ||
266 | printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); | ||
267 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); | ||
268 | } | ||
269 | return ata_pci_default_filter(adev, mask); | ||
270 | } | ||
271 | |||
267 | /** | 272 | /** |
268 | * scc_tf_load - send taskfile registers to host controller | 273 | * scc_tf_load - send taskfile registers to host controller |
269 | * @ap: Port to which output is sent | 274 | * @ap: Port to which output is sent |
@@ -358,6 +363,8 @@ static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) | |||
358 | tf->hob_lbal = in_be32(ioaddr->lbal_addr); | 363 | tf->hob_lbal = in_be32(ioaddr->lbal_addr); |
359 | tf->hob_lbam = in_be32(ioaddr->lbam_addr); | 364 | tf->hob_lbam = in_be32(ioaddr->lbam_addr); |
360 | tf->hob_lbah = in_be32(ioaddr->lbah_addr); | 365 | tf->hob_lbah = in_be32(ioaddr->lbah_addr); |
366 | out_be32(ioaddr->ctl_addr, tf->ctl); | ||
367 | ap->last_ctl = tf->ctl; | ||
361 | } | 368 | } |
362 | } | 369 | } |
363 | 370 | ||
@@ -741,7 +748,7 @@ static u8 scc_bmdma_status (struct ata_port *ap) | |||
741 | return host_stat; | 748 | return host_stat; |
742 | 749 | ||
743 | /* errata A252,A308 workaround: Step4 */ | 750 | /* errata A252,A308 workaround: Step4 */ |
744 | if (ata_altstatus(ap) & ATA_ERR && int_status & INTSTS_INTRQ) | 751 | if ((ata_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) |
745 | return (host_stat | ATA_DMA_INTR); | 752 | return (host_stat | ATA_DMA_INTR); |
746 | 753 | ||
747 | /* errata A308 workaround Step5 */ | 754 | /* errata A308 workaround Step5 */ |
@@ -752,11 +759,11 @@ static u8 scc_bmdma_status (struct ata_port *ap) | |||
752 | if ((qc->tf.protocol == ATA_PROT_DMA && | 759 | if ((qc->tf.protocol == ATA_PROT_DMA && |
753 | qc->dev->xfer_mode > XFER_UDMA_4)) { | 760 | qc->dev->xfer_mode > XFER_UDMA_4)) { |
754 | if (!(int_status & INTSTS_ACTEINT)) { | 761 | if (!(int_status & INTSTS_ACTEINT)) { |
755 | printk(KERN_WARNING "ata%u: data lost occurred. (ACTEINT==0, retry:%d)\n", | 762 | printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n", |
756 | ap->print_id, retry); | 763 | ap->print_id); |
757 | host_stat |= ATA_DMA_ERR; | 764 | host_stat |= ATA_DMA_ERR; |
758 | if (retry++) | 765 | if (retry++) |
759 | ap->udma_mask >>= 1; | 766 | ap->udma_mask &= ~(1 << qc->dev->xfer_mode); |
760 | } else | 767 | } else |
761 | retry = 0; | 768 | retry = 0; |
762 | } | 769 | } |
@@ -1016,7 +1023,7 @@ static const struct ata_port_operations scc_pata_ops = { | |||
1016 | .port_disable = ata_port_disable, | 1023 | .port_disable = ata_port_disable, |
1017 | .set_piomode = scc_set_piomode, | 1024 | .set_piomode = scc_set_piomode, |
1018 | .set_dmamode = scc_set_dmamode, | 1025 | .set_dmamode = scc_set_dmamode, |
1019 | .mode_filter = ata_pci_default_filter, | 1026 | .mode_filter = scc_mode_filter, |
1020 | 1027 | ||
1021 | .tf_load = scc_tf_load, | 1028 | .tf_load = scc_tf_load, |
1022 | .tf_read = scc_tf_read, | 1029 | .tf_read = scc_tf_read, |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 3de183461c3c..a9c948d7604a 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -190,34 +190,34 @@ static void inic_reset_port(void __iomem *port_base) | |||
190 | writew(ctl, idma_ctl); | 190 | writew(ctl, idma_ctl); |
191 | } | 191 | } |
192 | 192 | ||
193 | static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg) | 193 | static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) |
194 | { | 194 | { |
195 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 195 | void __iomem *scr_addr = ap->ioaddr.scr_addr; |
196 | void __iomem *addr; | 196 | void __iomem *addr; |
197 | u32 val; | ||
198 | 197 | ||
199 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | 198 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) |
200 | return 0xffffffffU; | 199 | return -EINVAL; |
201 | 200 | ||
202 | addr = scr_addr + scr_map[sc_reg] * 4; | 201 | addr = scr_addr + scr_map[sc_reg] * 4; |
203 | val = readl(scr_addr + scr_map[sc_reg] * 4); | 202 | *val = readl(scr_addr + scr_map[sc_reg] * 4); |
204 | 203 | ||
205 | /* this controller has stuck DIAG.N, ignore it */ | 204 | /* this controller has stuck DIAG.N, ignore it */ |
206 | if (sc_reg == SCR_ERROR) | 205 | if (sc_reg == SCR_ERROR) |
207 | val &= ~SERR_PHYRDY_CHG; | 206 | *val &= ~SERR_PHYRDY_CHG; |
208 | return val; | 207 | return 0; |
209 | } | 208 | } |
210 | 209 | ||
211 | static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | 210 | static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) |
212 | { | 211 | { |
213 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 212 | void __iomem *scr_addr = ap->ioaddr.scr_addr; |
214 | void __iomem *addr; | 213 | void __iomem *addr; |
215 | 214 | ||
216 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | 215 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) |
217 | return; | 216 | return -EINVAL; |
218 | 217 | ||
219 | addr = scr_addr + scr_map[sc_reg] * 4; | 218 | addr = scr_addr + scr_map[sc_reg] * 4; |
220 | writel(val, scr_addr + scr_map[sc_reg] * 4); | 219 | writel(val, scr_addr + scr_map[sc_reg] * 4); |
220 | return 0; | ||
221 | } | 221 | } |
222 | 222 | ||
223 | /* | 223 | /* |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index fb8a749423ca..8ec520885b95 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -35,8 +35,6 @@ | |||
35 | 35 | ||
36 | 6) Add port multiplier support (intermediate) | 36 | 6) Add port multiplier support (intermediate) |
37 | 37 | ||
38 | 7) Test and verify 3.0 Gbps support | ||
39 | |||
40 | 8) Develop a low-power-consumption strategy, and implement it. | 38 | 8) Develop a low-power-consumption strategy, and implement it. |
41 | 39 | ||
42 | 9) [Experiment, low priority] See if ATAPI can be supported using | 40 | 9) [Experiment, low priority] See if ATAPI can be supported using |
@@ -227,26 +225,26 @@ enum { | |||
227 | 225 | ||
228 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | 226 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, |
229 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | 227 | EDMA_ERR_IRQ_MASK_OFS = 0xc, |
230 | EDMA_ERR_D_PAR = (1 << 0), | 228 | EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ |
231 | EDMA_ERR_PRD_PAR = (1 << 1), | 229 | EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ |
232 | EDMA_ERR_DEV = (1 << 2), | 230 | EDMA_ERR_DEV = (1 << 2), /* device error */ |
233 | EDMA_ERR_DEV_DCON = (1 << 3), | 231 | EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ |
234 | EDMA_ERR_DEV_CON = (1 << 4), | 232 | EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ |
235 | EDMA_ERR_SERR = (1 << 5), | 233 | EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ |
236 | EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ | 234 | EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ |
237 | EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ | 235 | EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ |
238 | EDMA_ERR_BIST_ASYNC = (1 << 8), | 236 | EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ |
239 | EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ | 237 | EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ |
240 | EDMA_ERR_CRBQ_PAR = (1 << 9), | 238 | EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ |
241 | EDMA_ERR_CRPB_PAR = (1 << 10), | 239 | EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ |
242 | EDMA_ERR_INTRL_PAR = (1 << 11), | 240 | EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ |
243 | EDMA_ERR_IORDY = (1 << 12), | 241 | EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ |
244 | EDMA_ERR_LNK_CTRL_RX = (0xf << 13), | 242 | EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ |
245 | EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), | 243 | EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), |
246 | EDMA_ERR_LNK_DATA_RX = (0xf << 17), | 244 | EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ |
247 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), | 245 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ |
248 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), | 246 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ |
249 | EDMA_ERR_TRANS_PROTO = (1 << 31), | 247 | EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ |
250 | EDMA_ERR_OVERRUN_5 = (1 << 5), | 248 | EDMA_ERR_OVERRUN_5 = (1 << 5), |
251 | EDMA_ERR_UNDERRUN_5 = (1 << 6), | 249 | EDMA_ERR_UNDERRUN_5 = (1 << 6), |
252 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | | 250 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | |
@@ -255,7 +253,7 @@ enum { | |||
255 | EDMA_ERR_DEV_CON | | 253 | EDMA_ERR_DEV_CON | |
256 | EDMA_ERR_SERR | | 254 | EDMA_ERR_SERR | |
257 | EDMA_ERR_SELF_DIS | | 255 | EDMA_ERR_SELF_DIS | |
258 | EDMA_ERR_CRBQ_PAR | | 256 | EDMA_ERR_CRQB_PAR | |
259 | EDMA_ERR_CRPB_PAR | | 257 | EDMA_ERR_CRPB_PAR | |
260 | EDMA_ERR_INTRL_PAR | | 258 | EDMA_ERR_INTRL_PAR | |
261 | EDMA_ERR_IORDY | | 259 | EDMA_ERR_IORDY | |
@@ -270,7 +268,7 @@ enum { | |||
270 | EDMA_ERR_OVERRUN_5 | | 268 | EDMA_ERR_OVERRUN_5 | |
271 | EDMA_ERR_UNDERRUN_5 | | 269 | EDMA_ERR_UNDERRUN_5 | |
272 | EDMA_ERR_SELF_DIS_5 | | 270 | EDMA_ERR_SELF_DIS_5 | |
273 | EDMA_ERR_CRBQ_PAR | | 271 | EDMA_ERR_CRQB_PAR | |
274 | EDMA_ERR_CRPB_PAR | | 272 | EDMA_ERR_CRPB_PAR | |
275 | EDMA_ERR_INTRL_PAR | | 273 | EDMA_ERR_INTRL_PAR | |
276 | EDMA_ERR_IORDY, | 274 | EDMA_ERR_IORDY, |
@@ -286,10 +284,10 @@ enum { | |||
286 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ | 284 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ |
287 | EDMA_RSP_Q_PTR_SHIFT = 3, | 285 | EDMA_RSP_Q_PTR_SHIFT = 3, |
288 | 286 | ||
289 | EDMA_CMD_OFS = 0x28, | 287 | EDMA_CMD_OFS = 0x28, /* EDMA command register */ |
290 | EDMA_EN = (1 << 0), | 288 | EDMA_EN = (1 << 0), /* enable EDMA */ |
291 | EDMA_DS = (1 << 1), | 289 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ |
292 | ATA_RST = (1 << 2), | 290 | ATA_RST = (1 << 2), /* reset trans/link/phy */ |
293 | 291 | ||
294 | EDMA_IORDY_TMOUT = 0x34, | 292 | EDMA_IORDY_TMOUT = 0x34, |
295 | EDMA_ARB_CFG = 0x38, | 293 | EDMA_ARB_CFG = 0x38, |
@@ -301,14 +299,13 @@ enum { | |||
301 | MV_HP_ERRATA_60X1B2 = (1 << 3), | 299 | MV_HP_ERRATA_60X1B2 = (1 << 3), |
302 | MV_HP_ERRATA_60X1C0 = (1 << 4), | 300 | MV_HP_ERRATA_60X1C0 = (1 << 4), |
303 | MV_HP_ERRATA_XX42A0 = (1 << 5), | 301 | MV_HP_ERRATA_XX42A0 = (1 << 5), |
304 | MV_HP_GEN_I = (1 << 6), | 302 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ |
305 | MV_HP_GEN_II = (1 << 7), | 303 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ |
306 | MV_HP_GEN_IIE = (1 << 8), | 304 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ |
307 | 305 | ||
308 | /* Port private flags (pp_flags) */ | 306 | /* Port private flags (pp_flags) */ |
309 | MV_PP_FLAG_EDMA_EN = (1 << 0), | 307 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
310 | MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), | 308 | MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ |
311 | MV_PP_FLAG_HAD_A_RESET = (1 << 2), | ||
312 | }; | 309 | }; |
313 | 310 | ||
314 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) | 311 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
@@ -318,8 +315,12 @@ enum { | |||
318 | enum { | 315 | enum { |
319 | MV_DMA_BOUNDARY = 0xffffffffU, | 316 | MV_DMA_BOUNDARY = 0xffffffffU, |
320 | 317 | ||
318 | /* mask of register bits containing lower 32 bits | ||
319 | * of EDMA request queue DMA address | ||
320 | */ | ||
321 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, | 321 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, |
322 | 322 | ||
323 | /* ditto, for response queue */ | ||
323 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, | 324 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, |
324 | }; | 325 | }; |
325 | 326 | ||
@@ -403,10 +404,10 @@ struct mv_host_priv { | |||
403 | }; | 404 | }; |
404 | 405 | ||
405 | static void mv_irq_clear(struct ata_port *ap); | 406 | static void mv_irq_clear(struct ata_port *ap); |
406 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | 407 | static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); |
407 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 408 | static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
408 | static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | 409 | static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); |
409 | static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 410 | static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
410 | static int mv_port_start(struct ata_port *ap); | 411 | static int mv_port_start(struct ata_port *ap); |
411 | static void mv_port_stop(struct ata_port *ap); | 412 | static void mv_port_stop(struct ata_port *ap); |
412 | static void mv_qc_prep(struct ata_queued_cmd *qc); | 413 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
@@ -823,7 +824,7 @@ static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, | |||
823 | } | 824 | } |
824 | 825 | ||
825 | /** | 826 | /** |
826 | * mv_stop_dma - Disable eDMA engine | 827 | * __mv_stop_dma - Disable eDMA engine |
827 | * @ap: ATA channel to manipulate | 828 | * @ap: ATA channel to manipulate |
828 | * | 829 | * |
829 | * Verify the local cache of the eDMA state is accurate with a | 830 | * Verify the local cache of the eDMA state is accurate with a |
@@ -832,7 +833,7 @@ static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, | |||
832 | * LOCKING: | 833 | * LOCKING: |
833 | * Inherited from caller. | 834 | * Inherited from caller. |
834 | */ | 835 | */ |
835 | static int mv_stop_dma(struct ata_port *ap) | 836 | static int __mv_stop_dma(struct ata_port *ap) |
836 | { | 837 | { |
837 | void __iomem *port_mmio = mv_ap_base(ap); | 838 | void __iomem *port_mmio = mv_ap_base(ap); |
838 | struct mv_port_priv *pp = ap->private_data; | 839 | struct mv_port_priv *pp = ap->private_data; |
@@ -865,6 +866,18 @@ static int mv_stop_dma(struct ata_port *ap) | |||
865 | return err; | 866 | return err; |
866 | } | 867 | } |
867 | 868 | ||
869 | static int mv_stop_dma(struct ata_port *ap) | ||
870 | { | ||
871 | unsigned long flags; | ||
872 | int rc; | ||
873 | |||
874 | spin_lock_irqsave(&ap->host->lock, flags); | ||
875 | rc = __mv_stop_dma(ap); | ||
876 | spin_unlock_irqrestore(&ap->host->lock, flags); | ||
877 | |||
878 | return rc; | ||
879 | } | ||
880 | |||
868 | #ifdef ATA_DEBUG | 881 | #ifdef ATA_DEBUG |
869 | static void mv_dump_mem(void __iomem *start, unsigned bytes) | 882 | static void mv_dump_mem(void __iomem *start, unsigned bytes) |
870 | { | 883 | { |
@@ -961,22 +974,26 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in) | |||
961 | return ofs; | 974 | return ofs; |
962 | } | 975 | } |
963 | 976 | ||
964 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in) | 977 | static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) |
965 | { | 978 | { |
966 | unsigned int ofs = mv_scr_offset(sc_reg_in); | 979 | unsigned int ofs = mv_scr_offset(sc_reg_in); |
967 | 980 | ||
968 | if (0xffffffffU != ofs) | 981 | if (ofs != 0xffffffffU) { |
969 | return readl(mv_ap_base(ap) + ofs); | 982 | *val = readl(mv_ap_base(ap) + ofs); |
970 | else | 983 | return 0; |
971 | return (u32) ofs; | 984 | } else |
985 | return -EINVAL; | ||
972 | } | 986 | } |
973 | 987 | ||
974 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | 988 | static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) |
975 | { | 989 | { |
976 | unsigned int ofs = mv_scr_offset(sc_reg_in); | 990 | unsigned int ofs = mv_scr_offset(sc_reg_in); |
977 | 991 | ||
978 | if (0xffffffffU != ofs) | 992 | if (ofs != 0xffffffffU) { |
979 | writelfl(val, mv_ap_base(ap) + ofs); | 993 | writelfl(val, mv_ap_base(ap) + ofs); |
994 | return 0; | ||
995 | } else | ||
996 | return -EINVAL; | ||
980 | } | 997 | } |
981 | 998 | ||
982 | static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, | 999 | static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, |
@@ -1029,6 +1046,7 @@ static int mv_port_start(struct ata_port *ap) | |||
1029 | void __iomem *port_mmio = mv_ap_base(ap); | 1046 | void __iomem *port_mmio = mv_ap_base(ap); |
1030 | void *mem; | 1047 | void *mem; |
1031 | dma_addr_t mem_dma; | 1048 | dma_addr_t mem_dma; |
1049 | unsigned long flags; | ||
1032 | int rc; | 1050 | int rc; |
1033 | 1051 | ||
1034 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); | 1052 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
@@ -1067,10 +1085,14 @@ static int mv_port_start(struct ata_port *ap) | |||
1067 | pp->sg_tbl = mem; | 1085 | pp->sg_tbl = mem; |
1068 | pp->sg_tbl_dma = mem_dma; | 1086 | pp->sg_tbl_dma = mem_dma; |
1069 | 1087 | ||
1088 | spin_lock_irqsave(&ap->host->lock, flags); | ||
1089 | |||
1070 | mv_edma_cfg(ap, hpriv, port_mmio); | 1090 | mv_edma_cfg(ap, hpriv, port_mmio); |
1071 | 1091 | ||
1072 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | 1092 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
1073 | 1093 | ||
1094 | spin_unlock_irqrestore(&ap->host->lock, flags); | ||
1095 | |||
1074 | /* Don't turn on EDMA here...do it before DMA commands only. Else | 1096 | /* Don't turn on EDMA here...do it before DMA commands only. Else |
1075 | * we'll be unable to send non-data, PIO, etc due to restricted access | 1097 | * we'll be unable to send non-data, PIO, etc due to restricted access |
1076 | * to shadow regs. | 1098 | * to shadow regs. |
@@ -1090,11 +1112,7 @@ static int mv_port_start(struct ata_port *ap) | |||
1090 | */ | 1112 | */ |
1091 | static void mv_port_stop(struct ata_port *ap) | 1113 | static void mv_port_stop(struct ata_port *ap) |
1092 | { | 1114 | { |
1093 | unsigned long flags; | ||
1094 | |||
1095 | spin_lock_irqsave(&ap->host->lock, flags); | ||
1096 | mv_stop_dma(ap); | 1115 | mv_stop_dma(ap); |
1097 | spin_unlock_irqrestore(&ap->host->lock, flags); | ||
1098 | } | 1116 | } |
1099 | 1117 | ||
1100 | /** | 1118 | /** |
@@ -1325,7 +1343,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1325 | * port. Turn off EDMA so there won't be problems accessing | 1343 | * port. Turn off EDMA so there won't be problems accessing |
1326 | * shadow block, etc registers. | 1344 | * shadow block, etc registers. |
1327 | */ | 1345 | */ |
1328 | mv_stop_dma(ap); | 1346 | __mv_stop_dma(ap); |
1329 | return ata_qc_issue_prot(qc); | 1347 | return ata_qc_issue_prot(qc); |
1330 | } | 1348 | } |
1331 | 1349 | ||
@@ -1393,16 +1411,16 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1393 | if (edma_err_cause & EDMA_ERR_DEV) | 1411 | if (edma_err_cause & EDMA_ERR_DEV) |
1394 | err_mask |= AC_ERR_DEV; | 1412 | err_mask |= AC_ERR_DEV; |
1395 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | 1413 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | |
1396 | EDMA_ERR_CRBQ_PAR | EDMA_ERR_CRPB_PAR | | 1414 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | |
1397 | EDMA_ERR_INTRL_PAR)) { | 1415 | EDMA_ERR_INTRL_PAR)) { |
1398 | err_mask |= AC_ERR_ATA_BUS; | 1416 | err_mask |= AC_ERR_ATA_BUS; |
1399 | action |= ATA_EH_HARDRESET; | 1417 | action |= ATA_EH_HARDRESET; |
1400 | ata_ehi_push_desc(ehi, ", parity error"); | 1418 | ata_ehi_push_desc(ehi, "parity error"); |
1401 | } | 1419 | } |
1402 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { | 1420 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { |
1403 | ata_ehi_hotplugged(ehi); | 1421 | ata_ehi_hotplugged(ehi); |
1404 | ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? | 1422 | ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? |
1405 | ", dev disconnect" : ", dev connect"); | 1423 | "dev disconnect" : "dev connect"); |
1406 | } | 1424 | } |
1407 | 1425 | ||
1408 | if (IS_GEN_I(hpriv)) { | 1426 | if (IS_GEN_I(hpriv)) { |
@@ -1411,7 +1429,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1411 | if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { | 1429 | if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { |
1412 | struct mv_port_priv *pp = ap->private_data; | 1430 | struct mv_port_priv *pp = ap->private_data; |
1413 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 1431 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
1414 | ata_ehi_push_desc(ehi, ", EDMA self-disable"); | 1432 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
1415 | } | 1433 | } |
1416 | } else { | 1434 | } else { |
1417 | eh_freeze_mask = EDMA_EH_FREEZE; | 1435 | eh_freeze_mask = EDMA_EH_FREEZE; |
@@ -1419,7 +1437,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1419 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { | 1437 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { |
1420 | struct mv_port_priv *pp = ap->private_data; | 1438 | struct mv_port_priv *pp = ap->private_data; |
1421 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 1439 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
1422 | ata_ehi_push_desc(ehi, ", EDMA self-disable"); | 1440 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
1423 | } | 1441 | } |
1424 | 1442 | ||
1425 | if (edma_err_cause & EDMA_ERR_SERR) { | 1443 | if (edma_err_cause & EDMA_ERR_SERR) { |
@@ -1489,33 +1507,30 @@ static void mv_intr_edma(struct ata_port *ap) | |||
1489 | 1507 | ||
1490 | while (1) { | 1508 | while (1) { |
1491 | u16 status; | 1509 | u16 status; |
1510 | unsigned int tag; | ||
1492 | 1511 | ||
1493 | /* get s/w response queue last-read pointer, and compare */ | 1512 | /* get s/w response queue last-read pointer, and compare */ |
1494 | out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; | 1513 | out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; |
1495 | if (in_index == out_index) | 1514 | if (in_index == out_index) |
1496 | break; | 1515 | break; |
1497 | 1516 | ||
1498 | |||
1499 | /* 50xx: get active ATA command */ | 1517 | /* 50xx: get active ATA command */ |
1500 | if (IS_GEN_I(hpriv)) | 1518 | if (IS_GEN_I(hpriv)) |
1501 | qc = ata_qc_from_tag(ap, ap->active_tag); | 1519 | tag = ap->active_tag; |
1502 | 1520 | ||
1503 | /* 60xx: get active ATA command via tag, to enable support | 1521 | /* Gen II/IIE: get active ATA command via tag, to enable |
1504 | * for queueing. this works transparently for queued and | 1522 | * support for queueing. this works transparently for |
1505 | * non-queued modes. | 1523 | * queued and non-queued modes. |
1506 | */ | 1524 | */ |
1507 | else { | 1525 | else if (IS_GEN_II(hpriv)) |
1508 | unsigned int tag; | 1526 | tag = (le16_to_cpu(pp->crpb[out_index].id) |
1527 | >> CRPB_IOID_SHIFT_6) & 0x3f; | ||
1509 | 1528 | ||
1510 | if (IS_GEN_II(hpriv)) | 1529 | else /* IS_GEN_IIE */ |
1511 | tag = (le16_to_cpu(pp->crpb[out_index].id) | 1530 | tag = (le16_to_cpu(pp->crpb[out_index].id) |
1512 | >> CRPB_IOID_SHIFT_6) & 0x3f; | 1531 | >> CRPB_IOID_SHIFT_7) & 0x3f; |
1513 | else | ||
1514 | tag = (le16_to_cpu(pp->crpb[out_index].id) | ||
1515 | >> CRPB_IOID_SHIFT_7) & 0x3f; | ||
1516 | 1532 | ||
1517 | qc = ata_qc_from_tag(ap, tag); | 1533 | qc = ata_qc_from_tag(ap, tag); |
1518 | } | ||
1519 | 1534 | ||
1520 | /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS | 1535 | /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS |
1521 | * bits (WARNING: might not necessarily be associated | 1536 | * bits (WARNING: might not necessarily be associated |
@@ -1535,7 +1550,7 @@ static void mv_intr_edma(struct ata_port *ap) | |||
1535 | ata_qc_complete(qc); | 1550 | ata_qc_complete(qc); |
1536 | } | 1551 | } |
1537 | 1552 | ||
1538 | /* advance software response queue pointer, to | 1553 | /* advance software response queue pointer, to |
1539 | * indicate (after the loop completes) to hardware | 1554 | * indicate (after the loop completes) to hardware |
1540 | * that we have consumed a response queue entry. | 1555 | * that we have consumed a response queue entry. |
1541 | */ | 1556 | */ |
@@ -1741,26 +1756,30 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in) | |||
1741 | return ofs; | 1756 | return ofs; |
1742 | } | 1757 | } |
1743 | 1758 | ||
1744 | static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) | 1759 | static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) |
1745 | { | 1760 | { |
1746 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | 1761 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; |
1747 | void __iomem *addr = mv5_phy_base(mmio, ap->port_no); | 1762 | void __iomem *addr = mv5_phy_base(mmio, ap->port_no); |
1748 | unsigned int ofs = mv5_scr_offset(sc_reg_in); | 1763 | unsigned int ofs = mv5_scr_offset(sc_reg_in); |
1749 | 1764 | ||
1750 | if (ofs != 0xffffffffU) | 1765 | if (ofs != 0xffffffffU) { |
1751 | return readl(addr + ofs); | 1766 | *val = readl(addr + ofs); |
1752 | else | 1767 | return 0; |
1753 | return (u32) ofs; | 1768 | } else |
1769 | return -EINVAL; | ||
1754 | } | 1770 | } |
1755 | 1771 | ||
1756 | static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | 1772 | static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) |
1757 | { | 1773 | { |
1758 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | 1774 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; |
1759 | void __iomem *addr = mv5_phy_base(mmio, ap->port_no); | 1775 | void __iomem *addr = mv5_phy_base(mmio, ap->port_no); |
1760 | unsigned int ofs = mv5_scr_offset(sc_reg_in); | 1776 | unsigned int ofs = mv5_scr_offset(sc_reg_in); |
1761 | 1777 | ||
1762 | if (ofs != 0xffffffffU) | 1778 | if (ofs != 0xffffffffU) { |
1763 | writelfl(val, addr + ofs); | 1779 | writelfl(val, addr + ofs); |
1780 | return 0; | ||
1781 | } else | ||
1782 | return -EINVAL; | ||
1764 | } | 1783 | } |
1765 | 1784 | ||
1766 | static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) | 1785 | static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) |
@@ -2138,9 +2157,17 @@ static void mv_phy_reset(struct ata_port *ap, unsigned int *class, | |||
2138 | 2157 | ||
2139 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); | 2158 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); |
2140 | 2159 | ||
2141 | DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " | 2160 | #ifdef DEBUG |
2142 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | 2161 | { |
2143 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | 2162 | u32 sstatus, serror, scontrol; |
2163 | |||
2164 | mv_scr_read(ap, SCR_STATUS, &sstatus); | ||
2165 | mv_scr_read(ap, SCR_ERROR, &serror); | ||
2166 | mv_scr_read(ap, SCR_CONTROL, &scontrol); | ||
2167 | DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " | ||
2168 | "SCtrl 0x%08x\n", status, serror, scontrol); | ||
2169 | } | ||
2170 | #endif | ||
2144 | 2171 | ||
2145 | /* Issue COMRESET via SControl */ | 2172 | /* Issue COMRESET via SControl */ |
2146 | comreset_retry: | 2173 | comreset_retry: |
@@ -2164,9 +2191,17 @@ comreset_retry: | |||
2164 | (retry-- > 0)) | 2191 | (retry-- > 0)) |
2165 | goto comreset_retry; | 2192 | goto comreset_retry; |
2166 | 2193 | ||
2167 | DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " | 2194 | #ifdef DEBUG |
2168 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | 2195 | { |
2169 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | 2196 | u32 sstatus, serror, scontrol; |
2197 | |||
2198 | mv_scr_read(ap, SCR_STATUS, &sstatus); | ||
2199 | mv_scr_read(ap, SCR_ERROR, &serror); | ||
2200 | mv_scr_read(ap, SCR_CONTROL, &scontrol); | ||
2201 | DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " | ||
2202 | "SCtrl 0x%08x\n", sstatus, serror, scontrol); | ||
2203 | } | ||
2204 | #endif | ||
2170 | 2205 | ||
2171 | if (ata_port_offline(ap)) { | 2206 | if (ata_port_offline(ap)) { |
2172 | *class = ATA_DEV_NONE; | 2207 | *class = ATA_DEV_NONE; |
@@ -2209,7 +2244,7 @@ static int mv_prereset(struct ata_port *ap, unsigned long deadline) | |||
2209 | struct mv_port_priv *pp = ap->private_data; | 2244 | struct mv_port_priv *pp = ap->private_data; |
2210 | struct ata_eh_context *ehc = &ap->eh_context; | 2245 | struct ata_eh_context *ehc = &ap->eh_context; |
2211 | int rc; | 2246 | int rc; |
2212 | 2247 | ||
2213 | rc = mv_stop_dma(ap); | 2248 | rc = mv_stop_dma(ap); |
2214 | if (rc) | 2249 | if (rc) |
2215 | ehc->i.action |= ATA_EH_HARDRESET; | 2250 | ehc->i.action |= ATA_EH_HARDRESET; |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index db81e3efa5ec..0b58c4df6fd2 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -236,8 +236,8 @@ static void nv_ck804_host_stop(struct ata_host *host); | |||
236 | static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); | 236 | static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); |
237 | static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); | 237 | static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); |
238 | static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); | 238 | static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); |
239 | static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg); | 239 | static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); |
240 | static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 240 | static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); |
241 | 241 | ||
242 | static void nv_nf2_freeze(struct ata_port *ap); | 242 | static void nv_nf2_freeze(struct ata_port *ap); |
243 | static void nv_nf2_thaw(struct ata_port *ap); | 243 | static void nv_nf2_thaw(struct ata_port *ap); |
@@ -715,19 +715,20 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | |||
715 | int freeze = 0; | 715 | int freeze = 0; |
716 | 716 | ||
717 | ata_ehi_clear_desc(ehi); | 717 | ata_ehi_clear_desc(ehi); |
718 | ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags ); | 718 | __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags ); |
719 | if (flags & NV_CPB_RESP_ATA_ERR) { | 719 | if (flags & NV_CPB_RESP_ATA_ERR) { |
720 | ata_ehi_push_desc(ehi, ": ATA error"); | 720 | ata_ehi_push_desc(ehi, "ATA error"); |
721 | ehi->err_mask |= AC_ERR_DEV; | 721 | ehi->err_mask |= AC_ERR_DEV; |
722 | } else if (flags & NV_CPB_RESP_CMD_ERR) { | 722 | } else if (flags & NV_CPB_RESP_CMD_ERR) { |
723 | ata_ehi_push_desc(ehi, ": CMD error"); | 723 | ata_ehi_push_desc(ehi, "CMD error"); |
724 | ehi->err_mask |= AC_ERR_DEV; | 724 | ehi->err_mask |= AC_ERR_DEV; |
725 | } else if (flags & NV_CPB_RESP_CPB_ERR) { | 725 | } else if (flags & NV_CPB_RESP_CPB_ERR) { |
726 | ata_ehi_push_desc(ehi, ": CPB error"); | 726 | ata_ehi_push_desc(ehi, "CPB error"); |
727 | ehi->err_mask |= AC_ERR_SYSTEM; | 727 | ehi->err_mask |= AC_ERR_SYSTEM; |
728 | freeze = 1; | 728 | freeze = 1; |
729 | } else { | 729 | } else { |
730 | /* notifier error, but no error in CPB flags? */ | 730 | /* notifier error, but no error in CPB flags? */ |
731 | ata_ehi_push_desc(ehi, "unknown"); | ||
731 | ehi->err_mask |= AC_ERR_OTHER; | 732 | ehi->err_mask |= AC_ERR_OTHER; |
732 | freeze = 1; | 733 | freeze = 1; |
733 | } | 734 | } |
@@ -854,20 +855,21 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
854 | struct ata_eh_info *ehi = &ap->eh_info; | 855 | struct ata_eh_info *ehi = &ap->eh_info; |
855 | 856 | ||
856 | ata_ehi_clear_desc(ehi); | 857 | ata_ehi_clear_desc(ehi); |
857 | ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status ); | 858 | __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status ); |
858 | if (status & NV_ADMA_STAT_TIMEOUT) { | 859 | if (status & NV_ADMA_STAT_TIMEOUT) { |
859 | ehi->err_mask |= AC_ERR_SYSTEM; | 860 | ehi->err_mask |= AC_ERR_SYSTEM; |
860 | ata_ehi_push_desc(ehi, ": timeout"); | 861 | ata_ehi_push_desc(ehi, "timeout"); |
861 | } else if (status & NV_ADMA_STAT_HOTPLUG) { | 862 | } else if (status & NV_ADMA_STAT_HOTPLUG) { |
862 | ata_ehi_hotplugged(ehi); | 863 | ata_ehi_hotplugged(ehi); |
863 | ata_ehi_push_desc(ehi, ": hotplug"); | 864 | ata_ehi_push_desc(ehi, "hotplug"); |
864 | } else if (status & NV_ADMA_STAT_HOTUNPLUG) { | 865 | } else if (status & NV_ADMA_STAT_HOTUNPLUG) { |
865 | ata_ehi_hotplugged(ehi); | 866 | ata_ehi_hotplugged(ehi); |
866 | ata_ehi_push_desc(ehi, ": hot unplug"); | 867 | ata_ehi_push_desc(ehi, "hot unplug"); |
867 | } else if (status & NV_ADMA_STAT_SERROR) { | 868 | } else if (status & NV_ADMA_STAT_SERROR) { |
868 | /* let libata analyze SError and figure out the cause */ | 869 | /* let libata analyze SError and figure out the cause */ |
869 | ata_ehi_push_desc(ehi, ": SError"); | 870 | ata_ehi_push_desc(ehi, "SError"); |
870 | } | 871 | } else |
872 | ata_ehi_push_desc(ehi, "unknown"); | ||
871 | ata_port_freeze(ap); | 873 | ata_port_freeze(ap); |
872 | continue; | 874 | continue; |
873 | } | 875 | } |
@@ -1391,20 +1393,22 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance) | |||
1391 | return ret; | 1393 | return ret; |
1392 | } | 1394 | } |
1393 | 1395 | ||
1394 | static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) | 1396 | static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
1395 | { | 1397 | { |
1396 | if (sc_reg > SCR_CONTROL) | 1398 | if (sc_reg > SCR_CONTROL) |
1397 | return 0xffffffffU; | 1399 | return -EINVAL; |
1398 | 1400 | ||
1399 | return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); | 1401 | *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); |
1402 | return 0; | ||
1400 | } | 1403 | } |
1401 | 1404 | ||
1402 | static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 1405 | static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
1403 | { | 1406 | { |
1404 | if (sc_reg > SCR_CONTROL) | 1407 | if (sc_reg > SCR_CONTROL) |
1405 | return; | 1408 | return -EINVAL; |
1406 | 1409 | ||
1407 | iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); | 1410 | iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); |
1411 | return 0; | ||
1408 | } | 1412 | } |
1409 | 1413 | ||
1410 | static void nv_nf2_freeze(struct ata_port *ap) | 1414 | static void nv_nf2_freeze(struct ata_port *ap) |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index d2fcb9a6bec2..d39ebc23c4a9 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
@@ -128,8 +128,8 @@ struct pdc_port_priv { | |||
128 | dma_addr_t pkt_dma; | 128 | dma_addr_t pkt_dma; |
129 | }; | 129 | }; |
130 | 130 | ||
131 | static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); | 131 | static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
132 | static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 132 | static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
133 | static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 133 | static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
134 | static int pdc_common_port_start(struct ata_port *ap); | 134 | static int pdc_common_port_start(struct ata_port *ap); |
135 | static int pdc_sata_port_start(struct ata_port *ap); | 135 | static int pdc_sata_port_start(struct ata_port *ap); |
@@ -427,19 +427,20 @@ static int pdc_sata_cable_detect(struct ata_port *ap) | |||
427 | return ATA_CBL_SATA; | 427 | return ATA_CBL_SATA; |
428 | } | 428 | } |
429 | 429 | ||
430 | static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) | 430 | static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
431 | { | 431 | { |
432 | if (sc_reg > SCR_CONTROL) | 432 | if (sc_reg > SCR_CONTROL) |
433 | return 0xffffffffU; | 433 | return -EINVAL; |
434 | return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); | 434 | *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); |
435 | return 0; | ||
435 | } | 436 | } |
436 | 437 | ||
437 | static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, | 438 | static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
438 | u32 val) | ||
439 | { | 439 | { |
440 | if (sc_reg > SCR_CONTROL) | 440 | if (sc_reg > SCR_CONTROL) |
441 | return; | 441 | return -EINVAL; |
442 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); | 442 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); |
443 | return 0; | ||
443 | } | 444 | } |
444 | 445 | ||
445 | static void pdc_atapi_pkt(struct ata_queued_cmd *qc) | 446 | static void pdc_atapi_pkt(struct ata_queued_cmd *qc) |
@@ -642,8 +643,12 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
642 | | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR)) | 643 | | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR)) |
643 | ac_err_mask |= AC_ERR_HOST_BUS; | 644 | ac_err_mask |= AC_ERR_HOST_BUS; |
644 | 645 | ||
645 | if (sata_scr_valid(ap)) | 646 | if (sata_scr_valid(ap)) { |
646 | ehi->serror |= pdc_sata_scr_read(ap, SCR_ERROR); | 647 | u32 serror; |
648 | |||
649 | pdc_sata_scr_read(ap, SCR_ERROR, &serror); | ||
650 | ehi->serror |= serror; | ||
651 | } | ||
647 | 652 | ||
648 | qc->err_mask |= ac_err_mask; | 653 | qc->err_mask |= ac_err_mask; |
649 | 654 | ||
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 9ab554da89bf..c8f9242e7f44 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c | |||
@@ -111,8 +111,8 @@ struct qs_port_priv { | |||
111 | qs_state_t state; | 111 | qs_state_t state; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg); | 114 | static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
115 | static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 115 | static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
116 | static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 116 | static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
117 | static int qs_port_start(struct ata_port *ap); | 117 | static int qs_port_start(struct ata_port *ap); |
118 | static void qs_host_stop(struct ata_host *host); | 118 | static void qs_host_stop(struct ata_host *host); |
@@ -255,18 +255,20 @@ static void qs_eng_timeout(struct ata_port *ap) | |||
255 | ata_eng_timeout(ap); | 255 | ata_eng_timeout(ap); |
256 | } | 256 | } |
257 | 257 | ||
258 | static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg) | 258 | static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
259 | { | 259 | { |
260 | if (sc_reg > SCR_CONTROL) | 260 | if (sc_reg > SCR_CONTROL) |
261 | return ~0U; | 261 | return -EINVAL; |
262 | return readl(ap->ioaddr.scr_addr + (sc_reg * 8)); | 262 | *val = readl(ap->ioaddr.scr_addr + (sc_reg * 8)); |
263 | return 0; | ||
263 | } | 264 | } |
264 | 265 | ||
265 | static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 266 | static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
266 | { | 267 | { |
267 | if (sc_reg > SCR_CONTROL) | 268 | if (sc_reg > SCR_CONTROL) |
268 | return; | 269 | return -EINVAL; |
269 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 8)); | 270 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 8)); |
271 | return 0; | ||
270 | } | 272 | } |
271 | 273 | ||
272 | static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) | 274 | static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) |
@@ -337,7 +339,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) | |||
337 | buf[28] = dflags; | 339 | buf[28] = dflags; |
338 | 340 | ||
339 | /* frame information structure (FIS) */ | 341 | /* frame information structure (FIS) */ |
340 | ata_tf_to_fis(&qc->tf, &buf[32], 0); | 342 | ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); |
341 | } | 343 | } |
342 | 344 | ||
343 | static inline void qs_packet_start(struct ata_queued_cmd *qc) | 345 | static inline void qs_packet_start(struct ata_queued_cmd *qc) |
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 2a86dc4598d0..db6763758952 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -115,8 +115,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | |||
115 | static int sil_pci_device_resume(struct pci_dev *pdev); | 115 | static int sil_pci_device_resume(struct pci_dev *pdev); |
116 | #endif | 116 | #endif |
117 | static void sil_dev_config(struct ata_device *dev); | 117 | static void sil_dev_config(struct ata_device *dev); |
118 | static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); | 118 | static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
119 | static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 119 | static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
120 | static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed); | 120 | static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed); |
121 | static void sil_freeze(struct ata_port *ap); | 121 | static void sil_freeze(struct ata_port *ap); |
122 | static void sil_thaw(struct ata_port *ap); | 122 | static void sil_thaw(struct ata_port *ap); |
@@ -350,19 +350,26 @@ static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_re | |||
350 | return NULL; | 350 | return NULL; |
351 | } | 351 | } |
352 | 352 | ||
353 | static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) | 353 | static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
354 | { | 354 | { |
355 | void __iomem *mmio = sil_scr_addr(ap, sc_reg); | 355 | void __iomem *mmio = sil_scr_addr(ap, sc_reg); |
356 | if (mmio) | 356 | |
357 | return readl(mmio); | 357 | if (mmio) { |
358 | return 0xffffffffU; | 358 | *val = readl(mmio); |
359 | return 0; | ||
360 | } | ||
361 | return -EINVAL; | ||
359 | } | 362 | } |
360 | 363 | ||
361 | static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 364 | static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
362 | { | 365 | { |
363 | void __iomem *mmio = sil_scr_addr(ap, sc_reg); | 366 | void __iomem *mmio = sil_scr_addr(ap, sc_reg); |
364 | if (mmio) | 367 | |
368 | if (mmio) { | ||
365 | writel(val, mmio); | 369 | writel(val, mmio); |
370 | return 0; | ||
371 | } | ||
372 | return -EINVAL; | ||
366 | } | 373 | } |
367 | 374 | ||
368 | static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | 375 | static void sil_host_intr(struct ata_port *ap, u32 bmdma2) |
@@ -378,7 +385,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | |||
378 | * controllers continue to assert IRQ as long as | 385 | * controllers continue to assert IRQ as long as |
379 | * SError bits are pending. Clear SError immediately. | 386 | * SError bits are pending. Clear SError immediately. |
380 | */ | 387 | */ |
381 | serror = sil_scr_read(ap, SCR_ERROR); | 388 | sil_scr_read(ap, SCR_ERROR, &serror); |
382 | sil_scr_write(ap, SCR_ERROR, serror); | 389 | sil_scr_write(ap, SCR_ERROR, serror); |
383 | 390 | ||
384 | /* Trigger hotplug and accumulate SError only if the | 391 | /* Trigger hotplug and accumulate SError only if the |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index ac43a30ebe29..46fbbe7f121c 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -326,8 +326,8 @@ struct sil24_port_priv { | |||
326 | 326 | ||
327 | static void sil24_dev_config(struct ata_device *dev); | 327 | static void sil24_dev_config(struct ata_device *dev); |
328 | static u8 sil24_check_status(struct ata_port *ap); | 328 | static u8 sil24_check_status(struct ata_port *ap); |
329 | static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); | 329 | static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); |
330 | static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); | 330 | static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); |
331 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | 331 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); |
332 | static void sil24_qc_prep(struct ata_queued_cmd *qc); | 332 | static void sil24_qc_prep(struct ata_queued_cmd *qc); |
333 | static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); | 333 | static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); |
@@ -464,15 +464,15 @@ static void sil24_dev_config(struct ata_device *dev) | |||
464 | writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); | 464 | writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); |
465 | } | 465 | } |
466 | 466 | ||
467 | static inline void sil24_update_tf(struct ata_port *ap) | 467 | static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) |
468 | { | 468 | { |
469 | struct sil24_port_priv *pp = ap->private_data; | ||
470 | void __iomem *port = ap->ioaddr.cmd_addr; | 469 | void __iomem *port = ap->ioaddr.cmd_addr; |
471 | struct sil24_prb __iomem *prb = port; | 470 | struct sil24_prb __iomem *prb; |
472 | u8 fis[6 * 4]; | 471 | u8 fis[6 * 4]; |
473 | 472 | ||
474 | memcpy_fromio(fis, prb->fis, 6 * 4); | 473 | prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ; |
475 | ata_tf_from_fis(fis, &pp->tf); | 474 | memcpy_fromio(fis, prb->fis, sizeof(fis)); |
475 | ata_tf_from_fis(fis, tf); | ||
476 | } | 476 | } |
477 | 477 | ||
478 | static u8 sil24_check_status(struct ata_port *ap) | 478 | static u8 sil24_check_status(struct ata_port *ap) |
@@ -488,25 +488,30 @@ static int sil24_scr_map[] = { | |||
488 | [SCR_ACTIVE] = 3, | 488 | [SCR_ACTIVE] = 3, |
489 | }; | 489 | }; |
490 | 490 | ||
491 | static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg) | 491 | static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) |
492 | { | 492 | { |
493 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 493 | void __iomem *scr_addr = ap->ioaddr.scr_addr; |
494 | |||
494 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { | 495 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { |
495 | void __iomem *addr; | 496 | void __iomem *addr; |
496 | addr = scr_addr + sil24_scr_map[sc_reg] * 4; | 497 | addr = scr_addr + sil24_scr_map[sc_reg] * 4; |
497 | return readl(scr_addr + sil24_scr_map[sc_reg] * 4); | 498 | *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4); |
499 | return 0; | ||
498 | } | 500 | } |
499 | return 0xffffffffU; | 501 | return -EINVAL; |
500 | } | 502 | } |
501 | 503 | ||
502 | static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | 504 | static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) |
503 | { | 505 | { |
504 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 506 | void __iomem *scr_addr = ap->ioaddr.scr_addr; |
507 | |||
505 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { | 508 | if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { |
506 | void __iomem *addr; | 509 | void __iomem *addr; |
507 | addr = scr_addr + sil24_scr_map[sc_reg] * 4; | 510 | addr = scr_addr + sil24_scr_map[sc_reg] * 4; |
508 | writel(val, scr_addr + sil24_scr_map[sc_reg] * 4); | 511 | writel(val, scr_addr + sil24_scr_map[sc_reg] * 4); |
512 | return 0; | ||
509 | } | 513 | } |
514 | return -EINVAL; | ||
510 | } | 515 | } |
511 | 516 | ||
512 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | 517 | static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
@@ -531,15 +536,60 @@ static int sil24_init_port(struct ata_port *ap) | |||
531 | return 0; | 536 | return 0; |
532 | } | 537 | } |
533 | 538 | ||
534 | static int sil24_softreset(struct ata_port *ap, unsigned int *class, | 539 | static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, |
535 | unsigned long deadline) | 540 | const struct ata_taskfile *tf, |
541 | int is_cmd, u32 ctrl, | ||
542 | unsigned long timeout_msec) | ||
536 | { | 543 | { |
537 | void __iomem *port = ap->ioaddr.cmd_addr; | 544 | void __iomem *port = ap->ioaddr.cmd_addr; |
538 | struct sil24_port_priv *pp = ap->private_data; | 545 | struct sil24_port_priv *pp = ap->private_data; |
539 | struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; | 546 | struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; |
540 | dma_addr_t paddr = pp->cmd_block_dma; | 547 | dma_addr_t paddr = pp->cmd_block_dma; |
541 | u32 mask, irq_stat; | 548 | u32 irq_enabled, irq_mask, irq_stat; |
549 | int rc; | ||
550 | |||
551 | prb->ctrl = cpu_to_le16(ctrl); | ||
552 | ata_tf_to_fis(tf, pmp, is_cmd, prb->fis); | ||
553 | |||
554 | /* temporarily plug completion and error interrupts */ | ||
555 | irq_enabled = readl(port + PORT_IRQ_ENABLE_SET); | ||
556 | writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR); | ||
557 | |||
558 | writel((u32)paddr, port + PORT_CMD_ACTIVATE); | ||
559 | writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); | ||
560 | |||
561 | irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; | ||
562 | irq_stat = ata_wait_register(port + PORT_IRQ_STAT, irq_mask, 0x0, | ||
563 | 10, timeout_msec); | ||
564 | |||
565 | writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */ | ||
566 | irq_stat >>= PORT_IRQ_RAW_SHIFT; | ||
567 | |||
568 | if (irq_stat & PORT_IRQ_COMPLETE) | ||
569 | rc = 0; | ||
570 | else { | ||
571 | /* force port into known state */ | ||
572 | sil24_init_port(ap); | ||
573 | |||
574 | if (irq_stat & PORT_IRQ_ERROR) | ||
575 | rc = -EIO; | ||
576 | else | ||
577 | rc = -EBUSY; | ||
578 | } | ||
579 | |||
580 | /* restore IRQ enabled */ | ||
581 | writel(irq_enabled, port + PORT_IRQ_ENABLE_SET); | ||
582 | |||
583 | return rc; | ||
584 | } | ||
585 | |||
586 | static int sil24_do_softreset(struct ata_port *ap, unsigned int *class, | ||
587 | int pmp, unsigned long deadline) | ||
588 | { | ||
589 | unsigned long timeout_msec = 0; | ||
590 | struct ata_taskfile tf; | ||
542 | const char *reason; | 591 | const char *reason; |
592 | int rc; | ||
543 | 593 | ||
544 | DPRINTK("ENTER\n"); | 594 | DPRINTK("ENTER\n"); |
545 | 595 | ||
@@ -556,29 +606,22 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class, | |||
556 | } | 606 | } |
557 | 607 | ||
558 | /* do SRST */ | 608 | /* do SRST */ |
559 | prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); | 609 | if (time_after(deadline, jiffies)) |
560 | prb->fis[1] = 0; /* no PMP yet */ | 610 | timeout_msec = jiffies_to_msecs(deadline - jiffies); |
561 | 611 | ||
562 | writel((u32)paddr, port + PORT_CMD_ACTIVATE); | 612 | ata_tf_init(ap->device, &tf); /* doesn't really matter */ |
563 | writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); | 613 | rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, |
564 | 614 | timeout_msec); | |
565 | mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; | 615 | if (rc == -EBUSY) { |
566 | irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0, | 616 | reason = "timeout"; |
567 | 100, jiffies_to_msecs(deadline - jiffies)); | 617 | goto err; |
568 | 618 | } else if (rc) { | |
569 | writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */ | 619 | reason = "SRST command error"; |
570 | irq_stat >>= PORT_IRQ_RAW_SHIFT; | ||
571 | |||
572 | if (!(irq_stat & PORT_IRQ_COMPLETE)) { | ||
573 | if (irq_stat & PORT_IRQ_ERROR) | ||
574 | reason = "SRST command error"; | ||
575 | else | ||
576 | reason = "timeout"; | ||
577 | goto err; | 620 | goto err; |
578 | } | 621 | } |
579 | 622 | ||
580 | sil24_update_tf(ap); | 623 | sil24_read_tf(ap, 0, &tf); |
581 | *class = ata_dev_classify(&pp->tf); | 624 | *class = ata_dev_classify(&tf); |
582 | 625 | ||
583 | if (*class == ATA_DEV_UNKNOWN) | 626 | if (*class == ATA_DEV_UNKNOWN) |
584 | *class = ATA_DEV_NONE; | 627 | *class = ATA_DEV_NONE; |
@@ -592,6 +635,12 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class, | |||
592 | return -EIO; | 635 | return -EIO; |
593 | } | 636 | } |
594 | 637 | ||
638 | static int sil24_softreset(struct ata_port *ap, unsigned int *class, | ||
639 | unsigned long deadline) | ||
640 | { | ||
641 | return sil24_do_softreset(ap, class, 0, deadline); | ||
642 | } | ||
643 | |||
595 | static int sil24_hardreset(struct ata_port *ap, unsigned int *class, | 644 | static int sil24_hardreset(struct ata_port *ap, unsigned int *class, |
596 | unsigned long deadline) | 645 | unsigned long deadline) |
597 | { | 646 | { |
@@ -699,7 +748,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) | |||
699 | } | 748 | } |
700 | 749 | ||
701 | prb->ctrl = cpu_to_le16(ctrl); | 750 | prb->ctrl = cpu_to_le16(ctrl); |
702 | ata_tf_to_fis(&qc->tf, prb->fis, 0); | 751 | ata_tf_to_fis(&qc->tf, 0, 1, prb->fis); |
703 | 752 | ||
704 | if (qc->flags & ATA_QCFLAG_DMAMAP) | 753 | if (qc->flags & ATA_QCFLAG_DMAMAP) |
705 | sil24_fill_sg(qc, sge); | 754 | sil24_fill_sg(qc, sge); |
@@ -754,6 +803,7 @@ static void sil24_thaw(struct ata_port *ap) | |||
754 | static void sil24_error_intr(struct ata_port *ap) | 803 | static void sil24_error_intr(struct ata_port *ap) |
755 | { | 804 | { |
756 | void __iomem *port = ap->ioaddr.cmd_addr; | 805 | void __iomem *port = ap->ioaddr.cmd_addr; |
806 | struct sil24_port_priv *pp = ap->private_data; | ||
757 | struct ata_eh_info *ehi = &ap->eh_info; | 807 | struct ata_eh_info *ehi = &ap->eh_info; |
758 | int freeze = 0; | 808 | int freeze = 0; |
759 | u32 irq_stat; | 809 | u32 irq_stat; |
@@ -769,16 +819,16 @@ static void sil24_error_intr(struct ata_port *ap) | |||
769 | 819 | ||
770 | if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { | 820 | if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { |
771 | ata_ehi_hotplugged(ehi); | 821 | ata_ehi_hotplugged(ehi); |
772 | ata_ehi_push_desc(ehi, ", %s", | 822 | ata_ehi_push_desc(ehi, "%s", |
773 | irq_stat & PORT_IRQ_PHYRDY_CHG ? | 823 | irq_stat & PORT_IRQ_PHYRDY_CHG ? |
774 | "PHY RDY changed" : "device exchanged"); | 824 | "PHY RDY changed" : "device exchanged"); |
775 | freeze = 1; | 825 | freeze = 1; |
776 | } | 826 | } |
777 | 827 | ||
778 | if (irq_stat & PORT_IRQ_UNK_FIS) { | 828 | if (irq_stat & PORT_IRQ_UNK_FIS) { |
779 | ehi->err_mask |= AC_ERR_HSM; | 829 | ehi->err_mask |= AC_ERR_HSM; |
780 | ehi->action |= ATA_EH_SOFTRESET; | 830 | ehi->action |= ATA_EH_SOFTRESET; |
781 | ata_ehi_push_desc(ehi , ", unknown FIS"); | 831 | ata_ehi_push_desc(ehi, "unknown FIS"); |
782 | freeze = 1; | 832 | freeze = 1; |
783 | } | 833 | } |
784 | 834 | ||
@@ -797,18 +847,18 @@ static void sil24_error_intr(struct ata_port *ap) | |||
797 | if (ci && ci->desc) { | 847 | if (ci && ci->desc) { |
798 | err_mask |= ci->err_mask; | 848 | err_mask |= ci->err_mask; |
799 | action |= ci->action; | 849 | action |= ci->action; |
800 | ata_ehi_push_desc(ehi, ", %s", ci->desc); | 850 | ata_ehi_push_desc(ehi, "%s", ci->desc); |
801 | } else { | 851 | } else { |
802 | err_mask |= AC_ERR_OTHER; | 852 | err_mask |= AC_ERR_OTHER; |
803 | action |= ATA_EH_SOFTRESET; | 853 | action |= ATA_EH_SOFTRESET; |
804 | ata_ehi_push_desc(ehi, ", unknown command error %d", | 854 | ata_ehi_push_desc(ehi, "unknown command error %d", |
805 | cerr); | 855 | cerr); |
806 | } | 856 | } |
807 | 857 | ||
808 | /* record error info */ | 858 | /* record error info */ |
809 | qc = ata_qc_from_tag(ap, ap->active_tag); | 859 | qc = ata_qc_from_tag(ap, ap->active_tag); |
810 | if (qc) { | 860 | if (qc) { |
811 | sil24_update_tf(ap); | 861 | sil24_read_tf(ap, qc->tag, &pp->tf); |
812 | qc->err_mask |= err_mask; | 862 | qc->err_mask |= err_mask; |
813 | } else | 863 | } else |
814 | ehi->err_mask |= err_mask; | 864 | ehi->err_mask |= err_mask; |
@@ -825,8 +875,11 @@ static void sil24_error_intr(struct ata_port *ap) | |||
825 | 875 | ||
826 | static void sil24_finish_qc(struct ata_queued_cmd *qc) | 876 | static void sil24_finish_qc(struct ata_queued_cmd *qc) |
827 | { | 877 | { |
878 | struct ata_port *ap = qc->ap; | ||
879 | struct sil24_port_priv *pp = ap->private_data; | ||
880 | |||
828 | if (qc->flags & ATA_QCFLAG_RESULT_TF) | 881 | if (qc->flags & ATA_QCFLAG_RESULT_TF) |
829 | sil24_update_tf(qc->ap); | 882 | sil24_read_tf(ap, qc->tag, &pp->tf); |
830 | } | 883 | } |
831 | 884 | ||
832 | static inline void sil24_host_intr(struct ata_port *ap) | 885 | static inline void sil24_host_intr(struct ata_port *ap) |
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index 33716b00c6b7..31a2f55aae66 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
@@ -64,8 +64,8 @@ enum { | |||
64 | }; | 64 | }; |
65 | 65 | ||
66 | static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 66 | static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
67 | static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg); | 67 | static int sis_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); |
68 | static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 68 | static int sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); |
69 | 69 | ||
70 | static const struct pci_device_id sis_pci_tbl[] = { | 70 | static const struct pci_device_id sis_pci_tbl[] = { |
71 | { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */ | 71 | { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */ |
@@ -207,36 +207,37 @@ static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val | |||
207 | pci_write_config_dword(pdev, cfg_addr+0x10, val); | 207 | pci_write_config_dword(pdev, cfg_addr+0x10, val); |
208 | } | 208 | } |
209 | 209 | ||
210 | static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg) | 210 | static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
211 | { | 211 | { |
212 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 212 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
213 | u32 val, val2 = 0; | ||
214 | u8 pmr; | 213 | u8 pmr; |
215 | 214 | ||
216 | if (sc_reg > SCR_CONTROL) | 215 | if (sc_reg > SCR_CONTROL) |
217 | return 0xffffffffU; | 216 | return -EINVAL; |
218 | 217 | ||
219 | if (ap->flags & SIS_FLAG_CFGSCR) | 218 | if (ap->flags & SIS_FLAG_CFGSCR) |
220 | return sis_scr_cfg_read(ap, sc_reg); | 219 | return sis_scr_cfg_read(ap, sc_reg); |
221 | 220 | ||
222 | pci_read_config_byte(pdev, SIS_PMR, &pmr); | 221 | pci_read_config_byte(pdev, SIS_PMR, &pmr); |
223 | 222 | ||
224 | val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); | 223 | *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); |
225 | 224 | ||
226 | if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || | 225 | if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || |
227 | (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) | 226 | (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) |
228 | val2 = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); | 227 | *val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); |
228 | |||
229 | *val &= 0xfffffffb; | ||
229 | 230 | ||
230 | return (val | val2) & 0xfffffffb; | 231 | return 0; |
231 | } | 232 | } |
232 | 233 | ||
233 | static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 234 | static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
234 | { | 235 | { |
235 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 236 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
236 | u8 pmr; | 237 | u8 pmr; |
237 | 238 | ||
238 | if (sc_reg > SCR_CONTROL) | 239 | if (sc_reg > SCR_CONTROL) |
239 | return; | 240 | return -EINVAL; |
240 | 241 | ||
241 | pci_read_config_byte(pdev, SIS_PMR, &pmr); | 242 | pci_read_config_byte(pdev, SIS_PMR, &pmr); |
242 | 243 | ||
@@ -248,6 +249,7 @@ static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | |||
248 | (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) | 249 | (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) |
249 | iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10); | 250 | iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10); |
250 | } | 251 | } |
252 | return 0; | ||
251 | } | 253 | } |
252 | 254 | ||
253 | static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 255 | static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 63fe99afd59f..92e877075037 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -103,20 +103,21 @@ static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) | |||
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | 105 | ||
106 | static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) | 106 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
107 | { | 107 | { |
108 | if (sc_reg > SCR_CONTROL) | 108 | if (sc_reg > SCR_CONTROL) |
109 | return 0xffffffffU; | 109 | return -EINVAL; |
110 | return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); | 110 | *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); |
111 | return 0; | ||
111 | } | 112 | } |
112 | 113 | ||
113 | 114 | ||
114 | static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, | 115 | static int k2_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
115 | u32 val) | ||
116 | { | 116 | { |
117 | if (sc_reg > SCR_CONTROL) | 117 | if (sc_reg > SCR_CONTROL) |
118 | return; | 118 | return -EINVAL; |
119 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); | 119 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); |
120 | return 0; | ||
120 | } | 121 | } |
121 | 122 | ||
122 | 123 | ||
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index b52f83ab056a..78c28512f01c 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c | |||
@@ -57,8 +57,8 @@ struct uli_priv { | |||
57 | }; | 57 | }; |
58 | 58 | ||
59 | static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 59 | static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
60 | static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg); | 60 | static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); |
61 | static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 61 | static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); |
62 | 62 | ||
63 | static const struct pci_device_id uli_pci_tbl[] = { | 63 | static const struct pci_device_id uli_pci_tbl[] = { |
64 | { PCI_VDEVICE(AL, 0x5289), uli_5289 }, | 64 | { PCI_VDEVICE(AL, 0x5289), uli_5289 }, |
@@ -164,20 +164,22 @@ static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) | |||
164 | pci_write_config_dword(pdev, cfg_addr, val); | 164 | pci_write_config_dword(pdev, cfg_addr, val); |
165 | } | 165 | } |
166 | 166 | ||
167 | static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg) | 167 | static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val) |
168 | { | 168 | { |
169 | if (sc_reg > SCR_CONTROL) | 169 | if (sc_reg > SCR_CONTROL) |
170 | return 0xffffffffU; | 170 | return -EINVAL; |
171 | 171 | ||
172 | return uli_scr_cfg_read(ap, sc_reg); | 172 | *val = uli_scr_cfg_read(ap, sc_reg); |
173 | return 0; | ||
173 | } | 174 | } |
174 | 175 | ||
175 | static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 176 | static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) |
176 | { | 177 | { |
177 | if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 | 178 | if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 |
178 | return; | 179 | return -EINVAL; |
179 | 180 | ||
180 | uli_scr_cfg_write(ap, sc_reg, val); | 181 | uli_scr_cfg_write(ap, sc_reg, val); |
182 | return 0; | ||
181 | } | 183 | } |
182 | 184 | ||
183 | static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 185 | static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) |
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index c4124475f754..86b7bfc17324 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -72,8 +72,8 @@ enum { | |||
72 | }; | 72 | }; |
73 | 73 | ||
74 | static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 74 | static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
75 | static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); | 75 | static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
76 | static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 76 | static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
77 | static void svia_noop_freeze(struct ata_port *ap); | 77 | static void svia_noop_freeze(struct ata_port *ap); |
78 | static void vt6420_error_handler(struct ata_port *ap); | 78 | static void vt6420_error_handler(struct ata_port *ap); |
79 | static int vt6421_pata_cable_detect(struct ata_port *ap); | 79 | static int vt6421_pata_cable_detect(struct ata_port *ap); |
@@ -249,18 +249,20 @@ MODULE_LICENSE("GPL"); | |||
249 | MODULE_DEVICE_TABLE(pci, svia_pci_tbl); | 249 | MODULE_DEVICE_TABLE(pci, svia_pci_tbl); |
250 | MODULE_VERSION(DRV_VERSION); | 250 | MODULE_VERSION(DRV_VERSION); |
251 | 251 | ||
252 | static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg) | 252 | static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
253 | { | 253 | { |
254 | if (sc_reg > SCR_CONTROL) | 254 | if (sc_reg > SCR_CONTROL) |
255 | return 0xffffffffU; | 255 | return -EINVAL; |
256 | return ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); | 256 | *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); |
257 | return 0; | ||
257 | } | 258 | } |
258 | 259 | ||
259 | static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 260 | static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
260 | { | 261 | { |
261 | if (sc_reg > SCR_CONTROL) | 262 | if (sc_reg > SCR_CONTROL) |
262 | return; | 263 | return -EINVAL; |
263 | iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg)); | 264 | iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg)); |
265 | return 0; | ||
264 | } | 266 | } |
265 | 267 | ||
266 | static void svia_noop_freeze(struct ata_port *ap) | 268 | static void svia_noop_freeze(struct ata_port *ap) |
@@ -305,18 +307,19 @@ static int vt6420_prereset(struct ata_port *ap, unsigned long deadline) | |||
305 | 307 | ||
306 | /* Resume phy. This is the old SATA resume sequence */ | 308 | /* Resume phy. This is the old SATA resume sequence */ |
307 | svia_scr_write(ap, SCR_CONTROL, 0x300); | 309 | svia_scr_write(ap, SCR_CONTROL, 0x300); |
308 | svia_scr_read(ap, SCR_CONTROL); /* flush */ | 310 | svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */ |
309 | 311 | ||
310 | /* wait for phy to become ready, if necessary */ | 312 | /* wait for phy to become ready, if necessary */ |
311 | do { | 313 | do { |
312 | msleep(200); | 314 | msleep(200); |
313 | if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1) | 315 | svia_scr_read(ap, SCR_STATUS, &sstatus); |
316 | if ((sstatus & 0xf) != 1) | ||
314 | break; | 317 | break; |
315 | } while (time_before(jiffies, timeout)); | 318 | } while (time_before(jiffies, timeout)); |
316 | 319 | ||
317 | /* open code sata_print_link_status() */ | 320 | /* open code sata_print_link_status() */ |
318 | sstatus = svia_scr_read(ap, SCR_STATUS); | 321 | svia_scr_read(ap, SCR_STATUS, &sstatus); |
319 | scontrol = svia_scr_read(ap, SCR_CONTROL); | 322 | svia_scr_read(ap, SCR_CONTROL, &scontrol); |
320 | 323 | ||
321 | online = (sstatus & 0xf) == 0x3; | 324 | online = (sstatus & 0xf) == 0x3; |
322 | 325 | ||
@@ -325,7 +328,7 @@ static int vt6420_prereset(struct ata_port *ap, unsigned long deadline) | |||
325 | online ? "up" : "down", sstatus, scontrol); | 328 | online ? "up" : "down", sstatus, scontrol); |
326 | 329 | ||
327 | /* SStatus is read one more time */ | 330 | /* SStatus is read one more time */ |
328 | svia_scr_read(ap, SCR_STATUS); | 331 | svia_scr_read(ap, SCR_STATUS, &sstatus); |
329 | 332 | ||
330 | if (!online) { | 333 | if (!online) { |
331 | /* tell EH to bail */ | 334 | /* tell EH to bail */ |
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 1b5d81faa102..24344d0d0575 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c | |||
@@ -98,20 +98,21 @@ enum { | |||
98 | VSC_SATA_INT_PHY_CHANGE), | 98 | VSC_SATA_INT_PHY_CHANGE), |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) | 101 | static int vsc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
102 | { | 102 | { |
103 | if (sc_reg > SCR_CONTROL) | 103 | if (sc_reg > SCR_CONTROL) |
104 | return 0xffffffffU; | 104 | return -EINVAL; |
105 | return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); | 105 | *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); |
106 | return 0; | ||
106 | } | 107 | } |
107 | 108 | ||
108 | 109 | ||
109 | static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, | 110 | static int vsc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
110 | u32 val) | ||
111 | { | 111 | { |
112 | if (sc_reg > SCR_CONTROL) | 112 | if (sc_reg > SCR_CONTROL) |
113 | return; | 113 | return -EINVAL; |
114 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); | 114 | writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); |
115 | return 0; | ||
115 | } | 116 | } |
116 | 117 | ||
117 | 118 | ||
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 2288b55d916f..d50b82381155 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c | |||
@@ -64,7 +64,6 @@ struct vdc_port { | |||
64 | u64 operations; | 64 | u64 operations; |
65 | u32 vdisk_size; | 65 | u32 vdisk_size; |
66 | u8 vdisk_type; | 66 | u8 vdisk_type; |
67 | u8 dev_no; | ||
68 | 67 | ||
69 | char disk_name[32]; | 68 | char disk_name[32]; |
70 | 69 | ||
@@ -703,7 +702,7 @@ static int probe_disk(struct vdc_port *port) | |||
703 | blk_queue_max_phys_segments(q, port->ring_cookies); | 702 | blk_queue_max_phys_segments(q, port->ring_cookies); |
704 | blk_queue_max_sectors(q, port->max_xfer_size); | 703 | blk_queue_max_sectors(q, port->max_xfer_size); |
705 | g->major = vdc_major; | 704 | g->major = vdc_major; |
706 | g->first_minor = port->dev_no << PARTITION_SHIFT; | 705 | g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; |
707 | strcpy(g->disk_name, port->disk_name); | 706 | strcpy(g->disk_name, port->disk_name); |
708 | 707 | ||
709 | g->fops = &vdc_fops; | 708 | g->fops = &vdc_fops; |
@@ -747,21 +746,16 @@ static int __devinit vdc_port_probe(struct vio_dev *vdev, | |||
747 | { | 746 | { |
748 | struct mdesc_handle *hp; | 747 | struct mdesc_handle *hp; |
749 | struct vdc_port *port; | 748 | struct vdc_port *port; |
750 | const u64 *port_id; | ||
751 | int err; | 749 | int err; |
752 | 750 | ||
753 | print_version(); | 751 | print_version(); |
754 | 752 | ||
755 | hp = mdesc_grab(); | 753 | hp = mdesc_grab(); |
756 | 754 | ||
757 | port_id = mdesc_get_property(hp, vdev->mp, "id", NULL); | ||
758 | err = -ENODEV; | 755 | err = -ENODEV; |
759 | if (!port_id) { | 756 | if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { |
760 | printk(KERN_ERR PFX "Port lacks id property.\n"); | 757 | printk(KERN_ERR PFX "Port id [%lu] too large.\n", |
761 | goto err_out_release_mdesc; | 758 | vdev->dev_no); |
762 | } | ||
763 | if ((*port_id << PARTITION_SHIFT) & ~(u64)MINORMASK) { | ||
764 | printk(KERN_ERR PFX "Port id [%lu] too large.\n", *port_id); | ||
765 | goto err_out_release_mdesc; | 759 | goto err_out_release_mdesc; |
766 | } | 760 | } |
767 | 761 | ||
@@ -772,16 +766,14 @@ static int __devinit vdc_port_probe(struct vio_dev *vdev, | |||
772 | goto err_out_release_mdesc; | 766 | goto err_out_release_mdesc; |
773 | } | 767 | } |
774 | 768 | ||
775 | port->dev_no = *port_id; | 769 | if (vdev->dev_no >= 26) |
776 | |||
777 | if (port->dev_no >= 26) | ||
778 | snprintf(port->disk_name, sizeof(port->disk_name), | 770 | snprintf(port->disk_name, sizeof(port->disk_name), |
779 | VDCBLK_NAME "%c%c", | 771 | VDCBLK_NAME "%c%c", |
780 | 'a' + (port->dev_no / 26) - 1, | 772 | 'a' + ((int)vdev->dev_no / 26) - 1, |
781 | 'a' + (port->dev_no % 26)); | 773 | 'a' + ((int)vdev->dev_no % 26)); |
782 | else | 774 | else |
783 | snprintf(port->disk_name, sizeof(port->disk_name), | 775 | snprintf(port->disk_name, sizeof(port->disk_name), |
784 | VDCBLK_NAME "%c", 'a' + (port->dev_no % 26)); | 776 | VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); |
785 | 777 | ||
786 | err = vio_driver_init(&port->vio, vdev, VDEV_DISK, | 778 | err = vio_driver_init(&port->vio, vdev, VDEV_DISK, |
787 | vdc_versions, ARRAY_SIZE(vdc_versions), | 779 | vdc_versions, ARRAY_SIZE(vdc_versions), |
@@ -849,7 +841,7 @@ static struct vio_device_id vdc_port_match[] = { | |||
849 | }, | 841 | }, |
850 | {}, | 842 | {}, |
851 | }; | 843 | }; |
852 | MODULE_DEVICE_TABLE(vio, vdc_match); | 844 | MODULE_DEVICE_TABLE(vio, vdc_port_match); |
853 | 845 | ||
854 | static struct vio_driver vdc_port_driver = { | 846 | static struct vio_driver vdc_port_driver = { |
855 | .id_table = vdc_port_match, | 847 | .id_table = vdc_port_match, |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 9e8f21410d2d..4373d7cdc5d2 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -185,7 +185,7 @@ config ESPSERIAL | |||
185 | 185 | ||
186 | config MOXA_INTELLIO | 186 | config MOXA_INTELLIO |
187 | tristate "Moxa Intellio support" | 187 | tristate "Moxa Intellio support" |
188 | depends on SERIAL_NONSTANDARD | 188 | depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI) |
189 | help | 189 | help |
190 | Say Y here if you have a Moxa Intellio multiport serial card. | 190 | Say Y here if you have a Moxa Intellio multiport serial card. |
191 | 191 | ||
@@ -241,7 +241,7 @@ config SYNCLINK | |||
241 | 241 | ||
242 | config SYNCLINKMP | 242 | config SYNCLINKMP |
243 | tristate "SyncLink Multiport support" | 243 | tristate "SyncLink Multiport support" |
244 | depends on SERIAL_NONSTANDARD | 244 | depends on SERIAL_NONSTANDARD && PCI |
245 | help | 245 | help |
246 | Enable support for the SyncLink Multiport (2 or 4 ports) | 246 | Enable support for the SyncLink Multiport (2 or 4 ports) |
247 | serial adapter, running asynchronous and HDLC communications up | 247 | serial adapter, running asynchronous and HDLC communications up |
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index c585b4738f86..f1497cecffd8 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c | |||
@@ -2573,16 +2573,10 @@ static struct tty_driver *serial167_console_device(struct console *c, | |||
2573 | return cy_serial_driver; | 2573 | return cy_serial_driver; |
2574 | } | 2574 | } |
2575 | 2575 | ||
2576 | static int __init serial167_console_setup(struct console *co, char *options) | ||
2577 | { | ||
2578 | return 0; | ||
2579 | } | ||
2580 | |||
2581 | static struct console sercons = { | 2576 | static struct console sercons = { |
2582 | .name = "ttyS", | 2577 | .name = "ttyS", |
2583 | .write = serial167_console_write, | 2578 | .write = serial167_console_write, |
2584 | .device = serial167_console_device, | 2579 | .device = serial167_console_device, |
2585 | .setup = serial167_console_setup, | ||
2586 | .flags = CON_PRINTBUFFER, | 2580 | .flags = CON_PRINTBUFFER, |
2587 | .index = -1, | 2581 | .index = -1, |
2588 | }; | 2582 | }; |
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c index 4eba32b23b29..4b26ce48189b 100644 --- a/drivers/char/tpm/tpm_bios.c +++ b/drivers/char/tpm/tpm_bios.c | |||
@@ -427,7 +427,7 @@ static int tpm_ascii_bios_measurements_open(struct inode *inode, | |||
427 | return -ENOMEM; | 427 | return -ENOMEM; |
428 | 428 | ||
429 | if ((err = read_log(log))) | 429 | if ((err = read_log(log))) |
430 | return err; | 430 | goto out_free; |
431 | 431 | ||
432 | /* now register seq file */ | 432 | /* now register seq file */ |
433 | err = seq_open(file, &tpm_ascii_b_measurments_seqops); | 433 | err = seq_open(file, &tpm_ascii_b_measurments_seqops); |
@@ -435,10 +435,15 @@ static int tpm_ascii_bios_measurements_open(struct inode *inode, | |||
435 | seq = file->private_data; | 435 | seq = file->private_data; |
436 | seq->private = log; | 436 | seq->private = log; |
437 | } else { | 437 | } else { |
438 | kfree(log->bios_event_log); | 438 | goto out_free; |
439 | kfree(log); | ||
440 | } | 439 | } |
440 | |||
441 | out: | ||
441 | return err; | 442 | return err; |
443 | out_free: | ||
444 | kfree(log->bios_event_log); | ||
445 | kfree(log); | ||
446 | goto out; | ||
442 | } | 447 | } |
443 | 448 | ||
444 | const struct file_operations tpm_ascii_bios_measurements_ops = { | 449 | const struct file_operations tpm_ascii_bios_measurements_ops = { |
diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c index bef6d886d4fb..e122a0e87bb0 100644 --- a/drivers/char/vme_scc.c +++ b/drivers/char/vme_scc.c | |||
@@ -1013,18 +1013,10 @@ static struct tty_driver *scc_console_device(struct console *c, int *index) | |||
1013 | return scc_driver; | 1013 | return scc_driver; |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | |||
1017 | static int __init scc_console_setup(struct console *co, char *options) | ||
1018 | { | ||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | |||
1023 | static struct console sercons = { | 1016 | static struct console sercons = { |
1024 | .name = "ttyS", | 1017 | .name = "ttyS", |
1025 | .write = scc_console_write, | 1018 | .write = scc_console_write, |
1026 | .device = scc_console_device, | 1019 | .device = scc_console_device, |
1027 | .setup = scc_console_setup, | ||
1028 | .flags = CON_PRINTBUFFER, | 1020 | .flags = CON_PRINTBUFFER, |
1029 | .index = -1, | 1021 | .index = -1, |
1030 | }; | 1022 | }; |
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c index e1e9d9d6893f..f0829b83e970 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/legacy/falconide.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * more details. | 8 | * more details. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | 12 | #include <linux/types.h> |
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
@@ -54,6 +55,7 @@ static int falconide_offsets[IDE_NR_PORTS] __initdata = { | |||
54 | */ | 55 | */ |
55 | 56 | ||
56 | int falconide_intr_lock; | 57 | int falconide_intr_lock; |
58 | EXPORT_SYMBOL(falconide_intr_lock); | ||
57 | 59 | ||
58 | 60 | ||
59 | /* | 61 | /* |
diff --git a/drivers/lguest/io.c b/drivers/lguest/io.c index 06bdba2337ef..c8eb79266991 100644 --- a/drivers/lguest/io.c +++ b/drivers/lguest/io.c | |||
@@ -187,7 +187,7 @@ static u32 copy_data(struct lguest *srclg, | |||
187 | /* FIXME: This is not completely portable, since | 187 | /* FIXME: This is not completely portable, since |
188 | archs do different things for copy_to_user_page. */ | 188 | archs do different things for copy_to_user_page. */ |
189 | if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE, | 189 | if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE, |
190 | (void *__user)src->addr[si], len) != 0) { | 190 | (void __user *)src->addr[si], len) != 0) { |
191 | kill_guest(srclg, "bad address in sending DMA"); | 191 | kill_guest(srclg, "bad address in sending DMA"); |
192 | totlen = 0; | 192 | totlen = 0; |
193 | break; | 193 | break; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c8dfdb302916..d90ee145effe 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -493,12 +493,12 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
493 | if (frombio) | 493 | if (frombio) |
494 | tx = async_memcpy(page, bio_page, page_offset, | 494 | tx = async_memcpy(page, bio_page, page_offset, |
495 | b_offset, clen, | 495 | b_offset, clen, |
496 | ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_SRC, | 496 | ASYNC_TX_DEP_ACK, |
497 | tx, NULL, NULL); | 497 | tx, NULL, NULL); |
498 | else | 498 | else |
499 | tx = async_memcpy(bio_page, page, b_offset, | 499 | tx = async_memcpy(bio_page, page, b_offset, |
500 | page_offset, clen, | 500 | page_offset, clen, |
501 | ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_DST, | 501 | ASYNC_TX_DEP_ACK, |
502 | tx, NULL, NULL); | 502 | tx, NULL, NULL); |
503 | } | 503 | } |
504 | if (clen < len) /* hit end of page */ | 504 | if (clen < len) /* hit end of page */ |
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 26a3b45a4a34..62c1c6262feb 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c | |||
@@ -608,7 +608,7 @@ module_param(debug, int, 0); | |||
608 | MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)"); | 608 | MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)"); |
609 | MODULE_LICENSE("GPL"); | 609 | MODULE_LICENSE("GPL"); |
610 | 610 | ||
611 | int | 611 | int __init |
612 | init_module(void) | 612 | init_module(void) |
613 | { | 613 | { |
614 | net_debug = debug; | 614 | net_debug = debug; |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index a2f32151559e..13f08a390e1f 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -692,6 +692,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
692 | { | 692 | { |
693 | struct sky2_port *sky2 = netdev_priv(hw->dev[port]); | 693 | struct sky2_port *sky2 = netdev_priv(hw->dev[port]); |
694 | u16 reg; | 694 | u16 reg; |
695 | u32 rx_reg; | ||
695 | int i; | 696 | int i; |
696 | const u8 *addr = hw->dev[port]->dev_addr; | 697 | const u8 *addr = hw->dev[port]->dev_addr; |
697 | 698 | ||
@@ -768,11 +769,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
768 | 769 | ||
769 | /* Configure Rx MAC FIFO */ | 770 | /* Configure Rx MAC FIFO */ |
770 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); | 771 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); |
771 | reg = GMF_OPER_ON | GMF_RX_F_FL_ON; | 772 | rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; |
772 | if (hw->chip_id == CHIP_ID_YUKON_EX) | 773 | if (hw->chip_id == CHIP_ID_YUKON_EX) |
773 | reg |= GMF_RX_OVER_ON; | 774 | rx_reg |= GMF_RX_OVER_ON; |
774 | 775 | ||
775 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), reg); | 776 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); |
776 | 777 | ||
777 | /* Flush Rx MAC FIFO on any flow control or error */ | 778 | /* Flush Rx MAC FIFO on any flow control or error */ |
778 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); | 779 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); |
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index b801e3b3a11a..ef0066bab2cf 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c | |||
@@ -1136,7 +1136,7 @@ static struct vio_device_id vnet_port_match[] = { | |||
1136 | }, | 1136 | }, |
1137 | {}, | 1137 | {}, |
1138 | }; | 1138 | }; |
1139 | MODULE_DEVICE_TABLE(vio, vnet_match); | 1139 | MODULE_DEVICE_TABLE(vio, vnet_port_match); |
1140 | 1140 | ||
1141 | static struct vio_driver vnet_port_driver = { | 1141 | static struct vio_driver vnet_port_driver = { |
1142 | .id_table = vnet_port_match, | 1142 | .id_table = vnet_port_match, |
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index 09c93ff932b1..d449b150930e 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig | |||
@@ -35,7 +35,7 @@ if PARPORT | |||
35 | 35 | ||
36 | config PARPORT_PC | 36 | config PARPORT_PC |
37 | tristate "PC-style hardware" | 37 | tristate "PC-style hardware" |
38 | depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV | 38 | depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && (!M68K || ISA) |
39 | ---help--- | 39 | ---help--- |
40 | You should say Y here if you have a PC-style parallel port. All | 40 | You should say Y here if you have a PC-style parallel port. All |
41 | IBM PC compatible computers and some Alphas have PC-style | 41 | IBM PC compatible computers and some Alphas have PC-style |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 372723161c97..a947257b8964 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -483,7 +483,7 @@ source "drivers/scsi/aic94xx/Kconfig" | |||
483 | # All the I2O code and drivers do not seem to be 64bit safe. | 483 | # All the I2O code and drivers do not seem to be 64bit safe. |
484 | config SCSI_DPT_I2O | 484 | config SCSI_DPT_I2O |
485 | tristate "Adaptec I2O RAID support " | 485 | tristate "Adaptec I2O RAID support " |
486 | depends on !64BIT && SCSI && PCI | 486 | depends on !64BIT && SCSI && PCI && VIRT_TO_BUS |
487 | help | 487 | help |
488 | This driver supports all of Adaptec's I2O based RAID controllers as | 488 | This driver supports all of Adaptec's I2O based RAID controllers as |
489 | well as the DPT SmartRaid V cards. This is an Adaptec maintained | 489 | well as the DPT SmartRaid V cards. This is an Adaptec maintained |
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c index 773d11dd9953..79b4df158140 100644 --- a/drivers/scsi/NCR53C9x.c +++ b/drivers/scsi/NCR53C9x.c | |||
@@ -95,6 +95,8 @@ enum { | |||
95 | /* The master ring of all esp hosts we are managing in this driver. */ | 95 | /* The master ring of all esp hosts we are managing in this driver. */ |
96 | static struct NCR_ESP *espchain; | 96 | static struct NCR_ESP *espchain; |
97 | int nesps = 0, esps_in_use = 0, esps_running = 0; | 97 | int nesps = 0, esps_in_use = 0, esps_running = 0; |
98 | EXPORT_SYMBOL(nesps); | ||
99 | EXPORT_SYMBOL(esps_running); | ||
98 | 100 | ||
99 | irqreturn_t esp_intr(int irq, void *dev_id); | 101 | irqreturn_t esp_intr(int irq, void *dev_id); |
100 | 102 | ||
@@ -524,6 +526,7 @@ void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs) | |||
524 | /* Eat any bitrot in the chip and we are done... */ | 526 | /* Eat any bitrot in the chip and we are done... */ |
525 | trash = esp_read(eregs->esp_intrpt); | 527 | trash = esp_read(eregs->esp_intrpt); |
526 | } | 528 | } |
529 | EXPORT_SYMBOL(esp_bootup_reset); | ||
527 | 530 | ||
528 | /* Allocate structure and insert basic data such as SCSI chip frequency | 531 | /* Allocate structure and insert basic data such as SCSI chip frequency |
529 | * data and a pointer to the device | 532 | * data and a pointer to the device |
@@ -772,6 +775,7 @@ const char *esp_info(struct Scsi_Host *host) | |||
772 | panic("Bogon ESP revision"); | 775 | panic("Bogon ESP revision"); |
773 | }; | 776 | }; |
774 | } | 777 | } |
778 | EXPORT_SYMBOL(esp_info); | ||
775 | 779 | ||
776 | /* From Wolfgang Stanglmeier's NCR scsi driver. */ | 780 | /* From Wolfgang Stanglmeier's NCR scsi driver. */ |
777 | struct info_str | 781 | struct info_str |
@@ -902,6 +906,7 @@ int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t off | |||
902 | *start = buffer; | 906 | *start = buffer; |
903 | return esp_host_info(esp, buffer, offset, length); | 907 | return esp_host_info(esp, buffer, offset, length); |
904 | } | 908 | } |
909 | EXPORT_SYMBOL(esp_proc_info); | ||
905 | 910 | ||
906 | static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp) | 911 | static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp) |
907 | { | 912 | { |
@@ -3535,6 +3540,7 @@ state_machine: | |||
3535 | if(esp->dma_irq_exit) | 3540 | if(esp->dma_irq_exit) |
3536 | esp->dma_irq_exit(esp); | 3541 | esp->dma_irq_exit(esp); |
3537 | } | 3542 | } |
3543 | EXPORT_SYMBOL(esp_handle); | ||
3538 | 3544 | ||
3539 | #ifndef CONFIG_SMP | 3545 | #ifndef CONFIG_SMP |
3540 | irqreturn_t esp_intr(int irq, void *dev_id) | 3546 | irqreturn_t esp_intr(int irq, void *dev_id) |
@@ -3631,6 +3637,7 @@ void esp_release(void) | |||
3631 | esps_in_use--; | 3637 | esps_in_use--; |
3632 | esps_running = esps_in_use; | 3638 | esps_running = esps_in_use; |
3633 | } | 3639 | } |
3640 | EXPORT_SYMBOL(esp_release); | ||
3634 | #endif | 3641 | #endif |
3635 | 3642 | ||
3636 | EXPORT_SYMBOL(esp_abort); | 3643 | EXPORT_SYMBOL(esp_abort); |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index aebcd5fcdc55..7829ab1e2fb4 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -1885,7 +1885,7 @@ static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, | |||
1885 | struct sockaddr_in *sin; | 1885 | struct sockaddr_in *sin; |
1886 | int rc = 0, len; | 1886 | int rc = 0, len; |
1887 | 1887 | ||
1888 | addr = kmalloc(GFP_KERNEL, sizeof(*addr)); | 1888 | addr = kmalloc(sizeof(*addr), GFP_KERNEL); |
1889 | if (!addr) | 1889 | if (!addr) |
1890 | return -ENOMEM; | 1890 | return -ENOMEM; |
1891 | 1891 | ||
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 1bc884051e0f..02c52f8d5dbf 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c | |||
@@ -456,7 +456,7 @@ static int cxacru_start_wait_urb(struct urb *urb, struct completion *done, | |||
456 | int* actual_length) | 456 | int* actual_length) |
457 | { | 457 | { |
458 | struct timer_list timer; | 458 | struct timer_list timer; |
459 | int status; | 459 | int status = urb->status; |
460 | 460 | ||
461 | init_timer(&timer); | 461 | init_timer(&timer); |
462 | timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT); | 462 | timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT); |
@@ -464,7 +464,6 @@ static int cxacru_start_wait_urb(struct urb *urb, struct completion *done, | |||
464 | timer.function = cxacru_timeout_kill; | 464 | timer.function = cxacru_timeout_kill; |
465 | add_timer(&timer); | 465 | add_timer(&timer); |
466 | wait_for_completion(done); | 466 | wait_for_completion(done); |
467 | status = urb->status; | ||
468 | del_timer_sync(&timer); | 467 | del_timer_sync(&timer); |
469 | 468 | ||
470 | if (actual_length) | 469 | if (actual_length) |
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c index 638b8009b3bc..eb0615abff68 100644 --- a/drivers/usb/atm/speedtch.c +++ b/drivers/usb/atm/speedtch.c | |||
@@ -612,7 +612,8 @@ static void speedtch_handle_int(struct urb *int_urb) | |||
612 | struct speedtch_instance_data *instance = int_urb->context; | 612 | struct speedtch_instance_data *instance = int_urb->context; |
613 | struct usbatm_data *usbatm = instance->usbatm; | 613 | struct usbatm_data *usbatm = instance->usbatm; |
614 | unsigned int count = int_urb->actual_length; | 614 | unsigned int count = int_urb->actual_length; |
615 | int ret = int_urb->status; | 615 | int status = int_urb->status; |
616 | int ret; | ||
616 | 617 | ||
617 | /* The magic interrupt for "up state" */ | 618 | /* The magic interrupt for "up state" */ |
618 | static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 }; | 619 | static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 }; |
@@ -621,8 +622,8 @@ static void speedtch_handle_int(struct urb *int_urb) | |||
621 | 622 | ||
622 | atm_dbg(usbatm, "%s entered\n", __func__); | 623 | atm_dbg(usbatm, "%s entered\n", __func__); |
623 | 624 | ||
624 | if (ret < 0) { | 625 | if (status < 0) { |
625 | atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, ret); | 626 | atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, status); |
626 | goto fail; | 627 | goto fail; |
627 | } | 628 | } |
628 | 629 | ||
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index 8f046659b4e9..a1a1c9d467e0 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c | |||
@@ -1308,11 +1308,13 @@ static void uea_intr(struct urb *urb) | |||
1308 | { | 1308 | { |
1309 | struct uea_softc *sc = urb->context; | 1309 | struct uea_softc *sc = urb->context; |
1310 | struct intr_pkt *intr = urb->transfer_buffer; | 1310 | struct intr_pkt *intr = urb->transfer_buffer; |
1311 | int status = urb->status; | ||
1312 | |||
1311 | uea_enters(INS_TO_USBDEV(sc)); | 1313 | uea_enters(INS_TO_USBDEV(sc)); |
1312 | 1314 | ||
1313 | if (unlikely(urb->status < 0)) { | 1315 | if (unlikely(status < 0)) { |
1314 | uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n", | 1316 | uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n", |
1315 | urb->status); | 1317 | status); |
1316 | return; | 1318 | return; |
1317 | } | 1319 | } |
1318 | 1320 | ||
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 11e9b15ca45a..e717f5b1caee 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c | |||
@@ -257,9 +257,10 @@ static void usbatm_complete(struct urb *urb) | |||
257 | { | 257 | { |
258 | struct usbatm_channel *channel = urb->context; | 258 | struct usbatm_channel *channel = urb->context; |
259 | unsigned long flags; | 259 | unsigned long flags; |
260 | int status = urb->status; | ||
260 | 261 | ||
261 | vdbg("%s: urb 0x%p, status %d, actual_length %d", | 262 | vdbg("%s: urb 0x%p, status %d, actual_length %d", |
262 | __func__, urb, urb->status, urb->actual_length); | 263 | __func__, urb, status, urb->actual_length); |
263 | 264 | ||
264 | /* usually in_interrupt(), but not always */ | 265 | /* usually in_interrupt(), but not always */ |
265 | spin_lock_irqsave(&channel->lock, flags); | 266 | spin_lock_irqsave(&channel->lock, flags); |
@@ -269,16 +270,16 @@ static void usbatm_complete(struct urb *urb) | |||
269 | 270 | ||
270 | spin_unlock_irqrestore(&channel->lock, flags); | 271 | spin_unlock_irqrestore(&channel->lock, flags); |
271 | 272 | ||
272 | if (unlikely(urb->status) && | 273 | if (unlikely(status) && |
273 | (!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) || | 274 | (!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) || |
274 | urb->status != -EILSEQ )) | 275 | status != -EILSEQ )) |
275 | { | 276 | { |
276 | if (urb->status == -ESHUTDOWN) | 277 | if (status == -ESHUTDOWN) |
277 | return; | 278 | return; |
278 | 279 | ||
279 | if (printk_ratelimit()) | 280 | if (printk_ratelimit()) |
280 | atm_warn(channel->usbatm, "%s: urb 0x%p failed (%d)!\n", | 281 | atm_warn(channel->usbatm, "%s: urb 0x%p failed (%d)!\n", |
281 | __func__, urb, urb->status); | 282 | __func__, urb, status); |
282 | /* throttle processing in case of an error */ | 283 | /* throttle processing in case of an error */ |
283 | mod_timer(&channel->delay, jiffies + msecs_to_jiffies(THROTTLE_MSECS)); | 284 | mod_timer(&channel->delay, jiffies + msecs_to_jiffies(THROTTLE_MSECS)); |
284 | } else | 285 | } else |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index cd51520c7e72..fe940e0536e0 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -257,9 +257,10 @@ static void acm_ctrl_irq(struct urb *urb) | |||
257 | struct usb_cdc_notification *dr = urb->transfer_buffer; | 257 | struct usb_cdc_notification *dr = urb->transfer_buffer; |
258 | unsigned char *data; | 258 | unsigned char *data; |
259 | int newctrl; | 259 | int newctrl; |
260 | int status; | 260 | int retval; |
261 | int status = urb->status; | ||
261 | 262 | ||
262 | switch (urb->status) { | 263 | switch (status) { |
263 | case 0: | 264 | case 0: |
264 | /* success */ | 265 | /* success */ |
265 | break; | 266 | break; |
@@ -267,10 +268,10 @@ static void acm_ctrl_irq(struct urb *urb) | |||
267 | case -ENOENT: | 268 | case -ENOENT: |
268 | case -ESHUTDOWN: | 269 | case -ESHUTDOWN: |
269 | /* this urb is terminated, clean up */ | 270 | /* this urb is terminated, clean up */ |
270 | dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); | 271 | dbg("%s - urb shutting down with status: %d", __FUNCTION__, status); |
271 | return; | 272 | return; |
272 | default: | 273 | default: |
273 | dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status); | 274 | dbg("%s - nonzero urb status received: %d", __FUNCTION__, status); |
274 | goto exit; | 275 | goto exit; |
275 | } | 276 | } |
276 | 277 | ||
@@ -311,10 +312,10 @@ static void acm_ctrl_irq(struct urb *urb) | |||
311 | break; | 312 | break; |
312 | } | 313 | } |
313 | exit: | 314 | exit: |
314 | status = usb_submit_urb (urb, GFP_ATOMIC); | 315 | retval = usb_submit_urb (urb, GFP_ATOMIC); |
315 | if (status) | 316 | if (retval) |
316 | err ("%s - usb_submit_urb failed with result %d", | 317 | err ("%s - usb_submit_urb failed with result %d", |
317 | __FUNCTION__, status); | 318 | __FUNCTION__, retval); |
318 | } | 319 | } |
319 | 320 | ||
320 | /* data interface returns incoming bytes, or we got unthrottled */ | 321 | /* data interface returns incoming bytes, or we got unthrottled */ |
@@ -324,7 +325,8 @@ static void acm_read_bulk(struct urb *urb) | |||
324 | struct acm_ru *rcv = urb->context; | 325 | struct acm_ru *rcv = urb->context; |
325 | struct acm *acm = rcv->instance; | 326 | struct acm *acm = rcv->instance; |
326 | int status = urb->status; | 327 | int status = urb->status; |
327 | dbg("Entering acm_read_bulk with status %d", urb->status); | 328 | |
329 | dbg("Entering acm_read_bulk with status %d", status); | ||
328 | 330 | ||
329 | if (!ACM_READY(acm)) | 331 | if (!ACM_READY(acm)) |
330 | return; | 332 | return; |
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 9a1478972bf5..5192cd9356de 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c | |||
@@ -289,16 +289,17 @@ static int proto_bias = -1; | |||
289 | static void usblp_bulk_read(struct urb *urb) | 289 | static void usblp_bulk_read(struct urb *urb) |
290 | { | 290 | { |
291 | struct usblp *usblp = urb->context; | 291 | struct usblp *usblp = urb->context; |
292 | int status = urb->status; | ||
292 | 293 | ||
293 | if (usblp->present && usblp->used) { | 294 | if (usblp->present && usblp->used) { |
294 | if (urb->status) | 295 | if (status) |
295 | printk(KERN_WARNING "usblp%d: " | 296 | printk(KERN_WARNING "usblp%d: " |
296 | "nonzero read bulk status received: %d\n", | 297 | "nonzero read bulk status received: %d\n", |
297 | usblp->minor, urb->status); | 298 | usblp->minor, status); |
298 | } | 299 | } |
299 | spin_lock(&usblp->lock); | 300 | spin_lock(&usblp->lock); |
300 | if (urb->status < 0) | 301 | if (status < 0) |
301 | usblp->rstatus = urb->status; | 302 | usblp->rstatus = status; |
302 | else | 303 | else |
303 | usblp->rstatus = urb->actual_length; | 304 | usblp->rstatus = urb->actual_length; |
304 | usblp->rcomplete = 1; | 305 | usblp->rcomplete = 1; |
@@ -311,16 +312,17 @@ static void usblp_bulk_read(struct urb *urb) | |||
311 | static void usblp_bulk_write(struct urb *urb) | 312 | static void usblp_bulk_write(struct urb *urb) |
312 | { | 313 | { |
313 | struct usblp *usblp = urb->context; | 314 | struct usblp *usblp = urb->context; |
315 | int status = urb->status; | ||
314 | 316 | ||
315 | if (usblp->present && usblp->used) { | 317 | if (usblp->present && usblp->used) { |
316 | if (urb->status) | 318 | if (status) |
317 | printk(KERN_WARNING "usblp%d: " | 319 | printk(KERN_WARNING "usblp%d: " |
318 | "nonzero write bulk status received: %d\n", | 320 | "nonzero write bulk status received: %d\n", |
319 | usblp->minor, urb->status); | 321 | usblp->minor, status); |
320 | } | 322 | } |
321 | spin_lock(&usblp->lock); | 323 | spin_lock(&usblp->lock); |
322 | if (urb->status < 0) | 324 | if (status < 0) |
323 | usblp->wstatus = urb->status; | 325 | usblp->wstatus = status; |
324 | else | 326 | else |
325 | usblp->wstatus = urb->actual_length; | 327 | usblp->wstatus = urb->actual_length; |
326 | usblp->wcomplete = 1; | 328 | usblp->wcomplete = 1; |
@@ -741,10 +743,11 @@ static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t | |||
741 | */ | 743 | */ |
742 | rv = usblp_wwait(usblp, !!(file->f_flags&O_NONBLOCK)); | 744 | rv = usblp_wwait(usblp, !!(file->f_flags&O_NONBLOCK)); |
743 | if (rv < 0) { | 745 | if (rv < 0) { |
744 | /* | 746 | if (rv == -EAGAIN) { |
745 | * If interrupted, we simply leave the URB to dangle, | 747 | /* Presume that it's going to complete well. */ |
746 | * so the ->release will call usb_kill_urb(). | 748 | writecount += transfer_length; |
747 | */ | 749 | } |
750 | /* Leave URB dangling, to be cleaned on close. */ | ||
748 | goto collect_error; | 751 | goto collect_error; |
749 | } | 752 | } |
750 | 753 | ||
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 963520fbef90..42ef1d5f6c8a 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -99,12 +99,17 @@ EXPORT_SYMBOL_GPL (usb_bus_list_lock); | |||
99 | /* used for controlling access to virtual root hubs */ | 99 | /* used for controlling access to virtual root hubs */ |
100 | static DEFINE_SPINLOCK(hcd_root_hub_lock); | 100 | static DEFINE_SPINLOCK(hcd_root_hub_lock); |
101 | 101 | ||
102 | /* used when updating hcd data */ | 102 | /* used when updating an endpoint's URB list */ |
103 | static DEFINE_SPINLOCK(hcd_data_lock); | 103 | static DEFINE_SPINLOCK(hcd_urb_list_lock); |
104 | 104 | ||
105 | /* wait queue for synchronous unlinks */ | 105 | /* wait queue for synchronous unlinks */ |
106 | DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue); | 106 | DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue); |
107 | 107 | ||
108 | static inline int is_root_hub(struct usb_device *udev) | ||
109 | { | ||
110 | return (udev->parent == NULL); | ||
111 | } | ||
112 | |||
108 | /*-------------------------------------------------------------------------*/ | 113 | /*-------------------------------------------------------------------------*/ |
109 | 114 | ||
110 | /* | 115 | /* |
@@ -906,14 +911,13 @@ EXPORT_SYMBOL (usb_calc_bus_time); | |||
906 | static void urb_unlink(struct usb_hcd *hcd, struct urb *urb) | 911 | static void urb_unlink(struct usb_hcd *hcd, struct urb *urb) |
907 | { | 912 | { |
908 | unsigned long flags; | 913 | unsigned long flags; |
909 | int at_root_hub = (urb->dev == hcd->self.root_hub); | ||
910 | 914 | ||
911 | /* clear all state linking urb to this dev (and hcd) */ | 915 | /* clear all state linking urb to this dev (and hcd) */ |
912 | spin_lock_irqsave (&hcd_data_lock, flags); | 916 | spin_lock_irqsave(&hcd_urb_list_lock, flags); |
913 | list_del_init (&urb->urb_list); | 917 | list_del_init (&urb->urb_list); |
914 | spin_unlock_irqrestore (&hcd_data_lock, flags); | 918 | spin_unlock_irqrestore(&hcd_urb_list_lock, flags); |
915 | 919 | ||
916 | if (hcd->self.uses_dma && !at_root_hub) { | 920 | if (hcd->self.uses_dma && !is_root_hub(urb->dev)) { |
917 | if (usb_pipecontrol (urb->pipe) | 921 | if (usb_pipecontrol (urb->pipe) |
918 | && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) | 922 | && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) |
919 | dma_unmap_single (hcd->self.controller, urb->setup_dma, | 923 | dma_unmap_single (hcd->self.controller, urb->setup_dma, |
@@ -955,7 +959,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) | |||
955 | 959 | ||
956 | // FIXME: verify that quiescing hc works right (RH cleans up) | 960 | // FIXME: verify that quiescing hc works right (RH cleans up) |
957 | 961 | ||
958 | spin_lock_irqsave (&hcd_data_lock, flags); | 962 | spin_lock_irqsave(&hcd_urb_list_lock, flags); |
959 | ep = (usb_pipein(urb->pipe) ? urb->dev->ep_in : urb->dev->ep_out) | 963 | ep = (usb_pipein(urb->pipe) ? urb->dev->ep_in : urb->dev->ep_out) |
960 | [usb_pipeendpoint(urb->pipe)]; | 964 | [usb_pipeendpoint(urb->pipe)]; |
961 | if (unlikely (!ep)) | 965 | if (unlikely (!ep)) |
@@ -972,7 +976,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) | |||
972 | status = -ESHUTDOWN; | 976 | status = -ESHUTDOWN; |
973 | break; | 977 | break; |
974 | } | 978 | } |
975 | spin_unlock_irqrestore (&hcd_data_lock, flags); | 979 | spin_unlock_irqrestore(&hcd_urb_list_lock, flags); |
976 | if (status) { | 980 | if (status) { |
977 | INIT_LIST_HEAD (&urb->urb_list); | 981 | INIT_LIST_HEAD (&urb->urb_list); |
978 | usbmon_urb_submit_error(&hcd->self, urb, status); | 982 | usbmon_urb_submit_error(&hcd->self, urb, status); |
@@ -986,7 +990,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) | |||
986 | urb = usb_get_urb (urb); | 990 | urb = usb_get_urb (urb); |
987 | atomic_inc (&urb->use_count); | 991 | atomic_inc (&urb->use_count); |
988 | 992 | ||
989 | if (urb->dev == hcd->self.root_hub) { | 993 | if (is_root_hub(urb->dev)) { |
990 | /* NOTE: requirement on hub callers (usbfs and the hub | 994 | /* NOTE: requirement on hub callers (usbfs and the hub |
991 | * driver, for now) that URBs' urb->transfer_buffer be | 995 | * driver, for now) that URBs' urb->transfer_buffer be |
992 | * valid and usb_buffer_{sync,unmap}() not be needed, since | 996 | * valid and usb_buffer_{sync,unmap}() not be needed, since |
@@ -1033,18 +1037,6 @@ done: | |||
1033 | 1037 | ||
1034 | /*-------------------------------------------------------------------------*/ | 1038 | /*-------------------------------------------------------------------------*/ |
1035 | 1039 | ||
1036 | /* called in any context */ | ||
1037 | int usb_hcd_get_frame_number (struct usb_device *udev) | ||
1038 | { | ||
1039 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | ||
1040 | |||
1041 | if (!HC_IS_RUNNING (hcd->state)) | ||
1042 | return -ESHUTDOWN; | ||
1043 | return hcd->driver->get_frame_number (hcd); | ||
1044 | } | ||
1045 | |||
1046 | /*-------------------------------------------------------------------------*/ | ||
1047 | |||
1048 | /* this makes the hcd giveback() the urb more quickly, by kicking it | 1040 | /* this makes the hcd giveback() the urb more quickly, by kicking it |
1049 | * off hardware queues (which may take a while) and returning it as | 1041 | * off hardware queues (which may take a while) and returning it as |
1050 | * soon as practical. we've already set up the urb's return status, | 1042 | * soon as practical. we've already set up the urb's return status, |
@@ -1055,7 +1047,7 @@ unlink1 (struct usb_hcd *hcd, struct urb *urb) | |||
1055 | { | 1047 | { |
1056 | int value; | 1048 | int value; |
1057 | 1049 | ||
1058 | if (urb->dev == hcd->self.root_hub) | 1050 | if (is_root_hub(urb->dev)) |
1059 | value = usb_rh_urb_dequeue (hcd, urb); | 1051 | value = usb_rh_urb_dequeue (hcd, urb); |
1060 | else { | 1052 | else { |
1061 | 1053 | ||
@@ -1103,11 +1095,11 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) | |||
1103 | * that it was submitted. But as a rule it can't know whether or | 1095 | * that it was submitted. But as a rule it can't know whether or |
1104 | * not it's already been unlinked ... so we respect the reversed | 1096 | * not it's already been unlinked ... so we respect the reversed |
1105 | * lock sequence needed for the usb_hcd_giveback_urb() code paths | 1097 | * lock sequence needed for the usb_hcd_giveback_urb() code paths |
1106 | * (urb lock, then hcd_data_lock) in case some other CPU is now | 1098 | * (urb lock, then hcd_urb_list_lock) in case some other CPU is now |
1107 | * unlinking it. | 1099 | * unlinking it. |
1108 | */ | 1100 | */ |
1109 | spin_lock_irqsave (&urb->lock, flags); | 1101 | spin_lock_irqsave (&urb->lock, flags); |
1110 | spin_lock (&hcd_data_lock); | 1102 | spin_lock(&hcd_urb_list_lock); |
1111 | 1103 | ||
1112 | sys = &urb->dev->dev; | 1104 | sys = &urb->dev->dev; |
1113 | hcd = bus_to_hcd(urb->dev->bus); | 1105 | hcd = bus_to_hcd(urb->dev->bus); |
@@ -1139,17 +1131,16 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) | |||
1139 | * finish unlinking the initial failed usb_set_address() | 1131 | * finish unlinking the initial failed usb_set_address() |
1140 | * or device descriptor fetch. | 1132 | * or device descriptor fetch. |
1141 | */ | 1133 | */ |
1142 | if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) | 1134 | if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) && |
1143 | && hcd->self.root_hub != urb->dev) { | 1135 | !is_root_hub(urb->dev)) { |
1144 | dev_warn (hcd->self.controller, "Unlink after no-IRQ? " | 1136 | dev_warn (hcd->self.controller, "Unlink after no-IRQ? " |
1145 | "Controller is probably using the wrong IRQ." | 1137 | "Controller is probably using the wrong IRQ.\n"); |
1146 | "\n"); | ||
1147 | set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); | 1138 | set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); |
1148 | } | 1139 | } |
1149 | 1140 | ||
1150 | urb->status = status; | 1141 | urb->status = status; |
1151 | 1142 | ||
1152 | spin_unlock (&hcd_data_lock); | 1143 | spin_unlock(&hcd_urb_list_lock); |
1153 | spin_unlock_irqrestore (&urb->lock, flags); | 1144 | spin_unlock_irqrestore (&urb->lock, flags); |
1154 | 1145 | ||
1155 | retval = unlink1 (hcd, urb); | 1146 | retval = unlink1 (hcd, urb); |
@@ -1158,7 +1149,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) | |||
1158 | return retval; | 1149 | return retval; |
1159 | 1150 | ||
1160 | done: | 1151 | done: |
1161 | spin_unlock (&hcd_data_lock); | 1152 | spin_unlock(&hcd_urb_list_lock); |
1162 | spin_unlock_irqrestore (&urb->lock, flags); | 1153 | spin_unlock_irqrestore (&urb->lock, flags); |
1163 | if (retval != -EIDRM && sys && sys->driver) | 1154 | if (retval != -EIDRM && sys && sys->driver) |
1164 | dev_dbg (sys, "hcd_unlink_urb %p fail %d\n", urb, retval); | 1155 | dev_dbg (sys, "hcd_unlink_urb %p fail %d\n", urb, retval); |
@@ -1167,6 +1158,35 @@ done: | |||
1167 | 1158 | ||
1168 | /*-------------------------------------------------------------------------*/ | 1159 | /*-------------------------------------------------------------------------*/ |
1169 | 1160 | ||
1161 | /** | ||
1162 | * usb_hcd_giveback_urb - return URB from HCD to device driver | ||
1163 | * @hcd: host controller returning the URB | ||
1164 | * @urb: urb being returned to the USB device driver. | ||
1165 | * Context: in_interrupt() | ||
1166 | * | ||
1167 | * This hands the URB from HCD to its USB device driver, using its | ||
1168 | * completion function. The HCD has freed all per-urb resources | ||
1169 | * (and is done using urb->hcpriv). It also released all HCD locks; | ||
1170 | * the device driver won't cause problems if it frees, modifies, | ||
1171 | * or resubmits this URB. | ||
1172 | */ | ||
1173 | void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb) | ||
1174 | { | ||
1175 | urb_unlink(hcd, urb); | ||
1176 | usbmon_urb_complete (&hcd->self, urb); | ||
1177 | usb_unanchor_urb(urb); | ||
1178 | |||
1179 | /* pass ownership to the completion handler */ | ||
1180 | urb->complete (urb); | ||
1181 | atomic_dec (&urb->use_count); | ||
1182 | if (unlikely (urb->reject)) | ||
1183 | wake_up (&usb_kill_urb_queue); | ||
1184 | usb_put_urb (urb); | ||
1185 | } | ||
1186 | EXPORT_SYMBOL (usb_hcd_giveback_urb); | ||
1187 | |||
1188 | /*-------------------------------------------------------------------------*/ | ||
1189 | |||
1170 | /* disables the endpoint: cancels any pending urbs, then synchronizes with | 1190 | /* disables the endpoint: cancels any pending urbs, then synchronizes with |
1171 | * the hcd to make sure all endpoint state is gone from hardware, and then | 1191 | * the hcd to make sure all endpoint state is gone from hardware, and then |
1172 | * waits until the endpoint's queue is completely drained. use for | 1192 | * waits until the endpoint's queue is completely drained. use for |
@@ -1186,7 +1206,7 @@ void usb_hcd_endpoint_disable (struct usb_device *udev, | |||
1186 | 1206 | ||
1187 | /* ep is already gone from udev->ep_{in,out}[]; no more submits */ | 1207 | /* ep is already gone from udev->ep_{in,out}[]; no more submits */ |
1188 | rescan: | 1208 | rescan: |
1189 | spin_lock (&hcd_data_lock); | 1209 | spin_lock(&hcd_urb_list_lock); |
1190 | list_for_each_entry (urb, &ep->urb_list, urb_list) { | 1210 | list_for_each_entry (urb, &ep->urb_list, urb_list) { |
1191 | int tmp; | 1211 | int tmp; |
1192 | 1212 | ||
@@ -1194,7 +1214,7 @@ rescan: | |||
1194 | if (urb->status != -EINPROGRESS) | 1214 | if (urb->status != -EINPROGRESS) |
1195 | continue; | 1215 | continue; |
1196 | usb_get_urb (urb); | 1216 | usb_get_urb (urb); |
1197 | spin_unlock (&hcd_data_lock); | 1217 | spin_unlock(&hcd_urb_list_lock); |
1198 | 1218 | ||
1199 | spin_lock (&urb->lock); | 1219 | spin_lock (&urb->lock); |
1200 | tmp = urb->status; | 1220 | tmp = urb->status; |
@@ -1223,7 +1243,7 @@ rescan: | |||
1223 | /* list contents may have changed */ | 1243 | /* list contents may have changed */ |
1224 | goto rescan; | 1244 | goto rescan; |
1225 | } | 1245 | } |
1226 | spin_unlock (&hcd_data_lock); | 1246 | spin_unlock(&hcd_urb_list_lock); |
1227 | local_irq_enable (); | 1247 | local_irq_enable (); |
1228 | 1248 | ||
1229 | /* synchronize with the hardware, so old configuration state | 1249 | /* synchronize with the hardware, so old configuration state |
@@ -1240,7 +1260,7 @@ rescan: | |||
1240 | * endpoint_disable methods. | 1260 | * endpoint_disable methods. |
1241 | */ | 1261 | */ |
1242 | while (!list_empty (&ep->urb_list)) { | 1262 | while (!list_empty (&ep->urb_list)) { |
1243 | spin_lock_irq (&hcd_data_lock); | 1263 | spin_lock_irq(&hcd_urb_list_lock); |
1244 | 1264 | ||
1245 | /* The list may have changed while we acquired the spinlock */ | 1265 | /* The list may have changed while we acquired the spinlock */ |
1246 | urb = NULL; | 1266 | urb = NULL; |
@@ -1249,7 +1269,7 @@ rescan: | |||
1249 | urb_list); | 1269 | urb_list); |
1250 | usb_get_urb (urb); | 1270 | usb_get_urb (urb); |
1251 | } | 1271 | } |
1252 | spin_unlock_irq (&hcd_data_lock); | 1272 | spin_unlock_irq(&hcd_urb_list_lock); |
1253 | 1273 | ||
1254 | if (urb) { | 1274 | if (urb) { |
1255 | usb_kill_urb (urb); | 1275 | usb_kill_urb (urb); |
@@ -1260,6 +1280,18 @@ rescan: | |||
1260 | 1280 | ||
1261 | /*-------------------------------------------------------------------------*/ | 1281 | /*-------------------------------------------------------------------------*/ |
1262 | 1282 | ||
1283 | /* called in any context */ | ||
1284 | int usb_hcd_get_frame_number (struct usb_device *udev) | ||
1285 | { | ||
1286 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | ||
1287 | |||
1288 | if (!HC_IS_RUNNING (hcd->state)) | ||
1289 | return -ESHUTDOWN; | ||
1290 | return hcd->driver->get_frame_number (hcd); | ||
1291 | } | ||
1292 | |||
1293 | /*-------------------------------------------------------------------------*/ | ||
1294 | |||
1263 | #ifdef CONFIG_PM | 1295 | #ifdef CONFIG_PM |
1264 | 1296 | ||
1265 | int hcd_bus_suspend(struct usb_device *rhdev) | 1297 | int hcd_bus_suspend(struct usb_device *rhdev) |
@@ -1395,35 +1427,6 @@ EXPORT_SYMBOL (usb_bus_start_enum); | |||
1395 | /*-------------------------------------------------------------------------*/ | 1427 | /*-------------------------------------------------------------------------*/ |
1396 | 1428 | ||
1397 | /** | 1429 | /** |
1398 | * usb_hcd_giveback_urb - return URB from HCD to device driver | ||
1399 | * @hcd: host controller returning the URB | ||
1400 | * @urb: urb being returned to the USB device driver. | ||
1401 | * Context: in_interrupt() | ||
1402 | * | ||
1403 | * This hands the URB from HCD to its USB device driver, using its | ||
1404 | * completion function. The HCD has freed all per-urb resources | ||
1405 | * (and is done using urb->hcpriv). It also released all HCD locks; | ||
1406 | * the device driver won't cause problems if it frees, modifies, | ||
1407 | * or resubmits this URB. | ||
1408 | */ | ||
1409 | void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb) | ||
1410 | { | ||
1411 | urb_unlink(hcd, urb); | ||
1412 | usbmon_urb_complete (&hcd->self, urb); | ||
1413 | usb_unanchor_urb(urb); | ||
1414 | |||
1415 | /* pass ownership to the completion handler */ | ||
1416 | urb->complete (urb); | ||
1417 | atomic_dec (&urb->use_count); | ||
1418 | if (unlikely (urb->reject)) | ||
1419 | wake_up (&usb_kill_urb_queue); | ||
1420 | usb_put_urb (urb); | ||
1421 | } | ||
1422 | EXPORT_SYMBOL (usb_hcd_giveback_urb); | ||
1423 | |||
1424 | /*-------------------------------------------------------------------------*/ | ||
1425 | |||
1426 | /** | ||
1427 | * usb_hcd_irq - hook IRQs to HCD framework (bus glue) | 1430 | * usb_hcd_irq - hook IRQs to HCD framework (bus glue) |
1428 | * @irq: the IRQ being raised | 1431 | * @irq: the IRQ being raised |
1429 | * @__hcd: pointer to the HCD whose IRQ is being signaled | 1432 | * @__hcd: pointer to the HCD whose IRQ is being signaled |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index fd74c50b1804..e341a1da517f 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -1335,6 +1335,10 @@ int usb_new_device(struct usb_device *udev) | |||
1335 | udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, | 1335 | udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, |
1336 | (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); | 1336 | (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); |
1337 | 1337 | ||
1338 | /* Increment the parent's count of unsuspended children */ | ||
1339 | if (udev->parent) | ||
1340 | usb_autoresume_device(udev->parent); | ||
1341 | |||
1338 | /* Register the device. The device driver is responsible | 1342 | /* Register the device. The device driver is responsible |
1339 | * for adding the device files to sysfs and for configuring | 1343 | * for adding the device files to sysfs and for configuring |
1340 | * the device. | 1344 | * the device. |
@@ -1342,13 +1346,11 @@ int usb_new_device(struct usb_device *udev) | |||
1342 | err = device_add(&udev->dev); | 1346 | err = device_add(&udev->dev); |
1343 | if (err) { | 1347 | if (err) { |
1344 | dev_err(&udev->dev, "can't device_add, error %d\n", err); | 1348 | dev_err(&udev->dev, "can't device_add, error %d\n", err); |
1349 | if (udev->parent) | ||
1350 | usb_autosuspend_device(udev->parent); | ||
1345 | goto fail; | 1351 | goto fail; |
1346 | } | 1352 | } |
1347 | 1353 | ||
1348 | /* Increment the parent's count of unsuspended children */ | ||
1349 | if (udev->parent) | ||
1350 | usb_autoresume_device(udev->parent); | ||
1351 | |||
1352 | exit: | 1354 | exit: |
1353 | return err; | 1355 | return err; |
1354 | 1356 | ||
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 530e854961ce..25f63f1096b4 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -34,13 +34,14 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) | |||
34 | { | 34 | { |
35 | struct completion done; | 35 | struct completion done; |
36 | unsigned long expire; | 36 | unsigned long expire; |
37 | int status; | 37 | int retval; |
38 | int status = urb->status; | ||
38 | 39 | ||
39 | init_completion(&done); | 40 | init_completion(&done); |
40 | urb->context = &done; | 41 | urb->context = &done; |
41 | urb->actual_length = 0; | 42 | urb->actual_length = 0; |
42 | status = usb_submit_urb(urb, GFP_NOIO); | 43 | retval = usb_submit_urb(urb, GFP_NOIO); |
43 | if (unlikely(status)) | 44 | if (unlikely(retval)) |
44 | goto out; | 45 | goto out; |
45 | 46 | ||
46 | expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; | 47 | expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; |
@@ -55,15 +56,15 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) | |||
55 | urb->transfer_buffer_length); | 56 | urb->transfer_buffer_length); |
56 | 57 | ||
57 | usb_kill_urb(urb); | 58 | usb_kill_urb(urb); |
58 | status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status; | 59 | retval = status == -ENOENT ? -ETIMEDOUT : status; |
59 | } else | 60 | } else |
60 | status = urb->status; | 61 | retval = status; |
61 | out: | 62 | out: |
62 | if (actual_length) | 63 | if (actual_length) |
63 | *actual_length = urb->actual_length; | 64 | *actual_length = urb->actual_length; |
64 | 65 | ||
65 | usb_free_urb(urb); | 66 | usb_free_urb(urb); |
66 | return status; | 67 | return retval; |
67 | } | 68 | } |
68 | 69 | ||
69 | /*-------------------------------------------------------------------*/ | 70 | /*-------------------------------------------------------------------*/ |
@@ -250,6 +251,7 @@ static void sg_clean (struct usb_sg_request *io) | |||
250 | static void sg_complete (struct urb *urb) | 251 | static void sg_complete (struct urb *urb) |
251 | { | 252 | { |
252 | struct usb_sg_request *io = urb->context; | 253 | struct usb_sg_request *io = urb->context; |
254 | int status = urb->status; | ||
253 | 255 | ||
254 | spin_lock (&io->lock); | 256 | spin_lock (&io->lock); |
255 | 257 | ||
@@ -265,21 +267,21 @@ static void sg_complete (struct urb *urb) | |||
265 | */ | 267 | */ |
266 | if (io->status | 268 | if (io->status |
267 | && (io->status != -ECONNRESET | 269 | && (io->status != -ECONNRESET |
268 | || urb->status != -ECONNRESET) | 270 | || status != -ECONNRESET) |
269 | && urb->actual_length) { | 271 | && urb->actual_length) { |
270 | dev_err (io->dev->bus->controller, | 272 | dev_err (io->dev->bus->controller, |
271 | "dev %s ep%d%s scatterlist error %d/%d\n", | 273 | "dev %s ep%d%s scatterlist error %d/%d\n", |
272 | io->dev->devpath, | 274 | io->dev->devpath, |
273 | usb_pipeendpoint (urb->pipe), | 275 | usb_pipeendpoint (urb->pipe), |
274 | usb_pipein (urb->pipe) ? "in" : "out", | 276 | usb_pipein (urb->pipe) ? "in" : "out", |
275 | urb->status, io->status); | 277 | status, io->status); |
276 | // BUG (); | 278 | // BUG (); |
277 | } | 279 | } |
278 | 280 | ||
279 | if (io->status == 0 && urb->status && urb->status != -ECONNRESET) { | 281 | if (io->status == 0 && status && status != -ECONNRESET) { |
280 | int i, found, status; | 282 | int i, found, retval; |
281 | 283 | ||
282 | io->status = urb->status; | 284 | io->status = status; |
283 | 285 | ||
284 | /* the previous urbs, and this one, completed already. | 286 | /* the previous urbs, and this one, completed already. |
285 | * unlink pending urbs so they won't rx/tx bad data. | 287 | * unlink pending urbs so they won't rx/tx bad data. |
@@ -290,13 +292,13 @@ static void sg_complete (struct urb *urb) | |||
290 | if (!io->urbs [i] || !io->urbs [i]->dev) | 292 | if (!io->urbs [i] || !io->urbs [i]->dev) |
291 | continue; | 293 | continue; |
292 | if (found) { | 294 | if (found) { |
293 | status = usb_unlink_urb (io->urbs [i]); | 295 | retval = usb_unlink_urb (io->urbs [i]); |
294 | if (status != -EINPROGRESS | 296 | if (retval != -EINPROGRESS && |
295 | && status != -ENODEV | 297 | retval != -ENODEV && |
296 | && status != -EBUSY) | 298 | retval != -EBUSY) |
297 | dev_err (&io->dev->dev, | 299 | dev_err (&io->dev->dev, |
298 | "%s, unlink --> %d\n", | 300 | "%s, unlink --> %d\n", |
299 | __FUNCTION__, status); | 301 | __FUNCTION__, retval); |
300 | } else if (urb == io->urbs [i]) | 302 | } else if (urb == io->urbs [i]) |
301 | found = 1; | 303 | found = 1; |
302 | } | 304 | } |
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index d47ae89154a7..2ab222be8fd1 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c | |||
@@ -441,6 +441,54 @@ static struct attribute_group dev_attr_grp = { | |||
441 | .attrs = dev_attrs, | 441 | .attrs = dev_attrs, |
442 | }; | 442 | }; |
443 | 443 | ||
444 | /* Binary descriptors */ | ||
445 | |||
446 | static ssize_t | ||
447 | read_descriptors(struct kobject *kobj, struct bin_attribute *attr, | ||
448 | char *buf, loff_t off, size_t count) | ||
449 | { | ||
450 | struct usb_device *udev = to_usb_device( | ||
451 | container_of(kobj, struct device, kobj)); | ||
452 | size_t nleft = count; | ||
453 | size_t srclen, n; | ||
454 | |||
455 | usb_lock_device(udev); | ||
456 | |||
457 | /* The binary attribute begins with the device descriptor */ | ||
458 | srclen = sizeof(struct usb_device_descriptor); | ||
459 | if (off < srclen) { | ||
460 | n = min_t(size_t, nleft, srclen - off); | ||
461 | memcpy(buf, off + (char *) &udev->descriptor, n); | ||
462 | nleft -= n; | ||
463 | buf += n; | ||
464 | off = 0; | ||
465 | } else { | ||
466 | off -= srclen; | ||
467 | } | ||
468 | |||
469 | /* Then follows the raw descriptor entry for the current | ||
470 | * configuration (config plus subsidiary descriptors). | ||
471 | */ | ||
472 | if (udev->actconfig) { | ||
473 | int cfgno = udev->actconfig - udev->config; | ||
474 | |||
475 | srclen = __le16_to_cpu(udev->actconfig->desc.wTotalLength); | ||
476 | if (off < srclen) { | ||
477 | n = min_t(size_t, nleft, srclen - off); | ||
478 | memcpy(buf, off + udev->rawdescriptors[cfgno], n); | ||
479 | nleft -= n; | ||
480 | } | ||
481 | } | ||
482 | usb_unlock_device(udev); | ||
483 | return count - nleft; | ||
484 | } | ||
485 | |||
486 | static struct bin_attribute dev_bin_attr_descriptors = { | ||
487 | .attr = {.name = "descriptors", .mode = 0444}, | ||
488 | .read = read_descriptors, | ||
489 | .size = 18 + 65535, /* dev descr + max-size raw descriptor */ | ||
490 | }; | ||
491 | |||
444 | int usb_create_sysfs_dev_files(struct usb_device *udev) | 492 | int usb_create_sysfs_dev_files(struct usb_device *udev) |
445 | { | 493 | { |
446 | struct device *dev = &udev->dev; | 494 | struct device *dev = &udev->dev; |
@@ -450,6 +498,10 @@ int usb_create_sysfs_dev_files(struct usb_device *udev) | |||
450 | if (retval) | 498 | if (retval) |
451 | return retval; | 499 | return retval; |
452 | 500 | ||
501 | retval = device_create_bin_file(dev, &dev_bin_attr_descriptors); | ||
502 | if (retval) | ||
503 | goto error; | ||
504 | |||
453 | retval = add_persist_attributes(dev); | 505 | retval = add_persist_attributes(dev); |
454 | if (retval) | 506 | if (retval) |
455 | goto error; | 507 | goto error; |
@@ -492,6 +544,7 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev) | |||
492 | device_remove_file(dev, &dev_attr_serial); | 544 | device_remove_file(dev, &dev_attr_serial); |
493 | remove_power_attributes(dev); | 545 | remove_power_attributes(dev); |
494 | remove_persist_attributes(dev); | 546 | remove_persist_attributes(dev); |
547 | device_remove_bin_file(dev, &dev_bin_attr_descriptors); | ||
495 | sysfs_remove_group(&dev->kobj, &dev_attr_grp); | 548 | sysfs_remove_group(&dev->kobj, &dev_attr_grp); |
496 | } | 549 | } |
497 | 550 | ||
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 52ec44b828f3..be630228461c 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c | |||
@@ -440,55 +440,57 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) | |||
440 | * @urb: pointer to urb describing a previously submitted request, | 440 | * @urb: pointer to urb describing a previously submitted request, |
441 | * may be NULL | 441 | * may be NULL |
442 | * | 442 | * |
443 | * This routine cancels an in-progress request. URBs complete only | 443 | * This routine cancels an in-progress request. URBs complete only once |
444 | * once per submission, and may be canceled only once per submission. | 444 | * per submission, and may be canceled only once per submission. |
445 | * Successful cancellation means the requests's completion handler will | 445 | * Successful cancellation means termination of @urb will be expedited |
446 | * be called with a status code indicating that the request has been | 446 | * and the completion handler will be called with a status code |
447 | * canceled (rather than any other code) and will quickly be removed | 447 | * indicating that the request has been canceled (rather than any other |
448 | * from host controller data structures. | 448 | * code). |
449 | * | 449 | * |
450 | * This request is always asynchronous. | 450 | * This request is always asynchronous. Success is indicated by |
451 | * Success is indicated by returning -EINPROGRESS, | 451 | * returning -EINPROGRESS, at which time the URB will probably not yet |
452 | * at which time the URB will normally have been unlinked but not yet | 452 | * have been given back to the device driver. When it is eventually |
453 | * given back to the device driver. When it is called, the completion | 453 | * called, the completion function will see @urb->status == -ECONNRESET. |
454 | * function will see urb->status == -ECONNRESET. Failure is indicated | 454 | * Failure is indicated by usb_unlink_urb() returning any other value. |
455 | * by any other return value. Unlinking will fail when the URB is not | 455 | * Unlinking will fail when @urb is not currently "linked" (i.e., it was |
456 | * currently "linked" (i.e., it was never submitted, or it was unlinked | 456 | * never submitted, or it was unlinked before, or the hardware is already |
457 | * before, or the hardware is already finished with it), even if the | 457 | * finished with it), even if the completion handler has not yet run. |
458 | * completion handler has not yet run. | ||
459 | * | 458 | * |
460 | * Unlinking and Endpoint Queues: | 459 | * Unlinking and Endpoint Queues: |
461 | * | 460 | * |
461 | * [The behaviors and guarantees described below do not apply to virtual | ||
462 | * root hubs but only to endpoint queues for physical USB devices.] | ||
463 | * | ||
462 | * Host Controller Drivers (HCDs) place all the URBs for a particular | 464 | * Host Controller Drivers (HCDs) place all the URBs for a particular |
463 | * endpoint in a queue. Normally the queue advances as the controller | 465 | * endpoint in a queue. Normally the queue advances as the controller |
464 | * hardware processes each request. But when an URB terminates with an | 466 | * hardware processes each request. But when an URB terminates with an |
465 | * error its queue stops, at least until that URB's completion routine | 467 | * error its queue generally stops (see below), at least until that URB's |
466 | * returns. It is guaranteed that the queue will not restart until all | 468 | * completion routine returns. It is guaranteed that a stopped queue |
467 | * its unlinked URBs have been fully retired, with their completion | 469 | * will not restart until all its unlinked URBs have been fully retired, |
468 | * routines run, even if that's not until some time after the original | 470 | * with their completion routines run, even if that's not until some time |
469 | * completion handler returns. Normally the same behavior and guarantees | 471 | * after the original completion handler returns. The same behavior and |
470 | * apply when an URB terminates because it was unlinked; however if an | 472 | * guarantee apply when an URB terminates because it was unlinked. |
471 | * URB is unlinked before the hardware has started to execute it, then | 473 | * |
472 | * its queue is not guaranteed to stop until all the preceding URBs have | 474 | * Bulk and interrupt endpoint queues are guaranteed to stop whenever an |
473 | * completed. | 475 | * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, |
474 | * | 476 | * and -EREMOTEIO. Control endpoint queues behave the same way except |
475 | * This means that USB device drivers can safely build deep queues for | 477 | * that they are not guaranteed to stop for -EREMOTEIO errors. Queues |
476 | * large or complex transfers, and clean them up reliably after any sort | 478 | * for isochronous endpoints are treated differently, because they must |
477 | * of aborted transfer by unlinking all pending URBs at the first fault. | 479 | * advance at fixed rates. Such queues do not stop when an URB |
478 | * | 480 | * encounters an error or is unlinked. An unlinked isochronous URB may |
479 | * Note that an URB terminating early because a short packet was received | 481 | * leave a gap in the stream of packets; it is undefined whether such |
480 | * will count as an error if and only if the URB_SHORT_NOT_OK flag is set. | 482 | * gaps can be filled in. |
481 | * Also, that all unlinks performed in any URB completion handler must | 483 | * |
482 | * be asynchronous. | 484 | * Note that early termination of an URB because a short packet was |
483 | * | 485 | * received will generate a -EREMOTEIO error if and only if the |
484 | * Queues for isochronous endpoints are treated differently, because they | 486 | * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device |
485 | * advance at fixed rates. Such queues do not stop when an URB is unlinked. | 487 | * drivers can build deep queues for large or complex bulk transfers |
486 | * An unlinked URB may leave a gap in the stream of packets. It is undefined | 488 | * and clean them up reliably after any sort of aborted transfer by |
487 | * whether such gaps can be filled in. | 489 | * unlinking all pending URBs at the first fault. |
488 | * | 490 | * |
489 | * When a control URB terminates with an error, it is likely that the | 491 | * When a control URB terminates with an error other than -EREMOTEIO, it |
490 | * status stage of the transfer will not take place, even if it is merely | 492 | * is quite likely that the status stage of the transfer will not take |
491 | * a soft error resulting from a short-packet with URB_SHORT_NOT_OK set. | 493 | * place. |
492 | */ | 494 | */ |
493 | int usb_unlink_urb(struct urb *urb) | 495 | int usb_unlink_urb(struct urb *urb) |
494 | { | 496 | { |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 45e01e289455..767aed5b4bea 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -82,6 +82,27 @@ choice | |||
82 | Many controller drivers are platform-specific; these | 82 | Many controller drivers are platform-specific; these |
83 | often need board-specific hooks. | 83 | often need board-specific hooks. |
84 | 84 | ||
85 | config USB_GADGET_AMD5536UDC | ||
86 | boolean "AMD5536 UDC" | ||
87 | depends on PCI | ||
88 | select USB_GADGET_DUALSPEED | ||
89 | help | ||
90 | The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge. | ||
91 | It is a USB Highspeed DMA capable USB device controller. Beside ep0 | ||
92 | it provides 4 IN and 4 OUT endpoints (bulk or interrupt type). | ||
93 | The UDC port supports OTG operation, and may be used as a host port | ||
94 | if it's not being used to implement peripheral or OTG roles. | ||
95 | |||
96 | Say "y" to link the driver statically, or "m" to build a | ||
97 | dynamically linked module called "amd5536udc" and force all | ||
98 | gadget drivers to also be dynamically linked. | ||
99 | |||
100 | config USB_AMD5536UDC | ||
101 | tristate | ||
102 | depends on USB_GADGET_AMD5536UDC | ||
103 | default USB_GADGET | ||
104 | select USB_GADGET_SELECTED | ||
105 | |||
85 | config USB_GADGET_FSL_USB2 | 106 | config USB_GADGET_FSL_USB2 |
86 | boolean "Freescale Highspeed USB DR Peripheral Controller" | 107 | boolean "Freescale Highspeed USB DR Peripheral Controller" |
87 | depends on MPC834x || PPC_MPC831x | 108 | depends on MPC834x || PPC_MPC831x |
@@ -156,6 +177,24 @@ config USB_PXA2XX_SMALL | |||
156 | default y if USB_ETH | 177 | default y if USB_ETH |
157 | default y if USB_G_SERIAL | 178 | default y if USB_G_SERIAL |
158 | 179 | ||
180 | config USB_GADGET_M66592 | ||
181 | boolean "Renesas M66592 USB Peripheral Controller" | ||
182 | select USB_GADGET_DUALSPEED | ||
183 | help | ||
184 | M66592 is a discrete USB peripheral controller chip that | ||
185 | supports both full and high speed USB 2.0 data transfers. | ||
186 | It has seven configurable endpoints, and endpoint zero. | ||
187 | |||
188 | Say "y" to link the driver statically, or "m" to build a | ||
189 | dynamically linked module called "m66592_udc" and force all | ||
190 | gadget drivers to also be dynamically linked. | ||
191 | |||
192 | config USB_M66592 | ||
193 | tristate | ||
194 | depends on USB_GADGET_M66592 | ||
195 | default USB_GADGET | ||
196 | select USB_GADGET_SELECTED | ||
197 | |||
159 | config USB_GADGET_GOKU | 198 | config USB_GADGET_GOKU |
160 | boolean "Toshiba TC86C001 'Goku-S'" | 199 | boolean "Toshiba TC86C001 'Goku-S'" |
161 | depends on PCI | 200 | depends on PCI |
@@ -261,24 +300,6 @@ config USB_AT91 | |||
261 | depends on USB_GADGET_AT91 | 300 | depends on USB_GADGET_AT91 |
262 | default USB_GADGET | 301 | default USB_GADGET |
263 | 302 | ||
264 | config USB_GADGET_M66592 | ||
265 | boolean "M66592 driver" | ||
266 | select USB_GADGET_DUALSPEED | ||
267 | help | ||
268 | M66592 is a USB 2.0 peripheral controller. | ||
269 | |||
270 | It has seven configurable endpoints, and endpoint zero. | ||
271 | |||
272 | Say "y" to link the driver statically, or "m" to build a | ||
273 | dynamically linked module called "m66592_udc" and force all | ||
274 | gadget drivers to also be dynamically linked. | ||
275 | |||
276 | config USB_M66592 | ||
277 | tristate | ||
278 | depends on USB_GADGET_M66592 | ||
279 | default USB_GADGET | ||
280 | select USB_GADGET_SELECTED | ||
281 | |||
282 | config USB_GADGET_DUMMY_HCD | 303 | config USB_GADGET_DUMMY_HCD |
283 | boolean "Dummy HCD (DEVELOPMENT)" | 304 | boolean "Dummy HCD (DEVELOPMENT)" |
284 | depends on (USB=y || (USB=m && USB_GADGET=m)) && EXPERIMENTAL | 305 | depends on (USB=y || (USB=m && USB_GADGET=m)) && EXPERIMENTAL |
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile index 8ae76f738635..1bc0f03550ce 100644 --- a/drivers/usb/gadget/Makefile +++ b/drivers/usb/gadget/Makefile | |||
@@ -7,6 +7,7 @@ endif | |||
7 | 7 | ||
8 | obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o | 8 | obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o |
9 | obj-$(CONFIG_USB_NET2280) += net2280.o | 9 | obj-$(CONFIG_USB_NET2280) += net2280.o |
10 | obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o | ||
10 | obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o | 11 | obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o |
11 | obj-$(CONFIG_USB_GOKU) += goku_udc.o | 12 | obj-$(CONFIG_USB_GOKU) += goku_udc.o |
12 | obj-$(CONFIG_USB_OMAP) += omap_udc.o | 13 | obj-$(CONFIG_USB_OMAP) += omap_udc.o |
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c new file mode 100644 index 000000000000..714156ca8fe4 --- /dev/null +++ b/drivers/usb/gadget/amd5536udc.c | |||
@@ -0,0 +1,3454 @@ | |||
1 | /* | ||
2 | * amd5536.c -- AMD 5536 UDC high/full speed USB device controller | ||
3 | * | ||
4 | * Copyright (C) 2005-2007 AMD (http://www.amd.com) | ||
5 | * Author: Thomas Dahlmann | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536. | ||
24 | * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it | ||
25 | * provides 4 IN and 4 OUT endpoints (bulk or interrupt type). | ||
26 | * | ||
27 | * Make sure that UDC is assigned to port 4 by BIOS settings (port can also | ||
28 | * be used as host port) and UOC bits PAD_EN and APU are set (should be done | ||
29 | * by BIOS init). | ||
30 | * | ||
31 | * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not | ||
32 | * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0") | ||
33 | * can be used with gadget ether. | ||
34 | */ | ||
35 | |||
36 | /* debug control */ | ||
37 | /* #define UDC_VERBOSE */ | ||
38 | |||
39 | /* Driver strings */ | ||
40 | #define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller" | ||
41 | #define UDC_DRIVER_VERSION_STRING "01.00.0206 - $Revision: #3 $" | ||
42 | |||
43 | /* system */ | ||
44 | #include <linux/module.h> | ||
45 | #include <linux/pci.h> | ||
46 | #include <linux/kernel.h> | ||
47 | #include <linux/version.h> | ||
48 | #include <linux/delay.h> | ||
49 | #include <linux/ioport.h> | ||
50 | #include <linux/sched.h> | ||
51 | #include <linux/slab.h> | ||
52 | #include <linux/smp_lock.h> | ||
53 | #include <linux/errno.h> | ||
54 | #include <linux/init.h> | ||
55 | #include <linux/timer.h> | ||
56 | #include <linux/list.h> | ||
57 | #include <linux/interrupt.h> | ||
58 | #include <linux/ioctl.h> | ||
59 | #include <linux/fs.h> | ||
60 | #include <linux/dmapool.h> | ||
61 | #include <linux/moduleparam.h> | ||
62 | #include <linux/device.h> | ||
63 | #include <linux/io.h> | ||
64 | #include <linux/irq.h> | ||
65 | |||
66 | #include <asm/byteorder.h> | ||
67 | #include <asm/system.h> | ||
68 | #include <asm/unaligned.h> | ||
69 | |||
70 | /* gadget stack */ | ||
71 | #include <linux/usb/ch9.h> | ||
72 | #include <linux/usb_gadget.h> | ||
73 | |||
74 | /* udc specific */ | ||
75 | #include "amd5536udc.h" | ||
76 | |||
77 | |||
78 | static void udc_tasklet_disconnect(unsigned long); | ||
79 | static void empty_req_queue(struct udc_ep *); | ||
80 | static int udc_probe(struct udc *dev); | ||
81 | static void udc_basic_init(struct udc *dev); | ||
82 | static void udc_setup_endpoints(struct udc *dev); | ||
83 | static void udc_soft_reset(struct udc *dev); | ||
84 | static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep); | ||
85 | static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq); | ||
86 | static int udc_free_dma_chain(struct udc *dev, struct udc_request *req); | ||
87 | static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req, | ||
88 | unsigned long buf_len, gfp_t gfp_flags); | ||
89 | static int udc_remote_wakeup(struct udc *dev); | ||
90 | static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); | ||
91 | static void udc_pci_remove(struct pci_dev *pdev); | ||
92 | |||
93 | /* description */ | ||
94 | static const char mod_desc[] = UDC_MOD_DESCRIPTION; | ||
95 | static const char name[] = "amd5536udc"; | ||
96 | |||
97 | /* structure to hold endpoint function pointers */ | ||
98 | static const struct usb_ep_ops udc_ep_ops; | ||
99 | |||
100 | /* received setup data */ | ||
101 | static union udc_setup_data setup_data; | ||
102 | |||
103 | /* pointer to device object */ | ||
104 | static struct udc *udc; | ||
105 | |||
106 | /* irq spin lock for soft reset */ | ||
107 | static DEFINE_SPINLOCK(udc_irq_spinlock); | ||
108 | /* stall spin lock */ | ||
109 | static DEFINE_SPINLOCK(udc_stall_spinlock); | ||
110 | |||
111 | /* | ||
112 | * slave mode: pending bytes in rx fifo after nyet, | ||
113 | * used if EPIN irq came but no req was available | ||
114 | */ | ||
115 | static unsigned int udc_rxfifo_pending; | ||
116 | |||
117 | /* count soft resets after suspend to avoid loop */ | ||
118 | static int soft_reset_occured; | ||
119 | static int soft_reset_after_usbreset_occured; | ||
120 | |||
121 | /* timer */ | ||
122 | static struct timer_list udc_timer; | ||
123 | static int stop_timer; | ||
124 | |||
125 | /* set_rde -- Is used to control enabling of RX DMA. Problem is | ||
126 | * that UDC has only one bit (RDE) to enable/disable RX DMA for | ||
127 | * all OUT endpoints. So we have to handle race conditions like | ||
128 | * when OUT data reaches the fifo but no request was queued yet. | ||
129 | * This cannot be solved by letting the RX DMA disabled until a | ||
130 | * request gets queued because there may be other OUT packets | ||
131 | * in the FIFO (important for not blocking control traffic). | ||
132 | * The value of set_rde controls the correspondig timer. | ||
133 | * | ||
134 | * set_rde -1 == not used, means it is alloed to be set to 0 or 1 | ||
135 | * set_rde 0 == do not touch RDE, do no start the RDE timer | ||
136 | * set_rde 1 == timer function will look whether FIFO has data | ||
137 | * set_rde 2 == set by timer function to enable RX DMA on next call | ||
138 | */ | ||
139 | static int set_rde = -1; | ||
140 | |||
141 | static DECLARE_COMPLETION(on_exit); | ||
142 | static struct timer_list udc_pollstall_timer; | ||
143 | static int stop_pollstall_timer; | ||
144 | static DECLARE_COMPLETION(on_pollstall_exit); | ||
145 | |||
146 | /* tasklet for usb disconnect */ | ||
147 | static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect, | ||
148 | (unsigned long) &udc); | ||
149 | |||
150 | |||
151 | /* endpoint names used for print */ | ||
152 | static const char ep0_string[] = "ep0in"; | ||
153 | static const char *ep_string[] = { | ||
154 | ep0_string, | ||
155 | "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk", | ||
156 | "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk", | ||
157 | "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk", | ||
158 | "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk", | ||
159 | "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk", | ||
160 | "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk", | ||
161 | "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk" | ||
162 | }; | ||
163 | |||
164 | /* DMA usage flag */ | ||
165 | static int use_dma = 1; | ||
166 | /* packet per buffer dma */ | ||
167 | static int use_dma_ppb = 1; | ||
168 | /* with per descr. update */ | ||
169 | static int use_dma_ppb_du; | ||
170 | /* buffer fill mode */ | ||
171 | static int use_dma_bufferfill_mode; | ||
172 | /* full speed only mode */ | ||
173 | static int use_fullspeed; | ||
174 | /* tx buffer size for high speed */ | ||
175 | static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE; | ||
176 | |||
177 | /* module parameters */ | ||
178 | module_param(use_dma, bool, S_IRUGO); | ||
179 | MODULE_PARM_DESC(use_dma, "true for DMA"); | ||
180 | module_param(use_dma_ppb, bool, S_IRUGO); | ||
181 | MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode"); | ||
182 | module_param(use_dma_ppb_du, bool, S_IRUGO); | ||
183 | MODULE_PARM_DESC(use_dma_ppb_du, | ||
184 | "true for DMA in packet per buffer mode with descriptor update"); | ||
185 | module_param(use_fullspeed, bool, S_IRUGO); | ||
186 | MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only"); | ||
187 | |||
188 | /*---------------------------------------------------------------------------*/ | ||
189 | /* Prints UDC device registers and endpoint irq registers */ | ||
190 | static void print_regs(struct udc *dev) | ||
191 | { | ||
192 | DBG(dev, "------- Device registers -------\n"); | ||
193 | DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg)); | ||
194 | DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl)); | ||
195 | DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts)); | ||
196 | DBG(dev, "\n"); | ||
197 | DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts)); | ||
198 | DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk)); | ||
199 | DBG(dev, "\n"); | ||
200 | DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts)); | ||
201 | DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk)); | ||
202 | DBG(dev, "\n"); | ||
203 | DBG(dev, "USE DMA = %d\n", use_dma); | ||
204 | if (use_dma && use_dma_ppb && !use_dma_ppb_du) { | ||
205 | DBG(dev, "DMA mode = PPBNDU (packet per buffer " | ||
206 | "WITHOUT desc. update)\n"); | ||
207 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU"); | ||
208 | } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) { | ||
209 | DBG(dev, "DMA mode = PPBDU (packet per buffer " | ||
210 | "WITH desc. update)\n"); | ||
211 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU"); | ||
212 | } | ||
213 | if (use_dma && use_dma_bufferfill_mode) { | ||
214 | DBG(dev, "DMA mode = BF (buffer fill mode)\n"); | ||
215 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF"); | ||
216 | } | ||
217 | if (!use_dma) { | ||
218 | dev_info(&dev->pdev->dev, "FIFO mode\n"); | ||
219 | } | ||
220 | DBG(dev, "-------------------------------------------------------\n"); | ||
221 | } | ||
222 | |||
223 | /* Masks unused interrupts */ | ||
224 | static int udc_mask_unused_interrupts(struct udc *dev) | ||
225 | { | ||
226 | u32 tmp; | ||
227 | |||
228 | /* mask all dev interrupts */ | ||
229 | tmp = AMD_BIT(UDC_DEVINT_SVC) | | ||
230 | AMD_BIT(UDC_DEVINT_ENUM) | | ||
231 | AMD_BIT(UDC_DEVINT_US) | | ||
232 | AMD_BIT(UDC_DEVINT_UR) | | ||
233 | AMD_BIT(UDC_DEVINT_ES) | | ||
234 | AMD_BIT(UDC_DEVINT_SI) | | ||
235 | AMD_BIT(UDC_DEVINT_SOF)| | ||
236 | AMD_BIT(UDC_DEVINT_SC); | ||
237 | writel(tmp, &dev->regs->irqmsk); | ||
238 | |||
239 | /* mask all ep interrupts */ | ||
240 | writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | /* Enables endpoint 0 interrupts */ | ||
246 | static int udc_enable_ep0_interrupts(struct udc *dev) | ||
247 | { | ||
248 | u32 tmp; | ||
249 | |||
250 | DBG(dev, "udc_enable_ep0_interrupts()\n"); | ||
251 | |||
252 | /* read irq mask */ | ||
253 | tmp = readl(&dev->regs->ep_irqmsk); | ||
254 | /* enable ep0 irq's */ | ||
255 | tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0) | ||
256 | & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0); | ||
257 | writel(tmp, &dev->regs->ep_irqmsk); | ||
258 | |||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | /* Enables device interrupts for SET_INTF and SET_CONFIG */ | ||
263 | static int udc_enable_dev_setup_interrupts(struct udc *dev) | ||
264 | { | ||
265 | u32 tmp; | ||
266 | |||
267 | DBG(dev, "enable device interrupts for setup data\n"); | ||
268 | |||
269 | /* read irq mask */ | ||
270 | tmp = readl(&dev->regs->irqmsk); | ||
271 | |||
272 | /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */ | ||
273 | tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI) | ||
274 | & AMD_UNMASK_BIT(UDC_DEVINT_SC) | ||
275 | & AMD_UNMASK_BIT(UDC_DEVINT_UR) | ||
276 | & AMD_UNMASK_BIT(UDC_DEVINT_SVC) | ||
277 | & AMD_UNMASK_BIT(UDC_DEVINT_ENUM); | ||
278 | writel(tmp, &dev->regs->irqmsk); | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | /* Calculates fifo start of endpoint based on preceeding endpoints */ | ||
284 | static int udc_set_txfifo_addr(struct udc_ep *ep) | ||
285 | { | ||
286 | struct udc *dev; | ||
287 | u32 tmp; | ||
288 | int i; | ||
289 | |||
290 | if (!ep || !(ep->in)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | dev = ep->dev; | ||
294 | ep->txfifo = dev->txfifo; | ||
295 | |||
296 | /* traverse ep's */ | ||
297 | for (i = 0; i < ep->num; i++) { | ||
298 | if (dev->ep[i].regs) { | ||
299 | /* read fifo size */ | ||
300 | tmp = readl(&dev->ep[i].regs->bufin_framenum); | ||
301 | tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE); | ||
302 | ep->txfifo += tmp; | ||
303 | } | ||
304 | } | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */ | ||
309 | static u32 cnak_pending; | ||
310 | |||
311 | static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num) | ||
312 | { | ||
313 | if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) { | ||
314 | DBG(ep->dev, "NAK could not be cleared for ep%d\n", num); | ||
315 | cnak_pending |= 1 << (num); | ||
316 | ep->naking = 1; | ||
317 | } else | ||
318 | cnak_pending = cnak_pending & (~(1 << (num))); | ||
319 | } | ||
320 | |||
321 | |||
322 | /* Enables endpoint, is called by gadget driver */ | ||
323 | static int | ||
324 | udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc) | ||
325 | { | ||
326 | struct udc_ep *ep; | ||
327 | struct udc *dev; | ||
328 | u32 tmp; | ||
329 | unsigned long iflags; | ||
330 | u8 udc_csr_epix; | ||
331 | |||
332 | if (!usbep | ||
333 | || usbep->name == ep0_string | ||
334 | || !desc | ||
335 | || desc->bDescriptorType != USB_DT_ENDPOINT) | ||
336 | return -EINVAL; | ||
337 | |||
338 | ep = container_of(usbep, struct udc_ep, ep); | ||
339 | dev = ep->dev; | ||
340 | |||
341 | DBG(dev, "udc_ep_enable() ep %d\n", ep->num); | ||
342 | |||
343 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) | ||
344 | return -ESHUTDOWN; | ||
345 | |||
346 | spin_lock_irqsave(&dev->lock, iflags); | ||
347 | ep->desc = desc; | ||
348 | |||
349 | ep->halted = 0; | ||
350 | |||
351 | /* set traffic type */ | ||
352 | tmp = readl(&dev->ep[ep->num].regs->ctl); | ||
353 | tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET); | ||
354 | writel(tmp, &dev->ep[ep->num].regs->ctl); | ||
355 | |||
356 | /* set max packet size */ | ||
357 | tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt); | ||
358 | tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_EP_MAX_PKT_SIZE); | ||
359 | ep->ep.maxpacket = desc->wMaxPacketSize; | ||
360 | writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt); | ||
361 | |||
362 | /* IN ep */ | ||
363 | if (ep->in) { | ||
364 | |||
365 | /* ep ix in UDC CSR register space */ | ||
366 | udc_csr_epix = ep->num; | ||
367 | |||
368 | /* set buffer size (tx fifo entries) */ | ||
369 | tmp = readl(&dev->ep[ep->num].regs->bufin_framenum); | ||
370 | /* double buffering: fifo size = 2 x max packet size */ | ||
371 | tmp = AMD_ADDBITS( | ||
372 | tmp, | ||
373 | desc->wMaxPacketSize * UDC_EPIN_BUFF_SIZE_MULT | ||
374 | / UDC_DWORD_BYTES, | ||
375 | UDC_EPIN_BUFF_SIZE); | ||
376 | writel(tmp, &dev->ep[ep->num].regs->bufin_framenum); | ||
377 | |||
378 | /* calc. tx fifo base addr */ | ||
379 | udc_set_txfifo_addr(ep); | ||
380 | |||
381 | /* flush fifo */ | ||
382 | tmp = readl(&ep->regs->ctl); | ||
383 | tmp |= AMD_BIT(UDC_EPCTL_F); | ||
384 | writel(tmp, &ep->regs->ctl); | ||
385 | |||
386 | /* OUT ep */ | ||
387 | } else { | ||
388 | /* ep ix in UDC CSR register space */ | ||
389 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | ||
390 | |||
391 | /* set max packet size UDC CSR */ | ||
392 | tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); | ||
393 | tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, | ||
394 | UDC_CSR_NE_MAX_PKT); | ||
395 | writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); | ||
396 | |||
397 | if (use_dma && !ep->in) { | ||
398 | /* alloc and init BNA dummy request */ | ||
399 | ep->bna_dummy_req = udc_alloc_bna_dummy(ep); | ||
400 | ep->bna_occurred = 0; | ||
401 | } | ||
402 | |||
403 | if (ep->num != UDC_EP0OUT_IX) | ||
404 | dev->data_ep_enabled = 1; | ||
405 | } | ||
406 | |||
407 | /* set ep values */ | ||
408 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | ||
409 | /* max packet */ | ||
410 | tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_CSR_NE_MAX_PKT); | ||
411 | /* ep number */ | ||
412 | tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM); | ||
413 | /* ep direction */ | ||
414 | tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR); | ||
415 | /* ep type */ | ||
416 | tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE); | ||
417 | /* ep config */ | ||
418 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG); | ||
419 | /* ep interface */ | ||
420 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF); | ||
421 | /* ep alt */ | ||
422 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT); | ||
423 | /* write reg */ | ||
424 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | ||
425 | |||
426 | /* enable ep irq */ | ||
427 | tmp = readl(&dev->regs->ep_irqmsk); | ||
428 | tmp &= AMD_UNMASK_BIT(ep->num); | ||
429 | writel(tmp, &dev->regs->ep_irqmsk); | ||
430 | |||
431 | /* | ||
432 | * clear NAK by writing CNAK | ||
433 | * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written | ||
434 | */ | ||
435 | if (!use_dma || ep->in) { | ||
436 | tmp = readl(&ep->regs->ctl); | ||
437 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
438 | writel(tmp, &ep->regs->ctl); | ||
439 | ep->naking = 0; | ||
440 | UDC_QUEUE_CNAK(ep, ep->num); | ||
441 | } | ||
442 | tmp = desc->bEndpointAddress; | ||
443 | DBG(dev, "%s enabled\n", usbep->name); | ||
444 | |||
445 | spin_unlock_irqrestore(&dev->lock, iflags); | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | /* Resets endpoint */ | ||
450 | static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep) | ||
451 | { | ||
452 | u32 tmp; | ||
453 | |||
454 | VDBG(ep->dev, "ep-%d reset\n", ep->num); | ||
455 | ep->desc = NULL; | ||
456 | ep->ep.ops = &udc_ep_ops; | ||
457 | INIT_LIST_HEAD(&ep->queue); | ||
458 | |||
459 | ep->ep.maxpacket = (u16) ~0; | ||
460 | /* set NAK */ | ||
461 | tmp = readl(&ep->regs->ctl); | ||
462 | tmp |= AMD_BIT(UDC_EPCTL_SNAK); | ||
463 | writel(tmp, &ep->regs->ctl); | ||
464 | ep->naking = 1; | ||
465 | |||
466 | /* disable interrupt */ | ||
467 | tmp = readl(®s->ep_irqmsk); | ||
468 | tmp |= AMD_BIT(ep->num); | ||
469 | writel(tmp, ®s->ep_irqmsk); | ||
470 | |||
471 | if (ep->in) { | ||
472 | /* unset P and IN bit of potential former DMA */ | ||
473 | tmp = readl(&ep->regs->ctl); | ||
474 | tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P); | ||
475 | writel(tmp, &ep->regs->ctl); | ||
476 | |||
477 | tmp = readl(&ep->regs->sts); | ||
478 | tmp |= AMD_BIT(UDC_EPSTS_IN); | ||
479 | writel(tmp, &ep->regs->sts); | ||
480 | |||
481 | /* flush the fifo */ | ||
482 | tmp = readl(&ep->regs->ctl); | ||
483 | tmp |= AMD_BIT(UDC_EPCTL_F); | ||
484 | writel(tmp, &ep->regs->ctl); | ||
485 | |||
486 | } | ||
487 | /* reset desc pointer */ | ||
488 | writel(0, &ep->regs->desptr); | ||
489 | } | ||
490 | |||
491 | /* Disables endpoint, is called by gadget driver */ | ||
492 | static int udc_ep_disable(struct usb_ep *usbep) | ||
493 | { | ||
494 | struct udc_ep *ep = NULL; | ||
495 | unsigned long iflags; | ||
496 | |||
497 | if (!usbep) | ||
498 | return -EINVAL; | ||
499 | |||
500 | ep = container_of(usbep, struct udc_ep, ep); | ||
501 | if (usbep->name == ep0_string || !ep->desc) | ||
502 | return -EINVAL; | ||
503 | |||
504 | DBG(ep->dev, "Disable ep-%d\n", ep->num); | ||
505 | |||
506 | spin_lock_irqsave(&ep->dev->lock, iflags); | ||
507 | udc_free_request(&ep->ep, &ep->bna_dummy_req->req); | ||
508 | empty_req_queue(ep); | ||
509 | ep_init(ep->dev->regs, ep); | ||
510 | spin_unlock_irqrestore(&ep->dev->lock, iflags); | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | /* Allocates request packet, called by gadget driver */ | ||
516 | static struct usb_request * | ||
517 | udc_alloc_request(struct usb_ep *usbep, gfp_t gfp) | ||
518 | { | ||
519 | struct udc_request *req; | ||
520 | struct udc_data_dma *dma_desc; | ||
521 | struct udc_ep *ep; | ||
522 | |||
523 | if (!usbep) | ||
524 | return NULL; | ||
525 | |||
526 | ep = container_of(usbep, struct udc_ep, ep); | ||
527 | |||
528 | VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num); | ||
529 | req = kzalloc(sizeof(struct udc_request), gfp); | ||
530 | if (!req) | ||
531 | return NULL; | ||
532 | |||
533 | req->req.dma = DMA_DONT_USE; | ||
534 | INIT_LIST_HEAD(&req->queue); | ||
535 | |||
536 | if (ep->dma) { | ||
537 | /* ep0 in requests are allocated from data pool here */ | ||
538 | dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, | ||
539 | &req->td_phys); | ||
540 | if (!dma_desc) { | ||
541 | kfree(req); | ||
542 | return NULL; | ||
543 | } | ||
544 | |||
545 | VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, " | ||
546 | "td_phys = %lx\n", | ||
547 | req, dma_desc, | ||
548 | (unsigned long)req->td_phys); | ||
549 | /* prevent from using desc. - set HOST BUSY */ | ||
550 | dma_desc->status = AMD_ADDBITS(dma_desc->status, | ||
551 | UDC_DMA_STP_STS_BS_HOST_BUSY, | ||
552 | UDC_DMA_STP_STS_BS); | ||
553 | dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE); | ||
554 | req->td_data = dma_desc; | ||
555 | req->td_data_last = NULL; | ||
556 | req->chain_len = 1; | ||
557 | } | ||
558 | |||
559 | return &req->req; | ||
560 | } | ||
561 | |||
562 | /* Frees request packet, called by gadget driver */ | ||
563 | static void | ||
564 | udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq) | ||
565 | { | ||
566 | struct udc_ep *ep; | ||
567 | struct udc_request *req; | ||
568 | |||
569 | if (!usbep || !usbreq) | ||
570 | return; | ||
571 | |||
572 | ep = container_of(usbep, struct udc_ep, ep); | ||
573 | req = container_of(usbreq, struct udc_request, req); | ||
574 | VDBG(ep->dev, "free_req req=%p\n", req); | ||
575 | BUG_ON(!list_empty(&req->queue)); | ||
576 | if (req->td_data) { | ||
577 | VDBG(ep->dev, "req->td_data=%p\n", req->td_data); | ||
578 | |||
579 | /* free dma chain if created */ | ||
580 | if (req->chain_len > 1) { | ||
581 | udc_free_dma_chain(ep->dev, req); | ||
582 | } | ||
583 | |||
584 | pci_pool_free(ep->dev->data_requests, req->td_data, | ||
585 | req->td_phys); | ||
586 | } | ||
587 | kfree(req); | ||
588 | } | ||
589 | |||
590 | /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */ | ||
591 | static void udc_init_bna_dummy(struct udc_request *req) | ||
592 | { | ||
593 | if (req) { | ||
594 | /* set last bit */ | ||
595 | req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); | ||
596 | /* set next pointer to itself */ | ||
597 | req->td_data->next = req->td_phys; | ||
598 | /* set HOST BUSY */ | ||
599 | req->td_data->status | ||
600 | = AMD_ADDBITS(req->td_data->status, | ||
601 | UDC_DMA_STP_STS_BS_DMA_DONE, | ||
602 | UDC_DMA_STP_STS_BS); | ||
603 | #ifdef UDC_VERBOSE | ||
604 | pr_debug("bna desc = %p, sts = %08x\n", | ||
605 | req->td_data, req->td_data->status); | ||
606 | #endif | ||
607 | } | ||
608 | } | ||
609 | |||
610 | /* Allocate BNA dummy descriptor */ | ||
611 | static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep) | ||
612 | { | ||
613 | struct udc_request *req = NULL; | ||
614 | struct usb_request *_req = NULL; | ||
615 | |||
616 | /* alloc the dummy request */ | ||
617 | _req = udc_alloc_request(&ep->ep, GFP_ATOMIC); | ||
618 | if (_req) { | ||
619 | req = container_of(_req, struct udc_request, req); | ||
620 | ep->bna_dummy_req = req; | ||
621 | udc_init_bna_dummy(req); | ||
622 | } | ||
623 | return req; | ||
624 | } | ||
625 | |||
626 | /* Write data to TX fifo for IN packets */ | ||
627 | static void | ||
628 | udc_txfifo_write(struct udc_ep *ep, struct usb_request *req) | ||
629 | { | ||
630 | u8 *req_buf; | ||
631 | u32 *buf; | ||
632 | int i, j; | ||
633 | unsigned bytes = 0; | ||
634 | unsigned remaining = 0; | ||
635 | |||
636 | if (!req || !ep) | ||
637 | return; | ||
638 | |||
639 | req_buf = req->buf + req->actual; | ||
640 | prefetch(req_buf); | ||
641 | remaining = req->length - req->actual; | ||
642 | |||
643 | buf = (u32 *) req_buf; | ||
644 | |||
645 | bytes = ep->ep.maxpacket; | ||
646 | if (bytes > remaining) | ||
647 | bytes = remaining; | ||
648 | |||
649 | /* dwords first */ | ||
650 | for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) { | ||
651 | writel(*(buf + i), ep->txfifo); | ||
652 | } | ||
653 | |||
654 | /* remaining bytes must be written by byte access */ | ||
655 | for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { | ||
656 | writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)), | ||
657 | ep->txfifo); | ||
658 | } | ||
659 | |||
660 | /* dummy write confirm */ | ||
661 | writel(0, &ep->regs->confirm); | ||
662 | } | ||
663 | |||
664 | /* Read dwords from RX fifo for OUT transfers */ | ||
665 | static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords) | ||
666 | { | ||
667 | int i; | ||
668 | |||
669 | VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords); | ||
670 | |||
671 | for (i = 0; i < dwords; i++) { | ||
672 | *(buf + i) = readl(dev->rxfifo); | ||
673 | } | ||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | /* Read bytes from RX fifo for OUT transfers */ | ||
678 | static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes) | ||
679 | { | ||
680 | int i, j; | ||
681 | u32 tmp; | ||
682 | |||
683 | VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes); | ||
684 | |||
685 | /* dwords first */ | ||
686 | for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) { | ||
687 | *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo); | ||
688 | } | ||
689 | |||
690 | /* remaining bytes must be read by byte access */ | ||
691 | if (bytes % UDC_DWORD_BYTES) { | ||
692 | tmp = readl(dev->rxfifo); | ||
693 | for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { | ||
694 | *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK); | ||
695 | tmp = tmp >> UDC_BITS_PER_BYTE; | ||
696 | } | ||
697 | } | ||
698 | |||
699 | return 0; | ||
700 | } | ||
701 | |||
702 | /* Read data from RX fifo for OUT transfers */ | ||
703 | static int | ||
704 | udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req) | ||
705 | { | ||
706 | u8 *buf; | ||
707 | unsigned buf_space; | ||
708 | unsigned bytes = 0; | ||
709 | unsigned finished = 0; | ||
710 | |||
711 | /* received number bytes */ | ||
712 | bytes = readl(&ep->regs->sts); | ||
713 | bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE); | ||
714 | |||
715 | buf_space = req->req.length - req->req.actual; | ||
716 | buf = req->req.buf + req->req.actual; | ||
717 | if (bytes > buf_space) { | ||
718 | if ((buf_space % ep->ep.maxpacket) != 0) { | ||
719 | DBG(ep->dev, | ||
720 | "%s: rx %d bytes, rx-buf space = %d bytesn\n", | ||
721 | ep->ep.name, bytes, buf_space); | ||
722 | req->req.status = -EOVERFLOW; | ||
723 | } | ||
724 | bytes = buf_space; | ||
725 | } | ||
726 | req->req.actual += bytes; | ||
727 | |||
728 | /* last packet ? */ | ||
729 | if (((bytes % ep->ep.maxpacket) != 0) || (!bytes) | ||
730 | || ((req->req.actual == req->req.length) && !req->req.zero)) | ||
731 | finished = 1; | ||
732 | |||
733 | /* read rx fifo bytes */ | ||
734 | VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes); | ||
735 | udc_rxfifo_read_bytes(ep->dev, buf, bytes); | ||
736 | |||
737 | return finished; | ||
738 | } | ||
739 | |||
740 | /* create/re-init a DMA descriptor or a DMA descriptor chain */ | ||
741 | static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp) | ||
742 | { | ||
743 | int retval = 0; | ||
744 | u32 tmp; | ||
745 | |||
746 | VDBG(ep->dev, "prep_dma\n"); | ||
747 | VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n", | ||
748 | ep->num, req->td_data); | ||
749 | |||
750 | /* set buffer pointer */ | ||
751 | req->td_data->bufptr = req->req.dma; | ||
752 | |||
753 | /* set last bit */ | ||
754 | req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); | ||
755 | |||
756 | /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */ | ||
757 | if (use_dma_ppb) { | ||
758 | |||
759 | retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); | ||
760 | if (retval != 0) { | ||
761 | if (retval == -ENOMEM) | ||
762 | DBG(ep->dev, "Out of DMA memory\n"); | ||
763 | return retval; | ||
764 | } | ||
765 | if (ep->in) { | ||
766 | if (req->req.length == ep->ep.maxpacket) { | ||
767 | /* write tx bytes */ | ||
768 | req->td_data->status = | ||
769 | AMD_ADDBITS(req->td_data->status, | ||
770 | ep->ep.maxpacket, | ||
771 | UDC_DMA_IN_STS_TXBYTES); | ||
772 | |||
773 | } | ||
774 | } | ||
775 | |||
776 | } | ||
777 | |||
778 | if (ep->in) { | ||
779 | VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d " | ||
780 | "maxpacket=%d ep%d\n", | ||
781 | use_dma_ppb, req->req.length, | ||
782 | ep->ep.maxpacket, ep->num); | ||
783 | /* | ||
784 | * if bytes < max packet then tx bytes must | ||
785 | * be written in packet per buffer mode | ||
786 | */ | ||
787 | if (!use_dma_ppb || req->req.length < ep->ep.maxpacket | ||
788 | || ep->num == UDC_EP0OUT_IX | ||
789 | || ep->num == UDC_EP0IN_IX) { | ||
790 | /* write tx bytes */ | ||
791 | req->td_data->status = | ||
792 | AMD_ADDBITS(req->td_data->status, | ||
793 | req->req.length, | ||
794 | UDC_DMA_IN_STS_TXBYTES); | ||
795 | /* reset frame num */ | ||
796 | req->td_data->status = | ||
797 | AMD_ADDBITS(req->td_data->status, | ||
798 | 0, | ||
799 | UDC_DMA_IN_STS_FRAMENUM); | ||
800 | } | ||
801 | /* set HOST BUSY */ | ||
802 | req->td_data->status = | ||
803 | AMD_ADDBITS(req->td_data->status, | ||
804 | UDC_DMA_STP_STS_BS_HOST_BUSY, | ||
805 | UDC_DMA_STP_STS_BS); | ||
806 | } else { | ||
807 | VDBG(ep->dev, "OUT set host ready\n"); | ||
808 | /* set HOST READY */ | ||
809 | req->td_data->status = | ||
810 | AMD_ADDBITS(req->td_data->status, | ||
811 | UDC_DMA_STP_STS_BS_HOST_READY, | ||
812 | UDC_DMA_STP_STS_BS); | ||
813 | |||
814 | |||
815 | /* clear NAK by writing CNAK */ | ||
816 | if (ep->naking) { | ||
817 | tmp = readl(&ep->regs->ctl); | ||
818 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
819 | writel(tmp, &ep->regs->ctl); | ||
820 | ep->naking = 0; | ||
821 | UDC_QUEUE_CNAK(ep, ep->num); | ||
822 | } | ||
823 | |||
824 | } | ||
825 | |||
826 | return retval; | ||
827 | } | ||
828 | |||
829 | /* Completes request packet ... caller MUST hold lock */ | ||
830 | static void | ||
831 | complete_req(struct udc_ep *ep, struct udc_request *req, int sts) | ||
832 | __releases(ep->dev->lock) | ||
833 | __acquires(ep->dev->lock) | ||
834 | { | ||
835 | struct udc *dev; | ||
836 | unsigned halted; | ||
837 | |||
838 | VDBG(ep->dev, "complete_req(): ep%d\n", ep->num); | ||
839 | |||
840 | dev = ep->dev; | ||
841 | /* unmap DMA */ | ||
842 | if (req->dma_mapping) { | ||
843 | if (ep->in) | ||
844 | pci_unmap_single(dev->pdev, | ||
845 | req->req.dma, | ||
846 | req->req.length, | ||
847 | PCI_DMA_TODEVICE); | ||
848 | else | ||
849 | pci_unmap_single(dev->pdev, | ||
850 | req->req.dma, | ||
851 | req->req.length, | ||
852 | PCI_DMA_FROMDEVICE); | ||
853 | req->dma_mapping = 0; | ||
854 | req->req.dma = DMA_DONT_USE; | ||
855 | } | ||
856 | |||
857 | halted = ep->halted; | ||
858 | ep->halted = 1; | ||
859 | |||
860 | /* set new status if pending */ | ||
861 | if (req->req.status == -EINPROGRESS) | ||
862 | req->req.status = sts; | ||
863 | |||
864 | /* remove from ep queue */ | ||
865 | list_del_init(&req->queue); | ||
866 | |||
867 | VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n", | ||
868 | &req->req, req->req.length, ep->ep.name, sts); | ||
869 | |||
870 | spin_unlock(&dev->lock); | ||
871 | req->req.complete(&ep->ep, &req->req); | ||
872 | spin_lock(&dev->lock); | ||
873 | ep->halted = halted; | ||
874 | } | ||
875 | |||
876 | /* frees pci pool descriptors of a DMA chain */ | ||
877 | static int udc_free_dma_chain(struct udc *dev, struct udc_request *req) | ||
878 | { | ||
879 | |||
880 | int ret_val = 0; | ||
881 | struct udc_data_dma *td; | ||
882 | struct udc_data_dma *td_last = NULL; | ||
883 | unsigned int i; | ||
884 | |||
885 | DBG(dev, "free chain req = %p\n", req); | ||
886 | |||
887 | /* do not free first desc., will be done by free for request */ | ||
888 | td_last = req->td_data; | ||
889 | td = phys_to_virt(td_last->next); | ||
890 | |||
891 | for (i = 1; i < req->chain_len; i++) { | ||
892 | |||
893 | pci_pool_free(dev->data_requests, td, | ||
894 | (dma_addr_t) td_last->next); | ||
895 | td_last = td; | ||
896 | td = phys_to_virt(td_last->next); | ||
897 | } | ||
898 | |||
899 | return ret_val; | ||
900 | } | ||
901 | |||
902 | /* Iterates to the end of a DMA chain and returns last descriptor */ | ||
903 | static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req) | ||
904 | { | ||
905 | struct udc_data_dma *td; | ||
906 | |||
907 | td = req->td_data; | ||
908 | while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { | ||
909 | td = phys_to_virt(td->next); | ||
910 | } | ||
911 | |||
912 | return td; | ||
913 | |||
914 | } | ||
915 | |||
916 | /* Iterates to the end of a DMA chain and counts bytes received */ | ||
917 | static u32 udc_get_ppbdu_rxbytes(struct udc_request *req) | ||
918 | { | ||
919 | struct udc_data_dma *td; | ||
920 | u32 count; | ||
921 | |||
922 | td = req->td_data; | ||
923 | /* received number bytes */ | ||
924 | count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES); | ||
925 | |||
926 | while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { | ||
927 | td = phys_to_virt(td->next); | ||
928 | /* received number bytes */ | ||
929 | if (td) { | ||
930 | count += AMD_GETBITS(td->status, | ||
931 | UDC_DMA_OUT_STS_RXBYTES); | ||
932 | } | ||
933 | } | ||
934 | |||
935 | return count; | ||
936 | |||
937 | } | ||
938 | |||
939 | /* Creates or re-inits a DMA chain */ | ||
940 | static int udc_create_dma_chain( | ||
941 | struct udc_ep *ep, | ||
942 | struct udc_request *req, | ||
943 | unsigned long buf_len, gfp_t gfp_flags | ||
944 | ) | ||
945 | { | ||
946 | unsigned long bytes = req->req.length; | ||
947 | unsigned int i; | ||
948 | dma_addr_t dma_addr; | ||
949 | struct udc_data_dma *td = NULL; | ||
950 | struct udc_data_dma *last = NULL; | ||
951 | unsigned long txbytes; | ||
952 | unsigned create_new_chain = 0; | ||
953 | unsigned len; | ||
954 | |||
955 | VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n", | ||
956 | bytes, buf_len); | ||
957 | dma_addr = DMA_DONT_USE; | ||
958 | |||
959 | /* unset L bit in first desc for OUT */ | ||
960 | if (!ep->in) { | ||
961 | req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L); | ||
962 | } | ||
963 | |||
964 | /* alloc only new desc's if not already available */ | ||
965 | len = req->req.length / ep->ep.maxpacket; | ||
966 | if (req->req.length % ep->ep.maxpacket) { | ||
967 | len++; | ||
968 | } | ||
969 | |||
970 | if (len > req->chain_len) { | ||
971 | /* shorter chain already allocated before */ | ||
972 | if (req->chain_len > 1) { | ||
973 | udc_free_dma_chain(ep->dev, req); | ||
974 | } | ||
975 | req->chain_len = len; | ||
976 | create_new_chain = 1; | ||
977 | } | ||
978 | |||
979 | td = req->td_data; | ||
980 | /* gen. required number of descriptors and buffers */ | ||
981 | for (i = buf_len; i < bytes; i += buf_len) { | ||
982 | /* create or determine next desc. */ | ||
983 | if (create_new_chain) { | ||
984 | |||
985 | td = pci_pool_alloc(ep->dev->data_requests, | ||
986 | gfp_flags, &dma_addr); | ||
987 | if (!td) | ||
988 | return -ENOMEM; | ||
989 | |||
990 | td->status = 0; | ||
991 | } else if (i == buf_len) { | ||
992 | /* first td */ | ||
993 | td = (struct udc_data_dma *) phys_to_virt( | ||
994 | req->td_data->next); | ||
995 | td->status = 0; | ||
996 | } else { | ||
997 | td = (struct udc_data_dma *) phys_to_virt(last->next); | ||
998 | td->status = 0; | ||
999 | } | ||
1000 | |||
1001 | |||
1002 | if (td) | ||
1003 | td->bufptr = req->req.dma + i; /* assign buffer */ | ||
1004 | else | ||
1005 | break; | ||
1006 | |||
1007 | /* short packet ? */ | ||
1008 | if ((bytes - i) >= buf_len) { | ||
1009 | txbytes = buf_len; | ||
1010 | } else { | ||
1011 | /* short packet */ | ||
1012 | txbytes = bytes - i; | ||
1013 | } | ||
1014 | |||
1015 | /* link td and assign tx bytes */ | ||
1016 | if (i == buf_len) { | ||
1017 | if (create_new_chain) { | ||
1018 | req->td_data->next = dma_addr; | ||
1019 | } else { | ||
1020 | /* req->td_data->next = virt_to_phys(td); */ | ||
1021 | } | ||
1022 | /* write tx bytes */ | ||
1023 | if (ep->in) { | ||
1024 | /* first desc */ | ||
1025 | req->td_data->status = | ||
1026 | AMD_ADDBITS(req->td_data->status, | ||
1027 | ep->ep.maxpacket, | ||
1028 | UDC_DMA_IN_STS_TXBYTES); | ||
1029 | /* second desc */ | ||
1030 | td->status = AMD_ADDBITS(td->status, | ||
1031 | txbytes, | ||
1032 | UDC_DMA_IN_STS_TXBYTES); | ||
1033 | } | ||
1034 | } else { | ||
1035 | if (create_new_chain) { | ||
1036 | last->next = dma_addr; | ||
1037 | } else { | ||
1038 | /* last->next = virt_to_phys(td); */ | ||
1039 | } | ||
1040 | if (ep->in) { | ||
1041 | /* write tx bytes */ | ||
1042 | td->status = AMD_ADDBITS(td->status, | ||
1043 | txbytes, | ||
1044 | UDC_DMA_IN_STS_TXBYTES); | ||
1045 | } | ||
1046 | } | ||
1047 | last = td; | ||
1048 | } | ||
1049 | /* set last bit */ | ||
1050 | if (td) { | ||
1051 | td->status |= AMD_BIT(UDC_DMA_IN_STS_L); | ||
1052 | /* last desc. points to itself */ | ||
1053 | req->td_data_last = td; | ||
1054 | } | ||
1055 | |||
1056 | return 0; | ||
1057 | } | ||
1058 | |||
1059 | /* Enabling RX DMA */ | ||
1060 | static void udc_set_rde(struct udc *dev) | ||
1061 | { | ||
1062 | u32 tmp; | ||
1063 | |||
1064 | VDBG(dev, "udc_set_rde()\n"); | ||
1065 | /* stop RDE timer */ | ||
1066 | if (timer_pending(&udc_timer)) { | ||
1067 | set_rde = 0; | ||
1068 | mod_timer(&udc_timer, jiffies - 1); | ||
1069 | } | ||
1070 | /* set RDE */ | ||
1071 | tmp = readl(&dev->regs->ctl); | ||
1072 | tmp |= AMD_BIT(UDC_DEVCTL_RDE); | ||
1073 | writel(tmp, &dev->regs->ctl); | ||
1074 | } | ||
1075 | |||
1076 | /* Queues a request packet, called by gadget driver */ | ||
1077 | static int | ||
1078 | udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp) | ||
1079 | { | ||
1080 | int retval = 0; | ||
1081 | u8 open_rxfifo = 0; | ||
1082 | unsigned long iflags; | ||
1083 | struct udc_ep *ep; | ||
1084 | struct udc_request *req; | ||
1085 | struct udc *dev; | ||
1086 | u32 tmp; | ||
1087 | |||
1088 | /* check the inputs */ | ||
1089 | req = container_of(usbreq, struct udc_request, req); | ||
1090 | |||
1091 | if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf | ||
1092 | || !list_empty(&req->queue)) | ||
1093 | return -EINVAL; | ||
1094 | |||
1095 | ep = container_of(usbep, struct udc_ep, ep); | ||
1096 | if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) | ||
1097 | return -EINVAL; | ||
1098 | |||
1099 | VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in); | ||
1100 | dev = ep->dev; | ||
1101 | |||
1102 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) | ||
1103 | return -ESHUTDOWN; | ||
1104 | |||
1105 | /* map dma (usually done before) */ | ||
1106 | if (ep->dma && usbreq->length != 0 | ||
1107 | && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) { | ||
1108 | VDBG(dev, "DMA map req %p\n", req); | ||
1109 | if (ep->in) | ||
1110 | usbreq->dma = pci_map_single(dev->pdev, | ||
1111 | usbreq->buf, | ||
1112 | usbreq->length, | ||
1113 | PCI_DMA_TODEVICE); | ||
1114 | else | ||
1115 | usbreq->dma = pci_map_single(dev->pdev, | ||
1116 | usbreq->buf, | ||
1117 | usbreq->length, | ||
1118 | PCI_DMA_FROMDEVICE); | ||
1119 | req->dma_mapping = 1; | ||
1120 | } | ||
1121 | |||
1122 | VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n", | ||
1123 | usbep->name, usbreq, usbreq->length, | ||
1124 | req->td_data, usbreq->buf); | ||
1125 | |||
1126 | spin_lock_irqsave(&dev->lock, iflags); | ||
1127 | usbreq->actual = 0; | ||
1128 | usbreq->status = -EINPROGRESS; | ||
1129 | req->dma_done = 0; | ||
1130 | |||
1131 | /* on empty queue just do first transfer */ | ||
1132 | if (list_empty(&ep->queue)) { | ||
1133 | /* zlp */ | ||
1134 | if (usbreq->length == 0) { | ||
1135 | /* IN zlp's are handled by hardware */ | ||
1136 | complete_req(ep, req, 0); | ||
1137 | VDBG(dev, "%s: zlp\n", ep->ep.name); | ||
1138 | /* | ||
1139 | * if set_config or set_intf is waiting for ack by zlp | ||
1140 | * then set CSR_DONE | ||
1141 | */ | ||
1142 | if (dev->set_cfg_not_acked) { | ||
1143 | tmp = readl(&dev->regs->ctl); | ||
1144 | tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE); | ||
1145 | writel(tmp, &dev->regs->ctl); | ||
1146 | dev->set_cfg_not_acked = 0; | ||
1147 | } | ||
1148 | /* setup command is ACK'ed now by zlp */ | ||
1149 | if (dev->waiting_zlp_ack_ep0in) { | ||
1150 | /* clear NAK by writing CNAK in EP0_IN */ | ||
1151 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
1152 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
1153 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
1154 | dev->ep[UDC_EP0IN_IX].naking = 0; | ||
1155 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], | ||
1156 | UDC_EP0IN_IX); | ||
1157 | dev->waiting_zlp_ack_ep0in = 0; | ||
1158 | } | ||
1159 | goto finished; | ||
1160 | } | ||
1161 | if (ep->dma) { | ||
1162 | retval = prep_dma(ep, req, gfp); | ||
1163 | if (retval != 0) | ||
1164 | goto finished; | ||
1165 | /* write desc pointer to enable DMA */ | ||
1166 | if (ep->in) { | ||
1167 | /* set HOST READY */ | ||
1168 | req->td_data->status = | ||
1169 | AMD_ADDBITS(req->td_data->status, | ||
1170 | UDC_DMA_IN_STS_BS_HOST_READY, | ||
1171 | UDC_DMA_IN_STS_BS); | ||
1172 | } | ||
1173 | |||
1174 | /* disabled rx dma while descriptor update */ | ||
1175 | if (!ep->in) { | ||
1176 | /* stop RDE timer */ | ||
1177 | if (timer_pending(&udc_timer)) { | ||
1178 | set_rde = 0; | ||
1179 | mod_timer(&udc_timer, jiffies - 1); | ||
1180 | } | ||
1181 | /* clear RDE */ | ||
1182 | tmp = readl(&dev->regs->ctl); | ||
1183 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); | ||
1184 | writel(tmp, &dev->regs->ctl); | ||
1185 | open_rxfifo = 1; | ||
1186 | |||
1187 | /* | ||
1188 | * if BNA occurred then let BNA dummy desc. | ||
1189 | * point to current desc. | ||
1190 | */ | ||
1191 | if (ep->bna_occurred) { | ||
1192 | VDBG(dev, "copy to BNA dummy desc.\n"); | ||
1193 | memcpy(ep->bna_dummy_req->td_data, | ||
1194 | req->td_data, | ||
1195 | sizeof(struct udc_data_dma)); | ||
1196 | } | ||
1197 | } | ||
1198 | /* write desc pointer */ | ||
1199 | writel(req->td_phys, &ep->regs->desptr); | ||
1200 | |||
1201 | /* clear NAK by writing CNAK */ | ||
1202 | if (ep->naking) { | ||
1203 | tmp = readl(&ep->regs->ctl); | ||
1204 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
1205 | writel(tmp, &ep->regs->ctl); | ||
1206 | ep->naking = 0; | ||
1207 | UDC_QUEUE_CNAK(ep, ep->num); | ||
1208 | } | ||
1209 | |||
1210 | if (ep->in) { | ||
1211 | /* enable ep irq */ | ||
1212 | tmp = readl(&dev->regs->ep_irqmsk); | ||
1213 | tmp &= AMD_UNMASK_BIT(ep->num); | ||
1214 | writel(tmp, &dev->regs->ep_irqmsk); | ||
1215 | } | ||
1216 | } | ||
1217 | |||
1218 | } else if (ep->dma) { | ||
1219 | |||
1220 | /* | ||
1221 | * prep_dma not used for OUT ep's, this is not possible | ||
1222 | * for PPB modes, because of chain creation reasons | ||
1223 | */ | ||
1224 | if (ep->in) { | ||
1225 | retval = prep_dma(ep, req, gfp); | ||
1226 | if (retval != 0) | ||
1227 | goto finished; | ||
1228 | } | ||
1229 | } | ||
1230 | VDBG(dev, "list_add\n"); | ||
1231 | /* add request to ep queue */ | ||
1232 | if (req) { | ||
1233 | |||
1234 | list_add_tail(&req->queue, &ep->queue); | ||
1235 | |||
1236 | /* open rxfifo if out data queued */ | ||
1237 | if (open_rxfifo) { | ||
1238 | /* enable DMA */ | ||
1239 | req->dma_going = 1; | ||
1240 | udc_set_rde(dev); | ||
1241 | if (ep->num != UDC_EP0OUT_IX) | ||
1242 | dev->data_ep_queued = 1; | ||
1243 | } | ||
1244 | /* stop OUT naking */ | ||
1245 | if (!ep->in) { | ||
1246 | if (!use_dma && udc_rxfifo_pending) { | ||
1247 | DBG(dev, "udc_queue(): pending bytes in" | ||
1248 | "rxfifo after nyet\n"); | ||
1249 | /* | ||
1250 | * read pending bytes afer nyet: | ||
1251 | * referring to isr | ||
1252 | */ | ||
1253 | if (udc_rxfifo_read(ep, req)) { | ||
1254 | /* finish */ | ||
1255 | complete_req(ep, req, 0); | ||
1256 | } | ||
1257 | udc_rxfifo_pending = 0; | ||
1258 | |||
1259 | } | ||
1260 | } | ||
1261 | } | ||
1262 | |||
1263 | finished: | ||
1264 | spin_unlock_irqrestore(&dev->lock, iflags); | ||
1265 | return retval; | ||
1266 | } | ||
1267 | |||
1268 | /* Empty request queue of an endpoint; caller holds spinlock */ | ||
1269 | static void empty_req_queue(struct udc_ep *ep) | ||
1270 | { | ||
1271 | struct udc_request *req; | ||
1272 | |||
1273 | ep->halted = 1; | ||
1274 | while (!list_empty(&ep->queue)) { | ||
1275 | req = list_entry(ep->queue.next, | ||
1276 | struct udc_request, | ||
1277 | queue); | ||
1278 | complete_req(ep, req, -ESHUTDOWN); | ||
1279 | } | ||
1280 | } | ||
1281 | |||
1282 | /* Dequeues a request packet, called by gadget driver */ | ||
1283 | static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq) | ||
1284 | { | ||
1285 | struct udc_ep *ep; | ||
1286 | struct udc_request *req; | ||
1287 | unsigned halted; | ||
1288 | unsigned long iflags; | ||
1289 | |||
1290 | ep = container_of(usbep, struct udc_ep, ep); | ||
1291 | if (!usbep || !usbreq || (!ep->desc && (ep->num != 0 | ||
1292 | && ep->num != UDC_EP0OUT_IX))) | ||
1293 | return -EINVAL; | ||
1294 | |||
1295 | req = container_of(usbreq, struct udc_request, req); | ||
1296 | |||
1297 | spin_lock_irqsave(&ep->dev->lock, iflags); | ||
1298 | halted = ep->halted; | ||
1299 | ep->halted = 1; | ||
1300 | /* request in processing or next one */ | ||
1301 | if (ep->queue.next == &req->queue) { | ||
1302 | if (ep->dma && req->dma_going) { | ||
1303 | if (ep->in) | ||
1304 | ep->cancel_transfer = 1; | ||
1305 | else { | ||
1306 | u32 tmp; | ||
1307 | u32 dma_sts; | ||
1308 | /* stop potential receive DMA */ | ||
1309 | tmp = readl(&udc->regs->ctl); | ||
1310 | writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE), | ||
1311 | &udc->regs->ctl); | ||
1312 | /* | ||
1313 | * Cancel transfer later in ISR | ||
1314 | * if descriptor was touched. | ||
1315 | */ | ||
1316 | dma_sts = AMD_GETBITS(req->td_data->status, | ||
1317 | UDC_DMA_OUT_STS_BS); | ||
1318 | if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY) | ||
1319 | ep->cancel_transfer = 1; | ||
1320 | else { | ||
1321 | udc_init_bna_dummy(ep->req); | ||
1322 | writel(ep->bna_dummy_req->td_phys, | ||
1323 | &ep->regs->desptr); | ||
1324 | } | ||
1325 | writel(tmp, &udc->regs->ctl); | ||
1326 | } | ||
1327 | } | ||
1328 | } | ||
1329 | complete_req(ep, req, -ECONNRESET); | ||
1330 | ep->halted = halted; | ||
1331 | |||
1332 | spin_unlock_irqrestore(&ep->dev->lock, iflags); | ||
1333 | return 0; | ||
1334 | } | ||
1335 | |||
1336 | /* Halt or clear halt of endpoint */ | ||
1337 | static int | ||
1338 | udc_set_halt(struct usb_ep *usbep, int halt) | ||
1339 | { | ||
1340 | struct udc_ep *ep; | ||
1341 | u32 tmp; | ||
1342 | unsigned long iflags; | ||
1343 | int retval = 0; | ||
1344 | |||
1345 | if (!usbep) | ||
1346 | return -EINVAL; | ||
1347 | |||
1348 | pr_debug("set_halt %s: halt=%d\n", usbep->name, halt); | ||
1349 | |||
1350 | ep = container_of(usbep, struct udc_ep, ep); | ||
1351 | if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) | ||
1352 | return -EINVAL; | ||
1353 | if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) | ||
1354 | return -ESHUTDOWN; | ||
1355 | |||
1356 | spin_lock_irqsave(&udc_stall_spinlock, iflags); | ||
1357 | /* halt or clear halt */ | ||
1358 | if (halt) { | ||
1359 | if (ep->num == 0) | ||
1360 | ep->dev->stall_ep0in = 1; | ||
1361 | else { | ||
1362 | /* | ||
1363 | * set STALL | ||
1364 | * rxfifo empty not taken into acount | ||
1365 | */ | ||
1366 | tmp = readl(&ep->regs->ctl); | ||
1367 | tmp |= AMD_BIT(UDC_EPCTL_S); | ||
1368 | writel(tmp, &ep->regs->ctl); | ||
1369 | ep->halted = 1; | ||
1370 | |||
1371 | /* setup poll timer */ | ||
1372 | if (!timer_pending(&udc_pollstall_timer)) { | ||
1373 | udc_pollstall_timer.expires = jiffies + | ||
1374 | HZ * UDC_POLLSTALL_TIMER_USECONDS | ||
1375 | / (1000 * 1000); | ||
1376 | if (!stop_pollstall_timer) { | ||
1377 | DBG(ep->dev, "start polltimer\n"); | ||
1378 | add_timer(&udc_pollstall_timer); | ||
1379 | } | ||
1380 | } | ||
1381 | } | ||
1382 | } else { | ||
1383 | /* ep is halted by set_halt() before */ | ||
1384 | if (ep->halted) { | ||
1385 | tmp = readl(&ep->regs->ctl); | ||
1386 | /* clear stall bit */ | ||
1387 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | ||
1388 | /* clear NAK by writing CNAK */ | ||
1389 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
1390 | writel(tmp, &ep->regs->ctl); | ||
1391 | ep->halted = 0; | ||
1392 | UDC_QUEUE_CNAK(ep, ep->num); | ||
1393 | } | ||
1394 | } | ||
1395 | spin_unlock_irqrestore(&udc_stall_spinlock, iflags); | ||
1396 | return retval; | ||
1397 | } | ||
1398 | |||
1399 | /* gadget interface */ | ||
1400 | static const struct usb_ep_ops udc_ep_ops = { | ||
1401 | .enable = udc_ep_enable, | ||
1402 | .disable = udc_ep_disable, | ||
1403 | |||
1404 | .alloc_request = udc_alloc_request, | ||
1405 | .free_request = udc_free_request, | ||
1406 | |||
1407 | .queue = udc_queue, | ||
1408 | .dequeue = udc_dequeue, | ||
1409 | |||
1410 | .set_halt = udc_set_halt, | ||
1411 | /* fifo ops not implemented */ | ||
1412 | }; | ||
1413 | |||
1414 | /*-------------------------------------------------------------------------*/ | ||
1415 | |||
1416 | /* Get frame counter (not implemented) */ | ||
1417 | static int udc_get_frame(struct usb_gadget *gadget) | ||
1418 | { | ||
1419 | return -EOPNOTSUPP; | ||
1420 | } | ||
1421 | |||
1422 | /* Remote wakeup gadget interface */ | ||
1423 | static int udc_wakeup(struct usb_gadget *gadget) | ||
1424 | { | ||
1425 | struct udc *dev; | ||
1426 | |||
1427 | if (!gadget) | ||
1428 | return -EINVAL; | ||
1429 | dev = container_of(gadget, struct udc, gadget); | ||
1430 | udc_remote_wakeup(dev); | ||
1431 | |||
1432 | return 0; | ||
1433 | } | ||
1434 | |||
1435 | /* gadget operations */ | ||
1436 | static const struct usb_gadget_ops udc_ops = { | ||
1437 | .wakeup = udc_wakeup, | ||
1438 | .get_frame = udc_get_frame, | ||
1439 | }; | ||
1440 | |||
1441 | /* Setups endpoint parameters, adds endpoints to linked list */ | ||
1442 | static void make_ep_lists(struct udc *dev) | ||
1443 | { | ||
1444 | /* make gadget ep lists */ | ||
1445 | INIT_LIST_HEAD(&dev->gadget.ep_list); | ||
1446 | list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list, | ||
1447 | &dev->gadget.ep_list); | ||
1448 | list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list, | ||
1449 | &dev->gadget.ep_list); | ||
1450 | list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list, | ||
1451 | &dev->gadget.ep_list); | ||
1452 | |||
1453 | /* fifo config */ | ||
1454 | dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE; | ||
1455 | if (dev->gadget.speed == USB_SPEED_FULL) | ||
1456 | dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE; | ||
1457 | else if (dev->gadget.speed == USB_SPEED_HIGH) | ||
1458 | dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf; | ||
1459 | dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE; | ||
1460 | } | ||
1461 | |||
1462 | /* init registers at driver load time */ | ||
1463 | static int startup_registers(struct udc *dev) | ||
1464 | { | ||
1465 | u32 tmp; | ||
1466 | |||
1467 | /* init controller by soft reset */ | ||
1468 | udc_soft_reset(dev); | ||
1469 | |||
1470 | /* mask not needed interrupts */ | ||
1471 | udc_mask_unused_interrupts(dev); | ||
1472 | |||
1473 | /* put into initial config */ | ||
1474 | udc_basic_init(dev); | ||
1475 | /* link up all endpoints */ | ||
1476 | udc_setup_endpoints(dev); | ||
1477 | |||
1478 | /* program speed */ | ||
1479 | tmp = readl(&dev->regs->cfg); | ||
1480 | if (use_fullspeed) { | ||
1481 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); | ||
1482 | } else { | ||
1483 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD); | ||
1484 | } | ||
1485 | writel(tmp, &dev->regs->cfg); | ||
1486 | |||
1487 | return 0; | ||
1488 | } | ||
1489 | |||
1490 | /* Inits UDC context */ | ||
1491 | static void udc_basic_init(struct udc *dev) | ||
1492 | { | ||
1493 | u32 tmp; | ||
1494 | |||
1495 | DBG(dev, "udc_basic_init()\n"); | ||
1496 | |||
1497 | dev->gadget.speed = USB_SPEED_UNKNOWN; | ||
1498 | |||
1499 | /* stop RDE timer */ | ||
1500 | if (timer_pending(&udc_timer)) { | ||
1501 | set_rde = 0; | ||
1502 | mod_timer(&udc_timer, jiffies - 1); | ||
1503 | } | ||
1504 | /* stop poll stall timer */ | ||
1505 | if (timer_pending(&udc_pollstall_timer)) { | ||
1506 | mod_timer(&udc_pollstall_timer, jiffies - 1); | ||
1507 | } | ||
1508 | /* disable DMA */ | ||
1509 | tmp = readl(&dev->regs->ctl); | ||
1510 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); | ||
1511 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE); | ||
1512 | writel(tmp, &dev->regs->ctl); | ||
1513 | |||
1514 | /* enable dynamic CSR programming */ | ||
1515 | tmp = readl(&dev->regs->cfg); | ||
1516 | tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG); | ||
1517 | /* set self powered */ | ||
1518 | tmp |= AMD_BIT(UDC_DEVCFG_SP); | ||
1519 | /* set remote wakeupable */ | ||
1520 | tmp |= AMD_BIT(UDC_DEVCFG_RWKP); | ||
1521 | writel(tmp, &dev->regs->cfg); | ||
1522 | |||
1523 | make_ep_lists(dev); | ||
1524 | |||
1525 | dev->data_ep_enabled = 0; | ||
1526 | dev->data_ep_queued = 0; | ||
1527 | } | ||
1528 | |||
1529 | /* Sets initial endpoint parameters */ | ||
1530 | static void udc_setup_endpoints(struct udc *dev) | ||
1531 | { | ||
1532 | struct udc_ep *ep; | ||
1533 | u32 tmp; | ||
1534 | u32 reg; | ||
1535 | |||
1536 | DBG(dev, "udc_setup_endpoints()\n"); | ||
1537 | |||
1538 | /* read enum speed */ | ||
1539 | tmp = readl(&dev->regs->sts); | ||
1540 | tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED); | ||
1541 | if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) { | ||
1542 | dev->gadget.speed = USB_SPEED_HIGH; | ||
1543 | } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) { | ||
1544 | dev->gadget.speed = USB_SPEED_FULL; | ||
1545 | } | ||
1546 | |||
1547 | /* set basic ep parameters */ | ||
1548 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | ||
1549 | ep = &dev->ep[tmp]; | ||
1550 | ep->dev = dev; | ||
1551 | ep->ep.name = ep_string[tmp]; | ||
1552 | ep->num = tmp; | ||
1553 | /* txfifo size is calculated at enable time */ | ||
1554 | ep->txfifo = dev->txfifo; | ||
1555 | |||
1556 | /* fifo size */ | ||
1557 | if (tmp < UDC_EPIN_NUM) { | ||
1558 | ep->fifo_depth = UDC_TXFIFO_SIZE; | ||
1559 | ep->in = 1; | ||
1560 | } else { | ||
1561 | ep->fifo_depth = UDC_RXFIFO_SIZE; | ||
1562 | ep->in = 0; | ||
1563 | |||
1564 | } | ||
1565 | ep->regs = &dev->ep_regs[tmp]; | ||
1566 | /* | ||
1567 | * ep will be reset only if ep was not enabled before to avoid | ||
1568 | * disabling ep interrupts when ENUM interrupt occurs but ep is | ||
1569 | * not enabled by gadget driver | ||
1570 | */ | ||
1571 | if (!ep->desc) { | ||
1572 | ep_init(dev->regs, ep); | ||
1573 | } | ||
1574 | |||
1575 | if (use_dma) { | ||
1576 | /* | ||
1577 | * ep->dma is not really used, just to indicate that | ||
1578 | * DMA is active: remove this | ||
1579 | * dma regs = dev control regs | ||
1580 | */ | ||
1581 | ep->dma = &dev->regs->ctl; | ||
1582 | |||
1583 | /* nak OUT endpoints until enable - not for ep0 */ | ||
1584 | if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX | ||
1585 | && tmp > UDC_EPIN_NUM) { | ||
1586 | /* set NAK */ | ||
1587 | reg = readl(&dev->ep[tmp].regs->ctl); | ||
1588 | reg |= AMD_BIT(UDC_EPCTL_SNAK); | ||
1589 | writel(reg, &dev->ep[tmp].regs->ctl); | ||
1590 | dev->ep[tmp].naking = 1; | ||
1591 | |||
1592 | } | ||
1593 | } | ||
1594 | } | ||
1595 | /* EP0 max packet */ | ||
1596 | if (dev->gadget.speed == USB_SPEED_FULL) { | ||
1597 | dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE; | ||
1598 | dev->ep[UDC_EP0OUT_IX].ep.maxpacket = | ||
1599 | UDC_FS_EP0OUT_MAX_PKT_SIZE; | ||
1600 | } else if (dev->gadget.speed == USB_SPEED_HIGH) { | ||
1601 | dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE; | ||
1602 | dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE; | ||
1603 | } | ||
1604 | |||
1605 | /* | ||
1606 | * with suspend bug workaround, ep0 params for gadget driver | ||
1607 | * are set at gadget driver bind() call | ||
1608 | */ | ||
1609 | dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; | ||
1610 | dev->ep[UDC_EP0IN_IX].halted = 0; | ||
1611 | INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); | ||
1612 | |||
1613 | /* init cfg/alt/int */ | ||
1614 | dev->cur_config = 0; | ||
1615 | dev->cur_intf = 0; | ||
1616 | dev->cur_alt = 0; | ||
1617 | } | ||
1618 | |||
1619 | /* Bringup after Connect event, initial bringup to be ready for ep0 events */ | ||
1620 | static void usb_connect(struct udc *dev) | ||
1621 | { | ||
1622 | |||
1623 | dev_info(&dev->pdev->dev, "USB Connect\n"); | ||
1624 | |||
1625 | dev->connected = 1; | ||
1626 | |||
1627 | /* put into initial config */ | ||
1628 | udc_basic_init(dev); | ||
1629 | |||
1630 | /* enable device setup interrupts */ | ||
1631 | udc_enable_dev_setup_interrupts(dev); | ||
1632 | } | ||
1633 | |||
1634 | /* | ||
1635 | * Calls gadget with disconnect event and resets the UDC and makes | ||
1636 | * initial bringup to be ready for ep0 events | ||
1637 | */ | ||
1638 | static void usb_disconnect(struct udc *dev) | ||
1639 | { | ||
1640 | |||
1641 | dev_info(&dev->pdev->dev, "USB Disconnect\n"); | ||
1642 | |||
1643 | dev->connected = 0; | ||
1644 | |||
1645 | /* mask interrupts */ | ||
1646 | udc_mask_unused_interrupts(dev); | ||
1647 | |||
1648 | /* REVISIT there doesn't seem to be a point to having this | ||
1649 | * talk to a tasklet ... do it directly, we already hold | ||
1650 | * the spinlock needed to process the disconnect. | ||
1651 | */ | ||
1652 | |||
1653 | tasklet_schedule(&disconnect_tasklet); | ||
1654 | } | ||
1655 | |||
1656 | /* Tasklet for disconnect to be outside of interrupt context */ | ||
1657 | static void udc_tasklet_disconnect(unsigned long par) | ||
1658 | { | ||
1659 | struct udc *dev = (struct udc *)(*((struct udc **) par)); | ||
1660 | u32 tmp; | ||
1661 | |||
1662 | DBG(dev, "Tasklet disconnect\n"); | ||
1663 | spin_lock_irq(&dev->lock); | ||
1664 | |||
1665 | if (dev->driver) { | ||
1666 | spin_unlock(&dev->lock); | ||
1667 | dev->driver->disconnect(&dev->gadget); | ||
1668 | spin_lock(&dev->lock); | ||
1669 | |||
1670 | /* empty queues */ | ||
1671 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | ||
1672 | empty_req_queue(&dev->ep[tmp]); | ||
1673 | } | ||
1674 | |||
1675 | } | ||
1676 | |||
1677 | /* disable ep0 */ | ||
1678 | ep_init(dev->regs, | ||
1679 | &dev->ep[UDC_EP0IN_IX]); | ||
1680 | |||
1681 | |||
1682 | if (!soft_reset_occured) { | ||
1683 | /* init controller by soft reset */ | ||
1684 | udc_soft_reset(dev); | ||
1685 | soft_reset_occured++; | ||
1686 | } | ||
1687 | |||
1688 | /* re-enable dev interrupts */ | ||
1689 | udc_enable_dev_setup_interrupts(dev); | ||
1690 | /* back to full speed ? */ | ||
1691 | if (use_fullspeed) { | ||
1692 | tmp = readl(&dev->regs->cfg); | ||
1693 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); | ||
1694 | writel(tmp, &dev->regs->cfg); | ||
1695 | } | ||
1696 | |||
1697 | spin_unlock_irq(&dev->lock); | ||
1698 | } | ||
1699 | |||
1700 | /* Reset the UDC core */ | ||
1701 | static void udc_soft_reset(struct udc *dev) | ||
1702 | { | ||
1703 | unsigned long flags; | ||
1704 | |||
1705 | DBG(dev, "Soft reset\n"); | ||
1706 | /* | ||
1707 | * reset possible waiting interrupts, because int. | ||
1708 | * status is lost after soft reset, | ||
1709 | * ep int. status reset | ||
1710 | */ | ||
1711 | writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts); | ||
1712 | /* device int. status reset */ | ||
1713 | writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts); | ||
1714 | |||
1715 | spin_lock_irqsave(&udc_irq_spinlock, flags); | ||
1716 | writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); | ||
1717 | readl(&dev->regs->cfg); | ||
1718 | spin_unlock_irqrestore(&udc_irq_spinlock, flags); | ||
1719 | |||
1720 | } | ||
1721 | |||
1722 | /* RDE timer callback to set RDE bit */ | ||
1723 | static void udc_timer_function(unsigned long v) | ||
1724 | { | ||
1725 | u32 tmp; | ||
1726 | |||
1727 | spin_lock_irq(&udc_irq_spinlock); | ||
1728 | |||
1729 | if (set_rde > 0) { | ||
1730 | /* | ||
1731 | * open the fifo if fifo was filled on last timer call | ||
1732 | * conditionally | ||
1733 | */ | ||
1734 | if (set_rde > 1) { | ||
1735 | /* set RDE to receive setup data */ | ||
1736 | tmp = readl(&udc->regs->ctl); | ||
1737 | tmp |= AMD_BIT(UDC_DEVCTL_RDE); | ||
1738 | writel(tmp, &udc->regs->ctl); | ||
1739 | set_rde = -1; | ||
1740 | } else if (readl(&udc->regs->sts) | ||
1741 | & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | ||
1742 | /* | ||
1743 | * if fifo empty setup polling, do not just | ||
1744 | * open the fifo | ||
1745 | */ | ||
1746 | udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV; | ||
1747 | if (!stop_timer) { | ||
1748 | add_timer(&udc_timer); | ||
1749 | } | ||
1750 | } else { | ||
1751 | /* | ||
1752 | * fifo contains data now, setup timer for opening | ||
1753 | * the fifo when timer expires to be able to receive | ||
1754 | * setup packets, when data packets gets queued by | ||
1755 | * gadget layer then timer will forced to expire with | ||
1756 | * set_rde=0 (RDE is set in udc_queue()) | ||
1757 | */ | ||
1758 | set_rde++; | ||
1759 | /* debug: lhadmot_timer_start = 221070 */ | ||
1760 | udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS; | ||
1761 | if (!stop_timer) { | ||
1762 | add_timer(&udc_timer); | ||
1763 | } | ||
1764 | } | ||
1765 | |||
1766 | } else | ||
1767 | set_rde = -1; /* RDE was set by udc_queue() */ | ||
1768 | spin_unlock_irq(&udc_irq_spinlock); | ||
1769 | if (stop_timer) | ||
1770 | complete(&on_exit); | ||
1771 | |||
1772 | } | ||
1773 | |||
1774 | /* Handle halt state, used in stall poll timer */ | ||
1775 | static void udc_handle_halt_state(struct udc_ep *ep) | ||
1776 | { | ||
1777 | u32 tmp; | ||
1778 | /* set stall as long not halted */ | ||
1779 | if (ep->halted == 1) { | ||
1780 | tmp = readl(&ep->regs->ctl); | ||
1781 | /* STALL cleared ? */ | ||
1782 | if (!(tmp & AMD_BIT(UDC_EPCTL_S))) { | ||
1783 | /* | ||
1784 | * FIXME: MSC spec requires that stall remains | ||
1785 | * even on receivng of CLEAR_FEATURE HALT. So | ||
1786 | * we would set STALL again here to be compliant. | ||
1787 | * But with current mass storage drivers this does | ||
1788 | * not work (would produce endless host retries). | ||
1789 | * So we clear halt on CLEAR_FEATURE. | ||
1790 | * | ||
1791 | DBG(ep->dev, "ep %d: set STALL again\n", ep->num); | ||
1792 | tmp |= AMD_BIT(UDC_EPCTL_S); | ||
1793 | writel(tmp, &ep->regs->ctl);*/ | ||
1794 | |||
1795 | /* clear NAK by writing CNAK */ | ||
1796 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
1797 | writel(tmp, &ep->regs->ctl); | ||
1798 | ep->halted = 0; | ||
1799 | UDC_QUEUE_CNAK(ep, ep->num); | ||
1800 | } | ||
1801 | } | ||
1802 | } | ||
1803 | |||
1804 | /* Stall timer callback to poll S bit and set it again after */ | ||
1805 | static void udc_pollstall_timer_function(unsigned long v) | ||
1806 | { | ||
1807 | struct udc_ep *ep; | ||
1808 | int halted = 0; | ||
1809 | |||
1810 | spin_lock_irq(&udc_stall_spinlock); | ||
1811 | /* | ||
1812 | * only one IN and OUT endpoints are handled | ||
1813 | * IN poll stall | ||
1814 | */ | ||
1815 | ep = &udc->ep[UDC_EPIN_IX]; | ||
1816 | udc_handle_halt_state(ep); | ||
1817 | if (ep->halted) | ||
1818 | halted = 1; | ||
1819 | /* OUT poll stall */ | ||
1820 | ep = &udc->ep[UDC_EPOUT_IX]; | ||
1821 | udc_handle_halt_state(ep); | ||
1822 | if (ep->halted) | ||
1823 | halted = 1; | ||
1824 | |||
1825 | /* setup timer again when still halted */ | ||
1826 | if (!stop_pollstall_timer && halted) { | ||
1827 | udc_pollstall_timer.expires = jiffies + | ||
1828 | HZ * UDC_POLLSTALL_TIMER_USECONDS | ||
1829 | / (1000 * 1000); | ||
1830 | add_timer(&udc_pollstall_timer); | ||
1831 | } | ||
1832 | spin_unlock_irq(&udc_stall_spinlock); | ||
1833 | |||
1834 | if (stop_pollstall_timer) | ||
1835 | complete(&on_pollstall_exit); | ||
1836 | } | ||
1837 | |||
1838 | /* Inits endpoint 0 so that SETUP packets are processed */ | ||
1839 | static void activate_control_endpoints(struct udc *dev) | ||
1840 | { | ||
1841 | u32 tmp; | ||
1842 | |||
1843 | DBG(dev, "activate_control_endpoints\n"); | ||
1844 | |||
1845 | /* flush fifo */ | ||
1846 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
1847 | tmp |= AMD_BIT(UDC_EPCTL_F); | ||
1848 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
1849 | |||
1850 | /* set ep0 directions */ | ||
1851 | dev->ep[UDC_EP0IN_IX].in = 1; | ||
1852 | dev->ep[UDC_EP0OUT_IX].in = 0; | ||
1853 | |||
1854 | /* set buffer size (tx fifo entries) of EP0_IN */ | ||
1855 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); | ||
1856 | if (dev->gadget.speed == USB_SPEED_FULL) | ||
1857 | tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE, | ||
1858 | UDC_EPIN_BUFF_SIZE); | ||
1859 | else if (dev->gadget.speed == USB_SPEED_HIGH) | ||
1860 | tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE, | ||
1861 | UDC_EPIN_BUFF_SIZE); | ||
1862 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); | ||
1863 | |||
1864 | /* set max packet size of EP0_IN */ | ||
1865 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); | ||
1866 | if (dev->gadget.speed == USB_SPEED_FULL) | ||
1867 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE, | ||
1868 | UDC_EP_MAX_PKT_SIZE); | ||
1869 | else if (dev->gadget.speed == USB_SPEED_HIGH) | ||
1870 | tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE, | ||
1871 | UDC_EP_MAX_PKT_SIZE); | ||
1872 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); | ||
1873 | |||
1874 | /* set max packet size of EP0_OUT */ | ||
1875 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); | ||
1876 | if (dev->gadget.speed == USB_SPEED_FULL) | ||
1877 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, | ||
1878 | UDC_EP_MAX_PKT_SIZE); | ||
1879 | else if (dev->gadget.speed == USB_SPEED_HIGH) | ||
1880 | tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, | ||
1881 | UDC_EP_MAX_PKT_SIZE); | ||
1882 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); | ||
1883 | |||
1884 | /* set max packet size of EP0 in UDC CSR */ | ||
1885 | tmp = readl(&dev->csr->ne[0]); | ||
1886 | if (dev->gadget.speed == USB_SPEED_FULL) | ||
1887 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, | ||
1888 | UDC_CSR_NE_MAX_PKT); | ||
1889 | else if (dev->gadget.speed == USB_SPEED_HIGH) | ||
1890 | tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, | ||
1891 | UDC_CSR_NE_MAX_PKT); | ||
1892 | writel(tmp, &dev->csr->ne[0]); | ||
1893 | |||
1894 | if (use_dma) { | ||
1895 | dev->ep[UDC_EP0OUT_IX].td->status |= | ||
1896 | AMD_BIT(UDC_DMA_OUT_STS_L); | ||
1897 | /* write dma desc address */ | ||
1898 | writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma, | ||
1899 | &dev->ep[UDC_EP0OUT_IX].regs->subptr); | ||
1900 | writel(dev->ep[UDC_EP0OUT_IX].td_phys, | ||
1901 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | ||
1902 | /* stop RDE timer */ | ||
1903 | if (timer_pending(&udc_timer)) { | ||
1904 | set_rde = 0; | ||
1905 | mod_timer(&udc_timer, jiffies - 1); | ||
1906 | } | ||
1907 | /* stop pollstall timer */ | ||
1908 | if (timer_pending(&udc_pollstall_timer)) { | ||
1909 | mod_timer(&udc_pollstall_timer, jiffies - 1); | ||
1910 | } | ||
1911 | /* enable DMA */ | ||
1912 | tmp = readl(&dev->regs->ctl); | ||
1913 | tmp |= AMD_BIT(UDC_DEVCTL_MODE) | ||
1914 | | AMD_BIT(UDC_DEVCTL_RDE) | ||
1915 | | AMD_BIT(UDC_DEVCTL_TDE); | ||
1916 | if (use_dma_bufferfill_mode) { | ||
1917 | tmp |= AMD_BIT(UDC_DEVCTL_BF); | ||
1918 | } else if (use_dma_ppb_du) { | ||
1919 | tmp |= AMD_BIT(UDC_DEVCTL_DU); | ||
1920 | } | ||
1921 | writel(tmp, &dev->regs->ctl); | ||
1922 | } | ||
1923 | |||
1924 | /* clear NAK by writing CNAK for EP0IN */ | ||
1925 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
1926 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
1927 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
1928 | dev->ep[UDC_EP0IN_IX].naking = 0; | ||
1929 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); | ||
1930 | |||
1931 | /* clear NAK by writing CNAK for EP0OUT */ | ||
1932 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | ||
1933 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
1934 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | ||
1935 | dev->ep[UDC_EP0OUT_IX].naking = 0; | ||
1936 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); | ||
1937 | } | ||
1938 | |||
1939 | /* Make endpoint 0 ready for control traffic */ | ||
1940 | static int setup_ep0(struct udc *dev) | ||
1941 | { | ||
1942 | activate_control_endpoints(dev); | ||
1943 | /* enable ep0 interrupts */ | ||
1944 | udc_enable_ep0_interrupts(dev); | ||
1945 | /* enable device setup interrupts */ | ||
1946 | udc_enable_dev_setup_interrupts(dev); | ||
1947 | |||
1948 | return 0; | ||
1949 | } | ||
1950 | |||
1951 | /* Called by gadget driver to register itself */ | ||
1952 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | ||
1953 | { | ||
1954 | struct udc *dev = udc; | ||
1955 | int retval; | ||
1956 | u32 tmp; | ||
1957 | |||
1958 | if (!driver || !driver->bind || !driver->setup | ||
1959 | || driver->speed != USB_SPEED_HIGH) | ||
1960 | return -EINVAL; | ||
1961 | if (!dev) | ||
1962 | return -ENODEV; | ||
1963 | if (dev->driver) | ||
1964 | return -EBUSY; | ||
1965 | |||
1966 | driver->driver.bus = NULL; | ||
1967 | dev->driver = driver; | ||
1968 | dev->gadget.dev.driver = &driver->driver; | ||
1969 | |||
1970 | retval = driver->bind(&dev->gadget); | ||
1971 | |||
1972 | /* Some gadget drivers use both ep0 directions. | ||
1973 | * NOTE: to gadget driver, ep0 is just one endpoint... | ||
1974 | */ | ||
1975 | dev->ep[UDC_EP0OUT_IX].ep.driver_data = | ||
1976 | dev->ep[UDC_EP0IN_IX].ep.driver_data; | ||
1977 | |||
1978 | if (retval) { | ||
1979 | DBG(dev, "binding to %s returning %d\n", | ||
1980 | driver->driver.name, retval); | ||
1981 | dev->driver = NULL; | ||
1982 | dev->gadget.dev.driver = NULL; | ||
1983 | return retval; | ||
1984 | } | ||
1985 | |||
1986 | /* get ready for ep0 traffic */ | ||
1987 | setup_ep0(dev); | ||
1988 | |||
1989 | /* clear SD */ | ||
1990 | tmp = readl(&dev->regs->ctl); | ||
1991 | tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD); | ||
1992 | writel(tmp, &dev->regs->ctl); | ||
1993 | |||
1994 | usb_connect(dev); | ||
1995 | |||
1996 | return 0; | ||
1997 | } | ||
1998 | EXPORT_SYMBOL(usb_gadget_register_driver); | ||
1999 | |||
2000 | /* shutdown requests and disconnect from gadget */ | ||
2001 | static void | ||
2002 | shutdown(struct udc *dev, struct usb_gadget_driver *driver) | ||
2003 | __releases(dev->lock) | ||
2004 | __acquires(dev->lock) | ||
2005 | { | ||
2006 | int tmp; | ||
2007 | |||
2008 | /* empty queues and init hardware */ | ||
2009 | udc_basic_init(dev); | ||
2010 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | ||
2011 | empty_req_queue(&dev->ep[tmp]); | ||
2012 | } | ||
2013 | |||
2014 | if (dev->gadget.speed != USB_SPEED_UNKNOWN) { | ||
2015 | spin_unlock(&dev->lock); | ||
2016 | driver->disconnect(&dev->gadget); | ||
2017 | spin_lock(&dev->lock); | ||
2018 | } | ||
2019 | /* init */ | ||
2020 | udc_setup_endpoints(dev); | ||
2021 | } | ||
2022 | |||
2023 | /* Called by gadget driver to unregister itself */ | ||
2024 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | ||
2025 | { | ||
2026 | struct udc *dev = udc; | ||
2027 | unsigned long flags; | ||
2028 | u32 tmp; | ||
2029 | |||
2030 | if (!dev) | ||
2031 | return -ENODEV; | ||
2032 | if (!driver || driver != dev->driver || !driver->unbind) | ||
2033 | return -EINVAL; | ||
2034 | |||
2035 | spin_lock_irqsave(&dev->lock, flags); | ||
2036 | udc_mask_unused_interrupts(dev); | ||
2037 | shutdown(dev, driver); | ||
2038 | spin_unlock_irqrestore(&dev->lock, flags); | ||
2039 | |||
2040 | driver->unbind(&dev->gadget); | ||
2041 | dev->driver = NULL; | ||
2042 | |||
2043 | /* set SD */ | ||
2044 | tmp = readl(&dev->regs->ctl); | ||
2045 | tmp |= AMD_BIT(UDC_DEVCTL_SD); | ||
2046 | writel(tmp, &dev->regs->ctl); | ||
2047 | |||
2048 | |||
2049 | DBG(dev, "%s: unregistered\n", driver->driver.name); | ||
2050 | |||
2051 | return 0; | ||
2052 | } | ||
2053 | EXPORT_SYMBOL(usb_gadget_unregister_driver); | ||
2054 | |||
2055 | |||
2056 | /* Clear pending NAK bits */ | ||
2057 | static void udc_process_cnak_queue(struct udc *dev) | ||
2058 | { | ||
2059 | u32 tmp; | ||
2060 | u32 reg; | ||
2061 | |||
2062 | /* check epin's */ | ||
2063 | DBG(dev, "CNAK pending queue processing\n"); | ||
2064 | for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) { | ||
2065 | if (cnak_pending & (1 << tmp)) { | ||
2066 | DBG(dev, "CNAK pending for ep%d\n", tmp); | ||
2067 | /* clear NAK by writing CNAK */ | ||
2068 | reg = readl(&dev->ep[tmp].regs->ctl); | ||
2069 | reg |= AMD_BIT(UDC_EPCTL_CNAK); | ||
2070 | writel(reg, &dev->ep[tmp].regs->ctl); | ||
2071 | dev->ep[tmp].naking = 0; | ||
2072 | UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num); | ||
2073 | } | ||
2074 | } | ||
2075 | /* ... and ep0out */ | ||
2076 | if (cnak_pending & (1 << UDC_EP0OUT_IX)) { | ||
2077 | DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX); | ||
2078 | /* clear NAK by writing CNAK */ | ||
2079 | reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | ||
2080 | reg |= AMD_BIT(UDC_EPCTL_CNAK); | ||
2081 | writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | ||
2082 | dev->ep[UDC_EP0OUT_IX].naking = 0; | ||
2083 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], | ||
2084 | dev->ep[UDC_EP0OUT_IX].num); | ||
2085 | } | ||
2086 | } | ||
2087 | |||
2088 | /* Enabling RX DMA after setup packet */ | ||
2089 | static void udc_ep0_set_rde(struct udc *dev) | ||
2090 | { | ||
2091 | if (use_dma) { | ||
2092 | /* | ||
2093 | * only enable RXDMA when no data endpoint enabled | ||
2094 | * or data is queued | ||
2095 | */ | ||
2096 | if (!dev->data_ep_enabled || dev->data_ep_queued) { | ||
2097 | udc_set_rde(dev); | ||
2098 | } else { | ||
2099 | /* | ||
2100 | * setup timer for enabling RDE (to not enable | ||
2101 | * RXFIFO DMA for data endpoints to early) | ||
2102 | */ | ||
2103 | if (set_rde != 0 && !timer_pending(&udc_timer)) { | ||
2104 | udc_timer.expires = | ||
2105 | jiffies + HZ/UDC_RDE_TIMER_DIV; | ||
2106 | set_rde = 1; | ||
2107 | if (!stop_timer) { | ||
2108 | add_timer(&udc_timer); | ||
2109 | } | ||
2110 | } | ||
2111 | } | ||
2112 | } | ||
2113 | } | ||
2114 | |||
2115 | |||
2116 | /* Interrupt handler for data OUT traffic */ | ||
2117 | static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix) | ||
2118 | { | ||
2119 | irqreturn_t ret_val = IRQ_NONE; | ||
2120 | u32 tmp; | ||
2121 | struct udc_ep *ep; | ||
2122 | struct udc_request *req; | ||
2123 | unsigned int count; | ||
2124 | struct udc_data_dma *td = NULL; | ||
2125 | unsigned dma_done; | ||
2126 | |||
2127 | VDBG(dev, "ep%d irq\n", ep_ix); | ||
2128 | ep = &dev->ep[ep_ix]; | ||
2129 | |||
2130 | tmp = readl(&ep->regs->sts); | ||
2131 | if (use_dma) { | ||
2132 | /* BNA event ? */ | ||
2133 | if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { | ||
2134 | DBG(dev, "BNA ep%dout occured - DESPTR = %x \n", | ||
2135 | ep->num, readl(&ep->regs->desptr)); | ||
2136 | /* clear BNA */ | ||
2137 | writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts); | ||
2138 | if (!ep->cancel_transfer) | ||
2139 | ep->bna_occurred = 1; | ||
2140 | else | ||
2141 | ep->cancel_transfer = 0; | ||
2142 | ret_val = IRQ_HANDLED; | ||
2143 | goto finished; | ||
2144 | } | ||
2145 | } | ||
2146 | /* HE event ? */ | ||
2147 | if (tmp & AMD_BIT(UDC_EPSTS_HE)) { | ||
2148 | dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num); | ||
2149 | |||
2150 | /* clear HE */ | ||
2151 | writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); | ||
2152 | ret_val = IRQ_HANDLED; | ||
2153 | goto finished; | ||
2154 | } | ||
2155 | |||
2156 | if (!list_empty(&ep->queue)) { | ||
2157 | |||
2158 | /* next request */ | ||
2159 | req = list_entry(ep->queue.next, | ||
2160 | struct udc_request, queue); | ||
2161 | } else { | ||
2162 | req = NULL; | ||
2163 | udc_rxfifo_pending = 1; | ||
2164 | } | ||
2165 | VDBG(dev, "req = %p\n", req); | ||
2166 | /* fifo mode */ | ||
2167 | if (!use_dma) { | ||
2168 | |||
2169 | /* read fifo */ | ||
2170 | if (req && udc_rxfifo_read(ep, req)) { | ||
2171 | ret_val = IRQ_HANDLED; | ||
2172 | |||
2173 | /* finish */ | ||
2174 | complete_req(ep, req, 0); | ||
2175 | /* next request */ | ||
2176 | if (!list_empty(&ep->queue) && !ep->halted) { | ||
2177 | req = list_entry(ep->queue.next, | ||
2178 | struct udc_request, queue); | ||
2179 | } else | ||
2180 | req = NULL; | ||
2181 | } | ||
2182 | |||
2183 | /* DMA */ | ||
2184 | } else if (!ep->cancel_transfer && req != NULL) { | ||
2185 | ret_val = IRQ_HANDLED; | ||
2186 | |||
2187 | /* check for DMA done */ | ||
2188 | if (!use_dma_ppb) { | ||
2189 | dma_done = AMD_GETBITS(req->td_data->status, | ||
2190 | UDC_DMA_OUT_STS_BS); | ||
2191 | /* packet per buffer mode - rx bytes */ | ||
2192 | } else { | ||
2193 | /* | ||
2194 | * if BNA occurred then recover desc. from | ||
2195 | * BNA dummy desc. | ||
2196 | */ | ||
2197 | if (ep->bna_occurred) { | ||
2198 | VDBG(dev, "Recover desc. from BNA dummy\n"); | ||
2199 | memcpy(req->td_data, ep->bna_dummy_req->td_data, | ||
2200 | sizeof(struct udc_data_dma)); | ||
2201 | ep->bna_occurred = 0; | ||
2202 | udc_init_bna_dummy(ep->req); | ||
2203 | } | ||
2204 | td = udc_get_last_dma_desc(req); | ||
2205 | dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS); | ||
2206 | } | ||
2207 | if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) { | ||
2208 | /* buffer fill mode - rx bytes */ | ||
2209 | if (!use_dma_ppb) { | ||
2210 | /* received number bytes */ | ||
2211 | count = AMD_GETBITS(req->td_data->status, | ||
2212 | UDC_DMA_OUT_STS_RXBYTES); | ||
2213 | VDBG(dev, "rx bytes=%u\n", count); | ||
2214 | /* packet per buffer mode - rx bytes */ | ||
2215 | } else { | ||
2216 | VDBG(dev, "req->td_data=%p\n", req->td_data); | ||
2217 | VDBG(dev, "last desc = %p\n", td); | ||
2218 | /* received number bytes */ | ||
2219 | if (use_dma_ppb_du) { | ||
2220 | /* every desc. counts bytes */ | ||
2221 | count = udc_get_ppbdu_rxbytes(req); | ||
2222 | } else { | ||
2223 | /* last desc. counts bytes */ | ||
2224 | count = AMD_GETBITS(td->status, | ||
2225 | UDC_DMA_OUT_STS_RXBYTES); | ||
2226 | if (!count && req->req.length | ||
2227 | == UDC_DMA_MAXPACKET) { | ||
2228 | /* | ||
2229 | * on 64k packets the RXBYTES | ||
2230 | * field is zero | ||
2231 | */ | ||
2232 | count = UDC_DMA_MAXPACKET; | ||
2233 | } | ||
2234 | } | ||
2235 | VDBG(dev, "last desc rx bytes=%u\n", count); | ||
2236 | } | ||
2237 | |||
2238 | tmp = req->req.length - req->req.actual; | ||
2239 | if (count > tmp) { | ||
2240 | if ((tmp % ep->ep.maxpacket) != 0) { | ||
2241 | DBG(dev, "%s: rx %db, space=%db\n", | ||
2242 | ep->ep.name, count, tmp); | ||
2243 | req->req.status = -EOVERFLOW; | ||
2244 | } | ||
2245 | count = tmp; | ||
2246 | } | ||
2247 | req->req.actual += count; | ||
2248 | req->dma_going = 0; | ||
2249 | /* complete request */ | ||
2250 | complete_req(ep, req, 0); | ||
2251 | |||
2252 | /* next request */ | ||
2253 | if (!list_empty(&ep->queue) && !ep->halted) { | ||
2254 | req = list_entry(ep->queue.next, | ||
2255 | struct udc_request, | ||
2256 | queue); | ||
2257 | /* | ||
2258 | * DMA may be already started by udc_queue() | ||
2259 | * called by gadget drivers completion | ||
2260 | * routine. This happens when queue | ||
2261 | * holds one request only. | ||
2262 | */ | ||
2263 | if (req->dma_going == 0) { | ||
2264 | /* next dma */ | ||
2265 | if (prep_dma(ep, req, GFP_ATOMIC) != 0) | ||
2266 | goto finished; | ||
2267 | /* write desc pointer */ | ||
2268 | writel(req->td_phys, | ||
2269 | &ep->regs->desptr); | ||
2270 | req->dma_going = 1; | ||
2271 | /* enable DMA */ | ||
2272 | udc_set_rde(dev); | ||
2273 | } | ||
2274 | } else { | ||
2275 | /* | ||
2276 | * implant BNA dummy descriptor to allow | ||
2277 | * RXFIFO opening by RDE | ||
2278 | */ | ||
2279 | if (ep->bna_dummy_req) { | ||
2280 | /* write desc pointer */ | ||
2281 | writel(ep->bna_dummy_req->td_phys, | ||
2282 | &ep->regs->desptr); | ||
2283 | ep->bna_occurred = 0; | ||
2284 | } | ||
2285 | |||
2286 | /* | ||
2287 | * schedule timer for setting RDE if queue | ||
2288 | * remains empty to allow ep0 packets pass | ||
2289 | * through | ||
2290 | */ | ||
2291 | if (set_rde != 0 | ||
2292 | && !timer_pending(&udc_timer)) { | ||
2293 | udc_timer.expires = | ||
2294 | jiffies | ||
2295 | + HZ*UDC_RDE_TIMER_SECONDS; | ||
2296 | set_rde = 1; | ||
2297 | if (!stop_timer) { | ||
2298 | add_timer(&udc_timer); | ||
2299 | } | ||
2300 | } | ||
2301 | if (ep->num != UDC_EP0OUT_IX) | ||
2302 | dev->data_ep_queued = 0; | ||
2303 | } | ||
2304 | |||
2305 | } else { | ||
2306 | /* | ||
2307 | * RX DMA must be reenabled for each desc in PPBDU mode | ||
2308 | * and must be enabled for PPBNDU mode in case of BNA | ||
2309 | */ | ||
2310 | udc_set_rde(dev); | ||
2311 | } | ||
2312 | |||
2313 | } else if (ep->cancel_transfer) { | ||
2314 | ret_val = IRQ_HANDLED; | ||
2315 | ep->cancel_transfer = 0; | ||
2316 | } | ||
2317 | |||
2318 | /* check pending CNAKS */ | ||
2319 | if (cnak_pending) { | ||
2320 | /* CNAk processing when rxfifo empty only */ | ||
2321 | if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | ||
2322 | udc_process_cnak_queue(dev); | ||
2323 | } | ||
2324 | } | ||
2325 | |||
2326 | /* clear OUT bits in ep status */ | ||
2327 | writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts); | ||
2328 | finished: | ||
2329 | return ret_val; | ||
2330 | } | ||
2331 | |||
2332 | /* Interrupt handler for data IN traffic */ | ||
2333 | static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix) | ||
2334 | { | ||
2335 | irqreturn_t ret_val = IRQ_NONE; | ||
2336 | u32 tmp; | ||
2337 | u32 epsts; | ||
2338 | struct udc_ep *ep; | ||
2339 | struct udc_request *req; | ||
2340 | struct udc_data_dma *td; | ||
2341 | unsigned dma_done; | ||
2342 | unsigned len; | ||
2343 | |||
2344 | ep = &dev->ep[ep_ix]; | ||
2345 | |||
2346 | epsts = readl(&ep->regs->sts); | ||
2347 | if (use_dma) { | ||
2348 | /* BNA ? */ | ||
2349 | if (epsts & AMD_BIT(UDC_EPSTS_BNA)) { | ||
2350 | dev_err(&dev->pdev->dev, | ||
2351 | "BNA ep%din occured - DESPTR = %08lx \n", | ||
2352 | ep->num, | ||
2353 | (unsigned long) readl(&ep->regs->desptr)); | ||
2354 | |||
2355 | /* clear BNA */ | ||
2356 | writel(epsts, &ep->regs->sts); | ||
2357 | ret_val = IRQ_HANDLED; | ||
2358 | goto finished; | ||
2359 | } | ||
2360 | } | ||
2361 | /* HE event ? */ | ||
2362 | if (epsts & AMD_BIT(UDC_EPSTS_HE)) { | ||
2363 | dev_err(&dev->pdev->dev, | ||
2364 | "HE ep%dn occured - DESPTR = %08lx \n", | ||
2365 | ep->num, (unsigned long) readl(&ep->regs->desptr)); | ||
2366 | |||
2367 | /* clear HE */ | ||
2368 | writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); | ||
2369 | ret_val = IRQ_HANDLED; | ||
2370 | goto finished; | ||
2371 | } | ||
2372 | |||
2373 | /* DMA completion */ | ||
2374 | if (epsts & AMD_BIT(UDC_EPSTS_TDC)) { | ||
2375 | VDBG(dev, "TDC set- completion\n"); | ||
2376 | ret_val = IRQ_HANDLED; | ||
2377 | if (!ep->cancel_transfer && !list_empty(&ep->queue)) { | ||
2378 | req = list_entry(ep->queue.next, | ||
2379 | struct udc_request, queue); | ||
2380 | if (req) { | ||
2381 | /* | ||
2382 | * length bytes transfered | ||
2383 | * check dma done of last desc. in PPBDU mode | ||
2384 | */ | ||
2385 | if (use_dma_ppb_du) { | ||
2386 | td = udc_get_last_dma_desc(req); | ||
2387 | if (td) { | ||
2388 | dma_done = | ||
2389 | AMD_GETBITS(td->status, | ||
2390 | UDC_DMA_IN_STS_BS); | ||
2391 | /* don't care DMA done */ | ||
2392 | req->req.actual = | ||
2393 | req->req.length; | ||
2394 | } | ||
2395 | } else { | ||
2396 | /* assume all bytes transferred */ | ||
2397 | req->req.actual = req->req.length; | ||
2398 | } | ||
2399 | |||
2400 | if (req->req.actual == req->req.length) { | ||
2401 | /* complete req */ | ||
2402 | complete_req(ep, req, 0); | ||
2403 | req->dma_going = 0; | ||
2404 | /* further request available ? */ | ||
2405 | if (list_empty(&ep->queue)) { | ||
2406 | /* disable interrupt */ | ||
2407 | tmp = readl( | ||
2408 | &dev->regs->ep_irqmsk); | ||
2409 | tmp |= AMD_BIT(ep->num); | ||
2410 | writel(tmp, | ||
2411 | &dev->regs->ep_irqmsk); | ||
2412 | } | ||
2413 | |||
2414 | } | ||
2415 | } | ||
2416 | } | ||
2417 | ep->cancel_transfer = 0; | ||
2418 | |||
2419 | } | ||
2420 | /* | ||
2421 | * status reg has IN bit set and TDC not set (if TDC was handled, | ||
2422 | * IN must not be handled (UDC defect) ? | ||
2423 | */ | ||
2424 | if ((epsts & AMD_BIT(UDC_EPSTS_IN)) | ||
2425 | && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) { | ||
2426 | ret_val = IRQ_HANDLED; | ||
2427 | if (!list_empty(&ep->queue)) { | ||
2428 | /* next request */ | ||
2429 | req = list_entry(ep->queue.next, | ||
2430 | struct udc_request, queue); | ||
2431 | /* FIFO mode */ | ||
2432 | if (!use_dma) { | ||
2433 | /* write fifo */ | ||
2434 | udc_txfifo_write(ep, &req->req); | ||
2435 | len = req->req.length - req->req.actual; | ||
2436 | if (len > ep->ep.maxpacket) | ||
2437 | len = ep->ep.maxpacket; | ||
2438 | req->req.actual += len; | ||
2439 | if (req->req.actual == req->req.length | ||
2440 | || (len != ep->ep.maxpacket)) { | ||
2441 | /* complete req */ | ||
2442 | complete_req(ep, req, 0); | ||
2443 | } | ||
2444 | /* DMA */ | ||
2445 | } else if (req && !req->dma_going) { | ||
2446 | VDBG(dev, "IN DMA : req=%p req->td_data=%p\n", | ||
2447 | req, req->td_data); | ||
2448 | if (req->td_data) { | ||
2449 | |||
2450 | req->dma_going = 1; | ||
2451 | |||
2452 | /* | ||
2453 | * unset L bit of first desc. | ||
2454 | * for chain | ||
2455 | */ | ||
2456 | if (use_dma_ppb && req->req.length > | ||
2457 | ep->ep.maxpacket) { | ||
2458 | req->td_data->status &= | ||
2459 | AMD_CLEAR_BIT( | ||
2460 | UDC_DMA_IN_STS_L); | ||
2461 | } | ||
2462 | |||
2463 | /* write desc pointer */ | ||
2464 | writel(req->td_phys, &ep->regs->desptr); | ||
2465 | |||
2466 | /* set HOST READY */ | ||
2467 | req->td_data->status = | ||
2468 | AMD_ADDBITS( | ||
2469 | req->td_data->status, | ||
2470 | UDC_DMA_IN_STS_BS_HOST_READY, | ||
2471 | UDC_DMA_IN_STS_BS); | ||
2472 | |||
2473 | /* set poll demand bit */ | ||
2474 | tmp = readl(&ep->regs->ctl); | ||
2475 | tmp |= AMD_BIT(UDC_EPCTL_P); | ||
2476 | writel(tmp, &ep->regs->ctl); | ||
2477 | } | ||
2478 | } | ||
2479 | |||
2480 | } | ||
2481 | } | ||
2482 | /* clear status bits */ | ||
2483 | writel(epsts, &ep->regs->sts); | ||
2484 | |||
2485 | finished: | ||
2486 | return ret_val; | ||
2487 | |||
2488 | } | ||
2489 | |||
2490 | /* Interrupt handler for Control OUT traffic */ | ||
2491 | static irqreturn_t udc_control_out_isr(struct udc *dev) | ||
2492 | __releases(dev->lock) | ||
2493 | __acquires(dev->lock) | ||
2494 | { | ||
2495 | irqreturn_t ret_val = IRQ_NONE; | ||
2496 | u32 tmp; | ||
2497 | int setup_supported; | ||
2498 | u32 count; | ||
2499 | int set = 0; | ||
2500 | struct udc_ep *ep; | ||
2501 | struct udc_ep *ep_tmp; | ||
2502 | |||
2503 | ep = &dev->ep[UDC_EP0OUT_IX]; | ||
2504 | |||
2505 | /* clear irq */ | ||
2506 | writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts); | ||
2507 | |||
2508 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); | ||
2509 | /* check BNA and clear if set */ | ||
2510 | if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { | ||
2511 | VDBG(dev, "ep0: BNA set\n"); | ||
2512 | writel(AMD_BIT(UDC_EPSTS_BNA), | ||
2513 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | ||
2514 | ep->bna_occurred = 1; | ||
2515 | ret_val = IRQ_HANDLED; | ||
2516 | goto finished; | ||
2517 | } | ||
2518 | |||
2519 | /* type of data: SETUP or DATA 0 bytes */ | ||
2520 | tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT); | ||
2521 | VDBG(dev, "data_typ = %x\n", tmp); | ||
2522 | |||
2523 | /* setup data */ | ||
2524 | if (tmp == UDC_EPSTS_OUT_SETUP) { | ||
2525 | ret_val = IRQ_HANDLED; | ||
2526 | |||
2527 | ep->dev->stall_ep0in = 0; | ||
2528 | dev->waiting_zlp_ack_ep0in = 0; | ||
2529 | |||
2530 | /* set NAK for EP0_IN */ | ||
2531 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
2532 | tmp |= AMD_BIT(UDC_EPCTL_SNAK); | ||
2533 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
2534 | dev->ep[UDC_EP0IN_IX].naking = 1; | ||
2535 | /* get setup data */ | ||
2536 | if (use_dma) { | ||
2537 | |||
2538 | /* clear OUT bits in ep status */ | ||
2539 | writel(UDC_EPSTS_OUT_CLEAR, | ||
2540 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | ||
2541 | |||
2542 | setup_data.data[0] = | ||
2543 | dev->ep[UDC_EP0OUT_IX].td_stp->data12; | ||
2544 | setup_data.data[1] = | ||
2545 | dev->ep[UDC_EP0OUT_IX].td_stp->data34; | ||
2546 | /* set HOST READY */ | ||
2547 | dev->ep[UDC_EP0OUT_IX].td_stp->status = | ||
2548 | UDC_DMA_STP_STS_BS_HOST_READY; | ||
2549 | } else { | ||
2550 | /* read fifo */ | ||
2551 | udc_rxfifo_read_dwords(dev, setup_data.data, 2); | ||
2552 | } | ||
2553 | |||
2554 | /* determine direction of control data */ | ||
2555 | if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) { | ||
2556 | dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; | ||
2557 | /* enable RDE */ | ||
2558 | udc_ep0_set_rde(dev); | ||
2559 | set = 0; | ||
2560 | } else { | ||
2561 | dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep; | ||
2562 | /* | ||
2563 | * implant BNA dummy descriptor to allow RXFIFO opening | ||
2564 | * by RDE | ||
2565 | */ | ||
2566 | if (ep->bna_dummy_req) { | ||
2567 | /* write desc pointer */ | ||
2568 | writel(ep->bna_dummy_req->td_phys, | ||
2569 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | ||
2570 | ep->bna_occurred = 0; | ||
2571 | } | ||
2572 | |||
2573 | set = 1; | ||
2574 | dev->ep[UDC_EP0OUT_IX].naking = 1; | ||
2575 | /* | ||
2576 | * setup timer for enabling RDE (to not enable | ||
2577 | * RXFIFO DMA for data to early) | ||
2578 | */ | ||
2579 | set_rde = 1; | ||
2580 | if (!timer_pending(&udc_timer)) { | ||
2581 | udc_timer.expires = jiffies + | ||
2582 | HZ/UDC_RDE_TIMER_DIV; | ||
2583 | if (!stop_timer) { | ||
2584 | add_timer(&udc_timer); | ||
2585 | } | ||
2586 | } | ||
2587 | } | ||
2588 | |||
2589 | /* | ||
2590 | * mass storage reset must be processed here because | ||
2591 | * next packet may be a CLEAR_FEATURE HALT which would not | ||
2592 | * clear the stall bit when no STALL handshake was received | ||
2593 | * before (autostall can cause this) | ||
2594 | */ | ||
2595 | if (setup_data.data[0] == UDC_MSCRES_DWORD0 | ||
2596 | && setup_data.data[1] == UDC_MSCRES_DWORD1) { | ||
2597 | DBG(dev, "MSC Reset\n"); | ||
2598 | /* | ||
2599 | * clear stall bits | ||
2600 | * only one IN and OUT endpoints are handled | ||
2601 | */ | ||
2602 | ep_tmp = &udc->ep[UDC_EPIN_IX]; | ||
2603 | udc_set_halt(&ep_tmp->ep, 0); | ||
2604 | ep_tmp = &udc->ep[UDC_EPOUT_IX]; | ||
2605 | udc_set_halt(&ep_tmp->ep, 0); | ||
2606 | } | ||
2607 | |||
2608 | /* call gadget with setup data received */ | ||
2609 | spin_unlock(&dev->lock); | ||
2610 | setup_supported = dev->driver->setup(&dev->gadget, | ||
2611 | &setup_data.request); | ||
2612 | spin_lock(&dev->lock); | ||
2613 | |||
2614 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
2615 | /* ep0 in returns data (not zlp) on IN phase */ | ||
2616 | if (setup_supported >= 0 && setup_supported < | ||
2617 | UDC_EP0IN_MAXPACKET) { | ||
2618 | /* clear NAK by writing CNAK in EP0_IN */ | ||
2619 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
2620 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
2621 | dev->ep[UDC_EP0IN_IX].naking = 0; | ||
2622 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); | ||
2623 | |||
2624 | /* if unsupported request then stall */ | ||
2625 | } else if (setup_supported < 0) { | ||
2626 | tmp |= AMD_BIT(UDC_EPCTL_S); | ||
2627 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
2628 | } else | ||
2629 | dev->waiting_zlp_ack_ep0in = 1; | ||
2630 | |||
2631 | |||
2632 | /* clear NAK by writing CNAK in EP0_OUT */ | ||
2633 | if (!set) { | ||
2634 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | ||
2635 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | ||
2636 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | ||
2637 | dev->ep[UDC_EP0OUT_IX].naking = 0; | ||
2638 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); | ||
2639 | } | ||
2640 | |||
2641 | if (!use_dma) { | ||
2642 | /* clear OUT bits in ep status */ | ||
2643 | writel(UDC_EPSTS_OUT_CLEAR, | ||
2644 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | ||
2645 | } | ||
2646 | |||
2647 | /* data packet 0 bytes */ | ||
2648 | } else if (tmp == UDC_EPSTS_OUT_DATA) { | ||
2649 | /* clear OUT bits in ep status */ | ||
2650 | writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts); | ||
2651 | |||
2652 | /* get setup data: only 0 packet */ | ||
2653 | if (use_dma) { | ||
2654 | /* no req if 0 packet, just reactivate */ | ||
2655 | if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) { | ||
2656 | VDBG(dev, "ZLP\n"); | ||
2657 | |||
2658 | /* set HOST READY */ | ||
2659 | dev->ep[UDC_EP0OUT_IX].td->status = | ||
2660 | AMD_ADDBITS( | ||
2661 | dev->ep[UDC_EP0OUT_IX].td->status, | ||
2662 | UDC_DMA_OUT_STS_BS_HOST_READY, | ||
2663 | UDC_DMA_OUT_STS_BS); | ||
2664 | /* enable RDE */ | ||
2665 | udc_ep0_set_rde(dev); | ||
2666 | ret_val = IRQ_HANDLED; | ||
2667 | |||
2668 | } else { | ||
2669 | /* control write */ | ||
2670 | ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); | ||
2671 | /* re-program desc. pointer for possible ZLPs */ | ||
2672 | writel(dev->ep[UDC_EP0OUT_IX].td_phys, | ||
2673 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | ||
2674 | /* enable RDE */ | ||
2675 | udc_ep0_set_rde(dev); | ||
2676 | } | ||
2677 | } else { | ||
2678 | |||
2679 | /* received number bytes */ | ||
2680 | count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); | ||
2681 | count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE); | ||
2682 | /* out data for fifo mode not working */ | ||
2683 | count = 0; | ||
2684 | |||
2685 | /* 0 packet or real data ? */ | ||
2686 | if (count != 0) { | ||
2687 | ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); | ||
2688 | } else { | ||
2689 | /* dummy read confirm */ | ||
2690 | readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm); | ||
2691 | ret_val = IRQ_HANDLED; | ||
2692 | } | ||
2693 | } | ||
2694 | } | ||
2695 | |||
2696 | /* check pending CNAKS */ | ||
2697 | if (cnak_pending) { | ||
2698 | /* CNAk processing when rxfifo empty only */ | ||
2699 | if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | ||
2700 | udc_process_cnak_queue(dev); | ||
2701 | } | ||
2702 | } | ||
2703 | |||
2704 | finished: | ||
2705 | return ret_val; | ||
2706 | } | ||
2707 | |||
2708 | /* Interrupt handler for Control IN traffic */ | ||
2709 | static irqreturn_t udc_control_in_isr(struct udc *dev) | ||
2710 | { | ||
2711 | irqreturn_t ret_val = IRQ_NONE; | ||
2712 | u32 tmp; | ||
2713 | struct udc_ep *ep; | ||
2714 | struct udc_request *req; | ||
2715 | unsigned len; | ||
2716 | |||
2717 | ep = &dev->ep[UDC_EP0IN_IX]; | ||
2718 | |||
2719 | /* clear irq */ | ||
2720 | writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts); | ||
2721 | |||
2722 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts); | ||
2723 | /* DMA completion */ | ||
2724 | if (tmp & AMD_BIT(UDC_EPSTS_TDC)) { | ||
2725 | VDBG(dev, "isr: TDC clear \n"); | ||
2726 | ret_val = IRQ_HANDLED; | ||
2727 | |||
2728 | /* clear TDC bit */ | ||
2729 | writel(AMD_BIT(UDC_EPSTS_TDC), | ||
2730 | &dev->ep[UDC_EP0IN_IX].regs->sts); | ||
2731 | |||
2732 | /* status reg has IN bit set ? */ | ||
2733 | } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) { | ||
2734 | ret_val = IRQ_HANDLED; | ||
2735 | |||
2736 | if (ep->dma) { | ||
2737 | /* clear IN bit */ | ||
2738 | writel(AMD_BIT(UDC_EPSTS_IN), | ||
2739 | &dev->ep[UDC_EP0IN_IX].regs->sts); | ||
2740 | } | ||
2741 | if (dev->stall_ep0in) { | ||
2742 | DBG(dev, "stall ep0in\n"); | ||
2743 | /* halt ep0in */ | ||
2744 | tmp = readl(&ep->regs->ctl); | ||
2745 | tmp |= AMD_BIT(UDC_EPCTL_S); | ||
2746 | writel(tmp, &ep->regs->ctl); | ||
2747 | } else { | ||
2748 | if (!list_empty(&ep->queue)) { | ||
2749 | /* next request */ | ||
2750 | req = list_entry(ep->queue.next, | ||
2751 | struct udc_request, queue); | ||
2752 | |||
2753 | if (ep->dma) { | ||
2754 | /* write desc pointer */ | ||
2755 | writel(req->td_phys, &ep->regs->desptr); | ||
2756 | /* set HOST READY */ | ||
2757 | req->td_data->status = | ||
2758 | AMD_ADDBITS( | ||
2759 | req->td_data->status, | ||
2760 | UDC_DMA_STP_STS_BS_HOST_READY, | ||
2761 | UDC_DMA_STP_STS_BS); | ||
2762 | |||
2763 | /* set poll demand bit */ | ||
2764 | tmp = | ||
2765 | readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
2766 | tmp |= AMD_BIT(UDC_EPCTL_P); | ||
2767 | writel(tmp, | ||
2768 | &dev->ep[UDC_EP0IN_IX].regs->ctl); | ||
2769 | |||
2770 | /* all bytes will be transferred */ | ||
2771 | req->req.actual = req->req.length; | ||
2772 | |||
2773 | /* complete req */ | ||
2774 | complete_req(ep, req, 0); | ||
2775 | |||
2776 | } else { | ||
2777 | /* write fifo */ | ||
2778 | udc_txfifo_write(ep, &req->req); | ||
2779 | |||
2780 | /* lengh bytes transfered */ | ||
2781 | len = req->req.length - req->req.actual; | ||
2782 | if (len > ep->ep.maxpacket) | ||
2783 | len = ep->ep.maxpacket; | ||
2784 | |||
2785 | req->req.actual += len; | ||
2786 | if (req->req.actual == req->req.length | ||
2787 | || (len != ep->ep.maxpacket)) { | ||
2788 | /* complete req */ | ||
2789 | complete_req(ep, req, 0); | ||
2790 | } | ||
2791 | } | ||
2792 | |||
2793 | } | ||
2794 | } | ||
2795 | ep->halted = 0; | ||
2796 | dev->stall_ep0in = 0; | ||
2797 | if (!ep->dma) { | ||
2798 | /* clear IN bit */ | ||
2799 | writel(AMD_BIT(UDC_EPSTS_IN), | ||
2800 | &dev->ep[UDC_EP0IN_IX].regs->sts); | ||
2801 | } | ||
2802 | } | ||
2803 | |||
2804 | return ret_val; | ||
2805 | } | ||
2806 | |||
2807 | |||
2808 | /* Interrupt handler for global device events */ | ||
2809 | static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq) | ||
2810 | __releases(dev->lock) | ||
2811 | __acquires(dev->lock) | ||
2812 | { | ||
2813 | irqreturn_t ret_val = IRQ_NONE; | ||
2814 | u32 tmp; | ||
2815 | u32 cfg; | ||
2816 | struct udc_ep *ep; | ||
2817 | u16 i; | ||
2818 | u8 udc_csr_epix; | ||
2819 | |||
2820 | /* SET_CONFIG irq ? */ | ||
2821 | if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) { | ||
2822 | ret_val = IRQ_HANDLED; | ||
2823 | |||
2824 | /* read config value */ | ||
2825 | tmp = readl(&dev->regs->sts); | ||
2826 | cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG); | ||
2827 | DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg); | ||
2828 | dev->cur_config = cfg; | ||
2829 | dev->set_cfg_not_acked = 1; | ||
2830 | |||
2831 | /* make usb request for gadget driver */ | ||
2832 | memset(&setup_data, 0 , sizeof(union udc_setup_data)); | ||
2833 | setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION; | ||
2834 | setup_data.request.wValue = dev->cur_config; | ||
2835 | |||
2836 | /* programm the NE registers */ | ||
2837 | for (i = 0; i < UDC_EP_NUM; i++) { | ||
2838 | ep = &dev->ep[i]; | ||
2839 | if (ep->in) { | ||
2840 | |||
2841 | /* ep ix in UDC CSR register space */ | ||
2842 | udc_csr_epix = ep->num; | ||
2843 | |||
2844 | |||
2845 | /* OUT ep */ | ||
2846 | } else { | ||
2847 | /* ep ix in UDC CSR register space */ | ||
2848 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | ||
2849 | } | ||
2850 | |||
2851 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | ||
2852 | /* ep cfg */ | ||
2853 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, | ||
2854 | UDC_CSR_NE_CFG); | ||
2855 | /* write reg */ | ||
2856 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | ||
2857 | |||
2858 | /* clear stall bits */ | ||
2859 | ep->halted = 0; | ||
2860 | tmp = readl(&ep->regs->ctl); | ||
2861 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | ||
2862 | writel(tmp, &ep->regs->ctl); | ||
2863 | } | ||
2864 | /* call gadget zero with setup data received */ | ||
2865 | spin_unlock(&dev->lock); | ||
2866 | tmp = dev->driver->setup(&dev->gadget, &setup_data.request); | ||
2867 | spin_lock(&dev->lock); | ||
2868 | |||
2869 | } /* SET_INTERFACE ? */ | ||
2870 | if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) { | ||
2871 | ret_val = IRQ_HANDLED; | ||
2872 | |||
2873 | dev->set_cfg_not_acked = 1; | ||
2874 | /* read interface and alt setting values */ | ||
2875 | tmp = readl(&dev->regs->sts); | ||
2876 | dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT); | ||
2877 | dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF); | ||
2878 | |||
2879 | /* make usb request for gadget driver */ | ||
2880 | memset(&setup_data, 0 , sizeof(union udc_setup_data)); | ||
2881 | setup_data.request.bRequest = USB_REQ_SET_INTERFACE; | ||
2882 | setup_data.request.bRequestType = USB_RECIP_INTERFACE; | ||
2883 | setup_data.request.wValue = dev->cur_alt; | ||
2884 | setup_data.request.wIndex = dev->cur_intf; | ||
2885 | |||
2886 | DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n", | ||
2887 | dev->cur_alt, dev->cur_intf); | ||
2888 | |||
2889 | /* programm the NE registers */ | ||
2890 | for (i = 0; i < UDC_EP_NUM; i++) { | ||
2891 | ep = &dev->ep[i]; | ||
2892 | if (ep->in) { | ||
2893 | |||
2894 | /* ep ix in UDC CSR register space */ | ||
2895 | udc_csr_epix = ep->num; | ||
2896 | |||
2897 | |||
2898 | /* OUT ep */ | ||
2899 | } else { | ||
2900 | /* ep ix in UDC CSR register space */ | ||
2901 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | ||
2902 | } | ||
2903 | |||
2904 | /* UDC CSR reg */ | ||
2905 | /* set ep values */ | ||
2906 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | ||
2907 | /* ep interface */ | ||
2908 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, | ||
2909 | UDC_CSR_NE_INTF); | ||
2910 | /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */ | ||
2911 | /* ep alt */ | ||
2912 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, | ||
2913 | UDC_CSR_NE_ALT); | ||
2914 | /* write reg */ | ||
2915 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | ||
2916 | |||
2917 | /* clear stall bits */ | ||
2918 | ep->halted = 0; | ||
2919 | tmp = readl(&ep->regs->ctl); | ||
2920 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | ||
2921 | writel(tmp, &ep->regs->ctl); | ||
2922 | } | ||
2923 | |||
2924 | /* call gadget zero with setup data received */ | ||
2925 | spin_unlock(&dev->lock); | ||
2926 | tmp = dev->driver->setup(&dev->gadget, &setup_data.request); | ||
2927 | spin_lock(&dev->lock); | ||
2928 | |||
2929 | } /* USB reset */ | ||
2930 | if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) { | ||
2931 | DBG(dev, "USB Reset interrupt\n"); | ||
2932 | ret_val = IRQ_HANDLED; | ||
2933 | |||
2934 | /* allow soft reset when suspend occurs */ | ||
2935 | soft_reset_occured = 0; | ||
2936 | |||
2937 | dev->waiting_zlp_ack_ep0in = 0; | ||
2938 | dev->set_cfg_not_acked = 0; | ||
2939 | |||
2940 | /* mask not needed interrupts */ | ||
2941 | udc_mask_unused_interrupts(dev); | ||
2942 | |||
2943 | /* call gadget to resume and reset configs etc. */ | ||
2944 | spin_unlock(&dev->lock); | ||
2945 | if (dev->sys_suspended && dev->driver->resume) { | ||
2946 | dev->driver->resume(&dev->gadget); | ||
2947 | dev->sys_suspended = 0; | ||
2948 | } | ||
2949 | dev->driver->disconnect(&dev->gadget); | ||
2950 | spin_lock(&dev->lock); | ||
2951 | |||
2952 | /* disable ep0 to empty req queue */ | ||
2953 | empty_req_queue(&dev->ep[UDC_EP0IN_IX]); | ||
2954 | ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); | ||
2955 | |||
2956 | /* soft reset when rxfifo not empty */ | ||
2957 | tmp = readl(&dev->regs->sts); | ||
2958 | if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) | ||
2959 | && !soft_reset_after_usbreset_occured) { | ||
2960 | udc_soft_reset(dev); | ||
2961 | soft_reset_after_usbreset_occured++; | ||
2962 | } | ||
2963 | |||
2964 | /* | ||
2965 | * DMA reset to kill potential old DMA hw hang, | ||
2966 | * POLL bit is already reset by ep_init() through | ||
2967 | * disconnect() | ||
2968 | */ | ||
2969 | DBG(dev, "DMA machine reset\n"); | ||
2970 | tmp = readl(&dev->regs->cfg); | ||
2971 | writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg); | ||
2972 | writel(tmp, &dev->regs->cfg); | ||
2973 | |||
2974 | /* put into initial config */ | ||
2975 | udc_basic_init(dev); | ||
2976 | |||
2977 | /* enable device setup interrupts */ | ||
2978 | udc_enable_dev_setup_interrupts(dev); | ||
2979 | |||
2980 | /* enable suspend interrupt */ | ||
2981 | tmp = readl(&dev->regs->irqmsk); | ||
2982 | tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US); | ||
2983 | writel(tmp, &dev->regs->irqmsk); | ||
2984 | |||
2985 | } /* USB suspend */ | ||
2986 | if (dev_irq & AMD_BIT(UDC_DEVINT_US)) { | ||
2987 | DBG(dev, "USB Suspend interrupt\n"); | ||
2988 | ret_val = IRQ_HANDLED; | ||
2989 | if (dev->driver->suspend) { | ||
2990 | spin_unlock(&dev->lock); | ||
2991 | dev->sys_suspended = 1; | ||
2992 | dev->driver->suspend(&dev->gadget); | ||
2993 | spin_lock(&dev->lock); | ||
2994 | } | ||
2995 | } /* new speed ? */ | ||
2996 | if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) { | ||
2997 | DBG(dev, "ENUM interrupt\n"); | ||
2998 | ret_val = IRQ_HANDLED; | ||
2999 | soft_reset_after_usbreset_occured = 0; | ||
3000 | |||
3001 | /* disable ep0 to empty req queue */ | ||
3002 | empty_req_queue(&dev->ep[UDC_EP0IN_IX]); | ||
3003 | ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); | ||
3004 | |||
3005 | /* link up all endpoints */ | ||
3006 | udc_setup_endpoints(dev); | ||
3007 | if (dev->gadget.speed == USB_SPEED_HIGH) { | ||
3008 | dev_info(&dev->pdev->dev, "Connect: speed = %s\n", | ||
3009 | "high"); | ||
3010 | } else if (dev->gadget.speed == USB_SPEED_FULL) { | ||
3011 | dev_info(&dev->pdev->dev, "Connect: speed = %s\n", | ||
3012 | "full"); | ||
3013 | } | ||
3014 | |||
3015 | /* init ep 0 */ | ||
3016 | activate_control_endpoints(dev); | ||
3017 | |||
3018 | /* enable ep0 interrupts */ | ||
3019 | udc_enable_ep0_interrupts(dev); | ||
3020 | } | ||
3021 | /* session valid change interrupt */ | ||
3022 | if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) { | ||
3023 | DBG(dev, "USB SVC interrupt\n"); | ||
3024 | ret_val = IRQ_HANDLED; | ||
3025 | |||
3026 | /* check that session is not valid to detect disconnect */ | ||
3027 | tmp = readl(&dev->regs->sts); | ||
3028 | if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) { | ||
3029 | /* disable suspend interrupt */ | ||
3030 | tmp = readl(&dev->regs->irqmsk); | ||
3031 | tmp |= AMD_BIT(UDC_DEVINT_US); | ||
3032 | writel(tmp, &dev->regs->irqmsk); | ||
3033 | DBG(dev, "USB Disconnect (session valid low)\n"); | ||
3034 | /* cleanup on disconnect */ | ||
3035 | usb_disconnect(udc); | ||
3036 | } | ||
3037 | |||
3038 | } | ||
3039 | |||
3040 | return ret_val; | ||
3041 | } | ||
3042 | |||
3043 | /* Interrupt Service Routine, see Linux Kernel Doc for parameters */ | ||
3044 | static irqreturn_t udc_irq(int irq, void *pdev) | ||
3045 | { | ||
3046 | struct udc *dev = pdev; | ||
3047 | u32 reg; | ||
3048 | u16 i; | ||
3049 | u32 ep_irq; | ||
3050 | irqreturn_t ret_val = IRQ_NONE; | ||
3051 | |||
3052 | spin_lock(&dev->lock); | ||
3053 | |||
3054 | /* check for ep irq */ | ||
3055 | reg = readl(&dev->regs->ep_irqsts); | ||
3056 | if (reg) { | ||
3057 | if (reg & AMD_BIT(UDC_EPINT_OUT_EP0)) | ||
3058 | ret_val |= udc_control_out_isr(dev); | ||
3059 | if (reg & AMD_BIT(UDC_EPINT_IN_EP0)) | ||
3060 | ret_val |= udc_control_in_isr(dev); | ||
3061 | |||
3062 | /* | ||
3063 | * data endpoint | ||
3064 | * iterate ep's | ||
3065 | */ | ||
3066 | for (i = 1; i < UDC_EP_NUM; i++) { | ||
3067 | ep_irq = 1 << i; | ||
3068 | if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0) | ||
3069 | continue; | ||
3070 | |||
3071 | /* clear irq status */ | ||
3072 | writel(ep_irq, &dev->regs->ep_irqsts); | ||
3073 | |||
3074 | /* irq for out ep ? */ | ||
3075 | if (i > UDC_EPIN_NUM) | ||
3076 | ret_val |= udc_data_out_isr(dev, i); | ||
3077 | else | ||
3078 | ret_val |= udc_data_in_isr(dev, i); | ||
3079 | } | ||
3080 | |||
3081 | } | ||
3082 | |||
3083 | |||
3084 | /* check for dev irq */ | ||
3085 | reg = readl(&dev->regs->irqsts); | ||
3086 | if (reg) { | ||
3087 | /* clear irq */ | ||
3088 | writel(reg, &dev->regs->irqsts); | ||
3089 | ret_val |= udc_dev_isr(dev, reg); | ||
3090 | } | ||
3091 | |||
3092 | |||
3093 | spin_unlock(&dev->lock); | ||
3094 | return ret_val; | ||
3095 | } | ||
3096 | |||
3097 | /* Tears down device */ | ||
3098 | static void gadget_release(struct device *pdev) | ||
3099 | { | ||
3100 | struct amd5536udc *dev = dev_get_drvdata(pdev); | ||
3101 | kfree(dev); | ||
3102 | } | ||
3103 | |||
3104 | /* Cleanup on device remove */ | ||
3105 | static void udc_remove(struct udc *dev) | ||
3106 | { | ||
3107 | /* remove timer */ | ||
3108 | stop_timer++; | ||
3109 | if (timer_pending(&udc_timer)) | ||
3110 | wait_for_completion(&on_exit); | ||
3111 | if (udc_timer.data) | ||
3112 | del_timer_sync(&udc_timer); | ||
3113 | /* remove pollstall timer */ | ||
3114 | stop_pollstall_timer++; | ||
3115 | if (timer_pending(&udc_pollstall_timer)) | ||
3116 | wait_for_completion(&on_pollstall_exit); | ||
3117 | if (udc_pollstall_timer.data) | ||
3118 | del_timer_sync(&udc_pollstall_timer); | ||
3119 | udc = NULL; | ||
3120 | } | ||
3121 | |||
3122 | /* Reset all pci context */ | ||
3123 | static void udc_pci_remove(struct pci_dev *pdev) | ||
3124 | { | ||
3125 | struct udc *dev; | ||
3126 | |||
3127 | dev = pci_get_drvdata(pdev); | ||
3128 | |||
3129 | /* gadget driver must not be registered */ | ||
3130 | BUG_ON(dev->driver != NULL); | ||
3131 | |||
3132 | /* dma pool cleanup */ | ||
3133 | if (dev->data_requests) | ||
3134 | pci_pool_destroy(dev->data_requests); | ||
3135 | |||
3136 | if (dev->stp_requests) { | ||
3137 | /* cleanup DMA desc's for ep0in */ | ||
3138 | pci_pool_free(dev->stp_requests, | ||
3139 | dev->ep[UDC_EP0OUT_IX].td_stp, | ||
3140 | dev->ep[UDC_EP0OUT_IX].td_stp_dma); | ||
3141 | pci_pool_free(dev->stp_requests, | ||
3142 | dev->ep[UDC_EP0OUT_IX].td, | ||
3143 | dev->ep[UDC_EP0OUT_IX].td_phys); | ||
3144 | |||
3145 | pci_pool_destroy(dev->stp_requests); | ||
3146 | } | ||
3147 | |||
3148 | /* reset controller */ | ||
3149 | writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); | ||
3150 | if (dev->irq_registered) | ||
3151 | free_irq(pdev->irq, dev); | ||
3152 | if (dev->regs) | ||
3153 | iounmap(dev->regs); | ||
3154 | if (dev->mem_region) | ||
3155 | release_mem_region(pci_resource_start(pdev, 0), | ||
3156 | pci_resource_len(pdev, 0)); | ||
3157 | if (dev->active) | ||
3158 | pci_disable_device(pdev); | ||
3159 | |||
3160 | device_unregister(&dev->gadget.dev); | ||
3161 | pci_set_drvdata(pdev, NULL); | ||
3162 | |||
3163 | udc_remove(dev); | ||
3164 | } | ||
3165 | |||
3166 | /* create dma pools on init */ | ||
3167 | static int init_dma_pools(struct udc *dev) | ||
3168 | { | ||
3169 | struct udc_stp_dma *td_stp; | ||
3170 | struct udc_data_dma *td_data; | ||
3171 | int retval; | ||
3172 | |||
3173 | /* consistent DMA mode setting ? */ | ||
3174 | if (use_dma_ppb) { | ||
3175 | use_dma_bufferfill_mode = 0; | ||
3176 | } else { | ||
3177 | use_dma_ppb_du = 0; | ||
3178 | use_dma_bufferfill_mode = 1; | ||
3179 | } | ||
3180 | |||
3181 | /* DMA setup */ | ||
3182 | dev->data_requests = dma_pool_create("data_requests", NULL, | ||
3183 | sizeof(struct udc_data_dma), 0, 0); | ||
3184 | if (!dev->data_requests) { | ||
3185 | DBG(dev, "can't get request data pool\n"); | ||
3186 | retval = -ENOMEM; | ||
3187 | goto finished; | ||
3188 | } | ||
3189 | |||
3190 | /* EP0 in dma regs = dev control regs */ | ||
3191 | dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl; | ||
3192 | |||
3193 | /* dma desc for setup data */ | ||
3194 | dev->stp_requests = dma_pool_create("setup requests", NULL, | ||
3195 | sizeof(struct udc_stp_dma), 0, 0); | ||
3196 | if (!dev->stp_requests) { | ||
3197 | DBG(dev, "can't get stp request pool\n"); | ||
3198 | retval = -ENOMEM; | ||
3199 | goto finished; | ||
3200 | } | ||
3201 | /* setup */ | ||
3202 | td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, | ||
3203 | &dev->ep[UDC_EP0OUT_IX].td_stp_dma); | ||
3204 | if (td_stp == NULL) { | ||
3205 | retval = -ENOMEM; | ||
3206 | goto finished; | ||
3207 | } | ||
3208 | dev->ep[UDC_EP0OUT_IX].td_stp = td_stp; | ||
3209 | |||
3210 | /* data: 0 packets !? */ | ||
3211 | td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, | ||
3212 | &dev->ep[UDC_EP0OUT_IX].td_phys); | ||
3213 | if (td_data == NULL) { | ||
3214 | retval = -ENOMEM; | ||
3215 | goto finished; | ||
3216 | } | ||
3217 | dev->ep[UDC_EP0OUT_IX].td = td_data; | ||
3218 | return 0; | ||
3219 | |||
3220 | finished: | ||
3221 | return retval; | ||
3222 | } | ||
3223 | |||
3224 | /* Called by pci bus driver to init pci context */ | ||
3225 | static int udc_pci_probe( | ||
3226 | struct pci_dev *pdev, | ||
3227 | const struct pci_device_id *id | ||
3228 | ) | ||
3229 | { | ||
3230 | struct udc *dev; | ||
3231 | unsigned long resource; | ||
3232 | unsigned long len; | ||
3233 | int retval = 0; | ||
3234 | |||
3235 | /* one udc only */ | ||
3236 | if (udc) { | ||
3237 | dev_dbg(&pdev->dev, "already probed\n"); | ||
3238 | return -EBUSY; | ||
3239 | } | ||
3240 | |||
3241 | /* init */ | ||
3242 | dev = kzalloc(sizeof(struct udc), GFP_KERNEL); | ||
3243 | if (!dev) { | ||
3244 | retval = -ENOMEM; | ||
3245 | goto finished; | ||
3246 | } | ||
3247 | memset(dev, 0, sizeof(struct udc)); | ||
3248 | |||
3249 | /* pci setup */ | ||
3250 | if (pci_enable_device(pdev) < 0) { | ||
3251 | retval = -ENODEV; | ||
3252 | goto finished; | ||
3253 | } | ||
3254 | dev->active = 1; | ||
3255 | |||
3256 | /* PCI resource allocation */ | ||
3257 | resource = pci_resource_start(pdev, 0); | ||
3258 | len = pci_resource_len(pdev, 0); | ||
3259 | |||
3260 | if (!request_mem_region(resource, len, name)) { | ||
3261 | dev_dbg(&pdev->dev, "pci device used already\n"); | ||
3262 | retval = -EBUSY; | ||
3263 | goto finished; | ||
3264 | } | ||
3265 | dev->mem_region = 1; | ||
3266 | |||
3267 | dev->virt_addr = ioremap_nocache(resource, len); | ||
3268 | if (dev->virt_addr == NULL) { | ||
3269 | dev_dbg(&pdev->dev, "start address cannot be mapped\n"); | ||
3270 | retval = -EFAULT; | ||
3271 | goto finished; | ||
3272 | } | ||
3273 | |||
3274 | if (!pdev->irq) { | ||
3275 | dev_err(&dev->pdev->dev, "irq not set\n"); | ||
3276 | retval = -ENODEV; | ||
3277 | goto finished; | ||
3278 | } | ||
3279 | |||
3280 | if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { | ||
3281 | dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); | ||
3282 | retval = -EBUSY; | ||
3283 | goto finished; | ||
3284 | } | ||
3285 | dev->irq_registered = 1; | ||
3286 | |||
3287 | pci_set_drvdata(pdev, dev); | ||
3288 | |||
3289 | /* chip revision */ | ||
3290 | dev->chiprev = 0; | ||
3291 | |||
3292 | pci_set_master(pdev); | ||
3293 | pci_set_mwi(pdev); | ||
3294 | |||
3295 | /* chip rev for Hs AMD5536 */ | ||
3296 | pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) &dev->chiprev); | ||
3297 | /* init dma pools */ | ||
3298 | if (use_dma) { | ||
3299 | retval = init_dma_pools(dev); | ||
3300 | if (retval != 0) | ||
3301 | goto finished; | ||
3302 | } | ||
3303 | |||
3304 | dev->phys_addr = resource; | ||
3305 | dev->irq = pdev->irq; | ||
3306 | dev->pdev = pdev; | ||
3307 | dev->gadget.dev.parent = &pdev->dev; | ||
3308 | dev->gadget.dev.dma_mask = pdev->dev.dma_mask; | ||
3309 | |||
3310 | /* general probing */ | ||
3311 | if (udc_probe(dev) == 0) | ||
3312 | return 0; | ||
3313 | |||
3314 | finished: | ||
3315 | if (dev) | ||
3316 | udc_pci_remove(pdev); | ||
3317 | return retval; | ||
3318 | } | ||
3319 | |||
3320 | /* general probe */ | ||
3321 | static int udc_probe(struct udc *dev) | ||
3322 | { | ||
3323 | char tmp[128]; | ||
3324 | u32 reg; | ||
3325 | int retval; | ||
3326 | |||
3327 | /* mark timer as not initialized */ | ||
3328 | udc_timer.data = 0; | ||
3329 | udc_pollstall_timer.data = 0; | ||
3330 | |||
3331 | /* device struct setup */ | ||
3332 | spin_lock_init(&dev->lock); | ||
3333 | dev->gadget.ops = &udc_ops; | ||
3334 | |||
3335 | strcpy(dev->gadget.dev.bus_id, "gadget"); | ||
3336 | dev->gadget.dev.release = gadget_release; | ||
3337 | dev->gadget.name = name; | ||
3338 | dev->gadget.name = name; | ||
3339 | dev->gadget.is_dualspeed = 1; | ||
3340 | |||
3341 | /* udc csr registers base */ | ||
3342 | dev->csr = dev->virt_addr + UDC_CSR_ADDR; | ||
3343 | /* dev registers base */ | ||
3344 | dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR; | ||
3345 | /* ep registers base */ | ||
3346 | dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR; | ||
3347 | /* fifo's base */ | ||
3348 | dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR); | ||
3349 | dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR); | ||
3350 | |||
3351 | /* init registers, interrupts, ... */ | ||
3352 | startup_registers(dev); | ||
3353 | |||
3354 | dev_info(&dev->pdev->dev, "%s\n", mod_desc); | ||
3355 | |||
3356 | snprintf(tmp, sizeof tmp, "%d", dev->irq); | ||
3357 | dev_info(&dev->pdev->dev, | ||
3358 | "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n", | ||
3359 | tmp, dev->phys_addr, dev->chiprev, | ||
3360 | (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1"); | ||
3361 | strcpy(tmp, UDC_DRIVER_VERSION_STRING); | ||
3362 | if (dev->chiprev == UDC_HSA0_REV) { | ||
3363 | dev_err(&dev->pdev->dev, "chip revision is A0; too old\n"); | ||
3364 | retval = -ENODEV; | ||
3365 | goto finished; | ||
3366 | } | ||
3367 | dev_info(&dev->pdev->dev, | ||
3368 | "driver version: %s(for Geode5536 B1)\n", tmp); | ||
3369 | udc = dev; | ||
3370 | |||
3371 | retval = device_register(&dev->gadget.dev); | ||
3372 | if (retval) | ||
3373 | goto finished; | ||
3374 | |||
3375 | /* timer init */ | ||
3376 | init_timer(&udc_timer); | ||
3377 | udc_timer.function = udc_timer_function; | ||
3378 | udc_timer.data = 1; | ||
3379 | /* timer pollstall init */ | ||
3380 | init_timer(&udc_pollstall_timer); | ||
3381 | udc_pollstall_timer.function = udc_pollstall_timer_function; | ||
3382 | udc_pollstall_timer.data = 1; | ||
3383 | |||
3384 | /* set SD */ | ||
3385 | reg = readl(&dev->regs->ctl); | ||
3386 | reg |= AMD_BIT(UDC_DEVCTL_SD); | ||
3387 | writel(reg, &dev->regs->ctl); | ||
3388 | |||
3389 | /* print dev register info */ | ||
3390 | print_regs(dev); | ||
3391 | |||
3392 | return 0; | ||
3393 | |||
3394 | finished: | ||
3395 | return retval; | ||
3396 | } | ||
3397 | |||
3398 | /* Initiates a remote wakeup */ | ||
3399 | static int udc_remote_wakeup(struct udc *dev) | ||
3400 | { | ||
3401 | unsigned long flags; | ||
3402 | u32 tmp; | ||
3403 | |||
3404 | DBG(dev, "UDC initiates remote wakeup\n"); | ||
3405 | |||
3406 | spin_lock_irqsave(&dev->lock, flags); | ||
3407 | |||
3408 | tmp = readl(&dev->regs->ctl); | ||
3409 | tmp |= AMD_BIT(UDC_DEVCTL_RES); | ||
3410 | writel(tmp, &dev->regs->ctl); | ||
3411 | tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES); | ||
3412 | writel(tmp, &dev->regs->ctl); | ||
3413 | |||
3414 | spin_unlock_irqrestore(&dev->lock, flags); | ||
3415 | return 0; | ||
3416 | } | ||
3417 | |||
3418 | /* PCI device parameters */ | ||
3419 | static const struct pci_device_id pci_id[] = { | ||
3420 | { | ||
3421 | PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096), | ||
3422 | .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, | ||
3423 | .class_mask = 0xffffffff, | ||
3424 | }, | ||
3425 | {}, | ||
3426 | }; | ||
3427 | MODULE_DEVICE_TABLE(pci, pci_id); | ||
3428 | |||
3429 | /* PCI functions */ | ||
3430 | static struct pci_driver udc_pci_driver = { | ||
3431 | .name = (char *) name, | ||
3432 | .id_table = pci_id, | ||
3433 | .probe = udc_pci_probe, | ||
3434 | .remove = udc_pci_remove, | ||
3435 | }; | ||
3436 | |||
3437 | /* Inits driver */ | ||
3438 | static int __init init(void) | ||
3439 | { | ||
3440 | return pci_register_driver(&udc_pci_driver); | ||
3441 | } | ||
3442 | module_init(init); | ||
3443 | |||
3444 | /* Cleans driver */ | ||
3445 | static void __exit cleanup(void) | ||
3446 | { | ||
3447 | pci_unregister_driver(&udc_pci_driver); | ||
3448 | } | ||
3449 | module_exit(cleanup); | ||
3450 | |||
3451 | MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION); | ||
3452 | MODULE_AUTHOR("Thomas Dahlmann"); | ||
3453 | MODULE_LICENSE("GPL"); | ||
3454 | |||
diff --git a/drivers/usb/gadget/amd5536udc.h b/drivers/usb/gadget/amd5536udc.h new file mode 100644 index 000000000000..4bbabbbfc93f --- /dev/null +++ b/drivers/usb/gadget/amd5536udc.h | |||
@@ -0,0 +1,626 @@ | |||
1 | /* | ||
2 | * amd5536.h -- header for AMD 5536 UDC high/full speed USB device controller | ||
3 | * | ||
4 | * Copyright (C) 2007 AMD (http://www.amd.com) | ||
5 | * Author: Thomas Dahlmann | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #ifndef AMD5536UDC_H | ||
23 | #define AMD5536UDC_H | ||
24 | |||
25 | /* various constants */ | ||
26 | #define UDC_RDE_TIMER_SECONDS 1 | ||
27 | #define UDC_RDE_TIMER_DIV 10 | ||
28 | #define UDC_POLLSTALL_TIMER_USECONDS 500 | ||
29 | |||
30 | /* Hs AMD5536 chip rev. */ | ||
31 | #define UDC_HSA0_REV 1 | ||
32 | #define UDC_HSB1_REV 2 | ||
33 | |||
34 | /* | ||
35 | * SETUP usb commands | ||
36 | * needed, because some SETUP's are handled in hw, but must be passed to | ||
37 | * gadget driver above | ||
38 | * SET_CONFIG | ||
39 | */ | ||
40 | #define UDC_SETCONFIG_DWORD0 0x00000900 | ||
41 | #define UDC_SETCONFIG_DWORD0_VALUE_MASK 0xffff0000 | ||
42 | #define UDC_SETCONFIG_DWORD0_VALUE_OFS 16 | ||
43 | |||
44 | #define UDC_SETCONFIG_DWORD1 0x00000000 | ||
45 | |||
46 | /* SET_INTERFACE */ | ||
47 | #define UDC_SETINTF_DWORD0 0x00000b00 | ||
48 | #define UDC_SETINTF_DWORD0_ALT_MASK 0xffff0000 | ||
49 | #define UDC_SETINTF_DWORD0_ALT_OFS 16 | ||
50 | |||
51 | #define UDC_SETINTF_DWORD1 0x00000000 | ||
52 | #define UDC_SETINTF_DWORD1_INTF_MASK 0x0000ffff | ||
53 | #define UDC_SETINTF_DWORD1_INTF_OFS 0 | ||
54 | |||
55 | /* Mass storage reset */ | ||
56 | #define UDC_MSCRES_DWORD0 0x0000ff21 | ||
57 | #define UDC_MSCRES_DWORD1 0x00000000 | ||
58 | |||
59 | /* Global CSR's -------------------------------------------------------------*/ | ||
60 | #define UDC_CSR_ADDR 0x500 | ||
61 | |||
62 | /* EP NE bits */ | ||
63 | /* EP number */ | ||
64 | #define UDC_CSR_NE_NUM_MASK 0x0000000f | ||
65 | #define UDC_CSR_NE_NUM_OFS 0 | ||
66 | /* EP direction */ | ||
67 | #define UDC_CSR_NE_DIR_MASK 0x00000010 | ||
68 | #define UDC_CSR_NE_DIR_OFS 4 | ||
69 | /* EP type */ | ||
70 | #define UDC_CSR_NE_TYPE_MASK 0x00000060 | ||
71 | #define UDC_CSR_NE_TYPE_OFS 5 | ||
72 | /* EP config number */ | ||
73 | #define UDC_CSR_NE_CFG_MASK 0x00000780 | ||
74 | #define UDC_CSR_NE_CFG_OFS 7 | ||
75 | /* EP interface number */ | ||
76 | #define UDC_CSR_NE_INTF_MASK 0x00007800 | ||
77 | #define UDC_CSR_NE_INTF_OFS 11 | ||
78 | /* EP alt setting */ | ||
79 | #define UDC_CSR_NE_ALT_MASK 0x00078000 | ||
80 | #define UDC_CSR_NE_ALT_OFS 15 | ||
81 | |||
82 | /* max pkt */ | ||
83 | #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000 | ||
84 | #define UDC_CSR_NE_MAX_PKT_OFS 19 | ||
85 | |||
86 | /* Device Config Register ---------------------------------------------------*/ | ||
87 | #define UDC_DEVCFG_ADDR 0x400 | ||
88 | |||
89 | #define UDC_DEVCFG_SOFTRESET 31 | ||
90 | #define UDC_DEVCFG_HNPSFEN 30 | ||
91 | #define UDC_DEVCFG_DMARST 29 | ||
92 | #define UDC_DEVCFG_SET_DESC 18 | ||
93 | #define UDC_DEVCFG_CSR_PRG 17 | ||
94 | #define UDC_DEVCFG_STATUS 7 | ||
95 | #define UDC_DEVCFG_DIR 6 | ||
96 | #define UDC_DEVCFG_PI 5 | ||
97 | #define UDC_DEVCFG_SS 4 | ||
98 | #define UDC_DEVCFG_SP 3 | ||
99 | #define UDC_DEVCFG_RWKP 2 | ||
100 | |||
101 | #define UDC_DEVCFG_SPD_MASK 0x3 | ||
102 | #define UDC_DEVCFG_SPD_OFS 0 | ||
103 | #define UDC_DEVCFG_SPD_HS 0x0 | ||
104 | #define UDC_DEVCFG_SPD_FS 0x1 | ||
105 | #define UDC_DEVCFG_SPD_LS 0x2 | ||
106 | /*#define UDC_DEVCFG_SPD_FS 0x3*/ | ||
107 | |||
108 | |||
109 | /* Device Control Register --------------------------------------------------*/ | ||
110 | #define UDC_DEVCTL_ADDR 0x404 | ||
111 | |||
112 | #define UDC_DEVCTL_THLEN_MASK 0xff000000 | ||
113 | #define UDC_DEVCTL_THLEN_OFS 24 | ||
114 | |||
115 | #define UDC_DEVCTL_BRLEN_MASK 0x00ff0000 | ||
116 | #define UDC_DEVCTL_BRLEN_OFS 16 | ||
117 | |||
118 | #define UDC_DEVCTL_CSR_DONE 13 | ||
119 | #define UDC_DEVCTL_DEVNAK 12 | ||
120 | #define UDC_DEVCTL_SD 10 | ||
121 | #define UDC_DEVCTL_MODE 9 | ||
122 | #define UDC_DEVCTL_BREN 8 | ||
123 | #define UDC_DEVCTL_THE 7 | ||
124 | #define UDC_DEVCTL_BF 6 | ||
125 | #define UDC_DEVCTL_BE 5 | ||
126 | #define UDC_DEVCTL_DU 4 | ||
127 | #define UDC_DEVCTL_TDE 3 | ||
128 | #define UDC_DEVCTL_RDE 2 | ||
129 | #define UDC_DEVCTL_RES 0 | ||
130 | |||
131 | |||
132 | /* Device Status Register ---------------------------------------------------*/ | ||
133 | #define UDC_DEVSTS_ADDR 0x408 | ||
134 | |||
135 | #define UDC_DEVSTS_TS_MASK 0xfffc0000 | ||
136 | #define UDC_DEVSTS_TS_OFS 18 | ||
137 | |||
138 | #define UDC_DEVSTS_SESSVLD 17 | ||
139 | #define UDC_DEVSTS_PHY_ERROR 16 | ||
140 | #define UDC_DEVSTS_RXFIFO_EMPTY 15 | ||
141 | |||
142 | #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000 | ||
143 | #define UDC_DEVSTS_ENUM_SPEED_OFS 13 | ||
144 | #define UDC_DEVSTS_ENUM_SPEED_FULL 1 | ||
145 | #define UDC_DEVSTS_ENUM_SPEED_HIGH 0 | ||
146 | |||
147 | #define UDC_DEVSTS_SUSP 12 | ||
148 | |||
149 | #define UDC_DEVSTS_ALT_MASK 0x00000f00 | ||
150 | #define UDC_DEVSTS_ALT_OFS 8 | ||
151 | |||
152 | #define UDC_DEVSTS_INTF_MASK 0x000000f0 | ||
153 | #define UDC_DEVSTS_INTF_OFS 4 | ||
154 | |||
155 | #define UDC_DEVSTS_CFG_MASK 0x0000000f | ||
156 | #define UDC_DEVSTS_CFG_OFS 0 | ||
157 | |||
158 | |||
159 | /* Device Interrupt Register ------------------------------------------------*/ | ||
160 | #define UDC_DEVINT_ADDR 0x40c | ||
161 | |||
162 | #define UDC_DEVINT_SVC 7 | ||
163 | #define UDC_DEVINT_ENUM 6 | ||
164 | #define UDC_DEVINT_SOF 5 | ||
165 | #define UDC_DEVINT_US 4 | ||
166 | #define UDC_DEVINT_UR 3 | ||
167 | #define UDC_DEVINT_ES 2 | ||
168 | #define UDC_DEVINT_SI 1 | ||
169 | #define UDC_DEVINT_SC 0 | ||
170 | |||
171 | /* Device Interrupt Mask Register -------------------------------------------*/ | ||
172 | #define UDC_DEVINT_MSK_ADDR 0x410 | ||
173 | |||
174 | #define UDC_DEVINT_MSK 0x7f | ||
175 | |||
176 | /* Endpoint Interrupt Register ----------------------------------------------*/ | ||
177 | #define UDC_EPINT_ADDR 0x414 | ||
178 | |||
179 | #define UDC_EPINT_OUT_MASK 0xffff0000 | ||
180 | #define UDC_EPINT_OUT_OFS 16 | ||
181 | #define UDC_EPINT_IN_MASK 0x0000ffff | ||
182 | #define UDC_EPINT_IN_OFS 0 | ||
183 | |||
184 | #define UDC_EPINT_IN_EP0 0 | ||
185 | #define UDC_EPINT_IN_EP1 1 | ||
186 | #define UDC_EPINT_IN_EP2 2 | ||
187 | #define UDC_EPINT_IN_EP3 3 | ||
188 | #define UDC_EPINT_OUT_EP0 16 | ||
189 | #define UDC_EPINT_OUT_EP1 17 | ||
190 | #define UDC_EPINT_OUT_EP2 18 | ||
191 | #define UDC_EPINT_OUT_EP3 19 | ||
192 | |||
193 | #define UDC_EPINT_EP0_ENABLE_MSK 0x001e001e | ||
194 | |||
195 | /* Endpoint Interrupt Mask Register -----------------------------------------*/ | ||
196 | #define UDC_EPINT_MSK_ADDR 0x418 | ||
197 | |||
198 | #define UDC_EPINT_OUT_MSK_MASK 0xffff0000 | ||
199 | #define UDC_EPINT_OUT_MSK_OFS 16 | ||
200 | #define UDC_EPINT_IN_MSK_MASK 0x0000ffff | ||
201 | #define UDC_EPINT_IN_MSK_OFS 0 | ||
202 | |||
203 | #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff | ||
204 | /* mask non-EP0 endpoints */ | ||
205 | #define UDC_EPDATAINT_MSK_DISABLE 0xfffefffe | ||
206 | /* mask all dev interrupts */ | ||
207 | #define UDC_DEV_MSK_DISABLE 0x7f | ||
208 | |||
209 | /* Endpoint-specific CSR's --------------------------------------------------*/ | ||
210 | #define UDC_EPREGS_ADDR 0x0 | ||
211 | #define UDC_EPIN_REGS_ADDR 0x0 | ||
212 | #define UDC_EPOUT_REGS_ADDR 0x200 | ||
213 | |||
214 | #define UDC_EPCTL_ADDR 0x0 | ||
215 | |||
216 | #define UDC_EPCTL_RRDY 9 | ||
217 | #define UDC_EPCTL_CNAK 8 | ||
218 | #define UDC_EPCTL_SNAK 7 | ||
219 | #define UDC_EPCTL_NAK 6 | ||
220 | |||
221 | #define UDC_EPCTL_ET_MASK 0x00000030 | ||
222 | #define UDC_EPCTL_ET_OFS 4 | ||
223 | #define UDC_EPCTL_ET_CONTROL 0 | ||
224 | #define UDC_EPCTL_ET_ISO 1 | ||
225 | #define UDC_EPCTL_ET_BULK 2 | ||
226 | #define UDC_EPCTL_ET_INTERRUPT 3 | ||
227 | |||
228 | #define UDC_EPCTL_P 3 | ||
229 | #define UDC_EPCTL_SN 2 | ||
230 | #define UDC_EPCTL_F 1 | ||
231 | #define UDC_EPCTL_S 0 | ||
232 | |||
233 | /* Endpoint Status Registers ------------------------------------------------*/ | ||
234 | #define UDC_EPSTS_ADDR 0x4 | ||
235 | |||
236 | #define UDC_EPSTS_RX_PKT_SIZE_MASK 0x007ff800 | ||
237 | #define UDC_EPSTS_RX_PKT_SIZE_OFS 11 | ||
238 | |||
239 | #define UDC_EPSTS_TDC 10 | ||
240 | #define UDC_EPSTS_HE 9 | ||
241 | #define UDC_EPSTS_BNA 7 | ||
242 | #define UDC_EPSTS_IN 6 | ||
243 | |||
244 | #define UDC_EPSTS_OUT_MASK 0x00000030 | ||
245 | #define UDC_EPSTS_OUT_OFS 4 | ||
246 | #define UDC_EPSTS_OUT_DATA 1 | ||
247 | #define UDC_EPSTS_OUT_DATA_CLEAR 0x10 | ||
248 | #define UDC_EPSTS_OUT_SETUP 2 | ||
249 | #define UDC_EPSTS_OUT_SETUP_CLEAR 0x20 | ||
250 | #define UDC_EPSTS_OUT_CLEAR 0x30 | ||
251 | |||
252 | /* Endpoint Buffer Size IN/ Receive Packet Frame Number OUT Registers ------*/ | ||
253 | #define UDC_EPIN_BUFF_SIZE_ADDR 0x8 | ||
254 | #define UDC_EPOUT_FRAME_NUMBER_ADDR 0x8 | ||
255 | |||
256 | #define UDC_EPIN_BUFF_SIZE_MASK 0x0000ffff | ||
257 | #define UDC_EPIN_BUFF_SIZE_OFS 0 | ||
258 | /* EP0in txfifo = 128 bytes*/ | ||
259 | #define UDC_EPIN0_BUFF_SIZE 32 | ||
260 | /* EP0in fullspeed txfifo = 128 bytes*/ | ||
261 | #define UDC_FS_EPIN0_BUFF_SIZE 32 | ||
262 | |||
263 | /* fifo size mult = fifo size / max packet */ | ||
264 | #define UDC_EPIN_BUFF_SIZE_MULT 2 | ||
265 | |||
266 | /* EPin data fifo size = 1024 bytes DOUBLE BUFFERING */ | ||
267 | #define UDC_EPIN_BUFF_SIZE 256 | ||
268 | /* EPin small INT data fifo size = 128 bytes */ | ||
269 | #define UDC_EPIN_SMALLINT_BUFF_SIZE 32 | ||
270 | |||
271 | /* EPin fullspeed data fifo size = 128 bytes DOUBLE BUFFERING */ | ||
272 | #define UDC_FS_EPIN_BUFF_SIZE 32 | ||
273 | |||
274 | #define UDC_EPOUT_FRAME_NUMBER_MASK 0x0000ffff | ||
275 | #define UDC_EPOUT_FRAME_NUMBER_OFS 0 | ||
276 | |||
277 | /* Endpoint Buffer Size OUT/Max Packet Size Registers -----------------------*/ | ||
278 | #define UDC_EPOUT_BUFF_SIZE_ADDR 0x0c | ||
279 | #define UDC_EP_MAX_PKT_SIZE_ADDR 0x0c | ||
280 | |||
281 | #define UDC_EPOUT_BUFF_SIZE_MASK 0xffff0000 | ||
282 | #define UDC_EPOUT_BUFF_SIZE_OFS 16 | ||
283 | #define UDC_EP_MAX_PKT_SIZE_MASK 0x0000ffff | ||
284 | #define UDC_EP_MAX_PKT_SIZE_OFS 0 | ||
285 | /* EP0in max packet size = 64 bytes */ | ||
286 | #define UDC_EP0IN_MAX_PKT_SIZE 64 | ||
287 | /* EP0out max packet size = 64 bytes */ | ||
288 | #define UDC_EP0OUT_MAX_PKT_SIZE 64 | ||
289 | /* EP0in fullspeed max packet size = 64 bytes */ | ||
290 | #define UDC_FS_EP0IN_MAX_PKT_SIZE 64 | ||
291 | /* EP0out fullspeed max packet size = 64 bytes */ | ||
292 | #define UDC_FS_EP0OUT_MAX_PKT_SIZE 64 | ||
293 | |||
294 | /* | ||
295 | * Endpoint dma descriptors ------------------------------------------------ | ||
296 | * | ||
297 | * Setup data, Status dword | ||
298 | */ | ||
299 | #define UDC_DMA_STP_STS_CFG_MASK 0x0fff0000 | ||
300 | #define UDC_DMA_STP_STS_CFG_OFS 16 | ||
301 | #define UDC_DMA_STP_STS_CFG_ALT_MASK 0x000f0000 | ||
302 | #define UDC_DMA_STP_STS_CFG_ALT_OFS 16 | ||
303 | #define UDC_DMA_STP_STS_CFG_INTF_MASK 0x00f00000 | ||
304 | #define UDC_DMA_STP_STS_CFG_INTF_OFS 20 | ||
305 | #define UDC_DMA_STP_STS_CFG_NUM_MASK 0x0f000000 | ||
306 | #define UDC_DMA_STP_STS_CFG_NUM_OFS 24 | ||
307 | #define UDC_DMA_STP_STS_RX_MASK 0x30000000 | ||
308 | #define UDC_DMA_STP_STS_RX_OFS 28 | ||
309 | #define UDC_DMA_STP_STS_BS_MASK 0xc0000000 | ||
310 | #define UDC_DMA_STP_STS_BS_OFS 30 | ||
311 | #define UDC_DMA_STP_STS_BS_HOST_READY 0 | ||
312 | #define UDC_DMA_STP_STS_BS_DMA_BUSY 1 | ||
313 | #define UDC_DMA_STP_STS_BS_DMA_DONE 2 | ||
314 | #define UDC_DMA_STP_STS_BS_HOST_BUSY 3 | ||
315 | /* IN data, Status dword */ | ||
316 | #define UDC_DMA_IN_STS_TXBYTES_MASK 0x0000ffff | ||
317 | #define UDC_DMA_IN_STS_TXBYTES_OFS 0 | ||
318 | #define UDC_DMA_IN_STS_FRAMENUM_MASK 0x07ff0000 | ||
319 | #define UDC_DMA_IN_STS_FRAMENUM_OFS 0 | ||
320 | #define UDC_DMA_IN_STS_L 27 | ||
321 | #define UDC_DMA_IN_STS_TX_MASK 0x30000000 | ||
322 | #define UDC_DMA_IN_STS_TX_OFS 28 | ||
323 | #define UDC_DMA_IN_STS_BS_MASK 0xc0000000 | ||
324 | #define UDC_DMA_IN_STS_BS_OFS 30 | ||
325 | #define UDC_DMA_IN_STS_BS_HOST_READY 0 | ||
326 | #define UDC_DMA_IN_STS_BS_DMA_BUSY 1 | ||
327 | #define UDC_DMA_IN_STS_BS_DMA_DONE 2 | ||
328 | #define UDC_DMA_IN_STS_BS_HOST_BUSY 3 | ||
329 | /* OUT data, Status dword */ | ||
330 | #define UDC_DMA_OUT_STS_RXBYTES_MASK 0x0000ffff | ||
331 | #define UDC_DMA_OUT_STS_RXBYTES_OFS 0 | ||
332 | #define UDC_DMA_OUT_STS_FRAMENUM_MASK 0x07ff0000 | ||
333 | #define UDC_DMA_OUT_STS_FRAMENUM_OFS 0 | ||
334 | #define UDC_DMA_OUT_STS_L 27 | ||
335 | #define UDC_DMA_OUT_STS_RX_MASK 0x30000000 | ||
336 | #define UDC_DMA_OUT_STS_RX_OFS 28 | ||
337 | #define UDC_DMA_OUT_STS_BS_MASK 0xc0000000 | ||
338 | #define UDC_DMA_OUT_STS_BS_OFS 30 | ||
339 | #define UDC_DMA_OUT_STS_BS_HOST_READY 0 | ||
340 | #define UDC_DMA_OUT_STS_BS_DMA_BUSY 1 | ||
341 | #define UDC_DMA_OUT_STS_BS_DMA_DONE 2 | ||
342 | #define UDC_DMA_OUT_STS_BS_HOST_BUSY 3 | ||
343 | /* max ep0in packet */ | ||
344 | #define UDC_EP0IN_MAXPACKET 1000 | ||
345 | /* max dma packet */ | ||
346 | #define UDC_DMA_MAXPACKET 65536 | ||
347 | |||
348 | /* un-usable DMA address */ | ||
349 | #define DMA_DONT_USE (~(dma_addr_t) 0 ) | ||
350 | |||
351 | /* other Endpoint register addresses and values-----------------------------*/ | ||
352 | #define UDC_EP_SUBPTR_ADDR 0x10 | ||
353 | #define UDC_EP_DESPTR_ADDR 0x14 | ||
354 | #define UDC_EP_WRITE_CONFIRM_ADDR 0x1c | ||
355 | |||
356 | /* EP number as layouted in AHB space */ | ||
357 | #define UDC_EP_NUM 32 | ||
358 | #define UDC_EPIN_NUM 16 | ||
359 | #define UDC_EPIN_NUM_USED 5 | ||
360 | #define UDC_EPOUT_NUM 16 | ||
361 | /* EP number of EP's really used = EP0 + 8 data EP's */ | ||
362 | #define UDC_USED_EP_NUM 9 | ||
363 | /* UDC CSR regs are aligned but AHB regs not - offset for OUT EP's */ | ||
364 | #define UDC_CSR_EP_OUT_IX_OFS 12 | ||
365 | |||
366 | #define UDC_EP0OUT_IX 16 | ||
367 | #define UDC_EP0IN_IX 0 | ||
368 | |||
369 | /* Rx fifo address and size = 1k -------------------------------------------*/ | ||
370 | #define UDC_RXFIFO_ADDR 0x800 | ||
371 | #define UDC_RXFIFO_SIZE 0x400 | ||
372 | |||
373 | /* Tx fifo address and size = 1.5k -----------------------------------------*/ | ||
374 | #define UDC_TXFIFO_ADDR 0xc00 | ||
375 | #define UDC_TXFIFO_SIZE 0x600 | ||
376 | |||
377 | /* default data endpoints --------------------------------------------------*/ | ||
378 | #define UDC_EPIN_STATUS_IX 1 | ||
379 | #define UDC_EPIN_IX 2 | ||
380 | #define UDC_EPOUT_IX 18 | ||
381 | |||
382 | /* general constants -------------------------------------------------------*/ | ||
383 | #define UDC_DWORD_BYTES 4 | ||
384 | #define UDC_BITS_PER_BYTE_SHIFT 3 | ||
385 | #define UDC_BYTE_MASK 0xff | ||
386 | #define UDC_BITS_PER_BYTE 8 | ||
387 | |||
388 | /*---------------------------------------------------------------------------*/ | ||
389 | /* UDC CSR's */ | ||
390 | struct udc_csrs { | ||
391 | |||
392 | /* sca - setup command address */ | ||
393 | u32 sca; | ||
394 | |||
395 | /* ep ne's */ | ||
396 | u32 ne[UDC_USED_EP_NUM]; | ||
397 | } __attribute__ ((packed)); | ||
398 | |||
399 | /* AHB subsystem CSR registers */ | ||
400 | struct udc_regs { | ||
401 | |||
402 | /* device configuration */ | ||
403 | u32 cfg; | ||
404 | |||
405 | /* device control */ | ||
406 | u32 ctl; | ||
407 | |||
408 | /* device status */ | ||
409 | u32 sts; | ||
410 | |||
411 | /* device interrupt */ | ||
412 | u32 irqsts; | ||
413 | |||
414 | /* device interrupt mask */ | ||
415 | u32 irqmsk; | ||
416 | |||
417 | /* endpoint interrupt */ | ||
418 | u32 ep_irqsts; | ||
419 | |||
420 | /* endpoint interrupt mask */ | ||
421 | u32 ep_irqmsk; | ||
422 | } __attribute__ ((packed)); | ||
423 | |||
424 | /* endpoint specific registers */ | ||
425 | struct udc_ep_regs { | ||
426 | |||
427 | /* endpoint control */ | ||
428 | u32 ctl; | ||
429 | |||
430 | /* endpoint status */ | ||
431 | u32 sts; | ||
432 | |||
433 | /* endpoint buffer size in/ receive packet frame number out */ | ||
434 | u32 bufin_framenum; | ||
435 | |||
436 | /* endpoint buffer size out/max packet size */ | ||
437 | u32 bufout_maxpkt; | ||
438 | |||
439 | /* endpoint setup buffer pointer */ | ||
440 | u32 subptr; | ||
441 | |||
442 | /* endpoint data descriptor pointer */ | ||
443 | u32 desptr; | ||
444 | |||
445 | /* reserverd */ | ||
446 | u32 reserved; | ||
447 | |||
448 | /* write/read confirmation */ | ||
449 | u32 confirm; | ||
450 | |||
451 | } __attribute__ ((packed)); | ||
452 | |||
453 | /* control data DMA desc */ | ||
454 | struct udc_stp_dma { | ||
455 | /* status quadlet */ | ||
456 | u32 status; | ||
457 | /* reserved */ | ||
458 | u32 _reserved; | ||
459 | /* first setup word */ | ||
460 | u32 data12; | ||
461 | /* second setup word */ | ||
462 | u32 data34; | ||
463 | } __attribute__ ((aligned (16))); | ||
464 | |||
465 | /* normal data DMA desc */ | ||
466 | struct udc_data_dma { | ||
467 | /* status quadlet */ | ||
468 | u32 status; | ||
469 | /* reserved */ | ||
470 | u32 _reserved; | ||
471 | /* buffer pointer */ | ||
472 | u32 bufptr; | ||
473 | /* next descriptor pointer */ | ||
474 | u32 next; | ||
475 | } __attribute__ ((aligned (16))); | ||
476 | |||
477 | /* request packet */ | ||
478 | struct udc_request { | ||
479 | /* embedded gadget ep */ | ||
480 | struct usb_request req; | ||
481 | |||
482 | /* flags */ | ||
483 | unsigned dma_going : 1, | ||
484 | dma_mapping : 1, | ||
485 | dma_done : 1; | ||
486 | /* phys. address */ | ||
487 | dma_addr_t td_phys; | ||
488 | /* first dma desc. of chain */ | ||
489 | struct udc_data_dma *td_data; | ||
490 | /* last dma desc. of chain */ | ||
491 | struct udc_data_dma *td_data_last; | ||
492 | struct list_head queue; | ||
493 | |||
494 | /* chain length */ | ||
495 | unsigned chain_len; | ||
496 | |||
497 | }; | ||
498 | |||
499 | /* UDC specific endpoint parameters */ | ||
500 | struct udc_ep { | ||
501 | struct usb_ep ep; | ||
502 | struct udc_ep_regs __iomem *regs; | ||
503 | u32 __iomem *txfifo; | ||
504 | u32 __iomem *dma; | ||
505 | dma_addr_t td_phys; | ||
506 | dma_addr_t td_stp_dma; | ||
507 | struct udc_stp_dma *td_stp; | ||
508 | struct udc_data_dma *td; | ||
509 | /* temp request */ | ||
510 | struct udc_request *req; | ||
511 | unsigned req_used; | ||
512 | unsigned req_completed; | ||
513 | /* dummy DMA desc for BNA dummy */ | ||
514 | struct udc_request *bna_dummy_req; | ||
515 | unsigned bna_occurred; | ||
516 | |||
517 | /* NAK state */ | ||
518 | unsigned naking; | ||
519 | |||
520 | struct udc *dev; | ||
521 | |||
522 | /* queue for requests */ | ||
523 | struct list_head queue; | ||
524 | const struct usb_endpoint_descriptor *desc; | ||
525 | unsigned halted; | ||
526 | unsigned cancel_transfer; | ||
527 | unsigned num : 5, | ||
528 | fifo_depth : 14, | ||
529 | in : 1; | ||
530 | }; | ||
531 | |||
532 | /* device struct */ | ||
533 | struct udc { | ||
534 | struct usb_gadget gadget; | ||
535 | spinlock_t lock; /* protects all state */ | ||
536 | /* all endpoints */ | ||
537 | struct udc_ep ep[UDC_EP_NUM]; | ||
538 | struct usb_gadget_driver *driver; | ||
539 | /* operational flags */ | ||
540 | unsigned active : 1, | ||
541 | stall_ep0in : 1, | ||
542 | waiting_zlp_ack_ep0in : 1, | ||
543 | set_cfg_not_acked : 1, | ||
544 | irq_registered : 1, | ||
545 | data_ep_enabled : 1, | ||
546 | data_ep_queued : 1, | ||
547 | mem_region : 1, | ||
548 | sys_suspended : 1, | ||
549 | connected; | ||
550 | |||
551 | u16 chiprev; | ||
552 | |||
553 | /* registers */ | ||
554 | struct pci_dev *pdev; | ||
555 | struct udc_csrs __iomem *csr; | ||
556 | struct udc_regs __iomem *regs; | ||
557 | struct udc_ep_regs __iomem *ep_regs; | ||
558 | u32 __iomem *rxfifo; | ||
559 | u32 __iomem *txfifo; | ||
560 | |||
561 | /* DMA desc pools */ | ||
562 | struct pci_pool *data_requests; | ||
563 | struct pci_pool *stp_requests; | ||
564 | |||
565 | /* device data */ | ||
566 | unsigned long phys_addr; | ||
567 | void __iomem *virt_addr; | ||
568 | unsigned irq; | ||
569 | |||
570 | /* states */ | ||
571 | u16 cur_config; | ||
572 | u16 cur_intf; | ||
573 | u16 cur_alt; | ||
574 | }; | ||
575 | |||
576 | /* setup request data */ | ||
577 | union udc_setup_data { | ||
578 | u32 data[2]; | ||
579 | struct usb_ctrlrequest request; | ||
580 | }; | ||
581 | |||
582 | /* | ||
583 | *--------------------------------------------------------------------------- | ||
584 | * SET and GET bitfields in u32 values | ||
585 | * via constants for mask/offset: | ||
586 | * <bit_field_stub_name> is the text between | ||
587 | * UDC_ and _MASK|_OFS of appropiate | ||
588 | * constant | ||
589 | * | ||
590 | * set bitfield value in u32 u32Val | ||
591 | */ | ||
592 | #define AMD_ADDBITS(u32Val, bitfield_val, bitfield_stub_name) \ | ||
593 | (((u32Val) & (((u32) ~((u32) bitfield_stub_name##_MASK)))) \ | ||
594 | | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \ | ||
595 | & ((u32) bitfield_stub_name##_MASK))) | ||
596 | |||
597 | /* | ||
598 | * set bitfield value in zero-initialized u32 u32Val | ||
599 | * => bitfield bits in u32Val are all zero | ||
600 | */ | ||
601 | #define AMD_INIT_SETBITS(u32Val, bitfield_val, bitfield_stub_name) \ | ||
602 | ((u32Val) \ | ||
603 | | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \ | ||
604 | & ((u32) bitfield_stub_name##_MASK))) | ||
605 | |||
606 | /* get bitfield value from u32 u32Val */ | ||
607 | #define AMD_GETBITS(u32Val, bitfield_stub_name) \ | ||
608 | ((u32Val & ((u32) bitfield_stub_name##_MASK)) \ | ||
609 | >> ((u32) bitfield_stub_name##_OFS)) | ||
610 | |||
611 | /* SET and GET bits in u32 values ------------------------------------------*/ | ||
612 | #define AMD_BIT(bit_stub_name) (1 << bit_stub_name) | ||
613 | #define AMD_UNMASK_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name)) | ||
614 | #define AMD_CLEAR_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name)) | ||
615 | |||
616 | /* debug macros ------------------------------------------------------------*/ | ||
617 | |||
618 | #define DBG(udc , args...) dev_dbg(&(udc)->pdev->dev, args) | ||
619 | |||
620 | #ifdef UDC_VERBOSE | ||
621 | #define VDBG DBG | ||
622 | #else | ||
623 | #define VDBG(udc , args...) do {} while (0) | ||
624 | #endif | ||
625 | |||
626 | #endif /* #ifdef AMD5536UDC_H */ | ||
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index dbaf867436df..a3376739a81b 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c | |||
@@ -305,6 +305,10 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address"); | |||
305 | #define DEV_CONFIG_CDC | 305 | #define DEV_CONFIG_CDC |
306 | #endif | 306 | #endif |
307 | 307 | ||
308 | #ifdef CONFIG_USB_GADGET_AMD5536UDC | ||
309 | #define DEV_CONFIG_CDC | ||
310 | #endif | ||
311 | |||
308 | 312 | ||
309 | /*-------------------------------------------------------------------------*/ | 313 | /*-------------------------------------------------------------------------*/ |
310 | 314 | ||
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index 53e9139ba388..f7f159c1002b 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h | |||
@@ -17,6 +17,12 @@ | |||
17 | #define gadget_is_net2280(g) 0 | 17 | #define gadget_is_net2280(g) 0 |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #ifdef CONFIG_USB_GADGET_AMD5536UDC | ||
21 | #define gadget_is_amd5536udc(g) !strcmp("amd5536udc", (g)->name) | ||
22 | #else | ||
23 | #define gadget_is_amd5536udc(g) 0 | ||
24 | #endif | ||
25 | |||
20 | #ifdef CONFIG_USB_GADGET_DUMMY_HCD | 26 | #ifdef CONFIG_USB_GADGET_DUMMY_HCD |
21 | #define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name) | 27 | #define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name) |
22 | #else | 28 | #else |
@@ -202,7 +208,9 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget) | |||
202 | return 0x18; | 208 | return 0x18; |
203 | else if (gadget_is_fsl_usb2(gadget)) | 209 | else if (gadget_is_fsl_usb2(gadget)) |
204 | return 0x19; | 210 | return 0x19; |
205 | else if (gadget_is_m66592(gadget)) | 211 | else if (gadget_is_amd5536udc(gadget)) |
206 | return 0x20; | 212 | return 0x20; |
213 | else if (gadget_is_m66592(gadget)) | ||
214 | return 0x21; | ||
207 | return -ENOENT; | 215 | return -ENOENT; |
208 | } | 216 | } |
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 0174a322e007..700dda8a9157 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c | |||
@@ -21,26 +21,18 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/kernel.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/timer.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/list.h> | ||
32 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/delay.h> | ||
26 | #include <linux/io.h> | ||
33 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | |||
34 | #include <linux/usb/ch9.h> | 29 | #include <linux/usb/ch9.h> |
35 | #include <linux/usb_gadget.h> | 30 | #include <linux/usb_gadget.h> |
36 | 31 | ||
37 | #include <asm/io.h> | ||
38 | #include <asm/irq.h> | ||
39 | #include <asm/system.h> | ||
40 | |||
41 | #include "m66592-udc.h" | 32 | #include "m66592-udc.h" |
42 | 33 | ||
43 | MODULE_DESCRIPTION("M66592 USB gadget driiver"); | 34 | |
35 | MODULE_DESCRIPTION("M66592 USB gadget driver"); | ||
44 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
45 | MODULE_AUTHOR("Yoshihiro Shimoda"); | 37 | MODULE_AUTHOR("Yoshihiro Shimoda"); |
46 | 38 | ||
@@ -49,16 +41,21 @@ MODULE_AUTHOR("Yoshihiro Shimoda"); | |||
49 | /* module parameters */ | 41 | /* module parameters */ |
50 | static unsigned short clock = M66592_XTAL24; | 42 | static unsigned short clock = M66592_XTAL24; |
51 | module_param(clock, ushort, 0644); | 43 | module_param(clock, ushort, 0644); |
52 | MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0(default=16384)"); | 44 | MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 " |
45 | "(default=16384)"); | ||
46 | |||
53 | static unsigned short vif = M66592_LDRV; | 47 | static unsigned short vif = M66592_LDRV; |
54 | module_param(vif, ushort, 0644); | 48 | module_param(vif, ushort, 0644); |
55 | MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)"); | 49 | MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0 (default=32768)"); |
56 | static unsigned short endian = 0; | 50 | |
51 | static unsigned short endian; | ||
57 | module_param(endian, ushort, 0644); | 52 | module_param(endian, ushort, 0644); |
58 | MODULE_PARM_DESC(endian, "data endian: big=256, little=0(default=0)"); | 53 | MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)"); |
54 | |||
59 | static unsigned short irq_sense = M66592_INTL; | 55 | static unsigned short irq_sense = M66592_INTL; |
60 | module_param(irq_sense, ushort, 0644); | 56 | module_param(irq_sense, ushort, 0644); |
61 | MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0(default=2)"); | 57 | MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0 " |
58 | "(default=2)"); | ||
62 | 59 | ||
63 | static const char udc_name[] = "m66592_udc"; | 60 | static const char udc_name[] = "m66592_udc"; |
64 | static const char *m66592_ep_name[] = { | 61 | static const char *m66592_ep_name[] = { |
@@ -72,8 +69,8 @@ static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
72 | gfp_t gfp_flags); | 69 | gfp_t gfp_flags); |
73 | 70 | ||
74 | static void transfer_complete(struct m66592_ep *ep, | 71 | static void transfer_complete(struct m66592_ep *ep, |
75 | struct m66592_request *req, | 72 | struct m66592_request *req, int status); |
76 | int status); | 73 | |
77 | /*-------------------------------------------------------------------------*/ | 74 | /*-------------------------------------------------------------------------*/ |
78 | static inline u16 get_usb_speed(struct m66592 *m66592) | 75 | static inline u16 get_usb_speed(struct m66592 *m66592) |
79 | { | 76 | { |
@@ -81,25 +78,25 @@ static inline u16 get_usb_speed(struct m66592 *m66592) | |||
81 | } | 78 | } |
82 | 79 | ||
83 | static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum, | 80 | static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum, |
84 | unsigned long reg) | 81 | unsigned long reg) |
85 | { | 82 | { |
86 | u16 tmp; | 83 | u16 tmp; |
87 | 84 | ||
88 | tmp = m66592_read(m66592, M66592_INTENB0); | 85 | tmp = m66592_read(m66592, M66592_INTENB0); |
89 | m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE, | 86 | m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE, |
90 | M66592_INTENB0); | 87 | M66592_INTENB0); |
91 | m66592_bset(m66592, (1 << pipenum), reg); | 88 | m66592_bset(m66592, (1 << pipenum), reg); |
92 | m66592_write(m66592, tmp, M66592_INTENB0); | 89 | m66592_write(m66592, tmp, M66592_INTENB0); |
93 | } | 90 | } |
94 | 91 | ||
95 | static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum, | 92 | static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum, |
96 | unsigned long reg) | 93 | unsigned long reg) |
97 | { | 94 | { |
98 | u16 tmp; | 95 | u16 tmp; |
99 | 96 | ||
100 | tmp = m66592_read(m66592, M66592_INTENB0); | 97 | tmp = m66592_read(m66592, M66592_INTENB0); |
101 | m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE, | 98 | m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE, |
102 | M66592_INTENB0); | 99 | M66592_INTENB0); |
103 | m66592_bclr(m66592, (1 << pipenum), reg); | 100 | m66592_bclr(m66592, (1 << pipenum), reg); |
104 | m66592_write(m66592, tmp, M66592_INTENB0); | 101 | m66592_write(m66592, tmp, M66592_INTENB0); |
105 | } | 102 | } |
@@ -108,17 +105,19 @@ static void m66592_usb_connect(struct m66592 *m66592) | |||
108 | { | 105 | { |
109 | m66592_bset(m66592, M66592_CTRE, M66592_INTENB0); | 106 | m66592_bset(m66592, M66592_CTRE, M66592_INTENB0); |
110 | m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL, | 107 | m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL, |
111 | M66592_INTENB0); | 108 | M66592_INTENB0); |
112 | m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0); | 109 | m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0); |
113 | 110 | ||
114 | m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG); | 111 | m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG); |
115 | } | 112 | } |
116 | 113 | ||
117 | static void m66592_usb_disconnect(struct m66592 *m66592) | 114 | static void m66592_usb_disconnect(struct m66592 *m66592) |
115 | __releases(m66592->lock) | ||
116 | __acquires(m66592->lock) | ||
118 | { | 117 | { |
119 | m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0); | 118 | m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0); |
120 | m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL, | 119 | m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL, |
121 | M66592_INTENB0); | 120 | M66592_INTENB0); |
122 | m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0); | 121 | m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0); |
123 | m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); | 122 | m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); |
124 | 123 | ||
@@ -148,7 +147,7 @@ static inline u16 control_reg_get_pid(struct m66592 *m66592, u16 pipenum) | |||
148 | } | 147 | } |
149 | 148 | ||
150 | static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum, | 149 | static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum, |
151 | u16 pid) | 150 | u16 pid) |
152 | { | 151 | { |
153 | unsigned long offset; | 152 | unsigned long offset; |
154 | 153 | ||
@@ -250,7 +249,7 @@ static inline void pipe_change(struct m66592 *m66592, u16 pipenum) | |||
250 | } | 249 | } |
251 | 250 | ||
252 | static int pipe_buffer_setting(struct m66592 *m66592, | 251 | static int pipe_buffer_setting(struct m66592 *m66592, |
253 | struct m66592_pipe_info *info) | 252 | struct m66592_pipe_info *info) |
254 | { | 253 | { |
255 | u16 bufnum = 0, buf_bsize = 0; | 254 | u16 bufnum = 0, buf_bsize = 0; |
256 | u16 pipecfg = 0; | 255 | u16 pipecfg = 0; |
@@ -287,7 +286,7 @@ static int pipe_buffer_setting(struct m66592 *m66592, | |||
287 | } | 286 | } |
288 | if (m66592->bi_bufnum > M66592_MAX_BUFNUM) { | 287 | if (m66592->bi_bufnum > M66592_MAX_BUFNUM) { |
289 | printk(KERN_ERR "m66592 pipe memory is insufficient(%d)\n", | 288 | printk(KERN_ERR "m66592 pipe memory is insufficient(%d)\n", |
290 | m66592->bi_bufnum); | 289 | m66592->bi_bufnum); |
291 | return -ENOMEM; | 290 | return -ENOMEM; |
292 | } | 291 | } |
293 | 292 | ||
@@ -328,7 +327,7 @@ static void pipe_buffer_release(struct m66592 *m66592, | |||
328 | m66592->bulk--; | 327 | m66592->bulk--; |
329 | } else | 328 | } else |
330 | printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n", | 329 | printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n", |
331 | info->pipe); | 330 | info->pipe); |
332 | } | 331 | } |
333 | 332 | ||
334 | static void pipe_initialize(struct m66592_ep *ep) | 333 | static void pipe_initialize(struct m66592_ep *ep) |
@@ -350,8 +349,8 @@ static void pipe_initialize(struct m66592_ep *ep) | |||
350 | } | 349 | } |
351 | 350 | ||
352 | static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep, | 351 | static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep, |
353 | const struct usb_endpoint_descriptor *desc, | 352 | const struct usb_endpoint_descriptor *desc, |
354 | u16 pipenum, int dma) | 353 | u16 pipenum, int dma) |
355 | { | 354 | { |
356 | if ((pipenum != 0) && dma) { | 355 | if ((pipenum != 0) && dma) { |
357 | if (m66592->num_dma == 0) { | 356 | if (m66592->num_dma == 0) { |
@@ -385,7 +384,7 @@ static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep, | |||
385 | 384 | ||
386 | ep->pipectr = get_pipectr_addr(pipenum); | 385 | ep->pipectr = get_pipectr_addr(pipenum); |
387 | ep->pipenum = pipenum; | 386 | ep->pipenum = pipenum; |
388 | ep->ep.maxpacket = desc->wMaxPacketSize; | 387 | ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize); |
389 | m66592->pipenum2ep[pipenum] = ep; | 388 | m66592->pipenum2ep[pipenum] = ep; |
390 | m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep; | 389 | m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep; |
391 | INIT_LIST_HEAD(&ep->queue); | 390 | INIT_LIST_HEAD(&ep->queue); |
@@ -407,7 +406,7 @@ static void m66592_ep_release(struct m66592_ep *ep) | |||
407 | } | 406 | } |
408 | 407 | ||
409 | static int alloc_pipe_config(struct m66592_ep *ep, | 408 | static int alloc_pipe_config(struct m66592_ep *ep, |
410 | const struct usb_endpoint_descriptor *desc) | 409 | const struct usb_endpoint_descriptor *desc) |
411 | { | 410 | { |
412 | struct m66592 *m66592 = ep->m66592; | 411 | struct m66592 *m66592 = ep->m66592; |
413 | struct m66592_pipe_info info; | 412 | struct m66592_pipe_info info; |
@@ -419,15 +418,15 @@ static int alloc_pipe_config(struct m66592_ep *ep, | |||
419 | 418 | ||
420 | BUG_ON(ep->pipenum); | 419 | BUG_ON(ep->pipenum); |
421 | 420 | ||
422 | switch(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { | 421 | switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { |
423 | case USB_ENDPOINT_XFER_BULK: | 422 | case USB_ENDPOINT_XFER_BULK: |
424 | if (m66592->bulk >= M66592_MAX_NUM_BULK) { | 423 | if (m66592->bulk >= M66592_MAX_NUM_BULK) { |
425 | if (m66592->isochronous >= M66592_MAX_NUM_ISOC) { | 424 | if (m66592->isochronous >= M66592_MAX_NUM_ISOC) { |
426 | printk(KERN_ERR "bulk pipe is insufficient\n"); | 425 | printk(KERN_ERR "bulk pipe is insufficient\n"); |
427 | return -ENODEV; | 426 | return -ENODEV; |
428 | } else { | 427 | } else { |
429 | info.pipe = M66592_BASE_PIPENUM_ISOC + | 428 | info.pipe = M66592_BASE_PIPENUM_ISOC |
430 | m66592->isochronous; | 429 | + m66592->isochronous; |
431 | counter = &m66592->isochronous; | 430 | counter = &m66592->isochronous; |
432 | } | 431 | } |
433 | } else { | 432 | } else { |
@@ -462,7 +461,7 @@ static int alloc_pipe_config(struct m66592_ep *ep, | |||
462 | ep->type = info.type; | 461 | ep->type = info.type; |
463 | 462 | ||
464 | info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 463 | info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
465 | info.maxpacket = desc->wMaxPacketSize; | 464 | info.maxpacket = le16_to_cpu(desc->wMaxPacketSize); |
466 | info.interval = desc->bInterval; | 465 | info.interval = desc->bInterval; |
467 | if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) | 466 | if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) |
468 | info.dir_in = 1; | 467 | info.dir_in = 1; |
@@ -525,8 +524,8 @@ static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req) | |||
525 | 524 | ||
526 | pipe_change(m66592, ep->pipenum); | 525 | pipe_change(m66592, ep->pipenum); |
527 | m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0, | 526 | m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0, |
528 | (M66592_ISEL | M66592_CURPIPE), | 527 | (M66592_ISEL | M66592_CURPIPE), |
529 | M66592_CFIFOSEL); | 528 | M66592_CFIFOSEL); |
530 | m66592_write(m66592, M66592_BCLR, ep->fifoctr); | 529 | m66592_write(m66592, M66592_BCLR, ep->fifoctr); |
531 | if (req->req.length == 0) { | 530 | if (req->req.length == 0) { |
532 | m66592_bset(m66592, M66592_BVAL, ep->fifoctr); | 531 | m66592_bset(m66592, M66592_BVAL, ep->fifoctr); |
@@ -561,8 +560,8 @@ static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req) | |||
561 | 560 | ||
562 | if (ep->pipenum == 0) { | 561 | if (ep->pipenum == 0) { |
563 | m66592_mdfy(m66592, M66592_PIPE0, | 562 | m66592_mdfy(m66592, M66592_PIPE0, |
564 | (M66592_ISEL | M66592_CURPIPE), | 563 | (M66592_ISEL | M66592_CURPIPE), |
565 | M66592_CFIFOSEL); | 564 | M66592_CFIFOSEL); |
566 | m66592_write(m66592, M66592_BCLR, ep->fifoctr); | 565 | m66592_write(m66592, M66592_BCLR, ep->fifoctr); |
567 | pipe_start(m66592, pipenum); | 566 | pipe_start(m66592, pipenum); |
568 | pipe_irq_enable(m66592, pipenum); | 567 | pipe_irq_enable(m66592, pipenum); |
@@ -572,8 +571,9 @@ static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req) | |||
572 | pipe_change(m66592, pipenum); | 571 | pipe_change(m66592, pipenum); |
573 | m66592_bset(m66592, M66592_TRENB, ep->fifosel); | 572 | m66592_bset(m66592, M66592_TRENB, ep->fifosel); |
574 | m66592_write(m66592, | 573 | m66592_write(m66592, |
575 | (req->req.length + ep->ep.maxpacket - 1) / | 574 | (req->req.length + ep->ep.maxpacket - 1) |
576 | ep->ep.maxpacket, ep->fifotrn); | 575 | / ep->ep.maxpacket, |
576 | ep->fifotrn); | ||
577 | } | 577 | } |
578 | pipe_start(m66592, pipenum); /* trigger once */ | 578 | pipe_start(m66592, pipenum); /* trigger once */ |
579 | pipe_irq_enable(m66592, pipenum); | 579 | pipe_irq_enable(m66592, pipenum); |
@@ -614,7 +614,7 @@ static void start_ep0(struct m66592_ep *ep, struct m66592_request *req) | |||
614 | static void init_controller(struct m66592 *m66592) | 614 | static void init_controller(struct m66592 *m66592) |
615 | { | 615 | { |
616 | m66592_bset(m66592, (vif & M66592_LDRV) | (endian & M66592_BIGEND), | 616 | m66592_bset(m66592, (vif & M66592_LDRV) | (endian & M66592_BIGEND), |
617 | M66592_PINCFG); | 617 | M66592_PINCFG); |
618 | m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ | 618 | m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ |
619 | m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, M66592_SYSCFG); | 619 | m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, M66592_SYSCFG); |
620 | 620 | ||
@@ -634,7 +634,7 @@ static void init_controller(struct m66592 *m66592) | |||
634 | 634 | ||
635 | m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1); | 635 | m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1); |
636 | m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR, | 636 | m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR, |
637 | M66592_DMA0CFG); | 637 | M66592_DMA0CFG); |
638 | } | 638 | } |
639 | 639 | ||
640 | static void disable_controller(struct m66592 *m66592) | 640 | static void disable_controller(struct m66592 *m66592) |
@@ -659,8 +659,9 @@ static void m66592_start_xclock(struct m66592 *m66592) | |||
659 | 659 | ||
660 | /*-------------------------------------------------------------------------*/ | 660 | /*-------------------------------------------------------------------------*/ |
661 | static void transfer_complete(struct m66592_ep *ep, | 661 | static void transfer_complete(struct m66592_ep *ep, |
662 | struct m66592_request *req, | 662 | struct m66592_request *req, int status) |
663 | int status) | 663 | __releases(m66592->lock) |
664 | __acquires(m66592->lock) | ||
664 | { | 665 | { |
665 | int restart = 0; | 666 | int restart = 0; |
666 | 667 | ||
@@ -680,8 +681,9 @@ static void transfer_complete(struct m66592_ep *ep, | |||
680 | if (!list_empty(&ep->queue)) | 681 | if (!list_empty(&ep->queue)) |
681 | restart = 1; | 682 | restart = 1; |
682 | 683 | ||
683 | if (likely(req->req.complete)) | 684 | spin_unlock(&ep->m66592->lock); |
684 | req->req.complete(&ep->ep, &req->req); | 685 | req->req.complete(&ep->ep, &req->req); |
686 | spin_lock(&ep->m66592->lock); | ||
685 | 687 | ||
686 | if (restart) { | 688 | if (restart) { |
687 | req = list_entry(ep->queue.next, struct m66592_request, queue); | 689 | req = list_entry(ep->queue.next, struct m66592_request, queue); |
@@ -693,7 +695,7 @@ static void transfer_complete(struct m66592_ep *ep, | |||
693 | static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req) | 695 | static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req) |
694 | { | 696 | { |
695 | int i; | 697 | int i; |
696 | volatile u16 tmp; | 698 | u16 tmp; |
697 | unsigned bufsize; | 699 | unsigned bufsize; |
698 | size_t size; | 700 | size_t size; |
699 | void *buf; | 701 | void *buf; |
@@ -731,8 +733,9 @@ static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req) | |||
731 | req->req.actual += size; | 733 | req->req.actual += size; |
732 | 734 | ||
733 | /* check transfer finish */ | 735 | /* check transfer finish */ |
734 | if ((!req->req.zero && (req->req.actual == req->req.length)) || | 736 | if ((!req->req.zero && (req->req.actual == req->req.length)) |
735 | (size % ep->ep.maxpacket) || (size == 0)) { | 737 | || (size % ep->ep.maxpacket) |
738 | || (size == 0)) { | ||
736 | disable_irq_ready(m66592, pipenum); | 739 | disable_irq_ready(m66592, pipenum); |
737 | disable_irq_empty(m66592, pipenum); | 740 | disable_irq_empty(m66592, pipenum); |
738 | } else { | 741 | } else { |
@@ -768,16 +771,19 @@ static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req) | |||
768 | /* write fifo */ | 771 | /* write fifo */ |
769 | if (req->req.buf) { | 772 | if (req->req.buf) { |
770 | m66592_write_fifo(m66592, ep->fifoaddr, buf, size); | 773 | m66592_write_fifo(m66592, ep->fifoaddr, buf, size); |
771 | if ((size == 0) || ((size % ep->ep.maxpacket) != 0) || | 774 | if ((size == 0) |
772 | ((bufsize != ep->ep.maxpacket) && (bufsize > size))) | 775 | || ((size % ep->ep.maxpacket) != 0) |
776 | || ((bufsize != ep->ep.maxpacket) | ||
777 | && (bufsize > size))) | ||
773 | m66592_bset(m66592, M66592_BVAL, ep->fifoctr); | 778 | m66592_bset(m66592, M66592_BVAL, ep->fifoctr); |
774 | } | 779 | } |
775 | 780 | ||
776 | /* update parameters */ | 781 | /* update parameters */ |
777 | req->req.actual += size; | 782 | req->req.actual += size; |
778 | /* check transfer finish */ | 783 | /* check transfer finish */ |
779 | if ((!req->req.zero && (req->req.actual == req->req.length)) || | 784 | if ((!req->req.zero && (req->req.actual == req->req.length)) |
780 | (size % ep->ep.maxpacket) || (size == 0)) { | 785 | || (size % ep->ep.maxpacket) |
786 | || (size == 0)) { | ||
781 | disable_irq_ready(m66592, pipenum); | 787 | disable_irq_ready(m66592, pipenum); |
782 | enable_irq_empty(m66592, pipenum); | 788 | enable_irq_empty(m66592, pipenum); |
783 | } else { | 789 | } else { |
@@ -821,8 +827,9 @@ static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req) | |||
821 | req->req.actual += size; | 827 | req->req.actual += size; |
822 | 828 | ||
823 | /* check transfer finish */ | 829 | /* check transfer finish */ |
824 | if ((!req->req.zero && (req->req.actual == req->req.length)) || | 830 | if ((!req->req.zero && (req->req.actual == req->req.length)) |
825 | (size % ep->ep.maxpacket) || (size == 0)) { | 831 | || (size % ep->ep.maxpacket) |
832 | || (size == 0)) { | ||
826 | pipe_stop(m66592, pipenum); | 833 | pipe_stop(m66592, pipenum); |
827 | pipe_irq_disable(m66592, pipenum); | 834 | pipe_irq_disable(m66592, pipenum); |
828 | finish = 1; | 835 | finish = 1; |
@@ -850,7 +857,7 @@ static void irq_pipe_ready(struct m66592 *m66592, u16 status, u16 enb) | |||
850 | if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) { | 857 | if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) { |
851 | m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS); | 858 | m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS); |
852 | m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE, | 859 | m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE, |
853 | M66592_CFIFOSEL); | 860 | M66592_CFIFOSEL); |
854 | 861 | ||
855 | ep = &m66592->ep[0]; | 862 | ep = &m66592->ep[0]; |
856 | req = list_entry(ep->queue.next, struct m66592_request, queue); | 863 | req = list_entry(ep->queue.next, struct m66592_request, queue); |
@@ -909,23 +916,26 @@ static void irq_pipe_empty(struct m66592 *m66592, u16 status, u16 enb) | |||
909 | } | 916 | } |
910 | 917 | ||
911 | static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) | 918 | static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) |
919 | __releases(m66592->lock) | ||
920 | __acquires(m66592->lock) | ||
912 | { | 921 | { |
913 | struct m66592_ep *ep; | 922 | struct m66592_ep *ep; |
914 | u16 pid; | 923 | u16 pid; |
915 | u16 status = 0; | 924 | u16 status = 0; |
925 | u16 w_index = le16_to_cpu(ctrl->wIndex); | ||
916 | 926 | ||
917 | switch (ctrl->bRequestType & USB_RECIP_MASK) { | 927 | switch (ctrl->bRequestType & USB_RECIP_MASK) { |
918 | case USB_RECIP_DEVICE: | 928 | case USB_RECIP_DEVICE: |
919 | status = 1; /* selfpower */ | 929 | status = 1 << USB_DEVICE_SELF_POWERED; |
920 | break; | 930 | break; |
921 | case USB_RECIP_INTERFACE: | 931 | case USB_RECIP_INTERFACE: |
922 | status = 0; | 932 | status = 0; |
923 | break; | 933 | break; |
924 | case USB_RECIP_ENDPOINT: | 934 | case USB_RECIP_ENDPOINT: |
925 | ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; | 935 | ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; |
926 | pid = control_reg_get_pid(m66592, ep->pipenum); | 936 | pid = control_reg_get_pid(m66592, ep->pipenum); |
927 | if (pid == M66592_PID_STALL) | 937 | if (pid == M66592_PID_STALL) |
928 | status = 1; | 938 | status = 1 << USB_ENDPOINT_HALT; |
929 | else | 939 | else |
930 | status = 0; | 940 | status = 0; |
931 | break; | 941 | break; |
@@ -934,11 +944,13 @@ static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) | |||
934 | return; /* exit */ | 944 | return; /* exit */ |
935 | } | 945 | } |
936 | 946 | ||
937 | *m66592->ep0_buf = status; | 947 | m66592->ep0_data = cpu_to_le16(status); |
938 | m66592->ep0_req->buf = m66592->ep0_buf; | 948 | m66592->ep0_req->buf = &m66592->ep0_data; |
939 | m66592->ep0_req->length = 2; | 949 | m66592->ep0_req->length = 2; |
940 | /* AV: what happens if we get called again before that gets through? */ | 950 | /* AV: what happens if we get called again before that gets through? */ |
951 | spin_unlock(&m66592->lock); | ||
941 | m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL); | 952 | m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL); |
953 | spin_lock(&m66592->lock); | ||
942 | } | 954 | } |
943 | 955 | ||
944 | static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) | 956 | static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) |
@@ -953,8 +965,9 @@ static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) | |||
953 | case USB_RECIP_ENDPOINT: { | 965 | case USB_RECIP_ENDPOINT: { |
954 | struct m66592_ep *ep; | 966 | struct m66592_ep *ep; |
955 | struct m66592_request *req; | 967 | struct m66592_request *req; |
968 | u16 w_index = le16_to_cpu(ctrl->wIndex); | ||
956 | 969 | ||
957 | ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; | 970 | ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; |
958 | pipe_stop(m66592, ep->pipenum); | 971 | pipe_stop(m66592, ep->pipenum); |
959 | control_reg_sqclr(m66592, ep->pipenum); | 972 | control_reg_sqclr(m66592, ep->pipenum); |
960 | 973 | ||
@@ -989,8 +1002,9 @@ static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) | |||
989 | break; | 1002 | break; |
990 | case USB_RECIP_ENDPOINT: { | 1003 | case USB_RECIP_ENDPOINT: { |
991 | struct m66592_ep *ep; | 1004 | struct m66592_ep *ep; |
1005 | u16 w_index = le16_to_cpu(ctrl->wIndex); | ||
992 | 1006 | ||
993 | ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; | 1007 | ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; |
994 | pipe_stall(m66592, ep->pipenum); | 1008 | pipe_stall(m66592, ep->pipenum); |
995 | 1009 | ||
996 | control_end(m66592, 1); | 1010 | control_end(m66592, 1); |
@@ -1066,14 +1080,16 @@ static void irq_device_state(struct m66592 *m66592) | |||
1066 | } | 1080 | } |
1067 | if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG) | 1081 | if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG) |
1068 | m66592_update_usb_speed(m66592); | 1082 | m66592_update_usb_speed(m66592); |
1069 | if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS) && | 1083 | if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS) |
1070 | m66592->gadget.speed == USB_SPEED_UNKNOWN) | 1084 | && m66592->gadget.speed == USB_SPEED_UNKNOWN) |
1071 | m66592_update_usb_speed(m66592); | 1085 | m66592_update_usb_speed(m66592); |
1072 | 1086 | ||
1073 | m66592->old_dvsq = dvsq; | 1087 | m66592->old_dvsq = dvsq; |
1074 | } | 1088 | } |
1075 | 1089 | ||
1076 | static void irq_control_stage(struct m66592 *m66592) | 1090 | static void irq_control_stage(struct m66592 *m66592) |
1091 | __releases(m66592->lock) | ||
1092 | __acquires(m66592->lock) | ||
1077 | { | 1093 | { |
1078 | struct usb_ctrlrequest ctrl; | 1094 | struct usb_ctrlrequest ctrl; |
1079 | u16 ctsq; | 1095 | u16 ctsq; |
@@ -1095,8 +1111,10 @@ static void irq_control_stage(struct m66592 *m66592) | |||
1095 | case M66592_CS_WRDS: | 1111 | case M66592_CS_WRDS: |
1096 | case M66592_CS_WRND: | 1112 | case M66592_CS_WRND: |
1097 | if (setup_packet(m66592, &ctrl)) { | 1113 | if (setup_packet(m66592, &ctrl)) { |
1114 | spin_unlock(&m66592->lock); | ||
1098 | if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0) | 1115 | if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0) |
1099 | pipe_stall(m66592, 0); | 1116 | pipe_stall(m66592, 0); |
1117 | spin_lock(&m66592->lock); | ||
1100 | } | 1118 | } |
1101 | break; | 1119 | break; |
1102 | case M66592_CS_RDSS: | 1120 | case M66592_CS_RDSS: |
@@ -1119,6 +1137,8 @@ static irqreturn_t m66592_irq(int irq, void *_m66592) | |||
1119 | u16 savepipe; | 1137 | u16 savepipe; |
1120 | u16 mask0; | 1138 | u16 mask0; |
1121 | 1139 | ||
1140 | spin_lock(&m66592->lock); | ||
1141 | |||
1122 | intsts0 = m66592_read(m66592, M66592_INTSTS0); | 1142 | intsts0 = m66592_read(m66592, M66592_INTSTS0); |
1123 | intenb0 = m66592_read(m66592, M66592_INTENB0); | 1143 | intenb0 = m66592_read(m66592, M66592_INTENB0); |
1124 | 1144 | ||
@@ -1134,27 +1154,27 @@ static irqreturn_t m66592_irq(int irq, void *_m66592) | |||
1134 | bempenb = m66592_read(m66592, M66592_BEMPENB); | 1154 | bempenb = m66592_read(m66592, M66592_BEMPENB); |
1135 | 1155 | ||
1136 | if (mask0 & M66592_VBINT) { | 1156 | if (mask0 & M66592_VBINT) { |
1137 | m66592_write(m66592, (u16)~M66592_VBINT, | 1157 | m66592_write(m66592, 0xffff & ~M66592_VBINT, |
1138 | M66592_INTSTS0); | 1158 | M66592_INTSTS0); |
1139 | m66592_start_xclock(m66592); | 1159 | m66592_start_xclock(m66592); |
1140 | 1160 | ||
1141 | /* start vbus sampling */ | 1161 | /* start vbus sampling */ |
1142 | m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0) | 1162 | m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0) |
1143 | & M66592_VBSTS; | 1163 | & M66592_VBSTS; |
1144 | m66592->scount = M66592_MAX_SAMPLING; | 1164 | m66592->scount = M66592_MAX_SAMPLING; |
1145 | 1165 | ||
1146 | mod_timer(&m66592->timer, | 1166 | mod_timer(&m66592->timer, |
1147 | jiffies + msecs_to_jiffies(50)); | 1167 | jiffies + msecs_to_jiffies(50)); |
1148 | } | 1168 | } |
1149 | if (intsts0 & M66592_DVSQ) | 1169 | if (intsts0 & M66592_DVSQ) |
1150 | irq_device_state(m66592); | 1170 | irq_device_state(m66592); |
1151 | 1171 | ||
1152 | if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE) && | 1172 | if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE) |
1153 | (brdysts & brdyenb)) { | 1173 | && (brdysts & brdyenb)) { |
1154 | irq_pipe_ready(m66592, brdysts, brdyenb); | 1174 | irq_pipe_ready(m66592, brdysts, brdyenb); |
1155 | } | 1175 | } |
1156 | if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE) && | 1176 | if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE) |
1157 | (bempsts & bempenb)) { | 1177 | && (bempsts & bempenb)) { |
1158 | irq_pipe_empty(m66592, bempsts, bempenb); | 1178 | irq_pipe_empty(m66592, bempsts, bempenb); |
1159 | } | 1179 | } |
1160 | 1180 | ||
@@ -1164,6 +1184,7 @@ static irqreturn_t m66592_irq(int irq, void *_m66592) | |||
1164 | 1184 | ||
1165 | m66592_write(m66592, savepipe, M66592_CFIFOSEL); | 1185 | m66592_write(m66592, savepipe, M66592_CFIFOSEL); |
1166 | 1186 | ||
1187 | spin_unlock(&m66592->lock); | ||
1167 | return IRQ_HANDLED; | 1188 | return IRQ_HANDLED; |
1168 | } | 1189 | } |
1169 | 1190 | ||
@@ -1191,13 +1212,13 @@ static void m66592_timer(unsigned long _m66592) | |||
1191 | m66592_usb_disconnect(m66592); | 1212 | m66592_usb_disconnect(m66592); |
1192 | } else { | 1213 | } else { |
1193 | mod_timer(&m66592->timer, | 1214 | mod_timer(&m66592->timer, |
1194 | jiffies + msecs_to_jiffies(50)); | 1215 | jiffies + msecs_to_jiffies(50)); |
1195 | } | 1216 | } |
1196 | } else { | 1217 | } else { |
1197 | m66592->scount = M66592_MAX_SAMPLING; | 1218 | m66592->scount = M66592_MAX_SAMPLING; |
1198 | m66592->old_vbus = tmp; | 1219 | m66592->old_vbus = tmp; |
1199 | mod_timer(&m66592->timer, | 1220 | mod_timer(&m66592->timer, |
1200 | jiffies + msecs_to_jiffies(50)); | 1221 | jiffies + msecs_to_jiffies(50)); |
1201 | } | 1222 | } |
1202 | } | 1223 | } |
1203 | spin_unlock_irqrestore(&m66592->lock, flags); | 1224 | spin_unlock_irqrestore(&m66592->lock, flags); |
@@ -1335,11 +1356,6 @@ out: | |||
1335 | return ret; | 1356 | return ret; |
1336 | } | 1357 | } |
1337 | 1358 | ||
1338 | static int m66592_fifo_status(struct usb_ep *_ep) | ||
1339 | { | ||
1340 | return -EOPNOTSUPP; | ||
1341 | } | ||
1342 | |||
1343 | static void m66592_fifo_flush(struct usb_ep *_ep) | 1359 | static void m66592_fifo_flush(struct usb_ep *_ep) |
1344 | { | 1360 | { |
1345 | struct m66592_ep *ep; | 1361 | struct m66592_ep *ep; |
@@ -1365,7 +1381,6 @@ static struct usb_ep_ops m66592_ep_ops = { | |||
1365 | .dequeue = m66592_dequeue, | 1381 | .dequeue = m66592_dequeue, |
1366 | 1382 | ||
1367 | .set_halt = m66592_set_halt, | 1383 | .set_halt = m66592_set_halt, |
1368 | .fifo_status = m66592_fifo_status, | ||
1369 | .fifo_flush = m66592_fifo_flush, | 1384 | .fifo_flush = m66592_fifo_flush, |
1370 | }; | 1385 | }; |
1371 | 1386 | ||
@@ -1377,11 +1392,10 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver) | |||
1377 | struct m66592 *m66592 = the_controller; | 1392 | struct m66592 *m66592 = the_controller; |
1378 | int retval; | 1393 | int retval; |
1379 | 1394 | ||
1380 | if (!driver || | 1395 | if (!driver |
1381 | driver->speed != USB_SPEED_HIGH || | 1396 | || driver->speed != USB_SPEED_HIGH |
1382 | !driver->bind || | 1397 | || !driver->bind |
1383 | !driver->unbind || | 1398 | || !driver->setup) |
1384 | !driver->setup) | ||
1385 | return -EINVAL; | 1399 | return -EINVAL; |
1386 | if (!m66592) | 1400 | if (!m66592) |
1387 | return -ENODEV; | 1401 | return -ENODEV; |
@@ -1413,8 +1427,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver) | |||
1413 | m66592->old_vbus = m66592_read(m66592, | 1427 | m66592->old_vbus = m66592_read(m66592, |
1414 | M66592_INTSTS0) & M66592_VBSTS; | 1428 | M66592_INTSTS0) & M66592_VBSTS; |
1415 | m66592->scount = M66592_MAX_SAMPLING; | 1429 | m66592->scount = M66592_MAX_SAMPLING; |
1416 | mod_timer(&m66592->timer, | 1430 | mod_timer(&m66592->timer, jiffies + msecs_to_jiffies(50)); |
1417 | jiffies + msecs_to_jiffies(50)); | ||
1418 | } | 1431 | } |
1419 | 1432 | ||
1420 | return 0; | 1433 | return 0; |
@@ -1432,6 +1445,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |||
1432 | struct m66592 *m66592 = the_controller; | 1445 | struct m66592 *m66592 = the_controller; |
1433 | unsigned long flags; | 1446 | unsigned long flags; |
1434 | 1447 | ||
1448 | if (driver != m66592->driver || !driver->unbind) | ||
1449 | return -EINVAL; | ||
1450 | |||
1435 | spin_lock_irqsave(&m66592->lock, flags); | 1451 | spin_lock_irqsave(&m66592->lock, flags); |
1436 | if (m66592->gadget.speed != USB_SPEED_UNKNOWN) | 1452 | if (m66592->gadget.speed != USB_SPEED_UNKNOWN) |
1437 | m66592_usb_disconnect(m66592); | 1453 | m66592_usb_disconnect(m66592); |
@@ -1461,46 +1477,35 @@ static struct usb_gadget_ops m66592_gadget_ops = { | |||
1461 | .get_frame = m66592_get_frame, | 1477 | .get_frame = m66592_get_frame, |
1462 | }; | 1478 | }; |
1463 | 1479 | ||
1464 | #if defined(CONFIG_PM) | 1480 | static int __exit m66592_remove(struct platform_device *pdev) |
1465 | static int m66592_suspend(struct platform_device *pdev, pm_message_t state) | ||
1466 | { | ||
1467 | pdev->dev.power.power_state = state; | ||
1468 | return 0; | ||
1469 | } | ||
1470 | |||
1471 | static int m66592_resume(struct platform_device *pdev) | ||
1472 | { | ||
1473 | pdev->dev.power.power_state = PMSG_ON; | ||
1474 | return 0; | ||
1475 | } | ||
1476 | #else /* if defined(CONFIG_PM) */ | ||
1477 | #define m66592_suspend NULL | ||
1478 | #define m66592_resume NULL | ||
1479 | #endif | ||
1480 | |||
1481 | static int __init_or_module m66592_remove(struct platform_device *pdev) | ||
1482 | { | 1481 | { |
1483 | struct m66592 *m66592 = dev_get_drvdata(&pdev->dev); | 1482 | struct m66592 *m66592 = dev_get_drvdata(&pdev->dev); |
1484 | 1483 | ||
1485 | del_timer_sync(&m66592->timer); | 1484 | del_timer_sync(&m66592->timer); |
1486 | iounmap(m66592->reg); | 1485 | iounmap(m66592->reg); |
1487 | free_irq(platform_get_irq(pdev, 0), m66592); | 1486 | free_irq(platform_get_irq(pdev, 0), m66592); |
1487 | m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); | ||
1488 | kfree(m66592); | 1488 | kfree(m66592); |
1489 | return 0; | 1489 | return 0; |
1490 | } | 1490 | } |
1491 | 1491 | ||
1492 | static void nop_completion(struct usb_ep *ep, struct usb_request *r) | ||
1493 | { | ||
1494 | } | ||
1495 | |||
1492 | #define resource_len(r) (((r)->end - (r)->start) + 1) | 1496 | #define resource_len(r) (((r)->end - (r)->start) + 1) |
1497 | |||
1493 | static int __init m66592_probe(struct platform_device *pdev) | 1498 | static int __init m66592_probe(struct platform_device *pdev) |
1494 | { | 1499 | { |
1495 | struct resource *res = NULL; | 1500 | struct resource *res; |
1496 | int irq = -1; | 1501 | int irq; |
1497 | void __iomem *reg = NULL; | 1502 | void __iomem *reg = NULL; |
1498 | struct m66592 *m66592 = NULL; | 1503 | struct m66592 *m66592 = NULL; |
1499 | int ret = 0; | 1504 | int ret = 0; |
1500 | int i; | 1505 | int i; |
1501 | 1506 | ||
1502 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 1507 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
1503 | (char *)udc_name); | 1508 | (char *)udc_name); |
1504 | if (!res) { | 1509 | if (!res) { |
1505 | ret = -ENODEV; | 1510 | ret = -ENODEV; |
1506 | printk(KERN_ERR "platform_get_resource_byname error.\n"); | 1511 | printk(KERN_ERR "platform_get_resource_byname error.\n"); |
@@ -1548,7 +1553,7 @@ static int __init m66592_probe(struct platform_device *pdev) | |||
1548 | m66592->bi_bufnum = M66592_BASE_BUFNUM; | 1553 | m66592->bi_bufnum = M66592_BASE_BUFNUM; |
1549 | 1554 | ||
1550 | ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED, | 1555 | ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED, |
1551 | udc_name, m66592); | 1556 | udc_name, m66592); |
1552 | if (ret < 0) { | 1557 | if (ret < 0) { |
1553 | printk(KERN_ERR "request_irq error (%d)\n", ret); | 1558 | printk(KERN_ERR "request_irq error (%d)\n", ret); |
1554 | goto clean_up; | 1559 | goto clean_up; |
@@ -1563,7 +1568,7 @@ static int __init m66592_probe(struct platform_device *pdev) | |||
1563 | if (i != 0) { | 1568 | if (i != 0) { |
1564 | INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list); | 1569 | INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list); |
1565 | list_add_tail(&m66592->ep[i].ep.ep_list, | 1570 | list_add_tail(&m66592->ep[i].ep.ep_list, |
1566 | &m66592->gadget.ep_list); | 1571 | &m66592->gadget.ep_list); |
1567 | } | 1572 | } |
1568 | ep->m66592 = m66592; | 1573 | ep->m66592 = m66592; |
1569 | INIT_LIST_HEAD(&ep->queue); | 1574 | INIT_LIST_HEAD(&ep->queue); |
@@ -1583,20 +1588,18 @@ static int __init m66592_probe(struct platform_device *pdev) | |||
1583 | 1588 | ||
1584 | the_controller = m66592; | 1589 | the_controller = m66592; |
1585 | 1590 | ||
1586 | /* AV: leaks */ | ||
1587 | m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); | 1591 | m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); |
1588 | if (m66592->ep0_req == NULL) | 1592 | if (m66592->ep0_req == NULL) |
1589 | goto clean_up; | 1593 | goto clean_up2; |
1590 | /* AV: leaks, and do we really need it separately allocated? */ | 1594 | m66592->ep0_req->complete = nop_completion; |
1591 | m66592->ep0_buf = kzalloc(2, GFP_KERNEL); | ||
1592 | if (m66592->ep0_buf == NULL) | ||
1593 | goto clean_up; | ||
1594 | 1595 | ||
1595 | init_controller(m66592); | 1596 | init_controller(m66592); |
1596 | 1597 | ||
1597 | printk("driver %s, %s\n", udc_name, DRIVER_VERSION); | 1598 | dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); |
1598 | return 0; | 1599 | return 0; |
1599 | 1600 | ||
1601 | clean_up2: | ||
1602 | free_irq(irq, m66592); | ||
1600 | clean_up: | 1603 | clean_up: |
1601 | if (m66592) { | 1604 | if (m66592) { |
1602 | if (m66592->ep0_req) | 1605 | if (m66592->ep0_req) |
@@ -1611,10 +1614,7 @@ clean_up: | |||
1611 | 1614 | ||
1612 | /*-------------------------------------------------------------------------*/ | 1615 | /*-------------------------------------------------------------------------*/ |
1613 | static struct platform_driver m66592_driver = { | 1616 | static struct platform_driver m66592_driver = { |
1614 | .probe = m66592_probe, | 1617 | .remove = __exit_p(m66592_remove), |
1615 | .remove = m66592_remove, | ||
1616 | .suspend = m66592_suspend, | ||
1617 | .resume = m66592_resume, | ||
1618 | .driver = { | 1618 | .driver = { |
1619 | .name = (char *) udc_name, | 1619 | .name = (char *) udc_name, |
1620 | }, | 1620 | }, |
@@ -1622,7 +1622,7 @@ static struct platform_driver m66592_driver = { | |||
1622 | 1622 | ||
1623 | static int __init m66592_udc_init(void) | 1623 | static int __init m66592_udc_init(void) |
1624 | { | 1624 | { |
1625 | return platform_driver_register(&m66592_driver); | 1625 | return platform_driver_probe(&m66592_driver, m66592_probe); |
1626 | } | 1626 | } |
1627 | module_init(m66592_udc_init); | 1627 | module_init(m66592_udc_init); |
1628 | 1628 | ||
@@ -1631,4 +1631,3 @@ static void __exit m66592_udc_cleanup(void) | |||
1631 | platform_driver_unregister(&m66592_driver); | 1631 | platform_driver_unregister(&m66592_driver); |
1632 | } | 1632 | } |
1633 | module_exit(m66592_udc_cleanup); | 1633 | module_exit(m66592_udc_cleanup); |
1634 | |||
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h index 26b54f8b8945..bfa0c645f229 100644 --- a/drivers/usb/gadget/m66592-udc.h +++ b/drivers/usb/gadget/m66592-udc.h | |||
@@ -24,73 +24,73 @@ | |||
24 | #define __M66592_UDC_H__ | 24 | #define __M66592_UDC_H__ |
25 | 25 | ||
26 | #define M66592_SYSCFG 0x00 | 26 | #define M66592_SYSCFG 0x00 |
27 | #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ | 27 | #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ |
28 | #define M66592_XTAL48 0x8000 /* 48MHz */ | 28 | #define M66592_XTAL48 0x8000 /* 48MHz */ |
29 | #define M66592_XTAL24 0x4000 /* 24MHz */ | 29 | #define M66592_XTAL24 0x4000 /* 24MHz */ |
30 | #define M66592_XTAL12 0x0000 /* 12MHz */ | 30 | #define M66592_XTAL12 0x0000 /* 12MHz */ |
31 | #define M66592_XCKE 0x2000 /* b13: External clock enable */ | 31 | #define M66592_XCKE 0x2000 /* b13: External clock enable */ |
32 | #define M66592_RCKE 0x1000 /* b12: Register clock enable */ | 32 | #define M66592_RCKE 0x1000 /* b12: Register clock enable */ |
33 | #define M66592_PLLC 0x0800 /* b11: PLL control */ | 33 | #define M66592_PLLC 0x0800 /* b11: PLL control */ |
34 | #define M66592_SCKE 0x0400 /* b10: USB clock enable */ | 34 | #define M66592_SCKE 0x0400 /* b10: USB clock enable */ |
35 | #define M66592_ATCKM 0x0100 /* b8: Automatic supply functional enable */ | 35 | #define M66592_ATCKM 0x0100 /* b8: Automatic clock supply */ |
36 | #define M66592_HSE 0x0080 /* b7: Hi-speed enable */ | 36 | #define M66592_HSE 0x0080 /* b7: Hi-speed enable */ |
37 | #define M66592_DCFM 0x0040 /* b6: Controller function select */ | 37 | #define M66592_DCFM 0x0040 /* b6: Controller function select */ |
38 | #define M66592_DMRPD 0x0020 /* b5: D- pull down control */ | 38 | #define M66592_DMRPD 0x0020 /* b5: D- pull down control */ |
39 | #define M66592_DPRPU 0x0010 /* b4: D+ pull up control */ | 39 | #define M66592_DPRPU 0x0010 /* b4: D+ pull up control */ |
40 | #define M66592_FSRPC 0x0004 /* b2: Full-speed receiver enable */ | 40 | #define M66592_FSRPC 0x0004 /* b2: Full-speed receiver enable */ |
41 | #define M66592_PCUT 0x0002 /* b1: Low power sleep enable */ | 41 | #define M66592_PCUT 0x0002 /* b1: Low power sleep enable */ |
42 | #define M66592_USBE 0x0001 /* b0: USB module operation enable */ | 42 | #define M66592_USBE 0x0001 /* b0: USB module operation enable */ |
43 | 43 | ||
44 | #define M66592_SYSSTS 0x02 | 44 | #define M66592_SYSSTS 0x02 |
45 | #define M66592_LNST 0x0003 /* b1-0: D+, D- line status */ | 45 | #define M66592_LNST 0x0003 /* b1-0: D+, D- line status */ |
46 | #define M66592_SE1 0x0003 /* SE1 */ | 46 | #define M66592_SE1 0x0003 /* SE1 */ |
47 | #define M66592_KSTS 0x0002 /* K State */ | 47 | #define M66592_KSTS 0x0002 /* K State */ |
48 | #define M66592_JSTS 0x0001 /* J State */ | 48 | #define M66592_JSTS 0x0001 /* J State */ |
49 | #define M66592_SE0 0x0000 /* SE0 */ | 49 | #define M66592_SE0 0x0000 /* SE0 */ |
50 | 50 | ||
51 | #define M66592_DVSTCTR 0x04 | 51 | #define M66592_DVSTCTR 0x04 |
52 | #define M66592_WKUP 0x0100 /* b8: Remote wakeup */ | 52 | #define M66592_WKUP 0x0100 /* b8: Remote wakeup */ |
53 | #define M66592_RWUPE 0x0080 /* b7: Remote wakeup sense */ | 53 | #define M66592_RWUPE 0x0080 /* b7: Remote wakeup sense */ |
54 | #define M66592_USBRST 0x0040 /* b6: USB reset enable */ | 54 | #define M66592_USBRST 0x0040 /* b6: USB reset enable */ |
55 | #define M66592_RESUME 0x0020 /* b5: Resume enable */ | 55 | #define M66592_RESUME 0x0020 /* b5: Resume enable */ |
56 | #define M66592_UACT 0x0010 /* b4: USB bus enable */ | 56 | #define M66592_UACT 0x0010 /* b4: USB bus enable */ |
57 | #define M66592_RHST 0x0003 /* b1-0: Reset handshake status */ | 57 | #define M66592_RHST 0x0003 /* b1-0: Reset handshake status */ |
58 | #define M66592_HSMODE 0x0003 /* Hi-Speed mode */ | 58 | #define M66592_HSMODE 0x0003 /* Hi-Speed mode */ |
59 | #define M66592_FSMODE 0x0002 /* Full-Speed mode */ | 59 | #define M66592_FSMODE 0x0002 /* Full-Speed mode */ |
60 | #define M66592_HSPROC 0x0001 /* HS handshake is processing */ | 60 | #define M66592_HSPROC 0x0001 /* HS handshake is processing */ |
61 | 61 | ||
62 | #define M66592_TESTMODE 0x06 | 62 | #define M66592_TESTMODE 0x06 |
63 | #define M66592_UTST 0x000F /* b4-0: Test select */ | 63 | #define M66592_UTST 0x000F /* b4-0: Test select */ |
64 | #define M66592_H_TST_PACKET 0x000C /* HOST TEST Packet */ | 64 | #define M66592_H_TST_PACKET 0x000C /* HOST TEST Packet */ |
65 | #define M66592_H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ | 65 | #define M66592_H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ |
66 | #define M66592_H_TST_K 0x000A /* HOST TEST K */ | 66 | #define M66592_H_TST_K 0x000A /* HOST TEST K */ |
67 | #define M66592_H_TST_J 0x0009 /* HOST TEST J */ | 67 | #define M66592_H_TST_J 0x0009 /* HOST TEST J */ |
68 | #define M66592_H_TST_NORMAL 0x0000 /* HOST Normal Mode */ | 68 | #define M66592_H_TST_NORMAL 0x0000 /* HOST Normal Mode */ |
69 | #define M66592_P_TST_PACKET 0x0004 /* PERI TEST Packet */ | 69 | #define M66592_P_TST_PACKET 0x0004 /* PERI TEST Packet */ |
70 | #define M66592_P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ | 70 | #define M66592_P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ |
71 | #define M66592_P_TST_K 0x0002 /* PERI TEST K */ | 71 | #define M66592_P_TST_K 0x0002 /* PERI TEST K */ |
72 | #define M66592_P_TST_J 0x0001 /* PERI TEST J */ | 72 | #define M66592_P_TST_J 0x0001 /* PERI TEST J */ |
73 | #define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */ | 73 | #define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */ |
74 | 74 | ||
75 | #define M66592_PINCFG 0x0A | 75 | #define M66592_PINCFG 0x0A |
76 | #define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */ | 76 | #define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */ |
77 | #define M66592_BIGEND 0x0100 /* b8: Big endian mode */ | 77 | #define M66592_BIGEND 0x0100 /* b8: Big endian mode */ |
78 | 78 | ||
79 | #define M66592_DMA0CFG 0x0C | 79 | #define M66592_DMA0CFG 0x0C |
80 | #define M66592_DMA1CFG 0x0E | 80 | #define M66592_DMA1CFG 0x0E |
81 | #define M66592_DREQA 0x4000 /* b14: Dreq active select */ | 81 | #define M66592_DREQA 0x4000 /* b14: Dreq active select */ |
82 | #define M66592_BURST 0x2000 /* b13: Burst mode */ | 82 | #define M66592_BURST 0x2000 /* b13: Burst mode */ |
83 | #define M66592_DACKA 0x0400 /* b10: Dack active select */ | 83 | #define M66592_DACKA 0x0400 /* b10: Dack active select */ |
84 | #define M66592_DFORM 0x0380 /* b9-7: DMA mode select */ | 84 | #define M66592_DFORM 0x0380 /* b9-7: DMA mode select */ |
85 | #define M66592_CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ | 85 | #define M66592_CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ |
86 | #define M66592_CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ | 86 | #define M66592_CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ |
87 | #define M66592_CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ | 87 | #define M66592_CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ |
88 | #define M66592_SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ | 88 | #define M66592_SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ |
89 | #define M66592_SPLIT_DACK_DSTB 0x0300 /* DACK + DSTB0 mode (SPLIT bus) */ | 89 | #define M66592_SPLIT_DACK_DSTB 0x0300 /* DACK + DSTB0 mode (SPLIT bus) */ |
90 | #define M66592_DENDA 0x0040 /* b6: Dend active select */ | 90 | #define M66592_DENDA 0x0040 /* b6: Dend active select */ |
91 | #define M66592_PKTM 0x0020 /* b5: Packet mode */ | 91 | #define M66592_PKTM 0x0020 /* b5: Packet mode */ |
92 | #define M66592_DENDE 0x0010 /* b4: Dend enable */ | 92 | #define M66592_DENDE 0x0010 /* b4: Dend enable */ |
93 | #define M66592_OBUS 0x0004 /* b2: OUTbus mode */ | 93 | #define M66592_OBUS 0x0004 /* b2: OUTbus mode */ |
94 | 94 | ||
95 | #define M66592_CFIFO 0x10 | 95 | #define M66592_CFIFO 0x10 |
96 | #define M66592_D0FIFO 0x14 | 96 | #define M66592_D0FIFO 0x14 |
@@ -99,300 +99,300 @@ | |||
99 | #define M66592_CFIFOSEL 0x1E | 99 | #define M66592_CFIFOSEL 0x1E |
100 | #define M66592_D0FIFOSEL 0x24 | 100 | #define M66592_D0FIFOSEL 0x24 |
101 | #define M66592_D1FIFOSEL 0x2A | 101 | #define M66592_D1FIFOSEL 0x2A |
102 | #define M66592_RCNT 0x8000 /* b15: Read count mode */ | 102 | #define M66592_RCNT 0x8000 /* b15: Read count mode */ |
103 | #define M66592_REW 0x4000 /* b14: Buffer rewind */ | 103 | #define M66592_REW 0x4000 /* b14: Buffer rewind */ |
104 | #define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */ | 104 | #define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */ |
105 | #define M66592_DREQE 0x1000 /* b12: DREQ output enable */ | 105 | #define M66592_DREQE 0x1000 /* b12: DREQ output enable */ |
106 | #define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO access */ | 106 | #define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO */ |
107 | #define M66592_MBW_8 0x0000 /* 8bit */ | 107 | #define M66592_MBW_8 0x0000 /* 8bit */ |
108 | #define M66592_MBW_16 0x0400 /* 16bit */ | 108 | #define M66592_MBW_16 0x0400 /* 16bit */ |
109 | #define M66592_TRENB 0x0200 /* b9: Transaction counter enable */ | 109 | #define M66592_TRENB 0x0200 /* b9: Transaction counter enable */ |
110 | #define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */ | 110 | #define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */ |
111 | #define M66592_DEZPM 0x0080 /* b7: Zero-length packet additional mode */ | 111 | #define M66592_DEZPM 0x0080 /* b7: Zero-length packet mode */ |
112 | #define M66592_ISEL 0x0020 /* b5: DCP FIFO port direction select */ | 112 | #define M66592_ISEL 0x0020 /* b5: DCP FIFO port direction select */ |
113 | #define M66592_CURPIPE 0x0007 /* b2-0: PIPE select */ | 113 | #define M66592_CURPIPE 0x0007 /* b2-0: PIPE select */ |
114 | 114 | ||
115 | #define M66592_CFIFOCTR 0x20 | 115 | #define M66592_CFIFOCTR 0x20 |
116 | #define M66592_D0FIFOCTR 0x26 | 116 | #define M66592_D0FIFOCTR 0x26 |
117 | #define M66592_D1FIFOCTR 0x2c | 117 | #define M66592_D1FIFOCTR 0x2c |
118 | #define M66592_BVAL 0x8000 /* b15: Buffer valid flag */ | 118 | #define M66592_BVAL 0x8000 /* b15: Buffer valid flag */ |
119 | #define M66592_BCLR 0x4000 /* b14: Buffer clear */ | 119 | #define M66592_BCLR 0x4000 /* b14: Buffer clear */ |
120 | #define M66592_FRDY 0x2000 /* b13: FIFO ready */ | 120 | #define M66592_FRDY 0x2000 /* b13: FIFO ready */ |
121 | #define M66592_DTLN 0x0FFF /* b11-0: FIFO received data length */ | 121 | #define M66592_DTLN 0x0FFF /* b11-0: FIFO received data length */ |
122 | 122 | ||
123 | #define M66592_CFIFOSIE 0x22 | 123 | #define M66592_CFIFOSIE 0x22 |
124 | #define M66592_TGL 0x8000 /* b15: Buffer toggle */ | 124 | #define M66592_TGL 0x8000 /* b15: Buffer toggle */ |
125 | #define M66592_SCLR 0x4000 /* b14: Buffer clear */ | 125 | #define M66592_SCLR 0x4000 /* b14: Buffer clear */ |
126 | #define M66592_SBUSY 0x2000 /* b13: SIE_FIFO busy */ | 126 | #define M66592_SBUSY 0x2000 /* b13: SIE_FIFO busy */ |
127 | 127 | ||
128 | #define M66592_D0FIFOTRN 0x28 | 128 | #define M66592_D0FIFOTRN 0x28 |
129 | #define M66592_D1FIFOTRN 0x2E | 129 | #define M66592_D1FIFOTRN 0x2E |
130 | #define M66592_TRNCNT 0xFFFF /* b15-0: Transaction counter */ | 130 | #define M66592_TRNCNT 0xFFFF /* b15-0: Transaction counter */ |
131 | 131 | ||
132 | #define M66592_INTENB0 0x30 | 132 | #define M66592_INTENB0 0x30 |
133 | #define M66592_VBSE 0x8000 /* b15: VBUS interrupt */ | 133 | #define M66592_VBSE 0x8000 /* b15: VBUS interrupt */ |
134 | #define M66592_RSME 0x4000 /* b14: Resume interrupt */ | 134 | #define M66592_RSME 0x4000 /* b14: Resume interrupt */ |
135 | #define M66592_SOFE 0x2000 /* b13: Frame update interrupt */ | 135 | #define M66592_SOFE 0x2000 /* b13: Frame update interrupt */ |
136 | #define M66592_DVSE 0x1000 /* b12: Device state transition interrupt */ | 136 | #define M66592_DVSE 0x1000 /* b12: Device state transition interrupt */ |
137 | #define M66592_CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ | 137 | #define M66592_CTRE 0x0800 /* b11: Control transfer stage transition irq */ |
138 | #define M66592_BEMPE 0x0400 /* b10: Buffer empty interrupt */ | 138 | #define M66592_BEMPE 0x0400 /* b10: Buffer empty interrupt */ |
139 | #define M66592_NRDYE 0x0200 /* b9: Buffer not ready interrupt */ | 139 | #define M66592_NRDYE 0x0200 /* b9: Buffer not ready interrupt */ |
140 | #define M66592_BRDYE 0x0100 /* b8: Buffer ready interrupt */ | 140 | #define M66592_BRDYE 0x0100 /* b8: Buffer ready interrupt */ |
141 | #define M66592_URST 0x0080 /* b7: USB reset detected interrupt */ | 141 | #define M66592_URST 0x0080 /* b7: USB reset detected interrupt */ |
142 | #define M66592_SADR 0x0040 /* b6: Set address executed interrupt */ | 142 | #define M66592_SADR 0x0040 /* b6: Set address executed interrupt */ |
143 | #define M66592_SCFG 0x0020 /* b5: Set configuration executed interrupt */ | 143 | #define M66592_SCFG 0x0020 /* b5: Set configuration executed interrupt */ |
144 | #define M66592_SUSP 0x0010 /* b4: Suspend detected interrupt */ | 144 | #define M66592_SUSP 0x0010 /* b4: Suspend detected interrupt */ |
145 | #define M66592_WDST 0x0008 /* b3: Control write data stage completed interrupt */ | 145 | #define M66592_WDST 0x0008 /* b3: Control write data stage completed irq */ |
146 | #define M66592_RDST 0x0004 /* b2: Control read data stage completed interrupt */ | 146 | #define M66592_RDST 0x0004 /* b2: Control read data stage completed irq */ |
147 | #define M66592_CMPL 0x0002 /* b1: Control transfer complete interrupt */ | 147 | #define M66592_CMPL 0x0002 /* b1: Control transfer complete interrupt */ |
148 | #define M66592_SERR 0x0001 /* b0: Sequence error interrupt */ | 148 | #define M66592_SERR 0x0001 /* b0: Sequence error interrupt */ |
149 | 149 | ||
150 | #define M66592_INTENB1 0x32 | 150 | #define M66592_INTENB1 0x32 |
151 | #define M66592_BCHGE 0x4000 /* b14: USB us chenge interrupt */ | 151 | #define M66592_BCHGE 0x4000 /* b14: USB us chenge interrupt */ |
152 | #define M66592_DTCHE 0x1000 /* b12: Detach sense interrupt */ | 152 | #define M66592_DTCHE 0x1000 /* b12: Detach sense interrupt */ |
153 | #define M66592_SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ | 153 | #define M66592_SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ |
154 | #define M66592_SACKE 0x0010 /* b4: SETUP ACK interrupt */ | 154 | #define M66592_SACKE 0x0010 /* b4: SETUP ACK interrupt */ |
155 | #define M66592_BRDYM 0x0004 /* b2: BRDY clear timing */ | 155 | #define M66592_BRDYM 0x0004 /* b2: BRDY clear timing */ |
156 | #define M66592_INTL 0x0002 /* b1: Interrupt sense select */ | 156 | #define M66592_INTL 0x0002 /* b1: Interrupt sense select */ |
157 | #define M66592_PCSE 0x0001 /* b0: PCUT enable by CS assert */ | 157 | #define M66592_PCSE 0x0001 /* b0: PCUT enable by CS assert */ |
158 | 158 | ||
159 | #define M66592_BRDYENB 0x36 | 159 | #define M66592_BRDYENB 0x36 |
160 | #define M66592_BRDYSTS 0x46 | 160 | #define M66592_BRDYSTS 0x46 |
161 | #define M66592_BRDY7 0x0080 /* b7: PIPE7 */ | 161 | #define M66592_BRDY7 0x0080 /* b7: PIPE7 */ |
162 | #define M66592_BRDY6 0x0040 /* b6: PIPE6 */ | 162 | #define M66592_BRDY6 0x0040 /* b6: PIPE6 */ |
163 | #define M66592_BRDY5 0x0020 /* b5: PIPE5 */ | 163 | #define M66592_BRDY5 0x0020 /* b5: PIPE5 */ |
164 | #define M66592_BRDY4 0x0010 /* b4: PIPE4 */ | 164 | #define M66592_BRDY4 0x0010 /* b4: PIPE4 */ |
165 | #define M66592_BRDY3 0x0008 /* b3: PIPE3 */ | 165 | #define M66592_BRDY3 0x0008 /* b3: PIPE3 */ |
166 | #define M66592_BRDY2 0x0004 /* b2: PIPE2 */ | 166 | #define M66592_BRDY2 0x0004 /* b2: PIPE2 */ |
167 | #define M66592_BRDY1 0x0002 /* b1: PIPE1 */ | 167 | #define M66592_BRDY1 0x0002 /* b1: PIPE1 */ |
168 | #define M66592_BRDY0 0x0001 /* b1: PIPE0 */ | 168 | #define M66592_BRDY0 0x0001 /* b1: PIPE0 */ |
169 | 169 | ||
170 | #define M66592_NRDYENB 0x38 | 170 | #define M66592_NRDYENB 0x38 |
171 | #define M66592_NRDYSTS 0x48 | 171 | #define M66592_NRDYSTS 0x48 |
172 | #define M66592_NRDY7 0x0080 /* b7: PIPE7 */ | 172 | #define M66592_NRDY7 0x0080 /* b7: PIPE7 */ |
173 | #define M66592_NRDY6 0x0040 /* b6: PIPE6 */ | 173 | #define M66592_NRDY6 0x0040 /* b6: PIPE6 */ |
174 | #define M66592_NRDY5 0x0020 /* b5: PIPE5 */ | 174 | #define M66592_NRDY5 0x0020 /* b5: PIPE5 */ |
175 | #define M66592_NRDY4 0x0010 /* b4: PIPE4 */ | 175 | #define M66592_NRDY4 0x0010 /* b4: PIPE4 */ |
176 | #define M66592_NRDY3 0x0008 /* b3: PIPE3 */ | 176 | #define M66592_NRDY3 0x0008 /* b3: PIPE3 */ |
177 | #define M66592_NRDY2 0x0004 /* b2: PIPE2 */ | 177 | #define M66592_NRDY2 0x0004 /* b2: PIPE2 */ |
178 | #define M66592_NRDY1 0x0002 /* b1: PIPE1 */ | 178 | #define M66592_NRDY1 0x0002 /* b1: PIPE1 */ |
179 | #define M66592_NRDY0 0x0001 /* b1: PIPE0 */ | 179 | #define M66592_NRDY0 0x0001 /* b1: PIPE0 */ |
180 | 180 | ||
181 | #define M66592_BEMPENB 0x3A | 181 | #define M66592_BEMPENB 0x3A |
182 | #define M66592_BEMPSTS 0x4A | 182 | #define M66592_BEMPSTS 0x4A |
183 | #define M66592_BEMP7 0x0080 /* b7: PIPE7 */ | 183 | #define M66592_BEMP7 0x0080 /* b7: PIPE7 */ |
184 | #define M66592_BEMP6 0x0040 /* b6: PIPE6 */ | 184 | #define M66592_BEMP6 0x0040 /* b6: PIPE6 */ |
185 | #define M66592_BEMP5 0x0020 /* b5: PIPE5 */ | 185 | #define M66592_BEMP5 0x0020 /* b5: PIPE5 */ |
186 | #define M66592_BEMP4 0x0010 /* b4: PIPE4 */ | 186 | #define M66592_BEMP4 0x0010 /* b4: PIPE4 */ |
187 | #define M66592_BEMP3 0x0008 /* b3: PIPE3 */ | 187 | #define M66592_BEMP3 0x0008 /* b3: PIPE3 */ |
188 | #define M66592_BEMP2 0x0004 /* b2: PIPE2 */ | 188 | #define M66592_BEMP2 0x0004 /* b2: PIPE2 */ |
189 | #define M66592_BEMP1 0x0002 /* b1: PIPE1 */ | 189 | #define M66592_BEMP1 0x0002 /* b1: PIPE1 */ |
190 | #define M66592_BEMP0 0x0001 /* b0: PIPE0 */ | 190 | #define M66592_BEMP0 0x0001 /* b0: PIPE0 */ |
191 | 191 | ||
192 | #define M66592_SOFCFG 0x3C | 192 | #define M66592_SOFCFG 0x3C |
193 | #define M66592_SOFM 0x000C /* b3-2: SOF palse mode */ | 193 | #define M66592_SOFM 0x000C /* b3-2: SOF palse mode */ |
194 | #define M66592_SOF_125US 0x0008 /* SOF OUT 125us uFrame Signal */ | 194 | #define M66592_SOF_125US 0x0008 /* SOF OUT 125us uFrame Signal */ |
195 | #define M66592_SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ | 195 | #define M66592_SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ |
196 | #define M66592_SOF_DISABLE 0x0000 /* SOF OUT Disable */ | 196 | #define M66592_SOF_DISABLE 0x0000 /* SOF OUT Disable */ |
197 | 197 | ||
198 | #define M66592_INTSTS0 0x40 | 198 | #define M66592_INTSTS0 0x40 |
199 | #define M66592_VBINT 0x8000 /* b15: VBUS interrupt */ | 199 | #define M66592_VBINT 0x8000 /* b15: VBUS interrupt */ |
200 | #define M66592_RESM 0x4000 /* b14: Resume interrupt */ | 200 | #define M66592_RESM 0x4000 /* b14: Resume interrupt */ |
201 | #define M66592_SOFR 0x2000 /* b13: SOF frame update interrupt */ | 201 | #define M66592_SOFR 0x2000 /* b13: SOF frame update interrupt */ |
202 | #define M66592_DVST 0x1000 /* b12: Device state transition interrupt */ | 202 | #define M66592_DVST 0x1000 /* b12: Device state transition */ |
203 | #define M66592_CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ | 203 | #define M66592_CTRT 0x0800 /* b11: Control stage transition */ |
204 | #define M66592_BEMP 0x0400 /* b10: Buffer empty interrupt */ | 204 | #define M66592_BEMP 0x0400 /* b10: Buffer empty interrupt */ |
205 | #define M66592_NRDY 0x0200 /* b9: Buffer not ready interrupt */ | 205 | #define M66592_NRDY 0x0200 /* b9: Buffer not ready interrupt */ |
206 | #define M66592_BRDY 0x0100 /* b8: Buffer ready interrupt */ | 206 | #define M66592_BRDY 0x0100 /* b8: Buffer ready interrupt */ |
207 | #define M66592_VBSTS 0x0080 /* b7: VBUS input port */ | 207 | #define M66592_VBSTS 0x0080 /* b7: VBUS input port */ |
208 | #define M66592_DVSQ 0x0070 /* b6-4: Device state */ | 208 | #define M66592_DVSQ 0x0070 /* b6-4: Device state */ |
209 | #define M66592_DS_SPD_CNFG 0x0070 /* Suspend Configured */ | 209 | #define M66592_DS_SPD_CNFG 0x0070 /* Suspend Configured */ |
210 | #define M66592_DS_SPD_ADDR 0x0060 /* Suspend Address */ | 210 | #define M66592_DS_SPD_ADDR 0x0060 /* Suspend Address */ |
211 | #define M66592_DS_SPD_DFLT 0x0050 /* Suspend Default */ | 211 | #define M66592_DS_SPD_DFLT 0x0050 /* Suspend Default */ |
212 | #define M66592_DS_SPD_POWR 0x0040 /* Suspend Powered */ | 212 | #define M66592_DS_SPD_POWR 0x0040 /* Suspend Powered */ |
213 | #define M66592_DS_SUSP 0x0040 /* Suspend */ | 213 | #define M66592_DS_SUSP 0x0040 /* Suspend */ |
214 | #define M66592_DS_CNFG 0x0030 /* Configured */ | 214 | #define M66592_DS_CNFG 0x0030 /* Configured */ |
215 | #define M66592_DS_ADDS 0x0020 /* Address */ | 215 | #define M66592_DS_ADDS 0x0020 /* Address */ |
216 | #define M66592_DS_DFLT 0x0010 /* Default */ | 216 | #define M66592_DS_DFLT 0x0010 /* Default */ |
217 | #define M66592_DS_POWR 0x0000 /* Powered */ | 217 | #define M66592_DS_POWR 0x0000 /* Powered */ |
218 | #define M66592_DVSQS 0x0030 /* b5-4: Device state */ | 218 | #define M66592_DVSQS 0x0030 /* b5-4: Device state */ |
219 | #define M66592_VALID 0x0008 /* b3: Setup packet detected flag */ | 219 | #define M66592_VALID 0x0008 /* b3: Setup packet detected flag */ |
220 | #define M66592_CTSQ 0x0007 /* b2-0: Control transfer stage */ | 220 | #define M66592_CTSQ 0x0007 /* b2-0: Control transfer stage */ |
221 | #define M66592_CS_SQER 0x0006 /* Sequence error */ | 221 | #define M66592_CS_SQER 0x0006 /* Sequence error */ |
222 | #define M66592_CS_WRND 0x0005 /* Control write nodata status stage */ | 222 | #define M66592_CS_WRND 0x0005 /* Control write nodata status */ |
223 | #define M66592_CS_WRSS 0x0004 /* Control write status stage */ | 223 | #define M66592_CS_WRSS 0x0004 /* Control write status stage */ |
224 | #define M66592_CS_WRDS 0x0003 /* Control write data stage */ | 224 | #define M66592_CS_WRDS 0x0003 /* Control write data stage */ |
225 | #define M66592_CS_RDSS 0x0002 /* Control read status stage */ | 225 | #define M66592_CS_RDSS 0x0002 /* Control read status stage */ |
226 | #define M66592_CS_RDDS 0x0001 /* Control read data stage */ | 226 | #define M66592_CS_RDDS 0x0001 /* Control read data stage */ |
227 | #define M66592_CS_IDST 0x0000 /* Idle or setup stage */ | 227 | #define M66592_CS_IDST 0x0000 /* Idle or setup stage */ |
228 | 228 | ||
229 | #define M66592_INTSTS1 0x42 | 229 | #define M66592_INTSTS1 0x42 |
230 | #define M66592_BCHG 0x4000 /* b14: USB bus chenge interrupt */ | 230 | #define M66592_BCHG 0x4000 /* b14: USB bus chenge interrupt */ |
231 | #define M66592_DTCH 0x1000 /* b12: Detach sense interrupt */ | 231 | #define M66592_DTCH 0x1000 /* b12: Detach sense interrupt */ |
232 | #define M66592_SIGN 0x0020 /* b5: SETUP IGNORE interrupt */ | 232 | #define M66592_SIGN 0x0020 /* b5: SETUP IGNORE interrupt */ |
233 | #define M66592_SACK 0x0010 /* b4: SETUP ACK interrupt */ | 233 | #define M66592_SACK 0x0010 /* b4: SETUP ACK interrupt */ |
234 | 234 | ||
235 | #define M66592_FRMNUM 0x4C | 235 | #define M66592_FRMNUM 0x4C |
236 | #define M66592_OVRN 0x8000 /* b15: Overrun error */ | 236 | #define M66592_OVRN 0x8000 /* b15: Overrun error */ |
237 | #define M66592_CRCE 0x4000 /* b14: Received data error */ | 237 | #define M66592_CRCE 0x4000 /* b14: Received data error */ |
238 | #define M66592_SOFRM 0x0800 /* b11: SOF output mode */ | 238 | #define M66592_SOFRM 0x0800 /* b11: SOF output mode */ |
239 | #define M66592_FRNM 0x07FF /* b10-0: Frame number */ | 239 | #define M66592_FRNM 0x07FF /* b10-0: Frame number */ |
240 | 240 | ||
241 | #define M66592_UFRMNUM 0x4E | 241 | #define M66592_UFRMNUM 0x4E |
242 | #define M66592_UFRNM 0x0007 /* b2-0: Micro frame number */ | 242 | #define M66592_UFRNM 0x0007 /* b2-0: Micro frame number */ |
243 | 243 | ||
244 | #define M66592_RECOVER 0x50 | 244 | #define M66592_RECOVER 0x50 |
245 | #define M66592_STSRECOV 0x0700 /* Status recovery */ | 245 | #define M66592_STSRECOV 0x0700 /* Status recovery */ |
246 | #define M66592_STSR_HI 0x0400 /* FULL(0) or HI(1) Speed */ | 246 | #define M66592_STSR_HI 0x0400 /* FULL(0) or HI(1) Speed */ |
247 | #define M66592_STSR_DEFAULT 0x0100 /* Default state */ | 247 | #define M66592_STSR_DEFAULT 0x0100 /* Default state */ |
248 | #define M66592_STSR_ADDRESS 0x0200 /* Address state */ | 248 | #define M66592_STSR_ADDRESS 0x0200 /* Address state */ |
249 | #define M66592_STSR_CONFIG 0x0300 /* Configured state */ | 249 | #define M66592_STSR_CONFIG 0x0300 /* Configured state */ |
250 | #define M66592_USBADDR 0x007F /* b6-0: USB address */ | 250 | #define M66592_USBADDR 0x007F /* b6-0: USB address */ |
251 | 251 | ||
252 | #define M66592_USBREQ 0x54 | 252 | #define M66592_USBREQ 0x54 |
253 | #define M66592_bRequest 0xFF00 /* b15-8: bRequest */ | 253 | #define M66592_bRequest 0xFF00 /* b15-8: bRequest */ |
254 | #define M66592_GET_STATUS 0x0000 | 254 | #define M66592_GET_STATUS 0x0000 |
255 | #define M66592_CLEAR_FEATURE 0x0100 | 255 | #define M66592_CLEAR_FEATURE 0x0100 |
256 | #define M66592_ReqRESERVED 0x0200 | 256 | #define M66592_ReqRESERVED 0x0200 |
257 | #define M66592_SET_FEATURE 0x0300 | 257 | #define M66592_SET_FEATURE 0x0300 |
258 | #define M66592_ReqRESERVED1 0x0400 | 258 | #define M66592_ReqRESERVED1 0x0400 |
259 | #define M66592_SET_ADDRESS 0x0500 | 259 | #define M66592_SET_ADDRESS 0x0500 |
260 | #define M66592_GET_DESCRIPTOR 0x0600 | 260 | #define M66592_GET_DESCRIPTOR 0x0600 |
261 | #define M66592_SET_DESCRIPTOR 0x0700 | 261 | #define M66592_SET_DESCRIPTOR 0x0700 |
262 | #define M66592_GET_CONFIGURATION 0x0800 | 262 | #define M66592_GET_CONFIGURATION 0x0800 |
263 | #define M66592_SET_CONFIGURATION 0x0900 | 263 | #define M66592_SET_CONFIGURATION 0x0900 |
264 | #define M66592_GET_INTERFACE 0x0A00 | 264 | #define M66592_GET_INTERFACE 0x0A00 |
265 | #define M66592_SET_INTERFACE 0x0B00 | 265 | #define M66592_SET_INTERFACE 0x0B00 |
266 | #define M66592_SYNCH_FRAME 0x0C00 | 266 | #define M66592_SYNCH_FRAME 0x0C00 |
267 | #define M66592_bmRequestType 0x00FF /* b7-0: bmRequestType */ | 267 | #define M66592_bmRequestType 0x00FF /* b7-0: bmRequestType */ |
268 | #define M66592_bmRequestTypeDir 0x0080 /* b7 : Data transfer direction */ | 268 | #define M66592_bmRequestTypeDir 0x0080 /* b7 : Data direction */ |
269 | #define M66592_HOST_TO_DEVICE 0x0000 | 269 | #define M66592_HOST_TO_DEVICE 0x0000 |
270 | #define M66592_DEVICE_TO_HOST 0x0080 | 270 | #define M66592_DEVICE_TO_HOST 0x0080 |
271 | #define M66592_bmRequestTypeType 0x0060 /* b6-5: Type */ | 271 | #define M66592_bmRequestTypeType 0x0060 /* b6-5: Type */ |
272 | #define M66592_STANDARD 0x0000 | 272 | #define M66592_STANDARD 0x0000 |
273 | #define M66592_CLASS 0x0020 | 273 | #define M66592_CLASS 0x0020 |
274 | #define M66592_VENDOR 0x0040 | 274 | #define M66592_VENDOR 0x0040 |
275 | #define M66592_bmRequestTypeRecip 0x001F /* b4-0: Recipient */ | 275 | #define M66592_bmRequestTypeRecip 0x001F /* b4-0: Recipient */ |
276 | #define M66592_DEVICE 0x0000 | 276 | #define M66592_DEVICE 0x0000 |
277 | #define M66592_INTERFACE 0x0001 | 277 | #define M66592_INTERFACE 0x0001 |
278 | #define M66592_ENDPOINT 0x0002 | 278 | #define M66592_ENDPOINT 0x0002 |
279 | 279 | ||
280 | #define M66592_USBVAL 0x56 | 280 | #define M66592_USBVAL 0x56 |
281 | #define M66592_wValue 0xFFFF /* b15-0: wValue */ | 281 | #define M66592_wValue 0xFFFF /* b15-0: wValue */ |
282 | /* Standard Feature Selector */ | 282 | /* Standard Feature Selector */ |
283 | #define M66592_ENDPOINT_HALT 0x0000 | 283 | #define M66592_ENDPOINT_HALT 0x0000 |
284 | #define M66592_DEVICE_REMOTE_WAKEUP 0x0001 | 284 | #define M66592_DEVICE_REMOTE_WAKEUP 0x0001 |
285 | #define M66592_TEST_MODE 0x0002 | 285 | #define M66592_TEST_MODE 0x0002 |
286 | /* Descriptor Types */ | 286 | /* Descriptor Types */ |
287 | #define M66592_DT_TYPE 0xFF00 | 287 | #define M66592_DT_TYPE 0xFF00 |
288 | #define M66592_GET_DT_TYPE(v) (((v) & DT_TYPE) >> 8) | 288 | #define M66592_GET_DT_TYPE(v) (((v) & DT_TYPE) >> 8) |
289 | #define M66592_DT_DEVICE 0x01 | 289 | #define M66592_DT_DEVICE 0x01 |
290 | #define M66592_DT_CONFIGURATION 0x02 | 290 | #define M66592_DT_CONFIGURATION 0x02 |
291 | #define M66592_DT_STRING 0x03 | 291 | #define M66592_DT_STRING 0x03 |
292 | #define M66592_DT_INTERFACE 0x04 | 292 | #define M66592_DT_INTERFACE 0x04 |
293 | #define M66592_DT_ENDPOINT 0x05 | 293 | #define M66592_DT_ENDPOINT 0x05 |
294 | #define M66592_DT_DEVICE_QUALIFIER 0x06 | 294 | #define M66592_DT_DEVICE_QUALIFIER 0x06 |
295 | #define M66592_DT_OTHER_SPEED_CONFIGURATION 0x07 | 295 | #define M66592_DT_OTHER_SPEED_CONFIGURATION 0x07 |
296 | #define M66592_DT_INTERFACE_POWER 0x08 | 296 | #define M66592_DT_INTERFACE_POWER 0x08 |
297 | #define M66592_DT_INDEX 0x00FF | 297 | #define M66592_DT_INDEX 0x00FF |
298 | #define M66592_CONF_NUM 0x00FF | 298 | #define M66592_CONF_NUM 0x00FF |
299 | #define M66592_ALT_SET 0x00FF | 299 | #define M66592_ALT_SET 0x00FF |
300 | 300 | ||
301 | #define M66592_USBINDEX 0x58 | 301 | #define M66592_USBINDEX 0x58 |
302 | #define M66592_wIndex 0xFFFF /* b15-0: wIndex */ | 302 | #define M66592_wIndex 0xFFFF /* b15-0: wIndex */ |
303 | #define M66592_TEST_SELECT 0xFF00 /* b15-b8: Test Mode Selectors */ | 303 | #define M66592_TEST_SELECT 0xFF00 /* b15-b8: Test Mode */ |
304 | #define M66592_TEST_J 0x0100 /* Test_J */ | 304 | #define M66592_TEST_J 0x0100 /* Test_J */ |
305 | #define M66592_TEST_K 0x0200 /* Test_K */ | 305 | #define M66592_TEST_K 0x0200 /* Test_K */ |
306 | #define M66592_TEST_SE0_NAK 0x0300 /* Test_SE0_NAK */ | 306 | #define M66592_TEST_SE0_NAK 0x0300 /* Test_SE0_NAK */ |
307 | #define M66592_TEST_PACKET 0x0400 /* Test_Packet */ | 307 | #define M66592_TEST_PACKET 0x0400 /* Test_Packet */ |
308 | #define M66592_TEST_FORCE_ENABLE 0x0500 /* Test_Force_Enable */ | 308 | #define M66592_TEST_FORCE_ENABLE 0x0500 /* Test_Force_Enable */ |
309 | #define M66592_TEST_STSelectors 0x0600 /* Standard test selectors */ | 309 | #define M66592_TEST_STSelectors 0x0600 /* Standard test selectors */ |
310 | #define M66592_TEST_Reserved 0x4000 /* Reserved */ | 310 | #define M66592_TEST_Reserved 0x4000 /* Reserved */ |
311 | #define M66592_TEST_VSTModes 0xC000 /* Vendor-specific test modes */ | 311 | #define M66592_TEST_VSTModes 0xC000 /* Vendor-specific tests */ |
312 | #define M66592_EP_DIR 0x0080 /* b7: Endpoint Direction */ | 312 | #define M66592_EP_DIR 0x0080 /* b7: Endpoint Direction */ |
313 | #define M66592_EP_DIR_IN 0x0080 | 313 | #define M66592_EP_DIR_IN 0x0080 |
314 | #define M66592_EP_DIR_OUT 0x0000 | 314 | #define M66592_EP_DIR_OUT 0x0000 |
315 | 315 | ||
316 | #define M66592_USBLENG 0x5A | 316 | #define M66592_USBLENG 0x5A |
317 | #define M66592_wLength 0xFFFF /* b15-0: wLength */ | 317 | #define M66592_wLength 0xFFFF /* b15-0: wLength */ |
318 | 318 | ||
319 | #define M66592_DCPCFG 0x5C | 319 | #define M66592_DCPCFG 0x5C |
320 | #define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode select */ | 320 | #define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */ |
321 | #define M66592_DIR 0x0010 /* b4: Control transfer DIR select */ | 321 | #define M66592_DIR 0x0010 /* b4: Control transfer DIR select */ |
322 | 322 | ||
323 | #define M66592_DCPMAXP 0x5E | 323 | #define M66592_DCPMAXP 0x5E |
324 | #define M66592_DEVSEL 0xC000 /* b15-14: Device address select */ | 324 | #define M66592_DEVSEL 0xC000 /* b15-14: Device address select */ |
325 | #define M66592_DEVICE_0 0x0000 /* Device address 0 */ | 325 | #define M66592_DEVICE_0 0x0000 /* Device address 0 */ |
326 | #define M66592_DEVICE_1 0x4000 /* Device address 1 */ | 326 | #define M66592_DEVICE_1 0x4000 /* Device address 1 */ |
327 | #define M66592_DEVICE_2 0x8000 /* Device address 2 */ | 327 | #define M66592_DEVICE_2 0x8000 /* Device address 2 */ |
328 | #define M66592_DEVICE_3 0xC000 /* Device address 3 */ | 328 | #define M66592_DEVICE_3 0xC000 /* Device address 3 */ |
329 | #define M66592_MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ | 329 | #define M66592_MAXP 0x007F /* b6-0: Maxpacket size of ep0 */ |
330 | 330 | ||
331 | #define M66592_DCPCTR 0x60 | 331 | #define M66592_DCPCTR 0x60 |
332 | #define M66592_BSTS 0x8000 /* b15: Buffer status */ | 332 | #define M66592_BSTS 0x8000 /* b15: Buffer status */ |
333 | #define M66592_SUREQ 0x4000 /* b14: Send USB request */ | 333 | #define M66592_SUREQ 0x4000 /* b14: Send USB request */ |
334 | #define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ | 334 | #define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ |
335 | #define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ | 335 | #define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ |
336 | #define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ | 336 | #define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ |
337 | #define M66592_CCPL 0x0004 /* b2: Enable control transfer complete */ | 337 | #define M66592_CCPL 0x0004 /* b2: control transfer complete */ |
338 | #define M66592_PID 0x0003 /* b1-0: Response PID */ | 338 | #define M66592_PID 0x0003 /* b1-0: Response PID */ |
339 | #define M66592_PID_STALL 0x0002 /* STALL */ | 339 | #define M66592_PID_STALL 0x0002 /* STALL */ |
340 | #define M66592_PID_BUF 0x0001 /* BUF */ | 340 | #define M66592_PID_BUF 0x0001 /* BUF */ |
341 | #define M66592_PID_NAK 0x0000 /* NAK */ | 341 | #define M66592_PID_NAK 0x0000 /* NAK */ |
342 | 342 | ||
343 | #define M66592_PIPESEL 0x64 | 343 | #define M66592_PIPESEL 0x64 |
344 | #define M66592_PIPENM 0x0007 /* b2-0: Pipe select */ | 344 | #define M66592_PIPENM 0x0007 /* b2-0: Pipe select */ |
345 | #define M66592_PIPE0 0x0000 /* PIPE 0 */ | 345 | #define M66592_PIPE0 0x0000 /* PIPE 0 */ |
346 | #define M66592_PIPE1 0x0001 /* PIPE 1 */ | 346 | #define M66592_PIPE1 0x0001 /* PIPE 1 */ |
347 | #define M66592_PIPE2 0x0002 /* PIPE 2 */ | 347 | #define M66592_PIPE2 0x0002 /* PIPE 2 */ |
348 | #define M66592_PIPE3 0x0003 /* PIPE 3 */ | 348 | #define M66592_PIPE3 0x0003 /* PIPE 3 */ |
349 | #define M66592_PIPE4 0x0004 /* PIPE 4 */ | 349 | #define M66592_PIPE4 0x0004 /* PIPE 4 */ |
350 | #define M66592_PIPE5 0x0005 /* PIPE 5 */ | 350 | #define M66592_PIPE5 0x0005 /* PIPE 5 */ |
351 | #define M66592_PIPE6 0x0006 /* PIPE 6 */ | 351 | #define M66592_PIPE6 0x0006 /* PIPE 6 */ |
352 | #define M66592_PIPE7 0x0007 /* PIPE 7 */ | 352 | #define M66592_PIPE7 0x0007 /* PIPE 7 */ |
353 | 353 | ||
354 | #define M66592_PIPECFG 0x66 | 354 | #define M66592_PIPECFG 0x66 |
355 | #define M66592_TYP 0xC000 /* b15-14: Transfer type */ | 355 | #define M66592_TYP 0xC000 /* b15-14: Transfer type */ |
356 | #define M66592_ISO 0xC000 /* Isochronous */ | 356 | #define M66592_ISO 0xC000 /* Isochronous */ |
357 | #define M66592_INT 0x8000 /* Interrupt */ | 357 | #define M66592_INT 0x8000 /* Interrupt */ |
358 | #define M66592_BULK 0x4000 /* Bulk */ | 358 | #define M66592_BULK 0x4000 /* Bulk */ |
359 | #define M66592_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */ | 359 | #define M66592_BFRE 0x0400 /* b10: Buffer ready interrupt mode */ |
360 | #define M66592_DBLB 0x0200 /* b9: Double buffer mode select */ | 360 | #define M66592_DBLB 0x0200 /* b9: Double buffer mode select */ |
361 | #define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode select */ | 361 | #define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */ |
362 | #define M66592_SHTNAK 0x0080 /* b7: Transfer end NAK */ | 362 | #define M66592_SHTNAK 0x0080 /* b7: Transfer end NAK */ |
363 | #define M66592_DIR 0x0010 /* b4: Transfer direction select */ | 363 | #define M66592_DIR 0x0010 /* b4: Transfer direction select */ |
364 | #define M66592_DIR_H_OUT 0x0010 /* HOST OUT */ | 364 | #define M66592_DIR_H_OUT 0x0010 /* HOST OUT */ |
365 | #define M66592_DIR_P_IN 0x0010 /* PERI IN */ | 365 | #define M66592_DIR_P_IN 0x0010 /* PERI IN */ |
366 | #define M66592_DIR_H_IN 0x0000 /* HOST IN */ | 366 | #define M66592_DIR_H_IN 0x0000 /* HOST IN */ |
367 | #define M66592_DIR_P_OUT 0x0000 /* PERI OUT */ | 367 | #define M66592_DIR_P_OUT 0x0000 /* PERI OUT */ |
368 | #define M66592_EPNUM 0x000F /* b3-0: Eendpoint number select */ | 368 | #define M66592_EPNUM 0x000F /* b3-0: Eendpoint number select */ |
369 | #define M66592_EP1 0x0001 | 369 | #define M66592_EP1 0x0001 |
370 | #define M66592_EP2 0x0002 | 370 | #define M66592_EP2 0x0002 |
371 | #define M66592_EP3 0x0003 | 371 | #define M66592_EP3 0x0003 |
372 | #define M66592_EP4 0x0004 | 372 | #define M66592_EP4 0x0004 |
373 | #define M66592_EP5 0x0005 | 373 | #define M66592_EP5 0x0005 |
374 | #define M66592_EP6 0x0006 | 374 | #define M66592_EP6 0x0006 |
375 | #define M66592_EP7 0x0007 | 375 | #define M66592_EP7 0x0007 |
376 | #define M66592_EP8 0x0008 | 376 | #define M66592_EP8 0x0008 |
377 | #define M66592_EP9 0x0009 | 377 | #define M66592_EP9 0x0009 |
378 | #define M66592_EP10 0x000A | 378 | #define M66592_EP10 0x000A |
379 | #define M66592_EP11 0x000B | 379 | #define M66592_EP11 0x000B |
380 | #define M66592_EP12 0x000C | 380 | #define M66592_EP12 0x000C |
381 | #define M66592_EP13 0x000D | 381 | #define M66592_EP13 0x000D |
382 | #define M66592_EP14 0x000E | 382 | #define M66592_EP14 0x000E |
383 | #define M66592_EP15 0x000F | 383 | #define M66592_EP15 0x000F |
384 | 384 | ||
385 | #define M66592_PIPEBUF 0x68 | 385 | #define M66592_PIPEBUF 0x68 |
386 | #define M66592_BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ | 386 | #define M66592_BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ |
387 | #define M66592_BUF_SIZE(x) ((((x) / 64) - 1) << 10) | 387 | #define M66592_BUF_SIZE(x) ((((x) / 64) - 1) << 10) |
388 | #define M66592_BUFNMB 0x00FF /* b7-0: Pipe buffer number */ | 388 | #define M66592_BUFNMB 0x00FF /* b7-0: Pipe buffer number */ |
389 | 389 | ||
390 | #define M66592_PIPEMAXP 0x6A | 390 | #define M66592_PIPEMAXP 0x6A |
391 | #define M66592_MXPS 0x07FF /* b10-0: Maxpacket size */ | 391 | #define M66592_MXPS 0x07FF /* b10-0: Maxpacket size */ |
392 | 392 | ||
393 | #define M66592_PIPEPERI 0x6C | 393 | #define M66592_PIPEPERI 0x6C |
394 | #define M66592_IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ | 394 | #define M66592_IFIS 0x1000 /* b12: ISO in-buffer flush mode */ |
395 | #define M66592_IITV 0x0007 /* b2-0: Isochronous interval */ | 395 | #define M66592_IITV 0x0007 /* b2-0: ISO interval */ |
396 | 396 | ||
397 | #define M66592_PIPE1CTR 0x70 | 397 | #define M66592_PIPE1CTR 0x70 |
398 | #define M66592_PIPE2CTR 0x72 | 398 | #define M66592_PIPE2CTR 0x72 |
@@ -401,19 +401,17 @@ | |||
401 | #define M66592_PIPE5CTR 0x78 | 401 | #define M66592_PIPE5CTR 0x78 |
402 | #define M66592_PIPE6CTR 0x7A | 402 | #define M66592_PIPE6CTR 0x7A |
403 | #define M66592_PIPE7CTR 0x7C | 403 | #define M66592_PIPE7CTR 0x7C |
404 | #define M66592_BSTS 0x8000 /* b15: Buffer status */ | 404 | #define M66592_BSTS 0x8000 /* b15: Buffer status */ |
405 | #define M66592_INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ | 405 | #define M66592_INBUFM 0x4000 /* b14: IN buffer monitor (PIPE 1-5) */ |
406 | #define M66592_ACLRM 0x0200 /* b9: Out buffer auto clear mode */ | 406 | #define M66592_ACLRM 0x0200 /* b9: Out buffer auto clear mode */ |
407 | #define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ | 407 | #define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ |
408 | #define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ | 408 | #define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ |
409 | #define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ | 409 | #define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ |
410 | #define M66592_PID 0x0003 /* b1-0: Response PID */ | 410 | #define M66592_PID 0x0003 /* b1-0: Response PID */ |
411 | 411 | ||
412 | #define M66592_INVALID_REG 0x7E | 412 | #define M66592_INVALID_REG 0x7E |
413 | 413 | ||
414 | 414 | ||
415 | #define __iomem | ||
416 | |||
417 | #define get_pipectr_addr(pipenum) (M66592_PIPE1CTR + (pipenum - 1) * 2) | 415 | #define get_pipectr_addr(pipenum) (M66592_PIPE1CTR + (pipenum - 1) * 2) |
418 | 416 | ||
419 | #define M66592_MAX_SAMPLING 10 | 417 | #define M66592_MAX_SAMPLING 10 |
@@ -449,7 +447,7 @@ struct m66592_ep { | |||
449 | struct m66592 *m66592; | 447 | struct m66592 *m66592; |
450 | 448 | ||
451 | struct list_head queue; | 449 | struct list_head queue; |
452 | unsigned busy:1; | 450 | unsigned busy:1; |
453 | unsigned internal_ccpl:1; /* use only control */ | 451 | unsigned internal_ccpl:1; /* use only control */ |
454 | 452 | ||
455 | /* this member can able to after m66592_enable */ | 453 | /* this member can able to after m66592_enable */ |
@@ -477,7 +475,7 @@ struct m66592 { | |||
477 | struct m66592_ep *epaddr2ep[16]; | 475 | struct m66592_ep *epaddr2ep[16]; |
478 | 476 | ||
479 | struct usb_request *ep0_req; /* for internal request */ | 477 | struct usb_request *ep0_req; /* for internal request */ |
480 | u16 *ep0_buf; /* for internal request */ | 478 | u16 ep0_data; /* for internal request */ |
481 | 479 | ||
482 | struct timer_list timer; | 480 | struct timer_list timer; |
483 | 481 | ||
@@ -527,8 +525,8 @@ static inline u16 m66592_read(struct m66592 *m66592, unsigned long offset) | |||
527 | } | 525 | } |
528 | 526 | ||
529 | static inline void m66592_read_fifo(struct m66592 *m66592, | 527 | static inline void m66592_read_fifo(struct m66592 *m66592, |
530 | unsigned long offset, | 528 | unsigned long offset, |
531 | void *buf, unsigned long len) | 529 | void *buf, unsigned long len) |
532 | { | 530 | { |
533 | unsigned long fifoaddr = (unsigned long)m66592->reg + offset; | 531 | unsigned long fifoaddr = (unsigned long)m66592->reg + offset; |
534 | 532 | ||
@@ -543,8 +541,8 @@ static inline void m66592_write(struct m66592 *m66592, u16 val, | |||
543 | } | 541 | } |
544 | 542 | ||
545 | static inline void m66592_write_fifo(struct m66592 *m66592, | 543 | static inline void m66592_write_fifo(struct m66592 *m66592, |
546 | unsigned long offset, | 544 | unsigned long offset, |
547 | void *buf, unsigned long len) | 545 | void *buf, unsigned long len) |
548 | { | 546 | { |
549 | unsigned long fifoaddr = (unsigned long)m66592->reg + offset; | 547 | unsigned long fifoaddr = (unsigned long)m66592->reg + offset; |
550 | unsigned long odd = len & 0x0001; | 548 | unsigned long odd = len & 0x0001; |
@@ -558,7 +556,7 @@ static inline void m66592_write_fifo(struct m66592 *m66592, | |||
558 | } | 556 | } |
559 | 557 | ||
560 | static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat, | 558 | static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat, |
561 | unsigned long offset) | 559 | unsigned long offset) |
562 | { | 560 | { |
563 | u16 tmp; | 561 | u16 tmp; |
564 | tmp = m66592_read(m66592, offset); | 562 | tmp = m66592_read(m66592, offset); |
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c index 38138bb9ddb0..9cd98e73dc1d 100644 --- a/drivers/usb/gadget/serial.c +++ b/drivers/usb/gadget/serial.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/device.h> | 33 | #include <linux/device.h> |
34 | #include <linux/tty.h> | 34 | #include <linux/tty.h> |
35 | #include <linux/tty_flip.h> | 35 | #include <linux/tty_flip.h> |
36 | #include <linux/mutex.h> | ||
36 | 37 | ||
37 | #include <asm/byteorder.h> | 38 | #include <asm/byteorder.h> |
38 | #include <asm/io.h> | 39 | #include <asm/io.h> |
@@ -258,7 +259,7 @@ static const char *EP_IN_NAME; | |||
258 | static const char *EP_OUT_NAME; | 259 | static const char *EP_OUT_NAME; |
259 | static const char *EP_NOTIFY_NAME; | 260 | static const char *EP_NOTIFY_NAME; |
260 | 261 | ||
261 | static struct semaphore gs_open_close_sem[GS_NUM_PORTS]; | 262 | static struct mutex gs_open_close_lock[GS_NUM_PORTS]; |
262 | 263 | ||
263 | static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE; | 264 | static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE; |
264 | static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE; | 265 | static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE; |
@@ -595,7 +596,7 @@ static int __init gs_module_init(void) | |||
595 | tty_set_operations(gs_tty_driver, &gs_tty_ops); | 596 | tty_set_operations(gs_tty_driver, &gs_tty_ops); |
596 | 597 | ||
597 | for (i=0; i < GS_NUM_PORTS; i++) | 598 | for (i=0; i < GS_NUM_PORTS; i++) |
598 | sema_init(&gs_open_close_sem[i], 1); | 599 | mutex_init(&gs_open_close_lock[i]); |
599 | 600 | ||
600 | retval = tty_register_driver(gs_tty_driver); | 601 | retval = tty_register_driver(gs_tty_driver); |
601 | if (retval) { | 602 | if (retval) { |
@@ -635,7 +636,7 @@ static int gs_open(struct tty_struct *tty, struct file *file) | |||
635 | struct gs_port *port; | 636 | struct gs_port *port; |
636 | struct gs_dev *dev; | 637 | struct gs_dev *dev; |
637 | struct gs_buf *buf; | 638 | struct gs_buf *buf; |
638 | struct semaphore *sem; | 639 | struct mutex *mtx; |
639 | int ret; | 640 | int ret; |
640 | 641 | ||
641 | port_num = tty->index; | 642 | port_num = tty->index; |
@@ -656,10 +657,10 @@ static int gs_open(struct tty_struct *tty, struct file *file) | |||
656 | return -ENODEV; | 657 | return -ENODEV; |
657 | } | 658 | } |
658 | 659 | ||
659 | sem = &gs_open_close_sem[port_num]; | 660 | mtx = &gs_open_close_lock[port_num]; |
660 | if (down_interruptible(sem)) { | 661 | if (mutex_lock_interruptible(mtx)) { |
661 | printk(KERN_ERR | 662 | printk(KERN_ERR |
662 | "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n", | 663 | "gs_open: (%d,%p,%p) interrupted waiting for mutex\n", |
663 | port_num, tty, file); | 664 | port_num, tty, file); |
664 | return -ERESTARTSYS; | 665 | return -ERESTARTSYS; |
665 | } | 666 | } |
@@ -754,12 +755,12 @@ static int gs_open(struct tty_struct *tty, struct file *file) | |||
754 | 755 | ||
755 | exit_unlock_port: | 756 | exit_unlock_port: |
756 | spin_unlock_irqrestore(&port->port_lock, flags); | 757 | spin_unlock_irqrestore(&port->port_lock, flags); |
757 | up(sem); | 758 | mutex_unlock(mtx); |
758 | return ret; | 759 | return ret; |
759 | 760 | ||
760 | exit_unlock_dev: | 761 | exit_unlock_dev: |
761 | spin_unlock_irqrestore(&dev->dev_lock, flags); | 762 | spin_unlock_irqrestore(&dev->dev_lock, flags); |
762 | up(sem); | 763 | mutex_unlock(mtx); |
763 | return ret; | 764 | return ret; |
764 | 765 | ||
765 | } | 766 | } |
@@ -781,7 +782,7 @@ exit_unlock_dev: | |||
781 | static void gs_close(struct tty_struct *tty, struct file *file) | 782 | static void gs_close(struct tty_struct *tty, struct file *file) |
782 | { | 783 | { |
783 | struct gs_port *port = tty->driver_data; | 784 | struct gs_port *port = tty->driver_data; |
784 | struct semaphore *sem; | 785 | struct mutex *mtx; |
785 | 786 | ||
786 | if (port == NULL) { | 787 | if (port == NULL) { |
787 | printk(KERN_ERR "gs_close: NULL port pointer\n"); | 788 | printk(KERN_ERR "gs_close: NULL port pointer\n"); |
@@ -790,8 +791,8 @@ static void gs_close(struct tty_struct *tty, struct file *file) | |||
790 | 791 | ||
791 | gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file); | 792 | gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file); |
792 | 793 | ||
793 | sem = &gs_open_close_sem[port->port_num]; | 794 | mtx = &gs_open_close_lock[port->port_num]; |
794 | down(sem); | 795 | mutex_lock(mtx); |
795 | 796 | ||
796 | spin_lock_irq(&port->port_lock); | 797 | spin_lock_irq(&port->port_lock); |
797 | 798 | ||
@@ -846,7 +847,7 @@ static void gs_close(struct tty_struct *tty, struct file *file) | |||
846 | 847 | ||
847 | exit: | 848 | exit: |
848 | spin_unlock_irq(&port->port_lock); | 849 | spin_unlock_irq(&port->port_lock); |
849 | up(sem); | 850 | mutex_unlock(mtx); |
850 | } | 851 | } |
851 | 852 | ||
852 | /* | 853 | /* |
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 46873f2534b5..5c851a36de72 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c | |||
@@ -228,7 +228,6 @@ static void preproc_atl_queue(struct isp116x *isp116x) | |||
228 | struct urb, urb_list); | 228 | struct urb, urb_list); |
229 | ptd = &ep->ptd; | 229 | ptd = &ep->ptd; |
230 | len = ep->length; | 230 | len = ep->length; |
231 | spin_lock(&urb->lock); | ||
232 | ep->data = (unsigned char *)urb->transfer_buffer | 231 | ep->data = (unsigned char *)urb->transfer_buffer |
233 | + urb->actual_length; | 232 | + urb->actual_length; |
234 | 233 | ||
@@ -264,7 +263,6 @@ static void preproc_atl_queue(struct isp116x *isp116x) | |||
264 | | PTD_EP(ep->epnum); | 263 | | PTD_EP(ep->epnum); |
265 | ptd->len = PTD_LEN(len) | PTD_DIR(dir); | 264 | ptd->len = PTD_LEN(len) | PTD_DIR(dir); |
266 | ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe)); | 265 | ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe)); |
267 | spin_unlock(&urb->lock); | ||
268 | if (!ep->active) { | 266 | if (!ep->active) { |
269 | ptd->mps |= PTD_LAST_MSK; | 267 | ptd->mps |= PTD_LAST_MSK; |
270 | isp116x->atl_last_dir = dir; | 268 | isp116x->atl_last_dir = dir; |
@@ -275,6 +273,61 @@ static void preproc_atl_queue(struct isp116x *isp116x) | |||
275 | } | 273 | } |
276 | 274 | ||
277 | /* | 275 | /* |
276 | Take done or failed requests out of schedule. Give back | ||
277 | processed urbs. | ||
278 | */ | ||
279 | static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep, | ||
280 | struct urb *urb) | ||
281 | __releases(isp116x->lock) __acquires(isp116x->lock) | ||
282 | { | ||
283 | unsigned i; | ||
284 | |||
285 | urb->hcpriv = NULL; | ||
286 | ep->error_count = 0; | ||
287 | |||
288 | if (usb_pipecontrol(urb->pipe)) | ||
289 | ep->nextpid = USB_PID_SETUP; | ||
290 | |||
291 | urb_dbg(urb, "Finish"); | ||
292 | |||
293 | spin_unlock(&isp116x->lock); | ||
294 | usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb); | ||
295 | spin_lock(&isp116x->lock); | ||
296 | |||
297 | /* take idle endpoints out of the schedule */ | ||
298 | if (!list_empty(&ep->hep->urb_list)) | ||
299 | return; | ||
300 | |||
301 | /* async deschedule */ | ||
302 | if (!list_empty(&ep->schedule)) { | ||
303 | list_del_init(&ep->schedule); | ||
304 | return; | ||
305 | } | ||
306 | |||
307 | /* periodic deschedule */ | ||
308 | DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); | ||
309 | for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { | ||
310 | struct isp116x_ep *temp; | ||
311 | struct isp116x_ep **prev = &isp116x->periodic[i]; | ||
312 | |||
313 | while (*prev && ((temp = *prev) != ep)) | ||
314 | prev = &temp->next; | ||
315 | if (*prev) | ||
316 | *prev = ep->next; | ||
317 | isp116x->load[i] -= ep->load; | ||
318 | } | ||
319 | ep->branch = PERIODIC_SIZE; | ||
320 | isp116x_to_hcd(isp116x)->self.bandwidth_allocated -= | ||
321 | ep->load / ep->period; | ||
322 | |||
323 | /* switch irq type? */ | ||
324 | if (!--isp116x->periodic_count) { | ||
325 | isp116x->irqenb &= ~HCuPINT_SOF; | ||
326 | isp116x->irqenb |= HCuPINT_ATL; | ||
327 | } | ||
328 | } | ||
329 | |||
330 | /* | ||
278 | Analyze transfer results, handle partial transfers and errors | 331 | Analyze transfer results, handle partial transfers and errors |
279 | */ | 332 | */ |
280 | static void postproc_atl_queue(struct isp116x *isp116x) | 333 | static void postproc_atl_queue(struct isp116x *isp116x) |
@@ -284,6 +337,7 @@ static void postproc_atl_queue(struct isp116x *isp116x) | |||
284 | struct usb_device *udev; | 337 | struct usb_device *udev; |
285 | struct ptd *ptd; | 338 | struct ptd *ptd; |
286 | int short_not_ok; | 339 | int short_not_ok; |
340 | int status; | ||
287 | u8 cc; | 341 | u8 cc; |
288 | 342 | ||
289 | for (ep = isp116x->atl_active; ep; ep = ep->active) { | 343 | for (ep = isp116x->atl_active; ep; ep = ep->active) { |
@@ -294,7 +348,7 @@ static void postproc_atl_queue(struct isp116x *isp116x) | |||
294 | ptd = &ep->ptd; | 348 | ptd = &ep->ptd; |
295 | cc = PTD_GET_CC(ptd); | 349 | cc = PTD_GET_CC(ptd); |
296 | short_not_ok = 1; | 350 | short_not_ok = 1; |
297 | spin_lock(&urb->lock); | 351 | status = -EINPROGRESS; |
298 | 352 | ||
299 | /* Data underrun is special. For allowed underrun | 353 | /* Data underrun is special. For allowed underrun |
300 | we clear the error and continue as normal. For | 354 | we clear the error and continue as normal. For |
@@ -302,47 +356,36 @@ static void postproc_atl_queue(struct isp116x *isp116x) | |||
302 | immediately while for control transfer, | 356 | immediately while for control transfer, |
303 | we do a STATUS stage. */ | 357 | we do a STATUS stage. */ |
304 | if (cc == TD_DATAUNDERRUN) { | 358 | if (cc == TD_DATAUNDERRUN) { |
305 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) { | 359 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK) || |
306 | DBG("Allowed data underrun\n"); | 360 | usb_pipecontrol(urb->pipe)) { |
361 | DBG("Allowed or control data underrun\n"); | ||
307 | cc = TD_CC_NOERROR; | 362 | cc = TD_CC_NOERROR; |
308 | short_not_ok = 0; | 363 | short_not_ok = 0; |
309 | } else { | 364 | } else { |
310 | ep->error_count = 1; | 365 | ep->error_count = 1; |
311 | if (usb_pipecontrol(urb->pipe)) | 366 | usb_settoggle(udev, ep->epnum, |
312 | ep->nextpid = USB_PID_ACK; | 367 | ep->nextpid == USB_PID_OUT, |
313 | else | 368 | PTD_GET_TOGGLE(ptd)); |
314 | usb_settoggle(udev, ep->epnum, | ||
315 | ep->nextpid == | ||
316 | USB_PID_OUT, | ||
317 | PTD_GET_TOGGLE(ptd)); | ||
318 | urb->actual_length += PTD_GET_COUNT(ptd); | 369 | urb->actual_length += PTD_GET_COUNT(ptd); |
319 | urb->status = cc_to_error[TD_DATAUNDERRUN]; | 370 | status = cc_to_error[TD_DATAUNDERRUN]; |
320 | spin_unlock(&urb->lock); | 371 | goto done; |
321 | continue; | ||
322 | } | 372 | } |
323 | } | 373 | } |
324 | /* Keep underrun error through the STATUS stage */ | ||
325 | if (urb->status == cc_to_error[TD_DATAUNDERRUN]) | ||
326 | cc = TD_DATAUNDERRUN; | ||
327 | 374 | ||
328 | if (cc != TD_CC_NOERROR && cc != TD_NOTACCESSED | 375 | if (cc != TD_CC_NOERROR && cc != TD_NOTACCESSED |
329 | && (++ep->error_count >= 3 || cc == TD_CC_STALL | 376 | && (++ep->error_count >= 3 || cc == TD_CC_STALL |
330 | || cc == TD_DATAOVERRUN)) { | 377 | || cc == TD_DATAOVERRUN)) { |
331 | if (urb->status == -EINPROGRESS) | 378 | status = cc_to_error[cc]; |
332 | urb->status = cc_to_error[cc]; | ||
333 | if (ep->nextpid == USB_PID_ACK) | 379 | if (ep->nextpid == USB_PID_ACK) |
334 | ep->nextpid = 0; | 380 | ep->nextpid = 0; |
335 | spin_unlock(&urb->lock); | 381 | goto done; |
336 | continue; | ||
337 | } | 382 | } |
338 | /* According to usb spec, zero-length Int transfer signals | 383 | /* According to usb spec, zero-length Int transfer signals |
339 | finishing of the urb. Hey, does this apply only | 384 | finishing of the urb. Hey, does this apply only |
340 | for IN endpoints? */ | 385 | for IN endpoints? */ |
341 | if (usb_pipeint(urb->pipe) && !PTD_GET_LEN(ptd)) { | 386 | if (usb_pipeint(urb->pipe) && !PTD_GET_LEN(ptd)) { |
342 | if (urb->status == -EINPROGRESS) | 387 | status = 0; |
343 | urb->status = 0; | 388 | goto done; |
344 | spin_unlock(&urb->lock); | ||
345 | continue; | ||
346 | } | 389 | } |
347 | 390 | ||
348 | /* Relax after previously failed, but later succeeded | 391 | /* Relax after previously failed, but later succeeded |
@@ -381,8 +424,8 @@ static void postproc_atl_queue(struct isp116x *isp116x) | |||
381 | /* All data for this URB is transferred, let's finish */ | 424 | /* All data for this URB is transferred, let's finish */ |
382 | if (usb_pipecontrol(urb->pipe)) | 425 | if (usb_pipecontrol(urb->pipe)) |
383 | ep->nextpid = USB_PID_ACK; | 426 | ep->nextpid = USB_PID_ACK; |
384 | else if (urb->status == -EINPROGRESS) | 427 | else |
385 | urb->status = 0; | 428 | status = 0; |
386 | break; | 429 | break; |
387 | case USB_PID_SETUP: | 430 | case USB_PID_SETUP: |
388 | if (PTD_GET_ACTIVE(ptd) | 431 | if (PTD_GET_ACTIVE(ptd) |
@@ -402,69 +445,27 @@ static void postproc_atl_queue(struct isp116x *isp116x) | |||
402 | if (PTD_GET_ACTIVE(ptd) | 445 | if (PTD_GET_ACTIVE(ptd) |
403 | || (cc != TD_CC_NOERROR && cc < 0x0E)) | 446 | || (cc != TD_CC_NOERROR && cc < 0x0E)) |
404 | break; | 447 | break; |
405 | if (urb->status == -EINPROGRESS) | 448 | if ((urb->transfer_flags & URB_SHORT_NOT_OK) && |
406 | urb->status = 0; | 449 | urb->actual_length < |
450 | urb->transfer_buffer_length) | ||
451 | status = -EREMOTEIO; | ||
452 | else | ||
453 | status = 0; | ||
407 | ep->nextpid = 0; | 454 | ep->nextpid = 0; |
408 | break; | 455 | break; |
409 | default: | 456 | default: |
410 | BUG(); | 457 | BUG(); |
411 | } | 458 | } |
412 | spin_unlock(&urb->lock); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | Take done or failed requests out of schedule. Give back | ||
418 | processed urbs. | ||
419 | */ | ||
420 | static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep, | ||
421 | struct urb *urb) | ||
422 | __releases(isp116x->lock) __acquires(isp116x->lock) | ||
423 | { | ||
424 | unsigned i; | ||
425 | |||
426 | urb->hcpriv = NULL; | ||
427 | ep->error_count = 0; | ||
428 | |||
429 | if (usb_pipecontrol(urb->pipe)) | ||
430 | ep->nextpid = USB_PID_SETUP; | ||
431 | |||
432 | urb_dbg(urb, "Finish"); | ||
433 | |||
434 | spin_unlock(&isp116x->lock); | ||
435 | usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb); | ||
436 | spin_lock(&isp116x->lock); | ||
437 | |||
438 | /* take idle endpoints out of the schedule */ | ||
439 | if (!list_empty(&ep->hep->urb_list)) | ||
440 | return; | ||
441 | |||
442 | /* async deschedule */ | ||
443 | if (!list_empty(&ep->schedule)) { | ||
444 | list_del_init(&ep->schedule); | ||
445 | return; | ||
446 | } | ||
447 | 459 | ||
448 | /* periodic deschedule */ | 460 | done: |
449 | DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); | 461 | if (status != -EINPROGRESS) { |
450 | for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { | 462 | spin_lock(&urb->lock); |
451 | struct isp116x_ep *temp; | 463 | if (urb->status == -EINPROGRESS) |
452 | struct isp116x_ep **prev = &isp116x->periodic[i]; | 464 | urb->status = status; |
453 | 465 | spin_unlock(&urb->lock); | |
454 | while (*prev && ((temp = *prev) != ep)) | 466 | } |
455 | prev = &temp->next; | 467 | if (urb->status != -EINPROGRESS) |
456 | if (*prev) | 468 | finish_request(isp116x, ep, urb); |
457 | *prev = ep->next; | ||
458 | isp116x->load[i] -= ep->load; | ||
459 | } | ||
460 | ep->branch = PERIODIC_SIZE; | ||
461 | isp116x_to_hcd(isp116x)->self.bandwidth_allocated -= | ||
462 | ep->load / ep->period; | ||
463 | |||
464 | /* switch irq type? */ | ||
465 | if (!--isp116x->periodic_count) { | ||
466 | isp116x->irqenb &= ~HCuPINT_SOF; | ||
467 | isp116x->irqenb |= HCuPINT_ATL; | ||
468 | } | 469 | } |
469 | } | 470 | } |
470 | 471 | ||
@@ -570,9 +571,6 @@ static void start_atl_transfers(struct isp116x *isp116x) | |||
570 | */ | 571 | */ |
571 | static void finish_atl_transfers(struct isp116x *isp116x) | 572 | static void finish_atl_transfers(struct isp116x *isp116x) |
572 | { | 573 | { |
573 | struct isp116x_ep *ep; | ||
574 | struct urb *urb; | ||
575 | |||
576 | if (!isp116x->atl_active) | 574 | if (!isp116x->atl_active) |
577 | return; | 575 | return; |
578 | /* Fifo not ready? */ | 576 | /* Fifo not ready? */ |
@@ -582,16 +580,6 @@ static void finish_atl_transfers(struct isp116x *isp116x) | |||
582 | atomic_inc(&isp116x->atl_finishing); | 580 | atomic_inc(&isp116x->atl_finishing); |
583 | unpack_fifo(isp116x); | 581 | unpack_fifo(isp116x); |
584 | postproc_atl_queue(isp116x); | 582 | postproc_atl_queue(isp116x); |
585 | for (ep = isp116x->atl_active; ep; ep = ep->active) { | ||
586 | urb = | ||
587 | container_of(ep->hep->urb_list.next, struct urb, urb_list); | ||
588 | /* USB_PID_ACK check here avoids finishing of | ||
589 | control transfers, for which TD_DATAUNDERRUN | ||
590 | occured, while URB_SHORT_NOT_OK was set */ | ||
591 | if (urb && urb->status != -EINPROGRESS | ||
592 | && ep->nextpid != USB_PID_ACK) | ||
593 | finish_request(isp116x, ep, urb); | ||
594 | } | ||
595 | atomic_dec(&isp116x->atl_finishing); | 583 | atomic_dec(&isp116x->atl_finishing); |
596 | } | 584 | } |
597 | 585 | ||
@@ -821,15 +809,12 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd, | |||
821 | } | 809 | } |
822 | 810 | ||
823 | /* in case of unlink-during-submit */ | 811 | /* in case of unlink-during-submit */ |
824 | spin_lock(&urb->lock); | ||
825 | if (urb->status != -EINPROGRESS) { | 812 | if (urb->status != -EINPROGRESS) { |
826 | spin_unlock(&urb->lock); | ||
827 | finish_request(isp116x, ep, urb); | 813 | finish_request(isp116x, ep, urb); |
828 | ret = 0; | 814 | ret = 0; |
829 | goto fail; | 815 | goto fail; |
830 | } | 816 | } |
831 | urb->hcpriv = hep; | 817 | urb->hcpriv = hep; |
832 | spin_unlock(&urb->lock); | ||
833 | start_atl_transfers(isp116x); | 818 | start_atl_transfers(isp116x); |
834 | 819 | ||
835 | fail: | 820 | fail: |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index a7a7070c6e2a..d60f1985320c 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
@@ -35,10 +35,8 @@ | |||
35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
36 | #include <linux/usb.h> | 36 | #include <linux/usb.h> |
37 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
38 | 38 | #include <linux/io.h> | |
39 | #include <asm/io.h> | 39 | #include <linux/irq.h> |
40 | #include <asm/irq.h> | ||
41 | #include <asm/system.h> | ||
42 | 40 | ||
43 | #include "../core/hcd.h" | 41 | #include "../core/hcd.h" |
44 | #include "r8a66597.h" | 42 | #include "r8a66597.h" |
@@ -54,16 +52,21 @@ static const char hcd_name[] = "r8a66597_hcd"; | |||
54 | /* module parameters */ | 52 | /* module parameters */ |
55 | static unsigned short clock = XTAL12; | 53 | static unsigned short clock = XTAL12; |
56 | module_param(clock, ushort, 0644); | 54 | module_param(clock, ushort, 0644); |
57 | MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0(default=0)"); | 55 | MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 " |
56 | "(default=0)"); | ||
57 | |||
58 | static unsigned short vif = LDRV; | 58 | static unsigned short vif = LDRV; |
59 | module_param(vif, ushort, 0644); | 59 | module_param(vif, ushort, 0644); |
60 | MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)"); | 60 | MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)"); |
61 | static unsigned short endian = 0; | 61 | |
62 | static unsigned short endian; | ||
62 | module_param(endian, ushort, 0644); | 63 | module_param(endian, ushort, 0644); |
63 | MODULE_PARM_DESC(endian, "data endian: big=256, little=0(default=0)"); | 64 | MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)"); |
65 | |||
64 | static unsigned short irq_sense = INTL; | 66 | static unsigned short irq_sense = INTL; |
65 | module_param(irq_sense, ushort, 0644); | 67 | module_param(irq_sense, ushort, 0644); |
66 | MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0(default=32)"); | 68 | MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 " |
69 | "(default=32)"); | ||
67 | 70 | ||
68 | static void packet_write(struct r8a66597 *r8a66597, u16 pipenum); | 71 | static void packet_write(struct r8a66597 *r8a66597, u16 pipenum); |
69 | static int r8a66597_get_frame(struct usb_hcd *hcd); | 72 | static int r8a66597_get_frame(struct usb_hcd *hcd); |
@@ -308,7 +311,7 @@ static int make_r8a66597_device(struct r8a66597 *r8a66597, | |||
308 | struct r8a66597_device *dev; | 311 | struct r8a66597_device *dev; |
309 | int usb_address = urb->setup_packet[2]; /* urb->pipe is address 0 */ | 312 | int usb_address = urb->setup_packet[2]; /* urb->pipe is address 0 */ |
310 | 313 | ||
311 | dev = kzalloc(sizeof(struct r8a66597_device), GFP_KERNEL); | 314 | dev = kzalloc(sizeof(struct r8a66597_device), GFP_ATOMIC); |
312 | if (dev == NULL) | 315 | if (dev == NULL) |
313 | return -ENOMEM; | 316 | return -ENOMEM; |
314 | 317 | ||
@@ -611,33 +614,33 @@ static u16 get_empty_pipenum(struct r8a66597 *r8a66597, | |||
611 | u16 array[R8A66597_MAX_NUM_PIPE], i = 0, min; | 614 | u16 array[R8A66597_MAX_NUM_PIPE], i = 0, min; |
612 | 615 | ||
613 | memset(array, 0, sizeof(array)); | 616 | memset(array, 0, sizeof(array)); |
614 | switch(ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { | 617 | switch (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { |
615 | case USB_ENDPOINT_XFER_BULK: | 618 | case USB_ENDPOINT_XFER_BULK: |
616 | if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) | 619 | if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) |
617 | array[i++] = 4; | 620 | array[i++] = 4; |
618 | else { | 621 | else { |
619 | array[i++] = 3; | 622 | array[i++] = 3; |
620 | array[i++] = 5; | 623 | array[i++] = 5; |
621 | } | 624 | } |
622 | break; | 625 | break; |
623 | case USB_ENDPOINT_XFER_INT: | 626 | case USB_ENDPOINT_XFER_INT: |
624 | if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) { | 627 | if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) { |
625 | array[i++] = 6; | 628 | array[i++] = 6; |
626 | array[i++] = 7; | 629 | array[i++] = 7; |
627 | array[i++] = 8; | 630 | array[i++] = 8; |
628 | } else | 631 | } else |
629 | array[i++] = 9; | 632 | array[i++] = 9; |
630 | break; | 633 | break; |
631 | case USB_ENDPOINT_XFER_ISOC: | 634 | case USB_ENDPOINT_XFER_ISOC: |
632 | if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) | 635 | if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) |
633 | array[i++] = 2; | 636 | array[i++] = 2; |
634 | else | 637 | else |
635 | array[i++] = 1; | 638 | array[i++] = 1; |
636 | break; | 639 | break; |
637 | default: | 640 | default: |
638 | err("Illegal type"); | 641 | err("Illegal type"); |
639 | return 0; | 642 | return 0; |
640 | } | 643 | } |
641 | 644 | ||
642 | i = 1; | 645 | i = 1; |
643 | min = array[0]; | 646 | min = array[0]; |
@@ -654,7 +657,7 @@ static u16 get_r8a66597_type(__u8 type) | |||
654 | { | 657 | { |
655 | u16 r8a66597_type; | 658 | u16 r8a66597_type; |
656 | 659 | ||
657 | switch(type) { | 660 | switch (type) { |
658 | case USB_ENDPOINT_XFER_BULK: | 661 | case USB_ENDPOINT_XFER_BULK: |
659 | r8a66597_type = R8A66597_BULK; | 662 | r8a66597_type = R8A66597_BULK; |
660 | break; | 663 | break; |
@@ -874,7 +877,7 @@ static void r8a66597_usb_preconnect(struct r8a66597 *r8a66597, int port) | |||
874 | { | 877 | { |
875 | r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION) | 878 | r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION) |
876 | | (1 << USB_PORT_FEAT_C_CONNECTION); | 879 | | (1 << USB_PORT_FEAT_C_CONNECTION); |
877 | r8a66597_write(r8a66597, (u16)~DTCH, get_intsts_reg(port)); | 880 | r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); |
878 | r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); | 881 | r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); |
879 | } | 882 | } |
880 | 883 | ||
@@ -917,7 +920,7 @@ static void prepare_setup_packet(struct r8a66597 *r8a66597, | |||
917 | 920 | ||
918 | r8a66597_write(r8a66597, make_devsel(td->address) | td->maxpacket, | 921 | r8a66597_write(r8a66597, make_devsel(td->address) | td->maxpacket, |
919 | DCPMAXP); | 922 | DCPMAXP); |
920 | r8a66597_write(r8a66597, (u16)~(SIGN | SACK), INTSTS1); | 923 | r8a66597_write(r8a66597, ~(SIGN | SACK), INTSTS1); |
921 | 924 | ||
922 | for (i = 0; i < 4; i++) { | 925 | for (i = 0; i < 4; i++) { |
923 | r8a66597_write(r8a66597, p[i], setup_addr); | 926 | r8a66597_write(r8a66597, p[i], setup_addr); |
@@ -948,19 +951,18 @@ static void prepare_packet_read(struct r8a66597 *r8a66597, | |||
948 | pipe_irq_disable(r8a66597, td->pipenum); | 951 | pipe_irq_disable(r8a66597, td->pipenum); |
949 | pipe_setting(r8a66597, td); | 952 | pipe_setting(r8a66597, td); |
950 | pipe_stop(r8a66597, td->pipe); | 953 | pipe_stop(r8a66597, td->pipe); |
951 | r8a66597_write(r8a66597, (u16)~(1 << td->pipenum), | 954 | r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS); |
952 | BRDYSTS); | ||
953 | 955 | ||
954 | if (td->pipe->pipetre) { | 956 | if (td->pipe->pipetre) { |
955 | r8a66597_write(r8a66597, TRCLR, | 957 | r8a66597_write(r8a66597, TRCLR, |
956 | td->pipe->pipetre); | 958 | td->pipe->pipetre); |
957 | r8a66597_write(r8a66597, | 959 | r8a66597_write(r8a66597, |
958 | (urb->transfer_buffer_length | 960 | (urb->transfer_buffer_length |
959 | + td->maxpacket - 1) | 961 | + td->maxpacket - 1) |
960 | / td->maxpacket, | 962 | / td->maxpacket, |
961 | td->pipe->pipetrn); | 963 | td->pipe->pipetrn); |
962 | r8a66597_bset(r8a66597, TRENB, | 964 | r8a66597_bset(r8a66597, TRENB, |
963 | td->pipe->pipetre); | 965 | td->pipe->pipetre); |
964 | } | 966 | } |
965 | 967 | ||
966 | pipe_start(r8a66597, td->pipe); | 968 | pipe_start(r8a66597, td->pipe); |
@@ -991,7 +993,7 @@ static void prepare_packet_write(struct r8a66597 *r8a66597, | |||
991 | if (td->pipe->pipetre) | 993 | if (td->pipe->pipetre) |
992 | r8a66597_bclr(r8a66597, TRENB, td->pipe->pipetre); | 994 | r8a66597_bclr(r8a66597, TRENB, td->pipe->pipetre); |
993 | } | 995 | } |
994 | r8a66597_write(r8a66597, (u16)~(1 << td->pipenum), BRDYSTS); | 996 | r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS); |
995 | 997 | ||
996 | fifo_change_from_pipe(r8a66597, td->pipe); | 998 | fifo_change_from_pipe(r8a66597, td->pipe); |
997 | tmp = r8a66597_read(r8a66597, td->pipe->fifoctr); | 999 | tmp = r8a66597_read(r8a66597, td->pipe->fifoctr); |
@@ -1009,21 +1011,21 @@ static void prepare_status_packet(struct r8a66597 *r8a66597, | |||
1009 | struct urb *urb = td->urb; | 1011 | struct urb *urb = td->urb; |
1010 | 1012 | ||
1011 | r8a66597_pipe_toggle(r8a66597, td->pipe, 1); | 1013 | r8a66597_pipe_toggle(r8a66597, td->pipe, 1); |
1014 | pipe_stop(r8a66597, td->pipe); | ||
1012 | 1015 | ||
1013 | if (urb->setup_packet[0] & USB_ENDPOINT_DIR_MASK) { | 1016 | if (urb->setup_packet[0] & USB_ENDPOINT_DIR_MASK) { |
1014 | r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG); | 1017 | r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG); |
1015 | r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL); | 1018 | r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL); |
1016 | r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0); | 1019 | r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0); |
1017 | r8a66597_write(r8a66597, BVAL | BCLR, CFIFOCTR); | 1020 | r8a66597_write(r8a66597, ~BEMP0, BEMPSTS); |
1018 | r8a66597_write(r8a66597, (u16)~BEMP0, BEMPSTS); | 1021 | r8a66597_write(r8a66597, BCLR, CFIFOCTR); |
1022 | r8a66597_write(r8a66597, BVAL, CFIFOCTR); | ||
1019 | enable_irq_empty(r8a66597, 0); | 1023 | enable_irq_empty(r8a66597, 0); |
1020 | } else { | 1024 | } else { |
1021 | r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG); | 1025 | r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG); |
1022 | r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL); | 1026 | r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL); |
1023 | r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0); | 1027 | r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0); |
1024 | r8a66597_write(r8a66597, BCLR, CFIFOCTR); | 1028 | r8a66597_write(r8a66597, BCLR, CFIFOCTR); |
1025 | r8a66597_write(r8a66597, (u16)~BRDY0, BRDYSTS); | ||
1026 | r8a66597_write(r8a66597, (u16)~BEMP0, BEMPSTS); | ||
1027 | enable_irq_ready(r8a66597, 0); | 1029 | enable_irq_ready(r8a66597, 0); |
1028 | } | 1030 | } |
1029 | enable_irq_nrdy(r8a66597, 0); | 1031 | enable_irq_nrdy(r8a66597, 0); |
@@ -1269,7 +1271,7 @@ static void packet_write(struct r8a66597 *r8a66597, u16 pipenum) | |||
1269 | 1271 | ||
1270 | /* write fifo */ | 1272 | /* write fifo */ |
1271 | if (pipenum > 0) | 1273 | if (pipenum > 0) |
1272 | r8a66597_write(r8a66597, (u16)~(1 << pipenum), BEMPSTS); | 1274 | r8a66597_write(r8a66597, ~(1 << pipenum), BEMPSTS); |
1273 | if (urb->transfer_buffer) { | 1275 | if (urb->transfer_buffer) { |
1274 | r8a66597_write_fifo(r8a66597, td->pipe->fifoaddr, buf, size); | 1276 | r8a66597_write_fifo(r8a66597, td->pipe->fifoaddr, buf, size); |
1275 | if (!usb_pipebulk(urb->pipe) || td->maxpacket != size) | 1277 | if (!usb_pipebulk(urb->pipe) || td->maxpacket != size) |
@@ -1362,7 +1364,7 @@ static void irq_pipe_ready(struct r8a66597 *r8a66597) | |||
1362 | 1364 | ||
1363 | mask = r8a66597_read(r8a66597, BRDYSTS) | 1365 | mask = r8a66597_read(r8a66597, BRDYSTS) |
1364 | & r8a66597_read(r8a66597, BRDYENB); | 1366 | & r8a66597_read(r8a66597, BRDYENB); |
1365 | r8a66597_write(r8a66597, (u16)~mask, BRDYSTS); | 1367 | r8a66597_write(r8a66597, ~mask, BRDYSTS); |
1366 | if (mask & BRDY0) { | 1368 | if (mask & BRDY0) { |
1367 | td = r8a66597_get_td(r8a66597, 0); | 1369 | td = r8a66597_get_td(r8a66597, 0); |
1368 | if (td && td->type == USB_PID_IN) | 1370 | if (td && td->type == USB_PID_IN) |
@@ -1397,7 +1399,7 @@ static void irq_pipe_empty(struct r8a66597 *r8a66597) | |||
1397 | 1399 | ||
1398 | mask = r8a66597_read(r8a66597, BEMPSTS) | 1400 | mask = r8a66597_read(r8a66597, BEMPSTS) |
1399 | & r8a66597_read(r8a66597, BEMPENB); | 1401 | & r8a66597_read(r8a66597, BEMPENB); |
1400 | r8a66597_write(r8a66597, (u16)~mask, BEMPSTS); | 1402 | r8a66597_write(r8a66597, ~mask, BEMPSTS); |
1401 | if (mask & BEMP0) { | 1403 | if (mask & BEMP0) { |
1402 | cfifo_change(r8a66597, 0); | 1404 | cfifo_change(r8a66597, 0); |
1403 | td = r8a66597_get_td(r8a66597, 0); | 1405 | td = r8a66597_get_td(r8a66597, 0); |
@@ -1434,7 +1436,7 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597) | |||
1434 | 1436 | ||
1435 | mask = r8a66597_read(r8a66597, NRDYSTS) | 1437 | mask = r8a66597_read(r8a66597, NRDYSTS) |
1436 | & r8a66597_read(r8a66597, NRDYENB); | 1438 | & r8a66597_read(r8a66597, NRDYENB); |
1437 | r8a66597_write(r8a66597, (u16)~mask, NRDYSTS); | 1439 | r8a66597_write(r8a66597, ~mask, NRDYSTS); |
1438 | if (mask & NRDY0) { | 1440 | if (mask & NRDY0) { |
1439 | cfifo_change(r8a66597, 0); | 1441 | cfifo_change(r8a66597, 0); |
1440 | set_urb_error(r8a66597, 0); | 1442 | set_urb_error(r8a66597, 0); |
@@ -1488,14 +1490,14 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) | |||
1488 | mask0 = intsts0 & intenb0 & (BEMP | NRDY | BRDY); | 1490 | mask0 = intsts0 & intenb0 & (BEMP | NRDY | BRDY); |
1489 | if (mask2) { | 1491 | if (mask2) { |
1490 | if (mask2 & ATTCH) { | 1492 | if (mask2 & ATTCH) { |
1491 | r8a66597_write(r8a66597, (u16)~ATTCH, INTSTS2); | 1493 | r8a66597_write(r8a66597, ~ATTCH, INTSTS2); |
1492 | r8a66597_bclr(r8a66597, ATTCHE, INTENB2); | 1494 | r8a66597_bclr(r8a66597, ATTCHE, INTENB2); |
1493 | 1495 | ||
1494 | /* start usb bus sampling */ | 1496 | /* start usb bus sampling */ |
1495 | start_root_hub_sampling(r8a66597, 1); | 1497 | start_root_hub_sampling(r8a66597, 1); |
1496 | } | 1498 | } |
1497 | if (mask2 & DTCH) { | 1499 | if (mask2 & DTCH) { |
1498 | r8a66597_write(r8a66597, (u16)~DTCH, INTSTS2); | 1500 | r8a66597_write(r8a66597, ~DTCH, INTSTS2); |
1499 | r8a66597_bclr(r8a66597, DTCHE, INTENB2); | 1501 | r8a66597_bclr(r8a66597, DTCHE, INTENB2); |
1500 | r8a66597_usb_disconnect(r8a66597, 1); | 1502 | r8a66597_usb_disconnect(r8a66597, 1); |
1501 | } | 1503 | } |
@@ -1503,24 +1505,24 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) | |||
1503 | 1505 | ||
1504 | if (mask1) { | 1506 | if (mask1) { |
1505 | if (mask1 & ATTCH) { | 1507 | if (mask1 & ATTCH) { |
1506 | r8a66597_write(r8a66597, (u16)~ATTCH, INTSTS1); | 1508 | r8a66597_write(r8a66597, ~ATTCH, INTSTS1); |
1507 | r8a66597_bclr(r8a66597, ATTCHE, INTENB1); | 1509 | r8a66597_bclr(r8a66597, ATTCHE, INTENB1); |
1508 | 1510 | ||
1509 | /* start usb bus sampling */ | 1511 | /* start usb bus sampling */ |
1510 | start_root_hub_sampling(r8a66597, 0); | 1512 | start_root_hub_sampling(r8a66597, 0); |
1511 | } | 1513 | } |
1512 | if (mask1 & DTCH) { | 1514 | if (mask1 & DTCH) { |
1513 | r8a66597_write(r8a66597, (u16)~DTCH, INTSTS1); | 1515 | r8a66597_write(r8a66597, ~DTCH, INTSTS1); |
1514 | r8a66597_bclr(r8a66597, DTCHE, INTENB1); | 1516 | r8a66597_bclr(r8a66597, DTCHE, INTENB1); |
1515 | r8a66597_usb_disconnect(r8a66597, 0); | 1517 | r8a66597_usb_disconnect(r8a66597, 0); |
1516 | } | 1518 | } |
1517 | if (mask1 & SIGN) { | 1519 | if (mask1 & SIGN) { |
1518 | r8a66597_write(r8a66597, (u16)~SIGN, INTSTS1); | 1520 | r8a66597_write(r8a66597, ~SIGN, INTSTS1); |
1519 | set_urb_error(r8a66597, 0); | 1521 | set_urb_error(r8a66597, 0); |
1520 | check_next_phase(r8a66597); | 1522 | check_next_phase(r8a66597); |
1521 | } | 1523 | } |
1522 | if (mask1 & SACK) { | 1524 | if (mask1 & SACK) { |
1523 | r8a66597_write(r8a66597, (u16)~SACK, INTSTS1); | 1525 | r8a66597_write(r8a66597, ~SACK, INTSTS1); |
1524 | check_next_phase(r8a66597); | 1526 | check_next_phase(r8a66597); |
1525 | } | 1527 | } |
1526 | } | 1528 | } |
@@ -1663,13 +1665,9 @@ static int check_pipe_config(struct r8a66597 *r8a66597, struct urb *urb) | |||
1663 | static int r8a66597_start(struct usb_hcd *hcd) | 1665 | static int r8a66597_start(struct usb_hcd *hcd) |
1664 | { | 1666 | { |
1665 | struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); | 1667 | struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); |
1666 | int ret; | ||
1667 | 1668 | ||
1668 | hcd->state = HC_STATE_RUNNING; | 1669 | hcd->state = HC_STATE_RUNNING; |
1669 | if ((ret = enable_controller(r8a66597)) < 0) | 1670 | return enable_controller(r8a66597); |
1670 | return ret; | ||
1671 | |||
1672 | return 0; | ||
1673 | } | 1671 | } |
1674 | 1672 | ||
1675 | static void r8a66597_stop(struct usb_hcd *hcd) | 1673 | static void r8a66597_stop(struct usb_hcd *hcd) |
@@ -1696,13 +1694,12 @@ static void set_address_zero(struct r8a66597 *r8a66597, struct urb *urb) | |||
1696 | 1694 | ||
1697 | static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597, | 1695 | static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597, |
1698 | struct urb *urb, | 1696 | struct urb *urb, |
1699 | struct usb_host_endpoint *hep, | 1697 | struct usb_host_endpoint *hep) |
1700 | gfp_t mem_flags) | ||
1701 | { | 1698 | { |
1702 | struct r8a66597_td *td; | 1699 | struct r8a66597_td *td; |
1703 | u16 pipenum; | 1700 | u16 pipenum; |
1704 | 1701 | ||
1705 | td = kzalloc(sizeof(struct r8a66597_td), mem_flags); | 1702 | td = kzalloc(sizeof(struct r8a66597_td), GFP_ATOMIC); |
1706 | if (td == NULL) | 1703 | if (td == NULL) |
1707 | return NULL; | 1704 | return NULL; |
1708 | 1705 | ||
@@ -1741,7 +1738,8 @@ static int r8a66597_urb_enqueue(struct usb_hcd *hcd, | |||
1741 | } | 1738 | } |
1742 | 1739 | ||
1743 | if (!hep->hcpriv) { | 1740 | if (!hep->hcpriv) { |
1744 | hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe), mem_flags); | 1741 | hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe), |
1742 | GFP_ATOMIC); | ||
1745 | if (!hep->hcpriv) { | 1743 | if (!hep->hcpriv) { |
1746 | ret = -ENOMEM; | 1744 | ret = -ENOMEM; |
1747 | goto error; | 1745 | goto error; |
@@ -1755,7 +1753,7 @@ static int r8a66597_urb_enqueue(struct usb_hcd *hcd, | |||
1755 | init_pipe_config(r8a66597, urb); | 1753 | init_pipe_config(r8a66597, urb); |
1756 | 1754 | ||
1757 | set_address_zero(r8a66597, urb); | 1755 | set_address_zero(r8a66597, urb); |
1758 | td = r8a66597_make_td(r8a66597, urb, hep, mem_flags); | 1756 | td = r8a66597_make_td(r8a66597, urb, hep); |
1759 | if (td == NULL) { | 1757 | if (td == NULL) { |
1760 | ret = -ENOMEM; | 1758 | ret = -ENOMEM; |
1761 | goto error; | 1759 | goto error; |
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index 97c2a71ac7a1..fe9ceb077d9b 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h | |||
@@ -203,14 +203,14 @@ | |||
203 | #define DTLN 0x0FFF /* b11-0: FIFO received data length */ | 203 | #define DTLN 0x0FFF /* b11-0: FIFO received data length */ |
204 | 204 | ||
205 | /* Interrupt Enable Register 0 */ | 205 | /* Interrupt Enable Register 0 */ |
206 | #define VBSE 0x8000 /* b15: VBUS interrupt */ | 206 | #define VBSE 0x8000 /* b15: VBUS interrupt */ |
207 | #define RSME 0x4000 /* b14: Resume interrupt */ | 207 | #define RSME 0x4000 /* b14: Resume interrupt */ |
208 | #define SOFE 0x2000 /* b13: Frame update interrupt */ | 208 | #define SOFE 0x2000 /* b13: Frame update interrupt */ |
209 | #define DVSE 0x1000 /* b12: Device state transition interrupt */ | 209 | #define DVSE 0x1000 /* b12: Device state transition interrupt */ |
210 | #define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ | 210 | #define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ |
211 | #define BEMPE 0x0400 /* b10: Buffer empty interrupt */ | 211 | #define BEMPE 0x0400 /* b10: Buffer empty interrupt */ |
212 | #define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ | 212 | #define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ |
213 | #define BRDYE 0x0100 /* b8: Buffer ready interrupt */ | 213 | #define BRDYE 0x0100 /* b8: Buffer ready interrupt */ |
214 | 214 | ||
215 | /* Interrupt Enable Register 1 */ | 215 | /* Interrupt Enable Register 1 */ |
216 | #define OVRCRE 0x8000 /* b15: Over-current interrupt */ | 216 | #define OVRCRE 0x8000 /* b15: Over-current interrupt */ |
@@ -268,16 +268,16 @@ | |||
268 | #define SOF_DISABLE 0x0000 /* SOF OUT Disable */ | 268 | #define SOF_DISABLE 0x0000 /* SOF OUT Disable */ |
269 | 269 | ||
270 | /* Interrupt Status Register 0 */ | 270 | /* Interrupt Status Register 0 */ |
271 | #define VBINT 0x8000 /* b15: VBUS interrupt */ | 271 | #define VBINT 0x8000 /* b15: VBUS interrupt */ |
272 | #define RESM 0x4000 /* b14: Resume interrupt */ | 272 | #define RESM 0x4000 /* b14: Resume interrupt */ |
273 | #define SOFR 0x2000 /* b13: SOF frame update interrupt */ | 273 | #define SOFR 0x2000 /* b13: SOF frame update interrupt */ |
274 | #define DVST 0x1000 /* b12: Device state transition interrupt */ | 274 | #define DVST 0x1000 /* b12: Device state transition interrupt */ |
275 | #define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ | 275 | #define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ |
276 | #define BEMP 0x0400 /* b10: Buffer empty interrupt */ | 276 | #define BEMP 0x0400 /* b10: Buffer empty interrupt */ |
277 | #define NRDY 0x0200 /* b9: Buffer not ready interrupt */ | 277 | #define NRDY 0x0200 /* b9: Buffer not ready interrupt */ |
278 | #define BRDY 0x0100 /* b8: Buffer ready interrupt */ | 278 | #define BRDY 0x0100 /* b8: Buffer ready interrupt */ |
279 | #define VBSTS 0x0080 /* b7: VBUS input port */ | 279 | #define VBSTS 0x0080 /* b7: VBUS input port */ |
280 | #define DVSQ 0x0070 /* b6-4: Device state */ | 280 | #define DVSQ 0x0070 /* b6-4: Device state */ |
281 | #define DS_SPD_CNFG 0x0070 /* Suspend Configured */ | 281 | #define DS_SPD_CNFG 0x0070 /* Suspend Configured */ |
282 | #define DS_SPD_ADDR 0x0060 /* Suspend Address */ | 282 | #define DS_SPD_ADDR 0x0060 /* Suspend Address */ |
283 | #define DS_SPD_DFLT 0x0050 /* Suspend Default */ | 283 | #define DS_SPD_DFLT 0x0050 /* Suspend Default */ |
@@ -315,13 +315,10 @@ | |||
315 | /* Micro Frame Number Register */ | 315 | /* Micro Frame Number Register */ |
316 | #define UFRNM 0x0007 /* b2-0: Micro frame number */ | 316 | #define UFRNM 0x0007 /* b2-0: Micro frame number */ |
317 | 317 | ||
318 | /* USB Address / Low Power Status Recovery Register */ | ||
319 | //#define USBADDR 0x007F /* b6-0: USB address */ | ||
320 | |||
321 | /* Default Control Pipe Maxpacket Size Register */ | 318 | /* Default Control Pipe Maxpacket Size Register */ |
322 | /* Pipe Maxpacket Size Register */ | 319 | /* Pipe Maxpacket Size Register */ |
323 | #define DEVSEL 0xF000 /* b15-14: Device address select */ | 320 | #define DEVSEL 0xF000 /* b15-14: Device address select */ |
324 | #define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ | 321 | #define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ |
325 | 322 | ||
326 | /* Default Control Pipe Control Register */ | 323 | /* Default Control Pipe Control Register */ |
327 | #define BSTS 0x8000 /* b15: Buffer status */ | 324 | #define BSTS 0x8000 /* b15: Buffer status */ |
@@ -366,21 +363,21 @@ | |||
366 | #define MXPS 0x07FF /* b10-0: Maxpacket size */ | 363 | #define MXPS 0x07FF /* b10-0: Maxpacket size */ |
367 | 364 | ||
368 | /* Pipe Cycle Configuration Register */ | 365 | /* Pipe Cycle Configuration Register */ |
369 | #define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ | 366 | #define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ |
370 | #define IITV 0x0007 /* b2-0: Isochronous interval */ | 367 | #define IITV 0x0007 /* b2-0: Isochronous interval */ |
371 | 368 | ||
372 | /* Pipex Control Register */ | 369 | /* Pipex Control Register */ |
373 | #define BSTS 0x8000 /* b15: Buffer status */ | 370 | #define BSTS 0x8000 /* b15: Buffer status */ |
374 | #define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ | 371 | #define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ |
375 | #define CSCLR 0x2000 /* b13: complete-split status clear */ | 372 | #define CSCLR 0x2000 /* b13: complete-split status clear */ |
376 | #define CSSTS 0x1000 /* b12: complete-split status */ | 373 | #define CSSTS 0x1000 /* b12: complete-split status */ |
377 | #define ATREPM 0x0400 /* b10: Auto repeat mode */ | 374 | #define ATREPM 0x0400 /* b10: Auto repeat mode */ |
378 | #define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ | 375 | #define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ |
379 | #define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ | 376 | #define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ |
380 | #define SQSET 0x0080 /* b7: Sequence toggle bit set */ | 377 | #define SQSET 0x0080 /* b7: Sequence toggle bit set */ |
381 | #define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ | 378 | #define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ |
382 | #define PBUSY 0x0020 /* b5: pipe busy */ | 379 | #define PBUSY 0x0020 /* b5: pipe busy */ |
383 | #define PID 0x0003 /* b1-0: Response PID */ | 380 | #define PID 0x0003 /* b1-0: Response PID */ |
384 | 381 | ||
385 | /* PIPExTRE */ | 382 | /* PIPExTRE */ |
386 | #define TRENB 0x0200 /* b9: Transaction counter enable */ | 383 | #define TRENB 0x0200 /* b9: Transaction counter enable */ |
@@ -407,15 +404,15 @@ | |||
407 | #define make_devsel(addr) (addr << 12) | 404 | #define make_devsel(addr) (addr << 12) |
408 | 405 | ||
409 | struct r8a66597_pipe_info { | 406 | struct r8a66597_pipe_info { |
410 | u16 pipenum; | 407 | u16 pipenum; |
411 | u16 address; /* R8A66597 HCD usb addres */ | 408 | u16 address; /* R8A66597 HCD usb addres */ |
412 | u16 epnum; | 409 | u16 epnum; |
413 | u16 maxpacket; | 410 | u16 maxpacket; |
414 | u16 type; | 411 | u16 type; |
415 | u16 bufnum; | 412 | u16 bufnum; |
416 | u16 buf_bsize; | 413 | u16 buf_bsize; |
417 | u16 interval; | 414 | u16 interval; |
418 | u16 dir_in; | 415 | u16 dir_in; |
419 | }; | 416 | }; |
420 | 417 | ||
421 | struct r8a66597_pipe { | 418 | struct r8a66597_pipe { |
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index e98df2ee9901..7f765ec038cd 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/workqueue.h> | 52 | #include <linux/workqueue.h> |
53 | #include <linux/platform_device.h> | 53 | #include <linux/platform_device.h> |
54 | #include <linux/pci_ids.h> | 54 | #include <linux/pci_ids.h> |
55 | #include <linux/mutex.h> | ||
55 | #include <asm/io.h> | 56 | #include <asm/io.h> |
56 | #include <asm/irq.h> | 57 | #include <asm/irq.h> |
57 | #include <asm/system.h> | 58 | #include <asm/system.h> |
@@ -83,7 +84,7 @@ static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait); | |||
83 | * u132_module_lock exists to protect access to global variables | 84 | * u132_module_lock exists to protect access to global variables |
84 | * | 85 | * |
85 | */ | 86 | */ |
86 | static struct semaphore u132_module_lock; | 87 | static struct mutex u132_module_lock; |
87 | static int u132_exiting = 0; | 88 | static int u132_exiting = 0; |
88 | static int u132_instances = 0; | 89 | static int u132_instances = 0; |
89 | static struct list_head u132_static_list; | 90 | static struct list_head u132_static_list; |
@@ -258,10 +259,10 @@ static void u132_hcd_delete(struct kref *kref) | |||
258 | struct platform_device *pdev = u132->platform_dev; | 259 | struct platform_device *pdev = u132->platform_dev; |
259 | struct usb_hcd *hcd = u132_to_hcd(u132); | 260 | struct usb_hcd *hcd = u132_to_hcd(u132); |
260 | u132->going += 1; | 261 | u132->going += 1; |
261 | down(&u132_module_lock); | 262 | mutex_lock(&u132_module_lock); |
262 | list_del_init(&u132->u132_list); | 263 | list_del_init(&u132->u132_list); |
263 | u132_instances -= 1; | 264 | u132_instances -= 1; |
264 | up(&u132_module_lock); | 265 | mutex_unlock(&u132_module_lock); |
265 | dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13" | 266 | dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13" |
266 | "2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev); | 267 | "2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev); |
267 | usb_put_hcd(hcd); | 268 | usb_put_hcd(hcd); |
@@ -3111,10 +3112,10 @@ static int __devinit u132_probe(struct platform_device *pdev) | |||
3111 | int retval = 0; | 3112 | int retval = 0; |
3112 | struct u132 *u132 = hcd_to_u132(hcd); | 3113 | struct u132 *u132 = hcd_to_u132(hcd); |
3113 | hcd->rsrc_start = 0; | 3114 | hcd->rsrc_start = 0; |
3114 | down(&u132_module_lock); | 3115 | mutex_lock(&u132_module_lock); |
3115 | list_add_tail(&u132->u132_list, &u132_static_list); | 3116 | list_add_tail(&u132->u132_list, &u132_static_list); |
3116 | u132->sequence_num = ++u132_instances; | 3117 | u132->sequence_num = ++u132_instances; |
3117 | up(&u132_module_lock); | 3118 | mutex_unlock(&u132_module_lock); |
3118 | u132_u132_init_kref(u132); | 3119 | u132_u132_init_kref(u132); |
3119 | u132_initialise(u132, pdev); | 3120 | u132_initialise(u132, pdev); |
3120 | hcd->product_desc = "ELAN U132 Host Controller"; | 3121 | hcd->product_desc = "ELAN U132 Host Controller"; |
@@ -3216,7 +3217,7 @@ static int __init u132_hcd_init(void) | |||
3216 | INIT_LIST_HEAD(&u132_static_list); | 3217 | INIT_LIST_HEAD(&u132_static_list); |
3217 | u132_instances = 0; | 3218 | u132_instances = 0; |
3218 | u132_exiting = 0; | 3219 | u132_exiting = 0; |
3219 | init_MUTEX(&u132_module_lock); | 3220 | mutex_init(&u132_module_lock); |
3220 | if (usb_disabled()) | 3221 | if (usb_disabled()) |
3221 | return -ENODEV; | 3222 | return -ENODEV; |
3222 | printk(KERN_INFO "driver %s built at %s on %s\n", hcd_name, __TIME__, | 3223 | printk(KERN_INFO "driver %s built at %s on %s\n", hcd_name, __TIME__, |
@@ -3232,9 +3233,9 @@ static void __exit u132_hcd_exit(void) | |||
3232 | { | 3233 | { |
3233 | struct u132 *u132; | 3234 | struct u132 *u132; |
3234 | struct u132 *temp; | 3235 | struct u132 *temp; |
3235 | down(&u132_module_lock); | 3236 | mutex_lock(&u132_module_lock); |
3236 | u132_exiting += 1; | 3237 | u132_exiting += 1; |
3237 | up(&u132_module_lock); | 3238 | mutex_unlock(&u132_module_lock); |
3238 | list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) { | 3239 | list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) { |
3239 | platform_device_unregister(u132->platform_dev); | 3240 | platform_device_unregister(u132->platform_dev); |
3240 | } platform_driver_unregister(&u132_platform_driver); | 3241 | } platform_driver_unregister(&u132_platform_driver); |
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 4aed305982ec..3bb908ca38e9 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -827,8 +827,10 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
827 | * If direction is "send", change the packet ID from SETUP (0x2D) | 827 | * If direction is "send", change the packet ID from SETUP (0x2D) |
828 | * to OUT (0xE1). Else change it from SETUP to IN (0x69) and | 828 | * to OUT (0xE1). Else change it from SETUP to IN (0x69) and |
829 | * set Short Packet Detect (SPD) for all data packets. | 829 | * set Short Packet Detect (SPD) for all data packets. |
830 | * | ||
831 | * 0-length transfers always get treated as "send". | ||
830 | */ | 832 | */ |
831 | if (usb_pipeout(urb->pipe)) | 833 | if (usb_pipeout(urb->pipe) || len == 0) |
832 | destination ^= (USB_PID_SETUP ^ USB_PID_OUT); | 834 | destination ^= (USB_PID_SETUP ^ USB_PID_OUT); |
833 | else { | 835 | else { |
834 | destination ^= (USB_PID_SETUP ^ USB_PID_IN); | 836 | destination ^= (USB_PID_SETUP ^ USB_PID_IN); |
@@ -839,7 +841,12 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
839 | * Build the DATA TDs | 841 | * Build the DATA TDs |
840 | */ | 842 | */ |
841 | while (len > 0) { | 843 | while (len > 0) { |
842 | int pktsze = min(len, maxsze); | 844 | int pktsze = maxsze; |
845 | |||
846 | if (len <= pktsze) { /* The last data packet */ | ||
847 | pktsze = len; | ||
848 | status &= ~TD_CTRL_SPD; | ||
849 | } | ||
843 | 850 | ||
844 | td = uhci_alloc_td(uhci); | 851 | td = uhci_alloc_td(uhci); |
845 | if (!td) | 852 | if (!td) |
@@ -866,20 +873,10 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
866 | goto nomem; | 873 | goto nomem; |
867 | *plink = LINK_TO_TD(td); | 874 | *plink = LINK_TO_TD(td); |
868 | 875 | ||
869 | /* | 876 | /* Change direction for the status transaction */ |
870 | * It's IN if the pipe is an output pipe or we're not expecting | 877 | destination ^= (USB_PID_IN ^ USB_PID_OUT); |
871 | * data back. | ||
872 | */ | ||
873 | destination &= ~TD_TOKEN_PID_MASK; | ||
874 | if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) | ||
875 | destination |= USB_PID_IN; | ||
876 | else | ||
877 | destination |= USB_PID_OUT; | ||
878 | |||
879 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ | 878 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ |
880 | 879 | ||
881 | status &= ~TD_CTRL_SPD; | ||
882 | |||
883 | uhci_add_td_to_urbp(td, urbp); | 880 | uhci_add_td_to_urbp(td, urbp); |
884 | uhci_fill_td(td, status | TD_CTRL_IOC, | 881 | uhci_fill_td(td, status | TD_CTRL_IOC, |
885 | destination | uhci_explen(0), 0); | 882 | destination | uhci_explen(0), 0); |
@@ -1185,10 +1182,18 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |||
1185 | } | 1182 | } |
1186 | } | 1183 | } |
1187 | 1184 | ||
1185 | /* Did we receive a short packet? */ | ||
1188 | } else if (len < uhci_expected_length(td_token(td))) { | 1186 | } else if (len < uhci_expected_length(td_token(td))) { |
1189 | 1187 | ||
1190 | /* We received a short packet */ | 1188 | /* For control transfers, go to the status TD if |
1191 | if (urb->transfer_flags & URB_SHORT_NOT_OK) | 1189 | * this isn't already the last data TD */ |
1190 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { | ||
1191 | if (td->list.next != urbp->td_list.prev) | ||
1192 | ret = 1; | ||
1193 | } | ||
1194 | |||
1195 | /* For bulk and interrupt, this may be an error */ | ||
1196 | else if (urb->transfer_flags & URB_SHORT_NOT_OK) | ||
1192 | ret = -EREMOTEIO; | 1197 | ret = -EREMOTEIO; |
1193 | 1198 | ||
1194 | /* Fixup needed only if this isn't the URB's last TD */ | 1199 | /* Fixup needed only if this isn't the URB's last TD */ |
@@ -1208,10 +1213,6 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |||
1208 | 1213 | ||
1209 | err: | 1214 | err: |
1210 | if (ret < 0) { | 1215 | if (ret < 0) { |
1211 | /* In case a control transfer gets an error | ||
1212 | * during the setup stage */ | ||
1213 | urb->actual_length = max(urb->actual_length, 0); | ||
1214 | |||
1215 | /* Note that the queue has stopped and save | 1216 | /* Note that the queue has stopped and save |
1216 | * the next toggle value */ | 1217 | * the next toggle value */ |
1217 | qh->element = UHCI_PTR_TERM; | 1218 | qh->element = UHCI_PTR_TERM; |
@@ -1489,9 +1490,25 @@ __acquires(uhci->lock) | |||
1489 | { | 1490 | { |
1490 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | 1491 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
1491 | 1492 | ||
1493 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { | ||
1494 | |||
1495 | /* urb->actual_length < 0 means the setup transaction didn't | ||
1496 | * complete successfully. Either it failed or the URB was | ||
1497 | * unlinked first. Regardless, don't confuse people with a | ||
1498 | * negative length. */ | ||
1499 | urb->actual_length = max(urb->actual_length, 0); | ||
1500 | |||
1501 | /* Report erroneous short transfers */ | ||
1502 | if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && | ||
1503 | urb->actual_length < | ||
1504 | urb->transfer_buffer_length && | ||
1505 | urb->status == 0)) | ||
1506 | urb->status = -EREMOTEIO; | ||
1507 | } | ||
1508 | |||
1492 | /* When giving back the first URB in an Isochronous queue, | 1509 | /* When giving back the first URB in an Isochronous queue, |
1493 | * reinitialize the QH's iso-related members for the next URB. */ | 1510 | * reinitialize the QH's iso-related members for the next URB. */ |
1494 | if (qh->type == USB_ENDPOINT_XFER_ISOC && | 1511 | else if (qh->type == USB_ENDPOINT_XFER_ISOC && |
1495 | urbp->node.prev == &qh->queue && | 1512 | urbp->node.prev == &qh->queue && |
1496 | urbp->node.next != &qh->queue) { | 1513 | urbp->node.next != &qh->queue) { |
1497 | struct urb *nurb = list_entry(urbp->node.next, | 1514 | struct urb *nurb = list_entry(urbp->node.next, |
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index 36502a06f73a..d1131a87a5b1 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c | |||
@@ -284,9 +284,9 @@ static void mdc800_usb_irq (struct urb *urb) | |||
284 | int data_received=0, wake_up; | 284 | int data_received=0, wake_up; |
285 | unsigned char* b=urb->transfer_buffer; | 285 | unsigned char* b=urb->transfer_buffer; |
286 | struct mdc800_data* mdc800=urb->context; | 286 | struct mdc800_data* mdc800=urb->context; |
287 | int status = urb->status; | ||
287 | 288 | ||
288 | if (urb->status >= 0) | 289 | if (status >= 0) { |
289 | { | ||
290 | 290 | ||
291 | //dbg ("%i %i %i %i %i %i %i %i \n",b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7]); | 291 | //dbg ("%i %i %i %i %i %i %i %i \n",b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7]); |
292 | 292 | ||
@@ -324,7 +324,7 @@ static void mdc800_usb_irq (struct urb *urb) | |||
324 | || | 324 | || |
325 | ((mdc800->camera_request_ready == 3) && (mdc800->camera_busy)) | 325 | ((mdc800->camera_request_ready == 3) && (mdc800->camera_busy)) |
326 | || | 326 | || |
327 | (urb->status < 0) | 327 | (status < 0) |
328 | ); | 328 | ); |
329 | 329 | ||
330 | if (wake_up) | 330 | if (wake_up) |
@@ -376,15 +376,12 @@ static int mdc800_usb_waitForIRQ (int mode, int msec) | |||
376 | static void mdc800_usb_write_notify (struct urb *urb) | 376 | static void mdc800_usb_write_notify (struct urb *urb) |
377 | { | 377 | { |
378 | struct mdc800_data* mdc800=urb->context; | 378 | struct mdc800_data* mdc800=urb->context; |
379 | int status = urb->status; | ||
379 | 380 | ||
380 | if (urb->status != 0) | 381 | if (status != 0) |
381 | { | 382 | err ("writing command fails (status=%i)", status); |
382 | err ("writing command fails (status=%i)", urb->status); | ||
383 | } | ||
384 | else | 383 | else |
385 | { | ||
386 | mdc800->state=READY; | 384 | mdc800->state=READY; |
387 | } | ||
388 | mdc800->written = 1; | 385 | mdc800->written = 1; |
389 | wake_up (&mdc800->write_wait); | 386 | wake_up (&mdc800->write_wait); |
390 | } | 387 | } |
@@ -396,9 +393,9 @@ static void mdc800_usb_write_notify (struct urb *urb) | |||
396 | static void mdc800_usb_download_notify (struct urb *urb) | 393 | static void mdc800_usb_download_notify (struct urb *urb) |
397 | { | 394 | { |
398 | struct mdc800_data* mdc800=urb->context; | 395 | struct mdc800_data* mdc800=urb->context; |
396 | int status = urb->status; | ||
399 | 397 | ||
400 | if (urb->status == 0) | 398 | if (status == 0) { |
401 | { | ||
402 | /* Fill output buffer with these data */ | 399 | /* Fill output buffer with these data */ |
403 | memcpy (mdc800->out, urb->transfer_buffer, 64); | 400 | memcpy (mdc800->out, urb->transfer_buffer, 64); |
404 | mdc800->out_count=64; | 401 | mdc800->out_count=64; |
@@ -408,10 +405,8 @@ static void mdc800_usb_download_notify (struct urb *urb) | |||
408 | { | 405 | { |
409 | mdc800->state=READY; | 406 | mdc800->state=READY; |
410 | } | 407 | } |
411 | } | 408 | } else { |
412 | else | 409 | err ("request bytes fails (status:%i)", status); |
413 | { | ||
414 | err ("request bytes fails (status:%i)", urb->status); | ||
415 | } | 410 | } |
416 | mdc800->downloaded = 1; | 411 | mdc800->downloaded = 1; |
417 | wake_up (&mdc800->download_wait); | 412 | wake_up (&mdc800->download_wait); |
@@ -649,9 +644,9 @@ static int mdc800_device_open (struct inode* inode, struct file *file) | |||
649 | 644 | ||
650 | retval=0; | 645 | retval=0; |
651 | mdc800->irq_urb->dev = mdc800->dev; | 646 | mdc800->irq_urb->dev = mdc800->dev; |
652 | if (usb_submit_urb (mdc800->irq_urb, GFP_KERNEL)) | 647 | retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL); |
653 | { | 648 | if (retval) { |
654 | err ("request USB irq fails (submit_retval=%i urb_status=%i).",retval, mdc800->irq_urb->status); | 649 | err ("request USB irq fails (submit_retval=%i).", retval); |
655 | errn = -EIO; | 650 | errn = -EIO; |
656 | goto error_out; | 651 | goto error_out; |
657 | } | 652 | } |
@@ -698,6 +693,7 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l | |||
698 | { | 693 | { |
699 | size_t left=len, sts=len; /* single transfer size */ | 694 | size_t left=len, sts=len; /* single transfer size */ |
700 | char __user *ptr = buf; | 695 | char __user *ptr = buf; |
696 | int retval; | ||
701 | 697 | ||
702 | mutex_lock(&mdc800->io_lock); | 698 | mutex_lock(&mdc800->io_lock); |
703 | if (mdc800->state == NOT_CONNECTED) | 699 | if (mdc800->state == NOT_CONNECTED) |
@@ -737,9 +733,9 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l | |||
737 | 733 | ||
738 | /* Download -> Request new bytes */ | 734 | /* Download -> Request new bytes */ |
739 | mdc800->download_urb->dev = mdc800->dev; | 735 | mdc800->download_urb->dev = mdc800->dev; |
740 | if (usb_submit_urb (mdc800->download_urb, GFP_KERNEL)) | 736 | retval = usb_submit_urb (mdc800->download_urb, GFP_KERNEL); |
741 | { | 737 | if (retval) { |
742 | err ("Can't submit download urb (status=%i)",mdc800->download_urb->status); | 738 | err ("Can't submit download urb (retval=%i)",retval); |
743 | mutex_unlock(&mdc800->io_lock); | 739 | mutex_unlock(&mdc800->io_lock); |
744 | return len-left; | 740 | return len-left; |
745 | } | 741 | } |
@@ -788,6 +784,7 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l | |||
788 | static ssize_t mdc800_device_write (struct file *file, const char __user *buf, size_t len, loff_t *pos) | 784 | static ssize_t mdc800_device_write (struct file *file, const char __user *buf, size_t len, loff_t *pos) |
789 | { | 785 | { |
790 | size_t i=0; | 786 | size_t i=0; |
787 | int retval; | ||
791 | 788 | ||
792 | mutex_lock(&mdc800->io_lock); | 789 | mutex_lock(&mdc800->io_lock); |
793 | if (mdc800->state != READY) | 790 | if (mdc800->state != READY) |
@@ -854,9 +851,9 @@ static ssize_t mdc800_device_write (struct file *file, const char __user *buf, s | |||
854 | mdc800->state=WORKING; | 851 | mdc800->state=WORKING; |
855 | memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8); | 852 | memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8); |
856 | mdc800->write_urb->dev = mdc800->dev; | 853 | mdc800->write_urb->dev = mdc800->dev; |
857 | if (usb_submit_urb (mdc800->write_urb, GFP_KERNEL)) | 854 | retval = usb_submit_urb (mdc800->write_urb, GFP_KERNEL); |
858 | { | 855 | if (retval) { |
859 | err ("submitting write urb fails (status=%i)", mdc800->write_urb->status); | 856 | err ("submitting write urb fails (retval=%i)", retval); |
860 | mutex_unlock(&mdc800->io_lock); | 857 | mutex_unlock(&mdc800->io_lock); |
861 | return -EIO; | 858 | return -EIO; |
862 | } | 859 | } |
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 51bd80d2b8cc..768b2c11a231 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c | |||
@@ -189,7 +189,7 @@ static struct usb_driver mts_usb_driver = { | |||
189 | #define MTS_DEBUG_INT() \ | 189 | #define MTS_DEBUG_INT() \ |
190 | do { MTS_DEBUG_GOT_HERE(); \ | 190 | do { MTS_DEBUG_GOT_HERE(); \ |
191 | MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \ | 191 | MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \ |
192 | MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",(int)transfer->status,(int)context->data_length, (int)transfer->actual_length ); \ | 192 | MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \ |
193 | mts_debug_dump(context->instance);\ | 193 | mts_debug_dump(context->instance);\ |
194 | } while(0) | 194 | } while(0) |
195 | #else | 195 | #else |
@@ -393,8 +393,6 @@ void mts_int_submit_urb (struct urb* transfer, | |||
393 | context | 393 | context |
394 | ); | 394 | ); |
395 | 395 | ||
396 | transfer->status = 0; | ||
397 | |||
398 | res = usb_submit_urb( transfer, GFP_ATOMIC ); | 396 | res = usb_submit_urb( transfer, GFP_ATOMIC ); |
399 | if ( unlikely(res) ) { | 397 | if ( unlikely(res) ) { |
400 | MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res ); | 398 | MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res ); |
@@ -444,12 +442,13 @@ static void mts_get_status( struct urb *transfer ) | |||
444 | static void mts_data_done( struct urb* transfer ) | 442 | static void mts_data_done( struct urb* transfer ) |
445 | /* Interrupt context! */ | 443 | /* Interrupt context! */ |
446 | { | 444 | { |
445 | int status = transfer->status; | ||
447 | MTS_INT_INIT(); | 446 | MTS_INT_INIT(); |
448 | 447 | ||
449 | if ( context->data_length != transfer->actual_length ) { | 448 | if ( context->data_length != transfer->actual_length ) { |
450 | context->srb->resid = context->data_length - transfer->actual_length; | 449 | context->srb->resid = context->data_length - transfer->actual_length; |
451 | } else if ( unlikely(transfer->status) ) { | 450 | } else if ( unlikely(status) ) { |
452 | context->srb->result = (transfer->status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; | 451 | context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; |
453 | } | 452 | } |
454 | 453 | ||
455 | mts_get_status(transfer); | 454 | mts_get_status(transfer); |
@@ -461,10 +460,11 @@ static void mts_data_done( struct urb* transfer ) | |||
461 | static void mts_command_done( struct urb *transfer ) | 460 | static void mts_command_done( struct urb *transfer ) |
462 | /* Interrupt context! */ | 461 | /* Interrupt context! */ |
463 | { | 462 | { |
463 | int status = transfer->status; | ||
464 | MTS_INT_INIT(); | 464 | MTS_INT_INIT(); |
465 | 465 | ||
466 | if ( unlikely(transfer->status) ) { | 466 | if ( unlikely(status) ) { |
467 | if (transfer->status == -ENOENT) { | 467 | if (status == -ENOENT) { |
468 | /* We are being killed */ | 468 | /* We are being killed */ |
469 | MTS_DEBUG_GOT_HERE(); | 469 | MTS_DEBUG_GOT_HERE(); |
470 | context->srb->result = DID_ABORT<<16; | 470 | context->srb->result = DID_ABORT<<16; |
@@ -502,12 +502,13 @@ static void mts_command_done( struct urb *transfer ) | |||
502 | static void mts_do_sg (struct urb* transfer) | 502 | static void mts_do_sg (struct urb* transfer) |
503 | { | 503 | { |
504 | struct scatterlist * sg; | 504 | struct scatterlist * sg; |
505 | int status = transfer->status; | ||
505 | MTS_INT_INIT(); | 506 | MTS_INT_INIT(); |
506 | 507 | ||
507 | MTS_DEBUG("Processing fragment %d of %d\n", context->fragment,context->srb->use_sg); | 508 | MTS_DEBUG("Processing fragment %d of %d\n", context->fragment,context->srb->use_sg); |
508 | 509 | ||
509 | if (unlikely(transfer->status)) { | 510 | if (unlikely(status)) { |
510 | context->srb->result = (transfer->status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; | 511 | context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; |
511 | mts_transfer_cleanup(transfer); | 512 | mts_transfer_cleanup(transfer); |
512 | } | 513 | } |
513 | 514 | ||
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index d72c42e5f22d..e9fdbc8997b3 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/usb.h> | 26 | #include <linux/usb.h> |
27 | #include <linux/mutex.h> | ||
27 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
28 | 29 | ||
29 | #ifdef CONFIG_USB_DEBUG | 30 | #ifdef CONFIG_USB_DEBUG |
@@ -80,7 +81,7 @@ MODULE_DEVICE_TABLE(usb, device_table); | |||
80 | 81 | ||
81 | /* Structure to hold all of our device specific stuff */ | 82 | /* Structure to hold all of our device specific stuff */ |
82 | struct adu_device { | 83 | struct adu_device { |
83 | struct semaphore sem; /* locks this structure */ | 84 | struct mutex mtx; /* locks this structure */ |
84 | struct usb_device* udev; /* save off the usb device pointer */ | 85 | struct usb_device* udev; /* save off the usb device pointer */ |
85 | struct usb_interface* interface; | 86 | struct usb_interface* interface; |
86 | unsigned char minor; /* the starting minor number for this device */ | 87 | unsigned char minor; /* the starting minor number for this device */ |
@@ -178,17 +179,18 @@ static void adu_delete(struct adu_device *dev) | |||
178 | static void adu_interrupt_in_callback(struct urb *urb) | 179 | static void adu_interrupt_in_callback(struct urb *urb) |
179 | { | 180 | { |
180 | struct adu_device *dev = urb->context; | 181 | struct adu_device *dev = urb->context; |
182 | int status = urb->status; | ||
181 | 183 | ||
182 | dbg(4," %s : enter, status %d", __FUNCTION__, urb->status); | 184 | dbg(4," %s : enter, status %d", __FUNCTION__, status); |
183 | adu_debug_data(5, __FUNCTION__, urb->actual_length, | 185 | adu_debug_data(5, __FUNCTION__, urb->actual_length, |
184 | urb->transfer_buffer); | 186 | urb->transfer_buffer); |
185 | 187 | ||
186 | spin_lock(&dev->buflock); | 188 | spin_lock(&dev->buflock); |
187 | 189 | ||
188 | if (urb->status != 0) { | 190 | if (status != 0) { |
189 | if ((urb->status != -ENOENT) && (urb->status != -ECONNRESET)) { | 191 | if ((status != -ENOENT) && (status != -ECONNRESET)) { |
190 | dbg(1," %s : nonzero status received: %d", | 192 | dbg(1," %s : nonzero status received: %d", |
191 | __FUNCTION__, urb->status); | 193 | __FUNCTION__, status); |
192 | } | 194 | } |
193 | goto exit; | 195 | goto exit; |
194 | } | 196 | } |
@@ -216,21 +218,22 @@ exit: | |||
216 | wake_up_interruptible(&dev->read_wait); | 218 | wake_up_interruptible(&dev->read_wait); |
217 | adu_debug_data(5, __FUNCTION__, urb->actual_length, | 219 | adu_debug_data(5, __FUNCTION__, urb->actual_length, |
218 | urb->transfer_buffer); | 220 | urb->transfer_buffer); |
219 | dbg(4," %s : leave, status %d", __FUNCTION__, urb->status); | 221 | dbg(4," %s : leave, status %d", __FUNCTION__, status); |
220 | } | 222 | } |
221 | 223 | ||
222 | static void adu_interrupt_out_callback(struct urb *urb) | 224 | static void adu_interrupt_out_callback(struct urb *urb) |
223 | { | 225 | { |
224 | struct adu_device *dev = urb->context; | 226 | struct adu_device *dev = urb->context; |
227 | int status = urb->status; | ||
225 | 228 | ||
226 | dbg(4," %s : enter, status %d", __FUNCTION__, urb->status); | 229 | dbg(4," %s : enter, status %d", __FUNCTION__, status); |
227 | adu_debug_data(5,__FUNCTION__, urb->actual_length, urb->transfer_buffer); | 230 | adu_debug_data(5,__FUNCTION__, urb->actual_length, urb->transfer_buffer); |
228 | 231 | ||
229 | if (urb->status != 0) { | 232 | if (status != 0) { |
230 | if ((urb->status != -ENOENT) && | 233 | if ((status != -ENOENT) && |
231 | (urb->status != -ECONNRESET)) { | 234 | (status != -ECONNRESET)) { |
232 | dbg(1, " %s :nonzero status received: %d", | 235 | dbg(1, " %s :nonzero status received: %d", |
233 | __FUNCTION__, urb->status); | 236 | __FUNCTION__, status); |
234 | } | 237 | } |
235 | goto exit; | 238 | goto exit; |
236 | } | 239 | } |
@@ -240,7 +243,7 @@ exit: | |||
240 | 243 | ||
241 | adu_debug_data(5, __FUNCTION__, urb->actual_length, | 244 | adu_debug_data(5, __FUNCTION__, urb->actual_length, |
242 | urb->transfer_buffer); | 245 | urb->transfer_buffer); |
243 | dbg(4," %s : leave, status %d", __FUNCTION__, urb->status); | 246 | dbg(4," %s : leave, status %d", __FUNCTION__, status); |
244 | } | 247 | } |
245 | 248 | ||
246 | static int adu_open(struct inode *inode, struct file *file) | 249 | static int adu_open(struct inode *inode, struct file *file) |
@@ -269,8 +272,8 @@ static int adu_open(struct inode *inode, struct file *file) | |||
269 | } | 272 | } |
270 | 273 | ||
271 | /* lock this device */ | 274 | /* lock this device */ |
272 | if ((retval = down_interruptible(&dev->sem))) { | 275 | if ((retval = mutex_lock_interruptible(&dev->mtx))) { |
273 | dbg(2, "%s : sem down failed", __FUNCTION__); | 276 | dbg(2, "%s : mutex lock failed", __FUNCTION__); |
274 | goto exit_no_device; | 277 | goto exit_no_device; |
275 | } | 278 | } |
276 | 279 | ||
@@ -299,7 +302,7 @@ static int adu_open(struct inode *inode, struct file *file) | |||
299 | if (retval) | 302 | if (retval) |
300 | --dev->open_count; | 303 | --dev->open_count; |
301 | } | 304 | } |
302 | up(&dev->sem); | 305 | mutex_unlock(&dev->mtx); |
303 | 306 | ||
304 | exit_no_device: | 307 | exit_no_device: |
305 | dbg(2,"%s : leave, return value %d ", __FUNCTION__, retval); | 308 | dbg(2,"%s : leave, return value %d ", __FUNCTION__, retval); |
@@ -347,7 +350,7 @@ static int adu_release(struct inode *inode, struct file *file) | |||
347 | } | 350 | } |
348 | 351 | ||
349 | /* lock our device */ | 352 | /* lock our device */ |
350 | down(&dev->sem); /* not interruptible */ | 353 | mutex_lock(&dev->mtx); /* not interruptible */ |
351 | 354 | ||
352 | if (dev->open_count <= 0) { | 355 | if (dev->open_count <= 0) { |
353 | dbg(1," %s : device not opened", __FUNCTION__); | 356 | dbg(1," %s : device not opened", __FUNCTION__); |
@@ -357,7 +360,7 @@ static int adu_release(struct inode *inode, struct file *file) | |||
357 | 360 | ||
358 | if (dev->udev == NULL) { | 361 | if (dev->udev == NULL) { |
359 | /* the device was unplugged before the file was released */ | 362 | /* the device was unplugged before the file was released */ |
360 | up(&dev->sem); | 363 | mutex_unlock(&dev->mtx); |
361 | adu_delete(dev); | 364 | adu_delete(dev); |
362 | dev = NULL; | 365 | dev = NULL; |
363 | } else { | 366 | } else { |
@@ -367,7 +370,7 @@ static int adu_release(struct inode *inode, struct file *file) | |||
367 | 370 | ||
368 | exit: | 371 | exit: |
369 | if (dev) | 372 | if (dev) |
370 | up(&dev->sem); | 373 | mutex_unlock(&dev->mtx); |
371 | dbg(2," %s : leave, return value %d", __FUNCTION__, retval); | 374 | dbg(2," %s : leave, return value %d", __FUNCTION__, retval); |
372 | return retval; | 375 | return retval; |
373 | } | 376 | } |
@@ -390,7 +393,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, | |||
390 | dev = file->private_data; | 393 | dev = file->private_data; |
391 | dbg(2," %s : dev=%p", __FUNCTION__, dev); | 394 | dbg(2," %s : dev=%p", __FUNCTION__, dev); |
392 | /* lock this object */ | 395 | /* lock this object */ |
393 | if (down_interruptible(&dev->sem)) | 396 | if (mutex_lock_interruptible(&dev->mtx)) |
394 | return -ERESTARTSYS; | 397 | return -ERESTARTSYS; |
395 | 398 | ||
396 | /* verify that the device wasn't unplugged */ | 399 | /* verify that the device wasn't unplugged */ |
@@ -522,7 +525,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, | |||
522 | 525 | ||
523 | exit: | 526 | exit: |
524 | /* unlock the device */ | 527 | /* unlock the device */ |
525 | up(&dev->sem); | 528 | mutex_unlock(&dev->mtx); |
526 | 529 | ||
527 | dbg(2," %s : leave, return value %d", __FUNCTION__, retval); | 530 | dbg(2," %s : leave, return value %d", __FUNCTION__, retval); |
528 | return retval; | 531 | return retval; |
@@ -543,7 +546,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer, | |||
543 | dev = file->private_data; | 546 | dev = file->private_data; |
544 | 547 | ||
545 | /* lock this object */ | 548 | /* lock this object */ |
546 | retval = down_interruptible(&dev->sem); | 549 | retval = mutex_lock_interruptible(&dev->mtx); |
547 | if (retval) | 550 | if (retval) |
548 | goto exit_nolock; | 551 | goto exit_nolock; |
549 | 552 | ||
@@ -571,9 +574,9 @@ static ssize_t adu_write(struct file *file, const __user char *buffer, | |||
571 | retval = -EINTR; | 574 | retval = -EINTR; |
572 | goto exit; | 575 | goto exit; |
573 | } | 576 | } |
574 | up(&dev->sem); | 577 | mutex_unlock(&dev->mtx); |
575 | timeout = interruptible_sleep_on_timeout(&dev->write_wait, timeout); | 578 | timeout = interruptible_sleep_on_timeout(&dev->write_wait, timeout); |
576 | retval = down_interruptible(&dev->sem); | 579 | retval = mutex_lock_interruptible(&dev->mtx); |
577 | if (retval) { | 580 | if (retval) { |
578 | retval = bytes_written ? bytes_written : retval; | 581 | retval = bytes_written ? bytes_written : retval; |
579 | goto exit_nolock; | 582 | goto exit_nolock; |
@@ -638,7 +641,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer, | |||
638 | 641 | ||
639 | exit: | 642 | exit: |
640 | /* unlock the device */ | 643 | /* unlock the device */ |
641 | up(&dev->sem); | 644 | mutex_unlock(&dev->mtx); |
642 | exit_nolock: | 645 | exit_nolock: |
643 | 646 | ||
644 | dbg(2," %s : leave, return value %d", __FUNCTION__, retval); | 647 | dbg(2," %s : leave, return value %d", __FUNCTION__, retval); |
@@ -698,7 +701,7 @@ static int adu_probe(struct usb_interface *interface, | |||
698 | goto exit; | 701 | goto exit; |
699 | } | 702 | } |
700 | 703 | ||
701 | init_MUTEX(&dev->sem); | 704 | mutex_init(&dev->mtx); |
702 | spin_lock_init(&dev->buflock); | 705 | spin_lock_init(&dev->buflock); |
703 | dev->udev = udev; | 706 | dev->udev = udev; |
704 | init_waitqueue_head(&dev->read_wait); | 707 | init_waitqueue_head(&dev->read_wait); |
@@ -835,16 +838,16 @@ static void adu_disconnect(struct usb_interface *interface) | |||
835 | usb_deregister_dev(interface, &adu_class); | 838 | usb_deregister_dev(interface, &adu_class); |
836 | dev->minor = 0; | 839 | dev->minor = 0; |
837 | 840 | ||
838 | down(&dev->sem); /* not interruptible */ | 841 | mutex_lock(&dev->mtx); /* not interruptible */ |
839 | 842 | ||
840 | /* if the device is not opened, then we clean up right now */ | 843 | /* if the device is not opened, then we clean up right now */ |
841 | dbg(2," %s : open count %d", __FUNCTION__, dev->open_count); | 844 | dbg(2," %s : open count %d", __FUNCTION__, dev->open_count); |
842 | if (!dev->open_count) { | 845 | if (!dev->open_count) { |
843 | up(&dev->sem); | 846 | mutex_unlock(&dev->mtx); |
844 | adu_delete(dev); | 847 | adu_delete(dev); |
845 | } else { | 848 | } else { |
846 | dev->udev = NULL; | 849 | dev->udev = NULL; |
847 | up(&dev->sem); | 850 | mutex_unlock(&dev->mtx); |
848 | } | 851 | } |
849 | 852 | ||
850 | dev_info(&interface->dev, "ADU device adutux%d now disconnected", | 853 | dev_info(&interface->dev, "ADU device adutux%d now disconnected", |
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index cf70c16f0e3f..b09c83568c1a 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c | |||
@@ -88,9 +88,10 @@ static void appledisplay_complete(struct urb *urb) | |||
88 | { | 88 | { |
89 | struct appledisplay *pdata = urb->context; | 89 | struct appledisplay *pdata = urb->context; |
90 | unsigned long flags; | 90 | unsigned long flags; |
91 | int status = urb->status; | ||
91 | int retval; | 92 | int retval; |
92 | 93 | ||
93 | switch (urb->status) { | 94 | switch (status) { |
94 | case 0: | 95 | case 0: |
95 | /* success */ | 96 | /* success */ |
96 | break; | 97 | break; |
@@ -102,12 +103,12 @@ static void appledisplay_complete(struct urb *urb) | |||
102 | case -ENOENT: | 103 | case -ENOENT: |
103 | case -ESHUTDOWN: | 104 | case -ESHUTDOWN: |
104 | /* This urb is terminated, clean up */ | 105 | /* This urb is terminated, clean up */ |
105 | dbg("%s - urb shutting down with status: %d", | 106 | dbg("%s - urb shuttingdown with status: %d", |
106 | __FUNCTION__, urb->status); | 107 | __FUNCTION__, status); |
107 | return; | 108 | return; |
108 | default: | 109 | default: |
109 | dbg("%s - nonzero urb status received: %d", | 110 | dbg("%s - nonzero urb status received: %d", |
110 | __FUNCTION__, urb->status); | 111 | __FUNCTION__, status); |
111 | goto exit; | 112 | goto exit; |
112 | } | 113 | } |
113 | 114 | ||
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c index 42d4e6454a77..df7e1ecc810a 100644 --- a/drivers/usb/misc/auerswald.c +++ b/drivers/usb/misc/auerswald.c | |||
@@ -862,14 +862,16 @@ static void auerswald_ctrlread_wretcomplete (struct urb * urb) | |||
862 | pauerbuf_t bp = (pauerbuf_t) urb->context; | 862 | pauerbuf_t bp = (pauerbuf_t) urb->context; |
863 | pauerswald_t cp; | 863 | pauerswald_t cp; |
864 | int ret; | 864 | int ret; |
865 | int status = urb->status; | ||
866 | |||
865 | dbg ("auerswald_ctrlread_wretcomplete called"); | 867 | dbg ("auerswald_ctrlread_wretcomplete called"); |
866 | dbg ("complete with status: %d", urb->status); | 868 | dbg ("complete with status: %d", status); |
867 | cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); | 869 | cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); |
868 | 870 | ||
869 | /* check if it is possible to advance */ | 871 | /* check if it is possible to advance */ |
870 | if (!auerswald_status_retry (urb->status) || !cp->usbdev) { | 872 | if (!auerswald_status_retry(status) || !cp->usbdev) { |
871 | /* reuse the buffer */ | 873 | /* reuse the buffer */ |
872 | err ("control dummy: transmission error %d, can not retry", urb->status); | 874 | err ("control dummy: transmission error %d, can not retry", status); |
873 | auerbuf_releasebuf (bp); | 875 | auerbuf_releasebuf (bp); |
874 | /* Wake up all processes waiting for a buffer */ | 876 | /* Wake up all processes waiting for a buffer */ |
875 | wake_up (&cp->bufferwait); | 877 | wake_up (&cp->bufferwait); |
@@ -902,21 +904,23 @@ static void auerswald_ctrlread_complete (struct urb * urb) | |||
902 | pauerswald_t cp; | 904 | pauerswald_t cp; |
903 | pauerscon_t scp; | 905 | pauerscon_t scp; |
904 | pauerbuf_t bp = (pauerbuf_t) urb->context; | 906 | pauerbuf_t bp = (pauerbuf_t) urb->context; |
907 | int status = urb->status; | ||
905 | int ret; | 908 | int ret; |
909 | |||
906 | dbg ("auerswald_ctrlread_complete called"); | 910 | dbg ("auerswald_ctrlread_complete called"); |
907 | 911 | ||
908 | cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); | 912 | cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); |
909 | 913 | ||
910 | /* check if there is valid data in this urb */ | 914 | /* check if there is valid data in this urb */ |
911 | if (urb->status) { | 915 | if (status) { |
912 | dbg ("complete with non-zero status: %d", urb->status); | 916 | dbg ("complete with non-zero status: %d", status); |
913 | /* should we do a retry? */ | 917 | /* should we do a retry? */ |
914 | if (!auerswald_status_retry (urb->status) | 918 | if (!auerswald_status_retry(status) |
915 | || !cp->usbdev | 919 | || !cp->usbdev |
916 | || (cp->version < AUV_RETRY) | 920 | || (cp->version < AUV_RETRY) |
917 | || (bp->retries >= AU_RETRIES)) { | 921 | || (bp->retries >= AU_RETRIES)) { |
918 | /* reuse the buffer */ | 922 | /* reuse the buffer */ |
919 | err ("control read: transmission error %d, can not retry", urb->status); | 923 | err ("control read: transmission error %d, can not retry", status); |
920 | auerbuf_releasebuf (bp); | 924 | auerbuf_releasebuf (bp); |
921 | /* Wake up all processes waiting for a buffer */ | 925 | /* Wake up all processes waiting for a buffer */ |
922 | wake_up (&cp->bufferwait); | 926 | wake_up (&cp->bufferwait); |
@@ -974,12 +978,13 @@ static void auerswald_int_complete (struct urb * urb) | |||
974 | unsigned int channelid; | 978 | unsigned int channelid; |
975 | unsigned int bytecount; | 979 | unsigned int bytecount; |
976 | int ret; | 980 | int ret; |
981 | int status = urb->status; | ||
977 | pauerbuf_t bp = NULL; | 982 | pauerbuf_t bp = NULL; |
978 | pauerswald_t cp = (pauerswald_t) urb->context; | 983 | pauerswald_t cp = (pauerswald_t) urb->context; |
979 | 984 | ||
980 | dbg ("%s called", __FUNCTION__); | 985 | dbg ("%s called", __FUNCTION__); |
981 | 986 | ||
982 | switch (urb->status) { | 987 | switch (status) { |
983 | case 0: | 988 | case 0: |
984 | /* success */ | 989 | /* success */ |
985 | break; | 990 | break; |
@@ -987,10 +992,10 @@ static void auerswald_int_complete (struct urb * urb) | |||
987 | case -ENOENT: | 992 | case -ENOENT: |
988 | case -ESHUTDOWN: | 993 | case -ESHUTDOWN: |
989 | /* this urb is terminated, clean up */ | 994 | /* this urb is terminated, clean up */ |
990 | dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); | 995 | dbg("%s - urb shutting down with status: %d", __FUNCTION__, status); |
991 | return; | 996 | return; |
992 | default: | 997 | default: |
993 | dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status); | 998 | dbg("%s - nonzero urb status received: %d", __FUNCTION__, status); |
994 | goto exit; | 999 | goto exit; |
995 | } | 1000 | } |
996 | 1001 | ||
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index e0f122e131d7..538b535e955b 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/module.h> | 45 | #include <linux/module.h> |
46 | #include <linux/kref.h> | 46 | #include <linux/kref.h> |
47 | #include <linux/mutex.h> | ||
47 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
48 | #include <linux/usb.h> | 49 | #include <linux/usb.h> |
49 | #include <linux/workqueue.h> | 50 | #include <linux/workqueue.h> |
@@ -64,7 +65,7 @@ static struct workqueue_struct *respond_queue; | |||
64 | * ftdi_module_lock exists to protect access to global variables | 65 | * ftdi_module_lock exists to protect access to global variables |
65 | * | 66 | * |
66 | */ | 67 | */ |
67 | static struct semaphore ftdi_module_lock; | 68 | static struct mutex ftdi_module_lock; |
68 | static int ftdi_instances = 0; | 69 | static int ftdi_instances = 0; |
69 | static struct list_head ftdi_static_list; | 70 | static struct list_head ftdi_static_list; |
70 | /* | 71 | /* |
@@ -199,10 +200,10 @@ static void ftdi_elan_delete(struct kref *kref) | |||
199 | dev_warn(&ftdi->udev->dev, "FREEING ftdi=%p\n", ftdi); | 200 | dev_warn(&ftdi->udev->dev, "FREEING ftdi=%p\n", ftdi); |
200 | usb_put_dev(ftdi->udev); | 201 | usb_put_dev(ftdi->udev); |
201 | ftdi->disconnected += 1; | 202 | ftdi->disconnected += 1; |
202 | down(&ftdi_module_lock); | 203 | mutex_lock(&ftdi_module_lock); |
203 | list_del_init(&ftdi->ftdi_list); | 204 | list_del_init(&ftdi->ftdi_list); |
204 | ftdi_instances -= 1; | 205 | ftdi_instances -= 1; |
205 | up(&ftdi_module_lock); | 206 | mutex_unlock(&ftdi_module_lock); |
206 | kfree(ftdi->bulk_in_buffer); | 207 | kfree(ftdi->bulk_in_buffer); |
207 | ftdi->bulk_in_buffer = NULL; | 208 | ftdi->bulk_in_buffer = NULL; |
208 | } | 209 | } |
@@ -746,10 +747,12 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer, | |||
746 | static void ftdi_elan_write_bulk_callback(struct urb *urb) | 747 | static void ftdi_elan_write_bulk_callback(struct urb *urb) |
747 | { | 748 | { |
748 | struct usb_ftdi *ftdi = (struct usb_ftdi *)urb->context; | 749 | struct usb_ftdi *ftdi = (struct usb_ftdi *)urb->context; |
749 | if (urb->status && !(urb->status == -ENOENT || urb->status == | 750 | int status = urb->status; |
750 | -ECONNRESET || urb->status == -ESHUTDOWN)) { | 751 | |
752 | if (status && !(status == -ENOENT || status == -ECONNRESET || | ||
753 | status == -ESHUTDOWN)) { | ||
751 | dev_err(&ftdi->udev->dev, "urb=%p write bulk status received: %" | 754 | dev_err(&ftdi->udev->dev, "urb=%p write bulk status received: %" |
752 | "d\n", urb, urb->status); | 755 | "d\n", urb, status); |
753 | } | 756 | } |
754 | usb_buffer_free(urb->dev, urb->transfer_buffer_length, | 757 | usb_buffer_free(urb->dev, urb->transfer_buffer_length, |
755 | urb->transfer_buffer, urb->transfer_dma); | 758 | urb->transfer_buffer, urb->transfer_dma); |
@@ -2780,10 +2783,10 @@ static int ftdi_elan_probe(struct usb_interface *interface, | |||
2780 | return -ENOMEM; | 2783 | return -ENOMEM; |
2781 | } | 2784 | } |
2782 | memset(ftdi, 0x00, sizeof(struct usb_ftdi)); | 2785 | memset(ftdi, 0x00, sizeof(struct usb_ftdi)); |
2783 | down(&ftdi_module_lock); | 2786 | mutex_lock(&ftdi_module_lock); |
2784 | list_add_tail(&ftdi->ftdi_list, &ftdi_static_list); | 2787 | list_add_tail(&ftdi->ftdi_list, &ftdi_static_list); |
2785 | ftdi->sequence_num = ++ftdi_instances; | 2788 | ftdi->sequence_num = ++ftdi_instances; |
2786 | up(&ftdi_module_lock); | 2789 | mutex_unlock(&ftdi_module_lock); |
2787 | ftdi_elan_init_kref(ftdi); | 2790 | ftdi_elan_init_kref(ftdi); |
2788 | init_MUTEX(&ftdi->sw_lock); | 2791 | init_MUTEX(&ftdi->sw_lock); |
2789 | ftdi->udev = usb_get_dev(interface_to_usbdev(interface)); | 2792 | ftdi->udev = usb_get_dev(interface_to_usbdev(interface)); |
@@ -2909,7 +2912,7 @@ static int __init ftdi_elan_init(void) | |||
2909 | int result; | 2912 | int result; |
2910 | printk(KERN_INFO "driver %s built at %s on %s\n", ftdi_elan_driver.name, | 2913 | printk(KERN_INFO "driver %s built at %s on %s\n", ftdi_elan_driver.name, |
2911 | __TIME__, __DATE__); | 2914 | __TIME__, __DATE__); |
2912 | init_MUTEX(&ftdi_module_lock); | 2915 | mutex_init(&ftdi_module_lock); |
2913 | INIT_LIST_HEAD(&ftdi_static_list); | 2916 | INIT_LIST_HEAD(&ftdi_static_list); |
2914 | status_queue = create_singlethread_workqueue("ftdi-status-control"); | 2917 | status_queue = create_singlethread_workqueue("ftdi-status-control"); |
2915 | if (!status_queue) | 2918 | if (!status_queue) |
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 28548d186712..46d9f27ec173 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c | |||
@@ -158,9 +158,10 @@ static void iowarrior_callback(struct urb *urb) | |||
158 | int read_idx; | 158 | int read_idx; |
159 | int aux_idx; | 159 | int aux_idx; |
160 | int offset; | 160 | int offset; |
161 | int status; | 161 | int status = urb->status; |
162 | int retval; | ||
162 | 163 | ||
163 | switch (urb->status) { | 164 | switch (status) { |
164 | case 0: | 165 | case 0: |
165 | /* success */ | 166 | /* success */ |
166 | break; | 167 | break; |
@@ -213,10 +214,10 @@ static void iowarrior_callback(struct urb *urb) | |||
213 | wake_up_interruptible(&dev->read_wait); | 214 | wake_up_interruptible(&dev->read_wait); |
214 | 215 | ||
215 | exit: | 216 | exit: |
216 | status = usb_submit_urb(urb, GFP_ATOMIC); | 217 | retval = usb_submit_urb(urb, GFP_ATOMIC); |
217 | if (status) | 218 | if (retval) |
218 | dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d", | 219 | dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d", |
219 | __FUNCTION__, status); | 220 | __FUNCTION__, retval); |
220 | 221 | ||
221 | } | 222 | } |
222 | 223 | ||
@@ -226,13 +227,15 @@ exit: | |||
226 | static void iowarrior_write_callback(struct urb *urb) | 227 | static void iowarrior_write_callback(struct urb *urb) |
227 | { | 228 | { |
228 | struct iowarrior *dev; | 229 | struct iowarrior *dev; |
230 | int status = urb->status; | ||
231 | |||
229 | dev = (struct iowarrior *)urb->context; | 232 | dev = (struct iowarrior *)urb->context; |
230 | /* sync/async unlink faults aren't errors */ | 233 | /* sync/async unlink faults aren't errors */ |
231 | if (urb->status && | 234 | if (status && |
232 | !(urb->status == -ENOENT || | 235 | !(status == -ENOENT || |
233 | urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) { | 236 | status == -ECONNRESET || status == -ESHUTDOWN)) { |
234 | dbg("%s - nonzero write bulk status received: %d", | 237 | dbg("%s - nonzero write bulk status received: %d", |
235 | __func__, urb->status); | 238 | __func__, status); |
236 | } | 239 | } |
237 | /* free up our allocated buffer */ | 240 | /* free up our allocated buffer */ |
238 | usb_buffer_free(urb->dev, urb->transfer_buffer_length, | 241 | usb_buffer_free(urb->dev, urb->transfer_buffer_length, |
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 5e950b90c541..8208496dfc63 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c | |||
@@ -219,16 +219,17 @@ static void ld_usb_interrupt_in_callback(struct urb *urb) | |||
219 | struct ld_usb *dev = urb->context; | 219 | struct ld_usb *dev = urb->context; |
220 | size_t *actual_buffer; | 220 | size_t *actual_buffer; |
221 | unsigned int next_ring_head; | 221 | unsigned int next_ring_head; |
222 | int status = urb->status; | ||
222 | int retval; | 223 | int retval; |
223 | 224 | ||
224 | if (urb->status) { | 225 | if (status) { |
225 | if (urb->status == -ENOENT || | 226 | if (status == -ENOENT || |
226 | urb->status == -ECONNRESET || | 227 | status == -ECONNRESET || |
227 | urb->status == -ESHUTDOWN) { | 228 | status == -ESHUTDOWN) { |
228 | goto exit; | 229 | goto exit; |
229 | } else { | 230 | } else { |
230 | dbg_info(&dev->intf->dev, "%s: nonzero status received: %d\n", | 231 | dbg_info(&dev->intf->dev, "%s: nonzero status received: %d\n", |
231 | __FUNCTION__, urb->status); | 232 | __FUNCTION__, status); |
232 | spin_lock(&dev->rbsl); | 233 | spin_lock(&dev->rbsl); |
233 | goto resubmit; /* maybe we can recover */ | 234 | goto resubmit; /* maybe we can recover */ |
234 | } | 235 | } |
@@ -275,14 +276,15 @@ exit: | |||
275 | static void ld_usb_interrupt_out_callback(struct urb *urb) | 276 | static void ld_usb_interrupt_out_callback(struct urb *urb) |
276 | { | 277 | { |
277 | struct ld_usb *dev = urb->context; | 278 | struct ld_usb *dev = urb->context; |
279 | int status = urb->status; | ||
278 | 280 | ||
279 | /* sync/async unlink faults aren't errors */ | 281 | /* sync/async unlink faults aren't errors */ |
280 | if (urb->status && !(urb->status == -ENOENT || | 282 | if (status && !(status == -ENOENT || |
281 | urb->status == -ECONNRESET || | 283 | status == -ECONNRESET || |
282 | urb->status == -ESHUTDOWN)) | 284 | status == -ESHUTDOWN)) |
283 | dbg_info(&dev->intf->dev, | 285 | dbg_info(&dev->intf->dev, |
284 | "%s - nonzero write interrupt status received: %d\n", | 286 | "%s - nonzero write interrupt status received: %d\n", |
285 | __FUNCTION__, urb->status); | 287 | __FUNCTION__, status); |
286 | 288 | ||
287 | dev->interrupt_out_busy = 0; | 289 | dev->interrupt_out_busy = 0; |
288 | wake_up_interruptible(&dev->write_wait); | 290 | wake_up_interruptible(&dev->write_wait); |
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 2ed0daea894c..561970b889a5 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c | |||
@@ -742,19 +742,20 @@ exit: | |||
742 | static void tower_interrupt_in_callback (struct urb *urb) | 742 | static void tower_interrupt_in_callback (struct urb *urb) |
743 | { | 743 | { |
744 | struct lego_usb_tower *dev = (struct lego_usb_tower *)urb->context; | 744 | struct lego_usb_tower *dev = (struct lego_usb_tower *)urb->context; |
745 | int status = urb->status; | ||
745 | int retval; | 746 | int retval; |
746 | 747 | ||
747 | dbg(4, "%s: enter, status %d", __FUNCTION__, urb->status); | 748 | dbg(4, "%s: enter, status %d", __FUNCTION__, status); |
748 | 749 | ||
749 | lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); | 750 | lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); |
750 | 751 | ||
751 | if (urb->status) { | 752 | if (status) { |
752 | if (urb->status == -ENOENT || | 753 | if (status == -ENOENT || |
753 | urb->status == -ECONNRESET || | 754 | status == -ECONNRESET || |
754 | urb->status == -ESHUTDOWN) { | 755 | status == -ESHUTDOWN) { |
755 | goto exit; | 756 | goto exit; |
756 | } else { | 757 | } else { |
757 | dbg(1, "%s: nonzero status received: %d", __FUNCTION__, urb->status); | 758 | dbg(1, "%s: nonzero status received: %d", __FUNCTION__, status); |
758 | goto resubmit; /* maybe we can recover */ | 759 | goto resubmit; /* maybe we can recover */ |
759 | } | 760 | } |
760 | } | 761 | } |
@@ -788,7 +789,7 @@ exit: | |||
788 | wake_up_interruptible (&dev->read_wait); | 789 | wake_up_interruptible (&dev->read_wait); |
789 | 790 | ||
790 | lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); | 791 | lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); |
791 | dbg(4, "%s: leave, status %d", __FUNCTION__, urb->status); | 792 | dbg(4, "%s: leave, status %d", __FUNCTION__, status); |
792 | } | 793 | } |
793 | 794 | ||
794 | 795 | ||
@@ -798,23 +799,24 @@ exit: | |||
798 | static void tower_interrupt_out_callback (struct urb *urb) | 799 | static void tower_interrupt_out_callback (struct urb *urb) |
799 | { | 800 | { |
800 | struct lego_usb_tower *dev = (struct lego_usb_tower *)urb->context; | 801 | struct lego_usb_tower *dev = (struct lego_usb_tower *)urb->context; |
802 | int status = urb->status; | ||
801 | 803 | ||
802 | dbg(4, "%s: enter, status %d", __FUNCTION__, urb->status); | 804 | dbg(4, "%s: enter, status %d", __FUNCTION__, status); |
803 | lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); | 805 | lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); |
804 | 806 | ||
805 | /* sync/async unlink faults aren't errors */ | 807 | /* sync/async unlink faults aren't errors */ |
806 | if (urb->status && !(urb->status == -ENOENT || | 808 | if (status && !(status == -ENOENT || |
807 | urb->status == -ECONNRESET || | 809 | status == -ECONNRESET || |
808 | urb->status == -ESHUTDOWN)) { | 810 | status == -ESHUTDOWN)) { |
809 | dbg(1, "%s - nonzero write bulk status received: %d", | 811 | dbg(1, "%s - nonzero write bulk status received: %d", |
810 | __FUNCTION__, urb->status); | 812 | __FUNCTION__, status); |
811 | } | 813 | } |
812 | 814 | ||
813 | dev->interrupt_out_busy = 0; | 815 | dev->interrupt_out_busy = 0; |
814 | wake_up_interruptible(&dev->write_wait); | 816 | wake_up_interruptible(&dev->write_wait); |
815 | 817 | ||
816 | lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); | 818 | lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); |
817 | dbg(4, "%s: leave, status %d", __FUNCTION__, urb->status); | 819 | dbg(4, "%s: leave, status %d", __FUNCTION__, status); |
818 | } | 820 | } |
819 | 821 | ||
820 | 822 | ||
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c index 371bf2b1197d..aa9bcceabe74 100644 --- a/drivers/usb/misc/phidgetkit.c +++ b/drivers/usb/misc/phidgetkit.c | |||
@@ -305,9 +305,10 @@ static void interfacekit_irq(struct urb *urb) | |||
305 | struct interfacekit *kit = urb->context; | 305 | struct interfacekit *kit = urb->context; |
306 | unsigned char *buffer = kit->data; | 306 | unsigned char *buffer = kit->data; |
307 | int i, level, sensor; | 307 | int i, level, sensor; |
308 | int status; | 308 | int retval; |
309 | int status = urb->status; | ||
309 | 310 | ||
310 | switch (urb->status) { | 311 | switch (status) { |
311 | case 0: /* success */ | 312 | case 0: /* success */ |
312 | break; | 313 | break; |
313 | case -ECONNRESET: /* unlink */ | 314 | case -ECONNRESET: /* unlink */ |
@@ -377,11 +378,11 @@ static void interfacekit_irq(struct urb *urb) | |||
377 | schedule_delayed_work(&kit->do_notify, 0); | 378 | schedule_delayed_work(&kit->do_notify, 0); |
378 | 379 | ||
379 | resubmit: | 380 | resubmit: |
380 | status = usb_submit_urb(urb, GFP_ATOMIC); | 381 | retval = usb_submit_urb(urb, GFP_ATOMIC); |
381 | if (status) | 382 | if (retval) |
382 | err("can't resubmit intr, %s-%s/interfacekit0, status %d", | 383 | err("can't resubmit intr, %s-%s/interfacekit0, retval %d", |
383 | kit->udev->bus->bus_name, | 384 | kit->udev->bus->bus_name, |
384 | kit->udev->devpath, status); | 385 | kit->udev->devpath, retval); |
385 | } | 386 | } |
386 | 387 | ||
387 | static void do_notify(struct work_struct *work) | 388 | static void do_notify(struct work_struct *work) |
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c index 5727e1ea2f91..df0ebcdb9d6a 100644 --- a/drivers/usb/misc/phidgetmotorcontrol.c +++ b/drivers/usb/misc/phidgetmotorcontrol.c | |||
@@ -95,9 +95,10 @@ static void motorcontrol_irq(struct urb *urb) | |||
95 | struct motorcontrol *mc = urb->context; | 95 | struct motorcontrol *mc = urb->context; |
96 | unsigned char *buffer = mc->data; | 96 | unsigned char *buffer = mc->data; |
97 | int i, level; | 97 | int i, level; |
98 | int status; | 98 | int retval; |
99 | int status = urb->status;; | ||
99 | 100 | ||
100 | switch (urb->status) { | 101 | switch (status) { |
101 | case 0: /* success */ | 102 | case 0: /* success */ |
102 | break; | 103 | break; |
103 | case -ECONNRESET: /* unlink */ | 104 | case -ECONNRESET: /* unlink */ |
@@ -151,12 +152,12 @@ static void motorcontrol_irq(struct urb *urb) | |||
151 | schedule_delayed_work(&mc->do_notify, 0); | 152 | schedule_delayed_work(&mc->do_notify, 0); |
152 | 153 | ||
153 | resubmit: | 154 | resubmit: |
154 | status = usb_submit_urb(urb, GFP_ATOMIC); | 155 | retval = usb_submit_urb(urb, GFP_ATOMIC); |
155 | if (status) | 156 | if (retval) |
156 | dev_err(&mc->intf->dev, | 157 | dev_err(&mc->intf->dev, |
157 | "can't resubmit intr, %s-%s/motorcontrol0, status %d", | 158 | "can't resubmit intr, %s-%s/motorcontrol0, retval %d", |
158 | mc->udev->bus->bus_name, | 159 | mc->udev->bus->bus_name, |
159 | mc->udev->devpath, status); | 160 | mc->udev->devpath, retval); |
160 | } | 161 | } |
161 | 162 | ||
162 | static void do_notify(struct work_struct *work) | 163 | static void do_notify(struct work_struct *work) |
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index 504f7221b0d0..719842032712 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c | |||
@@ -176,16 +176,17 @@ static int lcd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u | |||
176 | static void lcd_write_bulk_callback(struct urb *urb) | 176 | static void lcd_write_bulk_callback(struct urb *urb) |
177 | { | 177 | { |
178 | struct usb_lcd *dev; | 178 | struct usb_lcd *dev; |
179 | int status = urb->status; | ||
179 | 180 | ||
180 | dev = (struct usb_lcd *)urb->context; | 181 | dev = (struct usb_lcd *)urb->context; |
181 | 182 | ||
182 | /* sync/async unlink faults aren't errors */ | 183 | /* sync/async unlink faults aren't errors */ |
183 | if (urb->status && | 184 | if (status && |
184 | !(urb->status == -ENOENT || | 185 | !(status == -ENOENT || |
185 | urb->status == -ECONNRESET || | 186 | status == -ECONNRESET || |
186 | urb->status == -ESHUTDOWN)) { | 187 | status == -ESHUTDOWN)) { |
187 | dbg("USBLCD: %s - nonzero write bulk status received: %d", | 188 | dbg("USBLCD: %s - nonzero write bulk status received: %d", |
188 | __FUNCTION__, urb->status); | 189 | __FUNCTION__, status); |
189 | } | 190 | } |
190 | 191 | ||
191 | /* free up our allocated buffer */ | 192 | /* free up our allocated buffer */ |
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index fb321864a92d..e901d31e051b 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c | |||
@@ -768,8 +768,8 @@ static void ctrl_complete (struct urb *urb) | |||
768 | 768 | ||
769 | /* some faults are allowed, not required */ | 769 | /* some faults are allowed, not required */ |
770 | if (subcase->expected > 0 && ( | 770 | if (subcase->expected > 0 && ( |
771 | ((urb->status == -subcase->expected /* happened */ | 771 | ((status == -subcase->expected /* happened */ |
772 | || urb->status == 0)))) /* didn't */ | 772 | || status == 0)))) /* didn't */ |
773 | status = 0; | 773 | status = 0; |
774 | /* sometimes more than one fault is allowed */ | 774 | /* sometimes more than one fault is allowed */ |
775 | else if (subcase->number == 12 && status == -EPIPE) | 775 | else if (subcase->number == 12 && status == -EPIPE) |
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 1a60f9c473ad..2734fe2b9c43 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c | |||
@@ -111,12 +111,13 @@ static void async_complete(struct urb *urb) | |||
111 | struct uss720_async_request *rq; | 111 | struct uss720_async_request *rq; |
112 | struct parport *pp; | 112 | struct parport *pp; |
113 | struct parport_uss720_private *priv; | 113 | struct parport_uss720_private *priv; |
114 | int status = urb->status; | ||
114 | 115 | ||
115 | rq = urb->context; | 116 | rq = urb->context; |
116 | priv = rq->priv; | 117 | priv = rq->priv; |
117 | pp = priv->pp; | 118 | pp = priv->pp; |
118 | if (urb->status) { | 119 | if (status) { |
119 | err("async_complete: urb error %d", urb->status); | 120 | err("async_complete: urb error %d", status); |
120 | } else if (rq->dr.bRequest == 3) { | 121 | } else if (rq->dr.bRequest == 3) { |
121 | memcpy(priv->reg, rq->reg, sizeof(priv->reg)); | 122 | memcpy(priv->reg, rq->reg, sizeof(priv->reg)); |
122 | #if 0 | 123 | #if 0 |
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 0d3903691e8c..b8670905bc3a 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c | |||
@@ -2794,16 +2794,14 @@ static void edge_shutdown (struct usb_serial *serial) | |||
2794 | 2794 | ||
2795 | dbg ("%s", __FUNCTION__); | 2795 | dbg ("%s", __FUNCTION__); |
2796 | 2796 | ||
2797 | for (i=0; i < serial->num_ports; ++i) { | 2797 | for (i = 0; i < serial->num_ports; ++i) { |
2798 | edge_port = usb_get_serial_port_data(serial->port[i]); | 2798 | edge_port = usb_get_serial_port_data(serial->port[i]); |
2799 | edge_remove_sysfs_attrs(edge_port->port); | 2799 | edge_remove_sysfs_attrs(edge_port->port); |
2800 | if (edge_port) { | 2800 | edge_buf_free(edge_port->ep_out_buf); |
2801 | edge_buf_free(edge_port->ep_out_buf); | 2801 | kfree(edge_port); |
2802 | kfree(edge_port); | ||
2803 | } | ||
2804 | usb_set_serial_port_data(serial->port[i], NULL); | 2802 | usb_set_serial_port_data(serial->port[i], NULL); |
2805 | } | 2803 | } |
2806 | kfree (usb_get_serial_data(serial)); | 2804 | kfree(usb_get_serial_data(serial)); |
2807 | usb_set_serial_data(serial, NULL); | 2805 | usb_set_serial_data(serial, NULL); |
2808 | } | 2806 | } |
2809 | 2807 | ||
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 231b584f6d0f..01e811becec4 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
@@ -110,11 +110,6 @@ static void mos7720_interrupt_callback(struct urb *urb) | |||
110 | 110 | ||
111 | dbg("%s"," : Entering\n"); | 111 | dbg("%s"," : Entering\n"); |
112 | 112 | ||
113 | if (!urb) { | ||
114 | dbg("%s","Invalid Pointer !!!!:\n"); | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | switch (status) { | 113 | switch (status) { |
119 | case 0: | 114 | case 0: |
120 | /* success */ | 115 | /* success */ |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 37f41f576d3d..f76480f1455d 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -436,11 +436,6 @@ static void mos7840_control_callback(struct urb *urb) | |||
436 | int result = 0; | 436 | int result = 0; |
437 | int status = urb->status; | 437 | int status = urb->status; |
438 | 438 | ||
439 | if (!urb) { | ||
440 | dbg("%s", "Invalid Pointer !!!!:\n"); | ||
441 | return; | ||
442 | } | ||
443 | |||
444 | mos7840_port = (struct moschip_port *)urb->context; | 439 | mos7840_port = (struct moschip_port *)urb->context; |
445 | 440 | ||
446 | switch (status) { | 441 | switch (status) { |
@@ -525,10 +520,6 @@ static void mos7840_interrupt_callback(struct urb *urb) | |||
525 | int status = urb->status; | 520 | int status = urb->status; |
526 | 521 | ||
527 | dbg("%s", " : Entering\n"); | 522 | dbg("%s", " : Entering\n"); |
528 | if (!urb) { | ||
529 | dbg("%s", "Invalid Pointer !!!!:\n"); | ||
530 | return; | ||
531 | } | ||
532 | 523 | ||
533 | switch (status) { | 524 | switch (status) { |
534 | case 0: | 525 | case 0: |
@@ -676,11 +667,6 @@ static void mos7840_bulk_in_callback(struct urb *urb) | |||
676 | struct tty_struct *tty; | 667 | struct tty_struct *tty; |
677 | int status = urb->status; | 668 | int status = urb->status; |
678 | 669 | ||
679 | if (!urb) { | ||
680 | dbg("%s", "Invalid Pointer !!!!:\n"); | ||
681 | return; | ||
682 | } | ||
683 | |||
684 | if (status) { | 670 | if (status) { |
685 | dbg("nonzero read bulk status received: %d", status); | 671 | dbg("nonzero read bulk status received: %d", status); |
686 | return; | 672 | return; |
@@ -753,11 +739,6 @@ static void mos7840_bulk_out_data_callback(struct urb *urb) | |||
753 | int status = urb->status; | 739 | int status = urb->status; |
754 | int i; | 740 | int i; |
755 | 741 | ||
756 | if (!urb) { | ||
757 | dbg("%s", "Invalid Pointer !!!!:\n"); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | mos7840_port = (struct moschip_port *)urb->context; | 742 | mos7840_port = (struct moschip_port *)urb->context; |
762 | spin_lock(&mos7840_port->pool_lock); | 743 | spin_lock(&mos7840_port->pool_lock); |
763 | for (i = 0; i < NUM_URBS; i++) { | 744 | for (i = 0; i < NUM_URBS; i++) { |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index e7db20343d1a..0794ccdebfd4 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | USB Driver for Sierra Wireless | 2 | USB Driver for Sierra Wireless |
3 | 3 | ||
4 | Copyright (C) 2006 Kevin Lloyd <linux@sierrawireless.com> | 4 | Copyright (C) 2006, 2007 Kevin Lloyd <linux@sierrawireless.com> |
5 | 5 | ||
6 | IMPORTANT DISCLAIMER: This driver is not commercially supported by | 6 | IMPORTANT DISCLAIMER: This driver is not commercially supported by |
7 | Sierra Wireless. Use at your own risk. | 7 | Sierra Wireless. Use at your own risk. |
@@ -12,10 +12,9 @@ | |||
12 | 12 | ||
13 | Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de> | 13 | Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de> |
14 | Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> | 14 | Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> |
15 | |||
16 | */ | 15 | */ |
17 | 16 | ||
18 | #define DRIVER_VERSION "v.1.0.6" | 17 | #define DRIVER_VERSION "v.1.2.5b" |
19 | #define DRIVER_AUTHOR "Kevin Lloyd <linux@sierrawireless.com>" | 18 | #define DRIVER_AUTHOR "Kevin Lloyd <linux@sierrawireless.com>" |
20 | #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" | 19 | #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" |
21 | 20 | ||
@@ -28,23 +27,98 @@ | |||
28 | #include <linux/usb.h> | 27 | #include <linux/usb.h> |
29 | #include <linux/usb/serial.h> | 28 | #include <linux/usb/serial.h> |
30 | 29 | ||
30 | #define SWIMS_USB_REQUEST_SetMode 0x0B | ||
31 | #define SWIMS_USB_REQUEST_TYPE_SetMode 0x40 | ||
32 | #define SWIMS_USB_INDEX_SetMode 0x0000 | ||
33 | #define SWIMS_SET_MODE_Modem 0x0001 | ||
34 | |||
35 | /* per port private data */ | ||
36 | #define N_IN_URB 4 | ||
37 | #define N_OUT_URB 4 | ||
38 | #define IN_BUFLEN 4096 | ||
39 | |||
40 | static int debug; | ||
41 | |||
42 | enum devicetype { | ||
43 | DEVICE_3_PORT = 0, | ||
44 | DEVICE_1_PORT = 1, | ||
45 | DEVICE_INSTALLER = 2, | ||
46 | }; | ||
47 | |||
48 | int sierra_set_power_state(struct usb_device *udev, __u16 swiState) | ||
49 | { | ||
50 | int result; | ||
51 | dev_dbg(&udev->dev, "%s", "SET POWER STATE"); | ||
52 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
53 | 0x00, /* __u8 request */ | ||
54 | 0x40, /* __u8 request type */ | ||
55 | swiState, /* __u16 value */ | ||
56 | 0, /* __u16 index */ | ||
57 | NULL, /* void *data */ | ||
58 | 0, /* __u16 size */ | ||
59 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
60 | return result; | ||
61 | } | ||
62 | |||
63 | int sierra_set_ms_mode(struct usb_device *udev, __u16 eSocMode) | ||
64 | { | ||
65 | int result; | ||
66 | dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH"); | ||
67 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
68 | SWIMS_USB_REQUEST_SetMode, /* __u8 request */ | ||
69 | SWIMS_USB_REQUEST_TYPE_SetMode, /* __u8 request type */ | ||
70 | eSocMode, /* __u16 value */ | ||
71 | SWIMS_USB_INDEX_SetMode, /* __u16 index */ | ||
72 | NULL, /* void *data */ | ||
73 | 0, /* __u16 size */ | ||
74 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
75 | return result; | ||
76 | } | ||
77 | |||
78 | int sierra_probe(struct usb_interface *iface, const struct usb_device_id *id) | ||
79 | { | ||
80 | int result; | ||
81 | struct usb_device *udev; | ||
82 | |||
83 | udev = usb_get_dev(interface_to_usbdev(iface)); | ||
84 | |||
85 | /* Check if in installer mode */ | ||
86 | if (id->driver_info == DEVICE_INSTALLER) { | ||
87 | dev_dbg(&udev->dev, "%s", "FOUND DEVICE(SW)\n"); | ||
88 | result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); | ||
89 | /*We do not want to bind to the device when in installer mode*/ | ||
90 | return -EIO; | ||
91 | } | ||
92 | |||
93 | return usb_serial_probe(iface, id); | ||
94 | } | ||
31 | 95 | ||
32 | static struct usb_device_id id_table [] = { | 96 | static struct usb_device_id id_table [] = { |
33 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ | 97 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ |
34 | { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ | 98 | { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ |
35 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ | 99 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ |
100 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ | ||
36 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ | 101 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ |
37 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ | 102 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ |
38 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless AirCard 595U */ | ||
39 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ | 103 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ |
104 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ | ||
105 | |||
40 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ | 106 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ |
41 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ | 107 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ |
42 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ | 108 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ |
43 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */ | 109 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ |
44 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ | 110 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ |
111 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ | ||
112 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ | ||
113 | { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ | ||
114 | { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ | ||
115 | { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ | ||
116 | { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ | ||
45 | 117 | ||
46 | { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ | 118 | { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */ |
47 | { USB_DEVICE(0x0F3D, 0x0112) }, /* AirPrime/Sierra PC 5220 */ | 119 | { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */ |
120 | |||
121 | { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER}, | ||
48 | { } | 122 | { } |
49 | }; | 123 | }; |
50 | MODULE_DEVICE_TABLE(usb, id_table); | 124 | MODULE_DEVICE_TABLE(usb, id_table); |
@@ -58,35 +132,36 @@ static struct usb_device_id id_table_1port [] = { | |||
58 | static struct usb_device_id id_table_3port [] = { | 132 | static struct usb_device_id id_table_3port [] = { |
59 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ | 133 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ |
60 | { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ | 134 | { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ |
135 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ | ||
61 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ | 136 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ |
62 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ | 137 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ |
63 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ | 138 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ |
64 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless AirCard 595U */ | ||
65 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ | 139 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ |
140 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U*/ | ||
141 | |||
66 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ | 142 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ |
67 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ | 143 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ |
68 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ | 144 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ |
69 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */ | 145 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ |
70 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ | 146 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ |
147 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ | ||
148 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ | ||
149 | { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ | ||
150 | { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ | ||
151 | { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880E */ | ||
152 | { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881E */ | ||
71 | { } | 153 | { } |
72 | }; | 154 | }; |
73 | 155 | ||
74 | static struct usb_driver sierra_driver = { | 156 | static struct usb_driver sierra_driver = { |
75 | .name = "sierra", | 157 | .name = "sierra", |
76 | .probe = usb_serial_probe, | 158 | .probe = sierra_probe, |
77 | .disconnect = usb_serial_disconnect, | 159 | .disconnect = usb_serial_disconnect, |
78 | .id_table = id_table, | 160 | .id_table = id_table, |
79 | .no_dynamic_id = 1, | 161 | .no_dynamic_id = 1, |
80 | }; | 162 | }; |
81 | 163 | ||
82 | 164 | ||
83 | static int debug; | ||
84 | |||
85 | /* per port private data */ | ||
86 | #define N_IN_URB 4 | ||
87 | #define N_OUT_URB 4 | ||
88 | #define IN_BUFLEN 4096 | ||
89 | |||
90 | struct sierra_port_private { | 165 | struct sierra_port_private { |
91 | spinlock_t lock; /* lock the structure */ | 166 | spinlock_t lock; /* lock the structure */ |
92 | int outstanding_urbs; /* number of out urbs in flight */ | 167 | int outstanding_urbs; /* number of out urbs in flight */ |
@@ -421,7 +496,6 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp) | |||
421 | int i; | 496 | int i; |
422 | struct urb *urb; | 497 | struct urb *urb; |
423 | int result; | 498 | int result; |
424 | __u16 set_mode_dzero = 0x0000; | ||
425 | 499 | ||
426 | portdata = usb_get_serial_port_data(port); | 500 | portdata = usb_get_serial_port_data(port); |
427 | 501 | ||
@@ -457,12 +531,6 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp) | |||
457 | 531 | ||
458 | port->tty->low_latency = 1; | 532 | port->tty->low_latency = 1; |
459 | 533 | ||
460 | /* set mode to D0 */ | ||
461 | result = usb_control_msg(serial->dev, | ||
462 | usb_rcvctrlpipe(serial->dev, 0), | ||
463 | 0x00, 0x40, set_mode_dzero, 0, NULL, | ||
464 | 0, USB_CTRL_SET_TIMEOUT); | ||
465 | |||
466 | sierra_send_setup(port); | 534 | sierra_send_setup(port); |
467 | 535 | ||
468 | /* start up the interrupt endpoint if we have one */ | 536 | /* start up the interrupt endpoint if we have one */ |
@@ -510,6 +578,9 @@ static int sierra_startup(struct usb_serial *serial) | |||
510 | 578 | ||
511 | dbg("%s", __FUNCTION__); | 579 | dbg("%s", __FUNCTION__); |
512 | 580 | ||
581 | /*Set Device mode to D0 */ | ||
582 | sierra_set_power_state(serial->dev, 0x0000); | ||
583 | |||
513 | /* Now setup per port private data */ | 584 | /* Now setup per port private data */ |
514 | for (i = 0; i < serial->num_ports; i++) { | 585 | for (i = 0; i < serial->num_ports; i++) { |
515 | port = serial->port[i]; | 586 | port = serial->port[i]; |
diff --git a/drivers/usb/storage/dpcm.c b/drivers/usb/storage/dpcm.c index 1628cb258562..9a410b5a6e5b 100644 --- a/drivers/usb/storage/dpcm.c +++ b/drivers/usb/storage/dpcm.c | |||
@@ -46,43 +46,43 @@ | |||
46 | */ | 46 | */ |
47 | int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us) | 47 | int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us) |
48 | { | 48 | { |
49 | int ret; | 49 | int ret; |
50 | 50 | ||
51 | if(srb == NULL) | 51 | if (srb == NULL) |
52 | return USB_STOR_TRANSPORT_ERROR; | 52 | return USB_STOR_TRANSPORT_ERROR; |
53 | 53 | ||
54 | US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun); | 54 | US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun); |
55 | 55 | ||
56 | switch(srb->device->lun) { | 56 | switch (srb->device->lun) { |
57 | case 0: | 57 | case 0: |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * LUN 0 corresponds to the CompactFlash card reader. | 60 | * LUN 0 corresponds to the CompactFlash card reader. |
61 | */ | 61 | */ |
62 | ret = usb_stor_CB_transport(srb, us); | 62 | ret = usb_stor_CB_transport(srb, us); |
63 | break; | 63 | break; |
64 | 64 | ||
65 | #ifdef CONFIG_USB_STORAGE_SDDR09 | 65 | #ifdef CONFIG_USB_STORAGE_SDDR09 |
66 | case 1: | 66 | case 1: |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * LUN 1 corresponds to the SmartMedia card reader. | 69 | * LUN 1 corresponds to the SmartMedia card reader. |
70 | */ | 70 | */ |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Set the LUN to 0 (just in case). | 73 | * Set the LUN to 0 (just in case). |
74 | */ | 74 | */ |
75 | srb->device->lun = 0; us->srb->device->lun = 0; | 75 | srb->device->lun = 0; us->srb->device->lun = 0; |
76 | ret = sddr09_transport(srb, us); | 76 | ret = sddr09_transport(srb, us); |
77 | srb->device->lun = 1; us->srb->device->lun = 1; | 77 | srb->device->lun = 1; us->srb->device->lun = 1; |
78 | break; | 78 | break; |
79 | 79 | ||
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | default: | 82 | default: |
83 | US_DEBUGP("dpcm_transport: Invalid LUN %d\n", srb->device->lun); | 83 | US_DEBUGP("dpcm_transport: Invalid LUN %d\n", srb->device->lun); |
84 | ret = USB_STOR_TRANSPORT_ERROR; | 84 | ret = USB_STOR_TRANSPORT_ERROR; |
85 | break; | 85 | break; |
86 | } | 86 | } |
87 | return ret; | 87 | return ret; |
88 | } | 88 | } |
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c index d35369392fed..dfd42fe9e5f0 100644 --- a/drivers/usb/storage/onetouch.c +++ b/drivers/usb/storage/onetouch.c | |||
@@ -57,9 +57,10 @@ static void usb_onetouch_irq(struct urb *urb) | |||
57 | struct usb_onetouch *onetouch = urb->context; | 57 | struct usb_onetouch *onetouch = urb->context; |
58 | signed char *data = onetouch->data; | 58 | signed char *data = onetouch->data; |
59 | struct input_dev *dev = onetouch->dev; | 59 | struct input_dev *dev = onetouch->dev; |
60 | int status; | 60 | int status = urb->status; |
61 | int retval; | ||
61 | 62 | ||
62 | switch (urb->status) { | 63 | switch (status) { |
63 | case 0: /* success */ | 64 | case 0: /* success */ |
64 | break; | 65 | break; |
65 | case -ECONNRESET: /* unlink */ | 66 | case -ECONNRESET: /* unlink */ |
@@ -75,11 +76,11 @@ static void usb_onetouch_irq(struct urb *urb) | |||
75 | input_sync(dev); | 76 | input_sync(dev); |
76 | 77 | ||
77 | resubmit: | 78 | resubmit: |
78 | status = usb_submit_urb (urb, GFP_ATOMIC); | 79 | retval = usb_submit_urb (urb, GFP_ATOMIC); |
79 | if (status) | 80 | if (retval) |
80 | err ("can't resubmit intr, %s-%s/input0, status %d", | 81 | err ("can't resubmit intr, %s-%s/input0, retval %d", |
81 | onetouch->udev->bus->bus_name, | 82 | onetouch->udev->bus->bus_name, |
82 | onetouch->udev->devpath, status); | 83 | onetouch->udev->devpath, retval); |
83 | } | 84 | } |
84 | 85 | ||
85 | static int usb_onetouch_open(struct input_dev *dev) | 86 | static int usb_onetouch_open(struct input_dev *dev) |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index b6bf31a97b60..a624e72f81dc 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -313,6 +313,13 @@ UNUSUAL_DEV( 0x04b0, 0x0301, 0x0010, 0x0010, | |||
313 | US_SC_DEVICE, US_PR_DEVICE,NULL, | 313 | US_SC_DEVICE, US_PR_DEVICE,NULL, |
314 | US_FL_NOT_LOCKABLE ), | 314 | US_FL_NOT_LOCKABLE ), |
315 | 315 | ||
316 | /* Reported by Stefan de Konink <skinkie@xs4all.nl> */ | ||
317 | UNUSUAL_DEV( 0x04b0, 0x0401, 0x0200, 0x0200, | ||
318 | "NIKON", | ||
319 | "NIKON DSC D100", | ||
320 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
321 | US_FL_FIX_CAPACITY), | ||
322 | |||
316 | /* Reported by Andreas Bockhold <andreas@bockionline.de> */ | 323 | /* Reported by Andreas Bockhold <andreas@bockionline.de> */ |
317 | UNUSUAL_DEV( 0x04b0, 0x0405, 0x0100, 0x0100, | 324 | UNUSUAL_DEV( 0x04b0, 0x0405, 0x0100, 0x0100, |
318 | "NIKON", | 325 | "NIKON", |
@@ -1384,6 +1391,17 @@ UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x0110, | |||
1384 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, | 1391 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, |
1385 | 0 ), | 1392 | 0 ), |
1386 | 1393 | ||
1394 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> | ||
1395 | * Entry is needed for the initializer function override, | ||
1396 | * which instructs the device to load as a modem | ||
1397 | * device. | ||
1398 | */ | ||
1399 | UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, | ||
1400 | "Sierra Wireless", | ||
1401 | "USB MMC Storage", | ||
1402 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1403 | US_FL_IGNORE_DEVICE), | ||
1404 | |||
1387 | /* Reported by Jaco Kroon <jaco@kroon.co.za> | 1405 | /* Reported by Jaco Kroon <jaco@kroon.co.za> |
1388 | * The usb-storage module found on the Digitech GNX4 (and supposedly other | 1406 | * The usb-storage module found on the Digitech GNX4 (and supposedly other |
1389 | * devices) misbehaves and causes a bunch of invalid I/O errors. | 1407 | * devices) misbehaves and causes a bunch of invalid I/O errors. |
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c index e9bbc3455c94..1b3f6586bc9f 100644 --- a/drivers/video/backlight/cr_bllcd.c +++ b/drivers/video/backlight/cr_bllcd.c | |||
@@ -174,7 +174,7 @@ static int cr_backlight_probe(struct platform_device *pdev) | |||
174 | struct cr_panel *crp; | 174 | struct cr_panel *crp; |
175 | u8 dev_en; | 175 | u8 dev_en; |
176 | 176 | ||
177 | crp = kzalloc(sizeof(crp), GFP_KERNEL); | 177 | crp = kzalloc(sizeof(*crp), GFP_KERNEL); |
178 | if (crp == NULL) | 178 | if (crp == NULL) |
179 | return -ENOMEM; | 179 | return -ENOMEM; |
180 | 180 | ||