diff options
Diffstat (limited to 'drivers/usb/gadget/udc/net2280.c')
-rw-r--r-- | drivers/usb/gadget/udc/net2280.c | 533 |
1 files changed, 130 insertions, 403 deletions
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index d6411e0a8e03..d2c0bf65e345 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c | |||
@@ -12,11 +12,7 @@ | |||
12 | * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers | 12 | * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers |
13 | * as well as Gadget Zero and Gadgetfs. | 13 | * as well as Gadget Zero and Gadgetfs. |
14 | * | 14 | * |
15 | * DMA is enabled by default. Drivers using transfer queues might use | 15 | * DMA is enabled by default. |
16 | * DMA chaining to remove IRQ latencies between transfers. (Except when | ||
17 | * short OUT transfers happen.) Drivers can use the req->no_interrupt | ||
18 | * hint to completely eliminate some IRQs, if a later IRQ is guaranteed | ||
19 | * and DMA chaining is enabled. | ||
20 | * | 16 | * |
21 | * MSI is enabled by default. The legacy IRQ is used if MSI couldn't | 17 | * MSI is enabled by default. The legacy IRQ is used if MSI couldn't |
22 | * be enabled. | 18 | * be enabled. |
@@ -84,23 +80,6 @@ static const char *const ep_name[] = { | |||
84 | "ep-e", "ep-f", "ep-g", "ep-h", | 80 | "ep-e", "ep-f", "ep-g", "ep-h", |
85 | }; | 81 | }; |
86 | 82 | ||
87 | /* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO) | ||
88 | * use_dma_chaining -- dma descriptor queueing gives even more irq reduction | ||
89 | * | ||
90 | * The net2280 DMA engines are not tightly integrated with their FIFOs; | ||
91 | * not all cases are (yet) handled well in this driver or the silicon. | ||
92 | * Some gadget drivers work better with the dma support here than others. | ||
93 | * These two parameters let you use PIO or more aggressive DMA. | ||
94 | */ | ||
95 | static bool use_dma = true; | ||
96 | static bool use_dma_chaining; | ||
97 | static bool use_msi = true; | ||
98 | |||
99 | /* "modprobe net2280 use_dma=n" etc */ | ||
100 | module_param(use_dma, bool, 0444); | ||
101 | module_param(use_dma_chaining, bool, 0444); | ||
102 | module_param(use_msi, bool, 0444); | ||
103 | |||
104 | /* mode 0 == ep-{a,b,c,d} 1K fifo each | 83 | /* mode 0 == ep-{a,b,c,d} 1K fifo each |
105 | * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable | 84 | * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable |
106 | * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable | 85 | * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable |
@@ -120,11 +99,6 @@ static bool enable_suspend; | |||
120 | /* "modprobe net2280 enable_suspend=1" etc */ | 99 | /* "modprobe net2280 enable_suspend=1" etc */ |
121 | module_param(enable_suspend, bool, 0444); | 100 | module_param(enable_suspend, bool, 0444); |
122 | 101 | ||
123 | /* force full-speed operation */ | ||
124 | static bool full_speed; | ||
125 | module_param(full_speed, bool, 0444); | ||
126 | MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!"); | ||
127 | |||
128 | #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") | 102 | #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") |
129 | 103 | ||
130 | static char *type_string(u8 bmAttributes) | 104 | static char *type_string(u8 bmAttributes) |
@@ -202,15 +176,6 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
202 | /* set speed-dependent max packet; may kick in high bandwidth */ | 176 | /* set speed-dependent max packet; may kick in high bandwidth */ |
203 | set_max_speed(ep, max); | 177 | set_max_speed(ep, max); |
204 | 178 | ||
205 | /* FIFO lines can't go to different packets. PIO is ok, so | ||
206 | * use it instead of troublesome (non-bulk) multi-packet DMA. | ||
207 | */ | ||
208 | if (ep->dma && (max % 4) != 0 && use_dma_chaining) { | ||
209 | ep_dbg(ep->dev, "%s, no dma for maxpacket %d\n", | ||
210 | ep->ep.name, ep->ep.maxpacket); | ||
211 | ep->dma = NULL; | ||
212 | } | ||
213 | |||
214 | /* set type, direction, address; reset fifo counters */ | 179 | /* set type, direction, address; reset fifo counters */ |
215 | writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); | 180 | writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); |
216 | tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); | 181 | tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); |
@@ -478,7 +443,7 @@ static int net2280_disable(struct usb_ep *_ep) | |||
478 | /* synch memory views with the device */ | 443 | /* synch memory views with the device */ |
479 | (void)readl(&ep->cfg->ep_cfg); | 444 | (void)readl(&ep->cfg->ep_cfg); |
480 | 445 | ||
481 | if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4) | 446 | if (!ep->dma && ep->num >= 1 && ep->num <= 4) |
482 | ep->dma = &ep->dev->dma[ep->num - 1]; | 447 | ep->dma = &ep->dev->dma[ep->num - 1]; |
483 | 448 | ||
484 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 449 | spin_unlock_irqrestore(&ep->dev->lock, flags); |
@@ -610,9 +575,15 @@ static void out_flush(struct net2280_ep *ep) | |||
610 | u32 __iomem *statp; | 575 | u32 __iomem *statp; |
611 | u32 tmp; | 576 | u32 tmp; |
612 | 577 | ||
613 | ASSERT_OUT_NAKING(ep); | ||
614 | |||
615 | statp = &ep->regs->ep_stat; | 578 | statp = &ep->regs->ep_stat; |
579 | |||
580 | tmp = readl(statp); | ||
581 | if (tmp & BIT(NAK_OUT_PACKETS)) { | ||
582 | ep_dbg(ep->dev, "%s %s %08x !NAK\n", | ||
583 | ep->ep.name, __func__, tmp); | ||
584 | writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); | ||
585 | } | ||
586 | |||
616 | writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | | 587 | writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | |
617 | BIT(DATA_PACKET_RECEIVED_INTERRUPT), | 588 | BIT(DATA_PACKET_RECEIVED_INTERRUPT), |
618 | statp); | 589 | statp); |
@@ -747,8 +718,7 @@ static void fill_dma_desc(struct net2280_ep *ep, | |||
747 | req->valid = valid; | 718 | req->valid = valid; |
748 | if (valid) | 719 | if (valid) |
749 | dmacount |= BIT(VALID_BIT); | 720 | dmacount |= BIT(VALID_BIT); |
750 | if (likely(!req->req.no_interrupt || !use_dma_chaining)) | 721 | dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE); |
751 | dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE); | ||
752 | 722 | ||
753 | /* td->dmadesc = previously set by caller */ | 723 | /* td->dmadesc = previously set by caller */ |
754 | td->dmaaddr = cpu_to_le32 (req->req.dma); | 724 | td->dmaaddr = cpu_to_le32 (req->req.dma); |
@@ -862,27 +832,11 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req) | |||
862 | req->td->dmadesc = cpu_to_le32 (ep->td_dma); | 832 | req->td->dmadesc = cpu_to_le32 (ep->td_dma); |
863 | fill_dma_desc(ep, req, 1); | 833 | fill_dma_desc(ep, req, 1); |
864 | 834 | ||
865 | if (!use_dma_chaining) | 835 | req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); |
866 | req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); | ||
867 | 836 | ||
868 | start_queue(ep, tmp, req->td_dma); | 837 | start_queue(ep, tmp, req->td_dma); |
869 | } | 838 | } |
870 | 839 | ||
871 | static inline void resume_dma(struct net2280_ep *ep) | ||
872 | { | ||
873 | writel(readl(&ep->dma->dmactl) | BIT(DMA_ENABLE), &ep->dma->dmactl); | ||
874 | |||
875 | ep->dma_started = true; | ||
876 | } | ||
877 | |||
878 | static inline void ep_stop_dma(struct net2280_ep *ep) | ||
879 | { | ||
880 | writel(readl(&ep->dma->dmactl) & ~BIT(DMA_ENABLE), &ep->dma->dmactl); | ||
881 | spin_stop_dma(ep->dma); | ||
882 | |||
883 | ep->dma_started = false; | ||
884 | } | ||
885 | |||
886 | static inline void | 840 | static inline void |
887 | queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) | 841 | queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) |
888 | { | 842 | { |
@@ -973,10 +927,8 @@ net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) | |||
973 | return ret; | 927 | return ret; |
974 | } | 928 | } |
975 | 929 | ||
976 | #if 0 | ||
977 | ep_vdbg(dev, "%s queue req %p, len %d buf %p\n", | 930 | ep_vdbg(dev, "%s queue req %p, len %d buf %p\n", |
978 | _ep->name, _req, _req->length, _req->buf); | 931 | _ep->name, _req, _req->length, _req->buf); |
979 | #endif | ||
980 | 932 | ||
981 | spin_lock_irqsave(&dev->lock, flags); | 933 | spin_lock_irqsave(&dev->lock, flags); |
982 | 934 | ||
@@ -984,24 +936,12 @@ net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) | |||
984 | _req->actual = 0; | 936 | _req->actual = 0; |
985 | 937 | ||
986 | /* kickstart this i/o queue? */ | 938 | /* kickstart this i/o queue? */ |
987 | if (list_empty(&ep->queue) && !ep->stopped) { | 939 | if (list_empty(&ep->queue) && !ep->stopped && |
988 | /* DMA request while EP halted */ | 940 | !((dev->quirks & PLX_SUPERSPEED) && ep->dma && |
989 | if (ep->dma && | 941 | (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) { |
990 | (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) && | 942 | |
991 | (dev->quirks & PLX_SUPERSPEED)) { | ||
992 | int valid = 1; | ||
993 | if (ep->is_in) { | ||
994 | int expect; | ||
995 | expect = likely(req->req.zero || | ||
996 | ((req->req.length % | ||
997 | ep->ep.maxpacket) != 0)); | ||
998 | if (expect != ep->in_fifo_validate) | ||
999 | valid = 0; | ||
1000 | } | ||
1001 | queue_dma(ep, req, valid); | ||
1002 | } | ||
1003 | /* use DMA if the endpoint supports it, else pio */ | 943 | /* use DMA if the endpoint supports it, else pio */ |
1004 | else if (ep->dma) | 944 | if (ep->dma) |
1005 | start_dma(ep, req); | 945 | start_dma(ep, req); |
1006 | else { | 946 | else { |
1007 | /* maybe there's no control data, just status ack */ | 947 | /* maybe there's no control data, just status ack */ |
@@ -1084,8 +1024,6 @@ dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, | |||
1084 | done(ep, req, status); | 1024 | done(ep, req, status); |
1085 | } | 1025 | } |
1086 | 1026 | ||
1087 | static void restart_dma(struct net2280_ep *ep); | ||
1088 | |||
1089 | static void scan_dma_completions(struct net2280_ep *ep) | 1027 | static void scan_dma_completions(struct net2280_ep *ep) |
1090 | { | 1028 | { |
1091 | /* only look at descriptors that were "naturally" retired, | 1029 | /* only look at descriptors that were "naturally" retired, |
@@ -1117,9 +1055,8 @@ static void scan_dma_completions(struct net2280_ep *ep) | |||
1117 | dma_done(ep, req, tmp, 0); | 1055 | dma_done(ep, req, tmp, 0); |
1118 | break; | 1056 | break; |
1119 | } else if (!ep->is_in && | 1057 | } else if (!ep->is_in && |
1120 | (req->req.length % ep->ep.maxpacket) != 0) { | 1058 | (req->req.length % ep->ep.maxpacket) && |
1121 | if (ep->dev->quirks & PLX_SUPERSPEED) | 1059 | !(ep->dev->quirks & PLX_SUPERSPEED)) { |
1122 | return dma_done(ep, req, tmp, 0); | ||
1123 | 1060 | ||
1124 | tmp = readl(&ep->regs->ep_stat); | 1061 | tmp = readl(&ep->regs->ep_stat); |
1125 | /* AVOID TROUBLE HERE by not issuing short reads from | 1062 | /* AVOID TROUBLE HERE by not issuing short reads from |
@@ -1150,67 +1087,15 @@ static void scan_dma_completions(struct net2280_ep *ep) | |||
1150 | static void restart_dma(struct net2280_ep *ep) | 1087 | static void restart_dma(struct net2280_ep *ep) |
1151 | { | 1088 | { |
1152 | struct net2280_request *req; | 1089 | struct net2280_request *req; |
1153 | u32 dmactl = dmactl_default; | ||
1154 | 1090 | ||
1155 | if (ep->stopped) | 1091 | if (ep->stopped) |
1156 | return; | 1092 | return; |
1157 | req = list_entry(ep->queue.next, struct net2280_request, queue); | 1093 | req = list_entry(ep->queue.next, struct net2280_request, queue); |
1158 | 1094 | ||
1159 | if (!use_dma_chaining) { | 1095 | start_dma(ep, req); |
1160 | start_dma(ep, req); | ||
1161 | return; | ||
1162 | } | ||
1163 | |||
1164 | /* the 2280 will be processing the queue unless queue hiccups after | ||
1165 | * the previous transfer: | ||
1166 | * IN: wanted automagic zlp, head doesn't (or vice versa) | ||
1167 | * DMA_FIFO_VALIDATE doesn't init from dma descriptors. | ||
1168 | * OUT: was "usb-short", we must restart. | ||
1169 | */ | ||
1170 | if (ep->is_in && !req->valid) { | ||
1171 | struct net2280_request *entry, *prev = NULL; | ||
1172 | int reqmode, done = 0; | ||
1173 | |||
1174 | ep_dbg(ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td); | ||
1175 | ep->in_fifo_validate = likely(req->req.zero || | ||
1176 | (req->req.length % ep->ep.maxpacket) != 0); | ||
1177 | if (ep->in_fifo_validate) | ||
1178 | dmactl |= BIT(DMA_FIFO_VALIDATE); | ||
1179 | list_for_each_entry(entry, &ep->queue, queue) { | ||
1180 | __le32 dmacount; | ||
1181 | |||
1182 | if (entry == req) | ||
1183 | continue; | ||
1184 | dmacount = entry->td->dmacount; | ||
1185 | if (!done) { | ||
1186 | reqmode = likely(entry->req.zero || | ||
1187 | (entry->req.length % ep->ep.maxpacket)); | ||
1188 | if (reqmode == ep->in_fifo_validate) { | ||
1189 | entry->valid = 1; | ||
1190 | dmacount |= valid_bit; | ||
1191 | entry->td->dmacount = dmacount; | ||
1192 | prev = entry; | ||
1193 | continue; | ||
1194 | } else { | ||
1195 | /* force a hiccup */ | ||
1196 | prev->td->dmacount |= dma_done_ie; | ||
1197 | done = 1; | ||
1198 | } | ||
1199 | } | ||
1200 | |||
1201 | /* walk the rest of the queue so unlinks behave */ | ||
1202 | entry->valid = 0; | ||
1203 | dmacount &= ~valid_bit; | ||
1204 | entry->td->dmacount = dmacount; | ||
1205 | prev = entry; | ||
1206 | } | ||
1207 | } | ||
1208 | |||
1209 | writel(0, &ep->dma->dmactl); | ||
1210 | start_queue(ep, dmactl, req->td_dma); | ||
1211 | } | 1096 | } |
1212 | 1097 | ||
1213 | static void abort_dma_228x(struct net2280_ep *ep) | 1098 | static void abort_dma(struct net2280_ep *ep) |
1214 | { | 1099 | { |
1215 | /* abort the current transfer */ | 1100 | /* abort the current transfer */ |
1216 | if (likely(!list_empty(&ep->queue))) { | 1101 | if (likely(!list_empty(&ep->queue))) { |
@@ -1222,19 +1107,6 @@ static void abort_dma_228x(struct net2280_ep *ep) | |||
1222 | scan_dma_completions(ep); | 1107 | scan_dma_completions(ep); |
1223 | } | 1108 | } |
1224 | 1109 | ||
1225 | static void abort_dma_338x(struct net2280_ep *ep) | ||
1226 | { | ||
1227 | writel(BIT(DMA_ABORT), &ep->dma->dmastat); | ||
1228 | spin_stop_dma(ep->dma); | ||
1229 | } | ||
1230 | |||
1231 | static void abort_dma(struct net2280_ep *ep) | ||
1232 | { | ||
1233 | if (ep->dev->quirks & PLX_LEGACY) | ||
1234 | return abort_dma_228x(ep); | ||
1235 | return abort_dma_338x(ep); | ||
1236 | } | ||
1237 | |||
1238 | /* dequeue ALL requests */ | 1110 | /* dequeue ALL requests */ |
1239 | static void nuke(struct net2280_ep *ep) | 1111 | static void nuke(struct net2280_ep *ep) |
1240 | { | 1112 | { |
@@ -1306,25 +1178,6 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) | |||
1306 | done(ep, req, -ECONNRESET); | 1178 | done(ep, req, -ECONNRESET); |
1307 | } | 1179 | } |
1308 | req = NULL; | 1180 | req = NULL; |
1309 | |||
1310 | /* patch up hardware chaining data */ | ||
1311 | } else if (ep->dma && use_dma_chaining) { | ||
1312 | if (req->queue.prev == ep->queue.next) { | ||
1313 | writel(le32_to_cpu(req->td->dmadesc), | ||
1314 | &ep->dma->dmadesc); | ||
1315 | if (req->td->dmacount & dma_done_ie) | ||
1316 | writel(readl(&ep->dma->dmacount) | | ||
1317 | le32_to_cpu(dma_done_ie), | ||
1318 | &ep->dma->dmacount); | ||
1319 | } else { | ||
1320 | struct net2280_request *prev; | ||
1321 | |||
1322 | prev = list_entry(req->queue.prev, | ||
1323 | struct net2280_request, queue); | ||
1324 | prev->td->dmadesc = req->td->dmadesc; | ||
1325 | if (req->td->dmacount & dma_done_ie) | ||
1326 | prev->td->dmacount |= dma_done_ie; | ||
1327 | } | ||
1328 | } | 1181 | } |
1329 | 1182 | ||
1330 | if (req) | 1183 | if (req) |
@@ -1512,10 +1365,10 @@ static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value) | |||
1512 | tmp = readl(&dev->usb->usbctl); | 1365 | tmp = readl(&dev->usb->usbctl); |
1513 | if (value) { | 1366 | if (value) { |
1514 | tmp |= BIT(SELF_POWERED_STATUS); | 1367 | tmp |= BIT(SELF_POWERED_STATUS); |
1515 | dev->selfpowered = 1; | 1368 | _gadget->is_selfpowered = 1; |
1516 | } else { | 1369 | } else { |
1517 | tmp &= ~BIT(SELF_POWERED_STATUS); | 1370 | tmp &= ~BIT(SELF_POWERED_STATUS); |
1518 | dev->selfpowered = 0; | 1371 | _gadget->is_selfpowered = 0; |
1519 | } | 1372 | } |
1520 | writel(tmp, &dev->usb->usbctl); | 1373 | writel(tmp, &dev->usb->usbctl); |
1521 | spin_unlock_irqrestore(&dev->lock, flags); | 1374 | spin_unlock_irqrestore(&dev->lock, flags); |
@@ -1604,14 +1457,11 @@ static ssize_t registers_show(struct device *_dev, | |||
1604 | 1457 | ||
1605 | /* Main Control Registers */ | 1458 | /* Main Control Registers */ |
1606 | t = scnprintf(next, size, "%s version " DRIVER_VERSION | 1459 | t = scnprintf(next, size, "%s version " DRIVER_VERSION |
1607 | ", chiprev %04x, dma %s\n\n" | 1460 | ", chiprev %04x\n\n" |
1608 | "devinit %03x fifoctl %08x gadget '%s'\n" | 1461 | "devinit %03x fifoctl %08x gadget '%s'\n" |
1609 | "pci irqenb0 %02x irqenb1 %08x " | 1462 | "pci irqenb0 %02x irqenb1 %08x " |
1610 | "irqstat0 %04x irqstat1 %08x\n", | 1463 | "irqstat0 %04x irqstat1 %08x\n", |
1611 | driver_name, dev->chiprev, | 1464 | driver_name, dev->chiprev, |
1612 | use_dma | ||
1613 | ? (use_dma_chaining ? "chaining" : "enabled") | ||
1614 | : "disabled", | ||
1615 | readl(&dev->regs->devinit), | 1465 | readl(&dev->regs->devinit), |
1616 | readl(&dev->regs->fifoctl), | 1466 | readl(&dev->regs->fifoctl), |
1617 | s, | 1467 | s, |
@@ -1913,76 +1763,73 @@ static void defect7374_disable_data_eps(struct net2280 *dev) | |||
1913 | static void defect7374_enable_data_eps_zero(struct net2280 *dev) | 1763 | static void defect7374_enable_data_eps_zero(struct net2280 *dev) |
1914 | { | 1764 | { |
1915 | u32 tmp = 0, tmp_reg; | 1765 | u32 tmp = 0, tmp_reg; |
1916 | u32 fsmvalue, scratch; | 1766 | u32 scratch; |
1917 | int i; | 1767 | int i; |
1918 | unsigned char ep_sel; | 1768 | unsigned char ep_sel; |
1919 | 1769 | ||
1920 | scratch = get_idx_reg(dev->regs, SCRATCH); | 1770 | scratch = get_idx_reg(dev->regs, SCRATCH); |
1921 | fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD); | 1771 | |
1772 | WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD)) | ||
1773 | == DEFECT7374_FSM_SS_CONTROL_READ); | ||
1774 | |||
1922 | scratch &= ~(0xf << DEFECT7374_FSM_FIELD); | 1775 | scratch &= ~(0xf << DEFECT7374_FSM_FIELD); |
1923 | 1776 | ||
1924 | /*See if firmware needs to set up for workaround*/ | 1777 | ep_warn(dev, "Operate Defect 7374 workaround soft this time"); |
1925 | if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { | 1778 | ep_warn(dev, "It will operate on cold-reboot and SS connect"); |
1926 | ep_warn(dev, "Operate Defect 7374 workaround soft this time"); | ||
1927 | ep_warn(dev, "It will operate on cold-reboot and SS connect"); | ||
1928 | |||
1929 | /*GPEPs:*/ | ||
1930 | tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) | | ||
1931 | (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) | | ||
1932 | ((dev->enhanced_mode) ? | ||
1933 | BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) | | ||
1934 | BIT(IN_ENDPOINT_ENABLE)); | ||
1935 | |||
1936 | for (i = 1; i < 5; i++) | ||
1937 | writel(tmp, &dev->ep[i].cfg->ep_cfg); | ||
1938 | |||
1939 | /* CSRIN, PCIIN, STATIN, RCIN*/ | ||
1940 | tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE)); | ||
1941 | writel(tmp, &dev->dep[1].dep_cfg); | ||
1942 | writel(tmp, &dev->dep[3].dep_cfg); | ||
1943 | writel(tmp, &dev->dep[4].dep_cfg); | ||
1944 | writel(tmp, &dev->dep[5].dep_cfg); | ||
1945 | |||
1946 | /*Implemented for development and debug. | ||
1947 | * Can be refined/tuned later.*/ | ||
1948 | for (ep_sel = 0; ep_sel <= 21; ep_sel++) { | ||
1949 | /* Select an endpoint for subsequent operations: */ | ||
1950 | tmp_reg = readl(&dev->plregs->pl_ep_ctrl); | ||
1951 | writel(((tmp_reg & ~0x1f) | ep_sel), | ||
1952 | &dev->plregs->pl_ep_ctrl); | ||
1953 | |||
1954 | if (ep_sel == 1) { | ||
1955 | tmp = | ||
1956 | (readl(&dev->plregs->pl_ep_ctrl) | | ||
1957 | BIT(CLEAR_ACK_ERROR_CODE) | 0); | ||
1958 | writel(tmp, &dev->plregs->pl_ep_ctrl); | ||
1959 | continue; | ||
1960 | } | ||
1961 | 1779 | ||
1962 | if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) || | 1780 | /*GPEPs:*/ |
1963 | ep_sel == 18 || ep_sel == 20) | 1781 | tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) | |
1964 | continue; | 1782 | (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) | |
1783 | ((dev->enhanced_mode) ? | ||
1784 | BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) | | ||
1785 | BIT(IN_ENDPOINT_ENABLE)); | ||
1965 | 1786 | ||
1966 | tmp = (readl(&dev->plregs->pl_ep_cfg_4) | | 1787 | for (i = 1; i < 5; i++) |
1967 | BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0); | 1788 | writel(tmp, &dev->ep[i].cfg->ep_cfg); |
1968 | writel(tmp, &dev->plregs->pl_ep_cfg_4); | ||
1969 | 1789 | ||
1970 | tmp = readl(&dev->plregs->pl_ep_ctrl) & | 1790 | /* CSRIN, PCIIN, STATIN, RCIN*/ |
1971 | ~BIT(EP_INITIALIZED); | 1791 | tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE)); |
1972 | writel(tmp, &dev->plregs->pl_ep_ctrl); | 1792 | writel(tmp, &dev->dep[1].dep_cfg); |
1793 | writel(tmp, &dev->dep[3].dep_cfg); | ||
1794 | writel(tmp, &dev->dep[4].dep_cfg); | ||
1795 | writel(tmp, &dev->dep[5].dep_cfg); | ||
1796 | |||
1797 | /*Implemented for development and debug. | ||
1798 | * Can be refined/tuned later.*/ | ||
1799 | for (ep_sel = 0; ep_sel <= 21; ep_sel++) { | ||
1800 | /* Select an endpoint for subsequent operations: */ | ||
1801 | tmp_reg = readl(&dev->plregs->pl_ep_ctrl); | ||
1802 | writel(((tmp_reg & ~0x1f) | ep_sel), | ||
1803 | &dev->plregs->pl_ep_ctrl); | ||
1973 | 1804 | ||
1805 | if (ep_sel == 1) { | ||
1806 | tmp = | ||
1807 | (readl(&dev->plregs->pl_ep_ctrl) | | ||
1808 | BIT(CLEAR_ACK_ERROR_CODE) | 0); | ||
1809 | writel(tmp, &dev->plregs->pl_ep_ctrl); | ||
1810 | continue; | ||
1974 | } | 1811 | } |
1975 | 1812 | ||
1976 | /* Set FSM to focus on the first Control Read: | 1813 | if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) || |
1977 | * - Tip: Connection speed is known upon the first | 1814 | ep_sel == 18 || ep_sel == 20) |
1978 | * setup request.*/ | 1815 | continue; |
1979 | scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ; | 1816 | |
1980 | set_idx_reg(dev->regs, SCRATCH, scratch); | 1817 | tmp = (readl(&dev->plregs->pl_ep_cfg_4) | |
1818 | BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0); | ||
1819 | writel(tmp, &dev->plregs->pl_ep_cfg_4); | ||
1820 | |||
1821 | tmp = readl(&dev->plregs->pl_ep_ctrl) & | ||
1822 | ~BIT(EP_INITIALIZED); | ||
1823 | writel(tmp, &dev->plregs->pl_ep_ctrl); | ||
1981 | 1824 | ||
1982 | } else{ | ||
1983 | ep_warn(dev, "Defect 7374 workaround soft will NOT operate"); | ||
1984 | ep_warn(dev, "It will operate on cold-reboot and SS connect"); | ||
1985 | } | 1825 | } |
1826 | |||
1827 | /* Set FSM to focus on the first Control Read: | ||
1828 | * - Tip: Connection speed is known upon the first | ||
1829 | * setup request.*/ | ||
1830 | scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ; | ||
1831 | set_idx_reg(dev->regs, SCRATCH, scratch); | ||
1832 | |||
1986 | } | 1833 | } |
1987 | 1834 | ||
1988 | /* keeping it simple: | 1835 | /* keeping it simple: |
@@ -2033,21 +1880,13 @@ static void usb_reset_228x(struct net2280 *dev) | |||
2033 | static void usb_reset_338x(struct net2280 *dev) | 1880 | static void usb_reset_338x(struct net2280 *dev) |
2034 | { | 1881 | { |
2035 | u32 tmp; | 1882 | u32 tmp; |
2036 | u32 fsmvalue; | ||
2037 | 1883 | ||
2038 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 1884 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
2039 | (void)readl(&dev->usb->usbctl); | 1885 | (void)readl(&dev->usb->usbctl); |
2040 | 1886 | ||
2041 | net2280_led_init(dev); | 1887 | net2280_led_init(dev); |
2042 | 1888 | ||
2043 | fsmvalue = get_idx_reg(dev->regs, SCRATCH) & | 1889 | if (dev->bug7734_patched) { |
2044 | (0xf << DEFECT7374_FSM_FIELD); | ||
2045 | |||
2046 | /* See if firmware needs to set up for workaround: */ | ||
2047 | if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) { | ||
2048 | ep_info(dev, "%s: Defect 7374 FsmValue 0x%08x\n", __func__, | ||
2049 | fsmvalue); | ||
2050 | } else { | ||
2051 | /* disable automatic responses, and irqs */ | 1890 | /* disable automatic responses, and irqs */ |
2052 | writel(0, &dev->usb->stdrsp); | 1891 | writel(0, &dev->usb->stdrsp); |
2053 | writel(0, &dev->regs->pciirqenb0); | 1892 | writel(0, &dev->regs->pciirqenb0); |
@@ -2064,7 +1903,7 @@ static void usb_reset_338x(struct net2280 *dev) | |||
2064 | 1903 | ||
2065 | writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); | 1904 | writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1); |
2066 | 1905 | ||
2067 | if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) { | 1906 | if (dev->bug7734_patched) { |
2068 | /* reset, and enable pci */ | 1907 | /* reset, and enable pci */ |
2069 | tmp = readl(&dev->regs->devinit) | | 1908 | tmp = readl(&dev->regs->devinit) | |
2070 | BIT(PCI_ENABLE) | | 1909 | BIT(PCI_ENABLE) | |
@@ -2093,10 +1932,6 @@ static void usb_reset(struct net2280 *dev) | |||
2093 | static void usb_reinit_228x(struct net2280 *dev) | 1932 | static void usb_reinit_228x(struct net2280 *dev) |
2094 | { | 1933 | { |
2095 | u32 tmp; | 1934 | u32 tmp; |
2096 | int init_dma; | ||
2097 | |||
2098 | /* use_dma changes are ignored till next device re-init */ | ||
2099 | init_dma = use_dma; | ||
2100 | 1935 | ||
2101 | /* basic endpoint init */ | 1936 | /* basic endpoint init */ |
2102 | for (tmp = 0; tmp < 7; tmp++) { | 1937 | for (tmp = 0; tmp < 7; tmp++) { |
@@ -2108,8 +1943,7 @@ static void usb_reinit_228x(struct net2280 *dev) | |||
2108 | 1943 | ||
2109 | if (tmp > 0 && tmp <= 4) { | 1944 | if (tmp > 0 && tmp <= 4) { |
2110 | ep->fifo_size = 1024; | 1945 | ep->fifo_size = 1024; |
2111 | if (init_dma) | 1946 | ep->dma = &dev->dma[tmp - 1]; |
2112 | ep->dma = &dev->dma[tmp - 1]; | ||
2113 | } else | 1947 | } else |
2114 | ep->fifo_size = 64; | 1948 | ep->fifo_size = 64; |
2115 | ep->regs = &dev->epregs[tmp]; | 1949 | ep->regs = &dev->epregs[tmp]; |
@@ -2133,17 +1967,12 @@ static void usb_reinit_228x(struct net2280 *dev) | |||
2133 | 1967 | ||
2134 | static void usb_reinit_338x(struct net2280 *dev) | 1968 | static void usb_reinit_338x(struct net2280 *dev) |
2135 | { | 1969 | { |
2136 | int init_dma; | ||
2137 | int i; | 1970 | int i; |
2138 | u32 tmp, val; | 1971 | u32 tmp, val; |
2139 | u32 fsmvalue; | ||
2140 | static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 }; | 1972 | static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 }; |
2141 | static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00, | 1973 | static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00, |
2142 | 0x00, 0xC0, 0x00, 0xC0 }; | 1974 | 0x00, 0xC0, 0x00, 0xC0 }; |
2143 | 1975 | ||
2144 | /* use_dma changes are ignored till next device re-init */ | ||
2145 | init_dma = use_dma; | ||
2146 | |||
2147 | /* basic endpoint init */ | 1976 | /* basic endpoint init */ |
2148 | for (i = 0; i < dev->n_ep; i++) { | 1977 | for (i = 0; i < dev->n_ep; i++) { |
2149 | struct net2280_ep *ep = &dev->ep[i]; | 1978 | struct net2280_ep *ep = &dev->ep[i]; |
@@ -2152,7 +1981,7 @@ static void usb_reinit_338x(struct net2280 *dev) | |||
2152 | ep->dev = dev; | 1981 | ep->dev = dev; |
2153 | ep->num = i; | 1982 | ep->num = i; |
2154 | 1983 | ||
2155 | if (i > 0 && i <= 4 && init_dma) | 1984 | if (i > 0 && i <= 4) |
2156 | ep->dma = &dev->dma[i - 1]; | 1985 | ep->dma = &dev->dma[i - 1]; |
2157 | 1986 | ||
2158 | if (dev->enhanced_mode) { | 1987 | if (dev->enhanced_mode) { |
@@ -2177,14 +2006,7 @@ static void usb_reinit_338x(struct net2280 *dev) | |||
2177 | dev->ep[0].stopped = 0; | 2006 | dev->ep[0].stopped = 0; |
2178 | 2007 | ||
2179 | /* Link layer set up */ | 2008 | /* Link layer set up */ |
2180 | fsmvalue = get_idx_reg(dev->regs, SCRATCH) & | 2009 | if (dev->bug7734_patched) { |
2181 | (0xf << DEFECT7374_FSM_FIELD); | ||
2182 | |||
2183 | /* See if driver needs to set up for workaround: */ | ||
2184 | if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) | ||
2185 | ep_info(dev, "%s: Defect 7374 FsmValue %08x\n", | ||
2186 | __func__, fsmvalue); | ||
2187 | else { | ||
2188 | tmp = readl(&dev->usb_ext->usbctl2) & | 2010 | tmp = readl(&dev->usb_ext->usbctl2) & |
2189 | ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE)); | 2011 | ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE)); |
2190 | writel(tmp, &dev->usb_ext->usbctl2); | 2012 | writel(tmp, &dev->usb_ext->usbctl2); |
@@ -2291,15 +2113,8 @@ static void ep0_start_228x(struct net2280 *dev) | |||
2291 | 2113 | ||
2292 | static void ep0_start_338x(struct net2280 *dev) | 2114 | static void ep0_start_338x(struct net2280 *dev) |
2293 | { | 2115 | { |
2294 | u32 fsmvalue; | ||
2295 | |||
2296 | fsmvalue = get_idx_reg(dev->regs, SCRATCH) & | ||
2297 | (0xf << DEFECT7374_FSM_FIELD); | ||
2298 | 2116 | ||
2299 | if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) | 2117 | if (dev->bug7734_patched) |
2300 | ep_info(dev, "%s: Defect 7374 FsmValue %08x\n", __func__, | ||
2301 | fsmvalue); | ||
2302 | else | ||
2303 | writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) | | 2118 | writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) | |
2304 | BIT(SET_EP_HIDE_STATUS_PHASE), | 2119 | BIT(SET_EP_HIDE_STATUS_PHASE), |
2305 | &dev->epregs[0].ep_rsp); | 2120 | &dev->epregs[0].ep_rsp); |
@@ -2382,16 +2197,12 @@ static int net2280_start(struct usb_gadget *_gadget, | |||
2382 | if (retval) | 2197 | if (retval) |
2383 | goto err_func; | 2198 | goto err_func; |
2384 | 2199 | ||
2385 | /* Enable force-full-speed testing mode, if desired */ | 2200 | /* enable host detection and ep0; and we're ready |
2386 | if (full_speed && (dev->quirks & PLX_LEGACY)) | ||
2387 | writel(BIT(FORCE_FULL_SPEED_MODE), &dev->usb->xcvrdiag); | ||
2388 | |||
2389 | /* ... then enable host detection and ep0; and we're ready | ||
2390 | * for set_configuration as well as eventual disconnect. | 2201 | * for set_configuration as well as eventual disconnect. |
2391 | */ | 2202 | */ |
2392 | net2280_led_active(dev, 1); | 2203 | net2280_led_active(dev, 1); |
2393 | 2204 | ||
2394 | if (dev->quirks & PLX_SUPERSPEED) | 2205 | if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched) |
2395 | defect7374_enable_data_eps_zero(dev); | 2206 | defect7374_enable_data_eps_zero(dev); |
2396 | 2207 | ||
2397 | ep0_start(dev); | 2208 | ep0_start(dev); |
@@ -2444,10 +2255,6 @@ static int net2280_stop(struct usb_gadget *_gadget) | |||
2444 | 2255 | ||
2445 | net2280_led_active(dev, 0); | 2256 | net2280_led_active(dev, 0); |
2446 | 2257 | ||
2447 | /* Disable full-speed test mode */ | ||
2448 | if (dev->quirks & PLX_LEGACY) | ||
2449 | writel(0, &dev->usb->xcvrdiag); | ||
2450 | |||
2451 | device_remove_file(&dev->pdev->dev, &dev_attr_function); | 2258 | device_remove_file(&dev->pdev->dev, &dev_attr_function); |
2452 | device_remove_file(&dev->pdev->dev, &dev_attr_queues); | 2259 | device_remove_file(&dev->pdev->dev, &dev_attr_queues); |
2453 | 2260 | ||
@@ -2478,10 +2285,10 @@ static void handle_ep_small(struct net2280_ep *ep) | |||
2478 | /* ack all, and handle what we care about */ | 2285 | /* ack all, and handle what we care about */ |
2479 | t = readl(&ep->regs->ep_stat); | 2286 | t = readl(&ep->regs->ep_stat); |
2480 | ep->irqs++; | 2287 | ep->irqs++; |
2481 | #if 0 | 2288 | |
2482 | ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", | 2289 | ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", |
2483 | ep->ep.name, t, req ? &req->req : 0); | 2290 | ep->ep.name, t, req ? &req->req : NULL); |
2484 | #endif | 2291 | |
2485 | if (!ep->is_in || (ep->dev->quirks & PLX_2280)) | 2292 | if (!ep->is_in || (ep->dev->quirks & PLX_2280)) |
2486 | writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); | 2293 | writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); |
2487 | else | 2294 | else |
@@ -2717,6 +2524,7 @@ static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r) | |||
2717 | * run after the next USB connection. | 2524 | * run after the next USB connection. |
2718 | */ | 2525 | */ |
2719 | scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ; | 2526 | scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ; |
2527 | dev->bug7734_patched = 1; | ||
2720 | goto restore_data_eps; | 2528 | goto restore_data_eps; |
2721 | } | 2529 | } |
2722 | 2530 | ||
@@ -2730,6 +2538,7 @@ static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r) | |||
2730 | if ((state >= (ACK_GOOD_NORMAL << STATE)) && | 2538 | if ((state >= (ACK_GOOD_NORMAL << STATE)) && |
2731 | (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) { | 2539 | (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) { |
2732 | scratch |= DEFECT7374_FSM_SS_CONTROL_READ; | 2540 | scratch |= DEFECT7374_FSM_SS_CONTROL_READ; |
2541 | dev->bug7734_patched = 1; | ||
2733 | break; | 2542 | break; |
2734 | } | 2543 | } |
2735 | 2544 | ||
@@ -2766,80 +2575,19 @@ restore_data_eps: | |||
2766 | return; | 2575 | return; |
2767 | } | 2576 | } |
2768 | 2577 | ||
2769 | static void ep_stall(struct net2280_ep *ep, int stall) | 2578 | static void ep_clear_seqnum(struct net2280_ep *ep) |
2770 | { | 2579 | { |
2771 | struct net2280 *dev = ep->dev; | 2580 | struct net2280 *dev = ep->dev; |
2772 | u32 val; | 2581 | u32 val; |
2773 | static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 }; | 2582 | static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 }; |
2774 | 2583 | ||
2775 | if (stall) { | 2584 | val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f; |
2776 | writel(BIT(SET_ENDPOINT_HALT) | | 2585 | val |= ep_pl[ep->num]; |
2777 | /* BIT(SET_NAK_PACKETS) | */ | 2586 | writel(val, &dev->plregs->pl_ep_ctrl); |
2778 | BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), | 2587 | val |= BIT(SEQUENCE_NUMBER_RESET); |
2779 | &ep->regs->ep_rsp); | 2588 | writel(val, &dev->plregs->pl_ep_ctrl); |
2780 | ep->is_halt = 1; | ||
2781 | } else { | ||
2782 | if (dev->gadget.speed == USB_SPEED_SUPER) { | ||
2783 | /* | ||
2784 | * Workaround for SS SeqNum not cleared via | ||
2785 | * Endpoint Halt (Clear) bit. select endpoint | ||
2786 | */ | ||
2787 | val = readl(&dev->plregs->pl_ep_ctrl); | ||
2788 | val = (val & ~0x1f) | ep_pl[ep->num]; | ||
2789 | writel(val, &dev->plregs->pl_ep_ctrl); | ||
2790 | |||
2791 | val |= BIT(SEQUENCE_NUMBER_RESET); | ||
2792 | writel(val, &dev->plregs->pl_ep_ctrl); | ||
2793 | } | ||
2794 | val = readl(&ep->regs->ep_rsp); | ||
2795 | val |= BIT(CLEAR_ENDPOINT_HALT) | | ||
2796 | BIT(CLEAR_ENDPOINT_TOGGLE); | ||
2797 | writel(val, | ||
2798 | /* | BIT(CLEAR_NAK_PACKETS),*/ | ||
2799 | &ep->regs->ep_rsp); | ||
2800 | ep->is_halt = 0; | ||
2801 | val = readl(&ep->regs->ep_rsp); | ||
2802 | } | ||
2803 | } | ||
2804 | |||
2805 | static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged) | ||
2806 | { | ||
2807 | /* set/clear, then synch memory views with the device */ | ||
2808 | if (value) { | ||
2809 | ep->stopped = 1; | ||
2810 | if (ep->num == 0) | ||
2811 | ep->dev->protocol_stall = 1; | ||
2812 | else { | ||
2813 | if (ep->dma) | ||
2814 | ep_stop_dma(ep); | ||
2815 | ep_stall(ep, true); | ||
2816 | } | ||
2817 | |||
2818 | if (wedged) | ||
2819 | ep->wedged = 1; | ||
2820 | } else { | ||
2821 | ep->stopped = 0; | ||
2822 | ep->wedged = 0; | ||
2823 | |||
2824 | ep_stall(ep, false); | ||
2825 | 2589 | ||
2826 | /* Flush the queue */ | 2590 | return; |
2827 | if (!list_empty(&ep->queue)) { | ||
2828 | struct net2280_request *req = | ||
2829 | list_entry(ep->queue.next, struct net2280_request, | ||
2830 | queue); | ||
2831 | if (ep->dma) | ||
2832 | resume_dma(ep); | ||
2833 | else { | ||
2834 | if (ep->is_in) | ||
2835 | write_fifo(ep, &req->req); | ||
2836 | else { | ||
2837 | if (read_fifo(ep, req)) | ||
2838 | done(ep, req, 0); | ||
2839 | } | ||
2840 | } | ||
2841 | } | ||
2842 | } | ||
2843 | } | 2591 | } |
2844 | 2592 | ||
2845 | static void handle_stat0_irqs_superspeed(struct net2280 *dev, | 2593 | static void handle_stat0_irqs_superspeed(struct net2280 *dev, |
@@ -2863,7 +2611,7 @@ static void handle_stat0_irqs_superspeed(struct net2280 *dev, | |||
2863 | switch (r.bRequestType) { | 2611 | switch (r.bRequestType) { |
2864 | case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE): | 2612 | case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE): |
2865 | status = dev->wakeup_enable ? 0x02 : 0x00; | 2613 | status = dev->wakeup_enable ? 0x02 : 0x00; |
2866 | if (dev->selfpowered) | 2614 | if (dev->gadget.is_selfpowered) |
2867 | status |= BIT(0); | 2615 | status |= BIT(0); |
2868 | status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | | 2616 | status |= (dev->u1_enable << 2 | dev->u2_enable << 3 | |
2869 | dev->ltm_enable << 4); | 2617 | dev->ltm_enable << 4); |
@@ -2940,7 +2688,12 @@ static void handle_stat0_irqs_superspeed(struct net2280 *dev, | |||
2940 | if (w_value != USB_ENDPOINT_HALT) | 2688 | if (w_value != USB_ENDPOINT_HALT) |
2941 | goto do_stall3; | 2689 | goto do_stall3; |
2942 | ep_vdbg(dev, "%s clear halt\n", e->ep.name); | 2690 | ep_vdbg(dev, "%s clear halt\n", e->ep.name); |
2943 | ep_stall(e, false); | 2691 | /* |
2692 | * Workaround for SS SeqNum not cleared via | ||
2693 | * Endpoint Halt (Clear) bit. select endpoint | ||
2694 | */ | ||
2695 | ep_clear_seqnum(e); | ||
2696 | clear_halt(e); | ||
2944 | if (!list_empty(&e->queue) && e->td_dma) | 2697 | if (!list_empty(&e->queue) && e->td_dma) |
2945 | restart_dma(e); | 2698 | restart_dma(e); |
2946 | allow_status(ep); | 2699 | allow_status(ep); |
@@ -2998,7 +2751,14 @@ static void handle_stat0_irqs_superspeed(struct net2280 *dev, | |||
2998 | e = get_ep_by_addr(dev, w_index); | 2751 | e = get_ep_by_addr(dev, w_index); |
2999 | if (!e || (w_value != USB_ENDPOINT_HALT)) | 2752 | if (!e || (w_value != USB_ENDPOINT_HALT)) |
3000 | goto do_stall3; | 2753 | goto do_stall3; |
3001 | ep_stdrsp(e, true, false); | 2754 | ep->stopped = 1; |
2755 | if (ep->num == 0) | ||
2756 | ep->dev->protocol_stall = 1; | ||
2757 | else { | ||
2758 | if (ep->dma) | ||
2759 | abort_dma(ep); | ||
2760 | set_halt(ep); | ||
2761 | } | ||
3002 | allow_status_338x(ep); | 2762 | allow_status_338x(ep); |
3003 | break; | 2763 | break; |
3004 | 2764 | ||
@@ -3026,7 +2786,7 @@ do_stall3: | |||
3026 | r.bRequestType, r.bRequest, tmp); | 2786 | r.bRequestType, r.bRequest, tmp); |
3027 | dev->protocol_stall = 1; | 2787 | dev->protocol_stall = 1; |
3028 | /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */ | 2788 | /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */ |
3029 | ep_stall(ep, true); | 2789 | set_halt(ep); |
3030 | } | 2790 | } |
3031 | 2791 | ||
3032 | next_endpoints3: | 2792 | next_endpoints3: |
@@ -3091,9 +2851,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat) | |||
3091 | } | 2851 | } |
3092 | ep->stopped = 0; | 2852 | ep->stopped = 0; |
3093 | dev->protocol_stall = 0; | 2853 | dev->protocol_stall = 0; |
3094 | if (dev->quirks & PLX_SUPERSPEED) | 2854 | if (!(dev->quirks & PLX_SUPERSPEED)) { |
3095 | ep->is_halt = 0; | ||
3096 | else{ | ||
3097 | if (ep->dev->quirks & PLX_2280) | 2855 | if (ep->dev->quirks & PLX_2280) |
3098 | tmp = BIT(FIFO_OVERFLOW) | | 2856 | tmp = BIT(FIFO_OVERFLOW) | |
3099 | BIT(FIFO_UNDERFLOW); | 2857 | BIT(FIFO_UNDERFLOW); |
@@ -3120,7 +2878,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat) | |||
3120 | cpu_to_le32s(&u.raw[0]); | 2878 | cpu_to_le32s(&u.raw[0]); |
3121 | cpu_to_le32s(&u.raw[1]); | 2879 | cpu_to_le32s(&u.raw[1]); |
3122 | 2880 | ||
3123 | if (dev->quirks & PLX_SUPERSPEED) | 2881 | if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched) |
3124 | defect7374_workaround(dev, u.r); | 2882 | defect7374_workaround(dev, u.r); |
3125 | 2883 | ||
3126 | tmp = 0; | 2884 | tmp = 0; |
@@ -3423,17 +3181,12 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | |||
3423 | continue; | 3181 | continue; |
3424 | } | 3182 | } |
3425 | 3183 | ||
3426 | /* chaining should stop on abort, short OUT from fifo, | 3184 | if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) { |
3427 | * or (stat0 codepath) short OUT transfer. | 3185 | ep_dbg(ep->dev, "%s no xact done? %08x\n", |
3428 | */ | 3186 | ep->ep.name, tmp); |
3429 | if (!use_dma_chaining) { | 3187 | continue; |
3430 | if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) { | ||
3431 | ep_dbg(ep->dev, "%s no xact done? %08x\n", | ||
3432 | ep->ep.name, tmp); | ||
3433 | continue; | ||
3434 | } | ||
3435 | stop_dma(ep->dma); | ||
3436 | } | 3188 | } |
3189 | stop_dma(ep->dma); | ||
3437 | 3190 | ||
3438 | /* OUT transfers terminate when the data from the | 3191 | /* OUT transfers terminate when the data from the |
3439 | * host is in our memory. Process whatever's done. | 3192 | * host is in our memory. Process whatever's done. |
@@ -3448,30 +3201,9 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | |||
3448 | scan_dma_completions(ep); | 3201 | scan_dma_completions(ep); |
3449 | 3202 | ||
3450 | /* disable dma on inactive queues; else maybe restart */ | 3203 | /* disable dma on inactive queues; else maybe restart */ |
3451 | if (list_empty(&ep->queue)) { | 3204 | if (!list_empty(&ep->queue)) { |
3452 | if (use_dma_chaining) | ||
3453 | stop_dma(ep->dma); | ||
3454 | } else { | ||
3455 | tmp = readl(&dma->dmactl); | 3205 | tmp = readl(&dma->dmactl); |
3456 | if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0) | 3206 | restart_dma(ep); |
3457 | restart_dma(ep); | ||
3458 | else if (ep->is_in && use_dma_chaining) { | ||
3459 | struct net2280_request *req; | ||
3460 | __le32 dmacount; | ||
3461 | |||
3462 | /* the descriptor at the head of the chain | ||
3463 | * may still have VALID_BIT clear; that's | ||
3464 | * used to trigger changing DMA_FIFO_VALIDATE | ||
3465 | * (affects automagic zlp writes). | ||
3466 | */ | ||
3467 | req = list_entry(ep->queue.next, | ||
3468 | struct net2280_request, queue); | ||
3469 | dmacount = req->td->dmacount; | ||
3470 | dmacount &= cpu_to_le32(BIT(VALID_BIT) | | ||
3471 | DMA_BYTE_COUNT_MASK); | ||
3472 | if (dmacount && (dmacount & valid_bit) == 0) | ||
3473 | restart_dma(ep); | ||
3474 | } | ||
3475 | } | 3207 | } |
3476 | ep->irqs++; | 3208 | ep->irqs++; |
3477 | } | 3209 | } |
@@ -3556,7 +3288,7 @@ static void net2280_remove(struct pci_dev *pdev) | |||
3556 | } | 3288 | } |
3557 | if (dev->got_irq) | 3289 | if (dev->got_irq) |
3558 | free_irq(pdev->irq, dev); | 3290 | free_irq(pdev->irq, dev); |
3559 | if (use_msi && dev->quirks & PLX_SUPERSPEED) | 3291 | if (dev->quirks & PLX_SUPERSPEED) |
3560 | pci_disable_msi(pdev); | 3292 | pci_disable_msi(pdev); |
3561 | if (dev->regs) | 3293 | if (dev->regs) |
3562 | iounmap(dev->regs); | 3294 | iounmap(dev->regs); |
@@ -3581,9 +3313,6 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3581 | void __iomem *base = NULL; | 3313 | void __iomem *base = NULL; |
3582 | int retval, i; | 3314 | int retval, i; |
3583 | 3315 | ||
3584 | if (!use_dma) | ||
3585 | use_dma_chaining = 0; | ||
3586 | |||
3587 | /* alloc, and start init */ | 3316 | /* alloc, and start init */ |
3588 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 3317 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
3589 | if (dev == NULL) { | 3318 | if (dev == NULL) { |
@@ -3663,9 +3392,12 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3663 | fsmvalue = get_idx_reg(dev->regs, SCRATCH) & | 3392 | fsmvalue = get_idx_reg(dev->regs, SCRATCH) & |
3664 | (0xf << DEFECT7374_FSM_FIELD); | 3393 | (0xf << DEFECT7374_FSM_FIELD); |
3665 | /* See if firmware needs to set up for workaround: */ | 3394 | /* See if firmware needs to set up for workaround: */ |
3666 | if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) | 3395 | if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) { |
3396 | dev->bug7734_patched = 1; | ||
3667 | writel(0, &dev->usb->usbctl); | 3397 | writel(0, &dev->usb->usbctl); |
3668 | } else{ | 3398 | } else |
3399 | dev->bug7734_patched = 0; | ||
3400 | } else { | ||
3669 | dev->enhanced_mode = 0; | 3401 | dev->enhanced_mode = 0; |
3670 | dev->n_ep = 7; | 3402 | dev->n_ep = 7; |
3671 | /* put into initial config, link up all endpoints */ | 3403 | /* put into initial config, link up all endpoints */ |
@@ -3682,7 +3414,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3682 | goto done; | 3414 | goto done; |
3683 | } | 3415 | } |
3684 | 3416 | ||
3685 | if (use_msi && (dev->quirks & PLX_SUPERSPEED)) | 3417 | if (dev->quirks & PLX_SUPERSPEED) |
3686 | if (pci_enable_msi(pdev)) | 3418 | if (pci_enable_msi(pdev)) |
3687 | ep_err(dev, "Failed to enable MSI mode\n"); | 3419 | ep_err(dev, "Failed to enable MSI mode\n"); |
3688 | 3420 | ||
@@ -3741,9 +3473,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3741 | ep_info(dev, "%s\n", driver_desc); | 3473 | ep_info(dev, "%s\n", driver_desc); |
3742 | ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n", | 3474 | ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n", |
3743 | pdev->irq, base, dev->chiprev); | 3475 | pdev->irq, base, dev->chiprev); |
3744 | ep_info(dev, "version: " DRIVER_VERSION "; dma %s %s\n", | 3476 | ep_info(dev, "version: " DRIVER_VERSION "; %s\n", |
3745 | use_dma ? (use_dma_chaining ? "chaining" : "enabled") | ||
3746 | : "disabled", | ||
3747 | dev->enhanced_mode ? "enhanced mode" : "legacy mode"); | 3477 | dev->enhanced_mode ? "enhanced mode" : "legacy mode"); |
3748 | retval = device_create_file(&pdev->dev, &dev_attr_registers); | 3478 | retval = device_create_file(&pdev->dev, &dev_attr_registers); |
3749 | if (retval) | 3479 | if (retval) |
@@ -3776,9 +3506,6 @@ static void net2280_shutdown(struct pci_dev *pdev) | |||
3776 | /* disable the pullup so the host will think we're gone */ | 3506 | /* disable the pullup so the host will think we're gone */ |
3777 | writel(0, &dev->usb->usbctl); | 3507 | writel(0, &dev->usb->usbctl); |
3778 | 3508 | ||
3779 | /* Disable full-speed test mode */ | ||
3780 | if (dev->quirks & PLX_LEGACY) | ||
3781 | writel(0, &dev->usb->xcvrdiag); | ||
3782 | } | 3509 | } |
3783 | 3510 | ||
3784 | 3511 | ||