aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRicardo Ribalda Delgado <ricardo.ribalda@gmail.com>2014-05-20 12:30:09 -0400
committerFelipe Balbi <balbi@ti.com>2014-06-30 13:33:34 -0400
commitfae3c158800339765a2580ac5d6236ae116ec5cb (patch)
tree09dd0b63df391f85fa34b9c80105a9132ac8441e
parenta27f37a13cfbcfaeb987a910661a860f8d2f915e (diff)
usb: gadget: net2280: Pass checkpacth.pl test
Fix Code Style using checkpatch.pl criteria Signed-off-by: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com> Signed-off-by: Felipe Balbi <balbi@ti.com>
-rw-r--r--drivers/usb/gadget/net2280.c1119
-rw-r--r--drivers/usb/gadget/net2280.h90
2 files changed, 602 insertions, 607 deletions
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index c3205ec9560c..d1d4f4fc9da7 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -62,9 +62,9 @@
62#include <linux/usb/ch9.h> 62#include <linux/usb/ch9.h>
63#include <linux/usb/gadget.h> 63#include <linux/usb/gadget.h>
64#include <linux/prefetch.h> 64#include <linux/prefetch.h>
65#include <linux/io.h>
65 66
66#include <asm/byteorder.h> 67#include <asm/byteorder.h>
67#include <asm/io.h>
68#include <asm/irq.h> 68#include <asm/irq.h>
69#include <asm/unaligned.h> 69#include <asm/unaligned.h>
70 70
@@ -76,12 +76,12 @@
76#define USE_RDK_LEDS /* GPIO pins control three LEDs */ 76#define USE_RDK_LEDS /* GPIO pins control three LEDs */
77 77
78 78
79static const char driver_name [] = "net2280"; 79static const char driver_name[] = "net2280";
80static const char driver_desc [] = DRIVER_DESC; 80static const char driver_desc[] = DRIVER_DESC;
81 81
82static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 }; 82static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
83static const char ep0name [] = "ep0"; 83static const char ep0name[] = "ep0";
84static const char *const ep_name [] = { 84static const char *const ep_name[] = {
85 ep0name, 85 ep0name,
86 "ep-a", "ep-b", "ep-c", "ep-d", 86 "ep-a", "ep-b", "ep-c", "ep-d",
87 "ep-e", "ep-f", "ep-g", "ep-h", 87 "ep-e", "ep-f", "ep-g", "ep-h",
@@ -100,15 +100,15 @@ static bool use_dma_chaining;
100static bool use_msi = true; 100static bool use_msi = true;
101 101
102/* "modprobe net2280 use_dma=n" etc */ 102/* "modprobe net2280 use_dma=n" etc */
103module_param (use_dma, bool, S_IRUGO); 103module_param(use_dma, bool, S_IRUGO);
104module_param (use_dma_chaining, bool, S_IRUGO); 104module_param(use_dma_chaining, bool, S_IRUGO);
105module_param(use_msi, bool, S_IRUGO); 105module_param(use_msi, bool, S_IRUGO);
106 106
107/* mode 0 == ep-{a,b,c,d} 1K fifo each 107/* mode 0 == ep-{a,b,c,d} 1K fifo each
108 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable 108 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
109 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable 109 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
110 */ 110 */
111static ushort fifo_mode = 0; 111static ushort fifo_mode;
112 112
113/* "modprobe net2280 fifo_mode=1" etc */ 113/* "modprobe net2280 fifo_mode=1" etc */
114module_param (fifo_mode, ushort, 0644); 114module_param (fifo_mode, ushort, 0644);
@@ -121,7 +121,7 @@ module_param (fifo_mode, ushort, 0644);
121static bool enable_suspend; 121static bool enable_suspend;
122 122
123/* "modprobe net2280 enable_suspend=1" etc */ 123/* "modprobe net2280 enable_suspend=1" etc */
124module_param (enable_suspend, bool, S_IRUGO); 124module_param(enable_suspend, bool, S_IRUGO);
125 125
126/* force full-speed operation */ 126/* force full-speed operation */
127static bool full_speed; 127static bool full_speed;
@@ -130,8 +130,7 @@ MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!");
130 130
131#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") 131#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
132 132
133#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG) 133static char *type_string(u8 bmAttributes)
134static char *type_string (u8 bmAttributes)
135{ 134{
136 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 135 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
137 case USB_ENDPOINT_XFER_BULK: return "bulk"; 136 case USB_ENDPOINT_XFER_BULK: return "bulk";
@@ -140,7 +139,6 @@ static char *type_string (u8 bmAttributes)
140 } 139 }
141 return "control"; 140 return "control";
142} 141}
143#endif
144 142
145#include "net2280.h" 143#include "net2280.h"
146 144
@@ -162,7 +160,7 @@ static inline void enable_pciirqenb(struct net2280_ep *ep)
162} 160}
163 161
164static int 162static int
165net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 163net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
166{ 164{
167 struct net2280 *dev; 165 struct net2280 *dev;
168 struct net2280_ep *ep; 166 struct net2280_ep *ep;
@@ -170,7 +168,7 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
170 unsigned long flags; 168 unsigned long flags;
171 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 }; 169 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
172 170
173 ep = container_of (_ep, struct net2280_ep, ep); 171 ep = container_of(_ep, struct net2280_ep, ep);
174 if (!_ep || !desc || ep->desc || _ep->name == ep0name 172 if (!_ep || !desc || ep->desc || _ep->name == ep0name
175 || desc->bDescriptorType != USB_DT_ENDPOINT) 173 || desc->bDescriptorType != USB_DT_ENDPOINT)
176 return -EINVAL; 174 return -EINVAL;
@@ -191,12 +189,12 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
191 } 189 }
192 190
193 /* sanity check ep-e/ep-f since their fifos are small */ 191 /* sanity check ep-e/ep-f since their fifos are small */
194 max = usb_endpoint_maxp (desc) & 0x1fff; 192 max = usb_endpoint_maxp(desc) & 0x1fff;
195 if (ep->num > 4 && max > 64 && 193 if (ep->num > 4 && max > 64 &&
196 (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)) 194 (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY))
197 return -ERANGE; 195 return -ERANGE;
198 196
199 spin_lock_irqsave (&dev->lock, flags); 197 spin_lock_irqsave(&dev->lock, flags);
200 _ep->maxpacket = max & 0x7ff; 198 _ep->maxpacket = max & 0x7ff;
201 ep->desc = desc; 199 ep->desc = desc;
202 200
@@ -212,7 +210,7 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
212 * use it instead of troublesome (non-bulk) multi-packet DMA. 210 * use it instead of troublesome (non-bulk) multi-packet DMA.
213 */ 211 */
214 if (ep->dma && (max % 4) != 0 && use_dma_chaining) { 212 if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
215 DEBUG (ep->dev, "%s, no dma for maxpacket %d\n", 213 DEBUG(ep->dev, "%s, no dma for maxpacket %d\n",
216 ep->ep.name, ep->ep.maxpacket); 214 ep->ep.name, ep->ep.maxpacket);
217 ep->dma = NULL; 215 ep->dma = NULL;
218 } 216 }
@@ -236,7 +234,7 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
236 return -ERANGE; 234 return -ERANGE;
237 } 235 }
238 } 236 }
239 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0; 237 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
240 /* Enable this endpoint */ 238 /* Enable this endpoint */
241 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) { 239 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) {
242 tmp <<= ENDPOINT_TYPE; 240 tmp <<= ENDPOINT_TYPE;
@@ -285,12 +283,12 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
285 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) | 283 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
286 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE); 284 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
287 if (dev->pdev->device == 0x2280) 285 if (dev->pdev->device == 0x2280)
288 tmp |= readl (&ep->regs->ep_irqenb); 286 tmp |= readl(&ep->regs->ep_irqenb);
289 writel (tmp, &ep->regs->ep_irqenb); 287 writel(tmp, &ep->regs->ep_irqenb);
290 } else { /* dma, per-request */ 288 } else { /* dma, per-request */
291 tmp = BIT((8 + ep->num)); /* completion */ 289 tmp = BIT((8 + ep->num)); /* completion */
292 tmp |= readl (&dev->regs->pciirqenb1); 290 tmp |= readl(&dev->regs->pciirqenb1);
293 writel (tmp, &dev->regs->pciirqenb1); 291 writel(tmp, &dev->regs->pciirqenb1);
294 292
295 /* for short OUT transfers, dma completions can't 293 /* for short OUT transfers, dma completions can't
296 * advance the queue; do it pio-style, by hand. 294 * advance the queue; do it pio-style, by hand.
@@ -298,35 +296,35 @@ net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
298 */ 296 */
299 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) { 297 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
300 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE); 298 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
301 writel (tmp, &ep->regs->ep_irqenb); 299 writel(tmp, &ep->regs->ep_irqenb);
302 300
303 enable_pciirqenb(ep); 301 enable_pciirqenb(ep);
304 } 302 }
305 } 303 }
306 304
307 tmp = desc->bEndpointAddress; 305 tmp = desc->bEndpointAddress;
308 DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n", 306 DEBUG(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
309 _ep->name, tmp & 0x0f, DIR_STRING (tmp), 307 _ep->name, tmp & 0x0f, DIR_STRING(tmp),
310 type_string (desc->bmAttributes), 308 type_string(desc->bmAttributes),
311 ep->dma ? "dma" : "pio", max); 309 ep->dma ? "dma" : "pio", max);
312 310
313 /* pci writes may still be posted */ 311 /* pci writes may still be posted */
314 spin_unlock_irqrestore (&dev->lock, flags); 312 spin_unlock_irqrestore(&dev->lock, flags);
315 return 0; 313 return 0;
316} 314}
317 315
318static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec) 316static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
319{ 317{
320 u32 result; 318 u32 result;
321 319
322 do { 320 do {
323 result = readl (ptr); 321 result = readl(ptr);
324 if (result == ~(u32)0) /* "device unplugged" */ 322 if (result == ~(u32)0) /* "device unplugged" */
325 return -ENODEV; 323 return -ENODEV;
326 result &= mask; 324 result &= mask;
327 if (result == done) 325 if (result == done)
328 return 0; 326 return 0;
329 udelay (1); 327 udelay(1);
330 usec--; 328 usec--;
331 } while (usec > 0); 329 } while (usec > 0);
332 return -ETIMEDOUT; 330 return -ETIMEDOUT;
@@ -340,28 +338,28 @@ static void ep_reset_228x(struct net2280_regs __iomem *regs,
340 u32 tmp; 338 u32 tmp;
341 339
342 ep->desc = NULL; 340 ep->desc = NULL;
343 INIT_LIST_HEAD (&ep->queue); 341 INIT_LIST_HEAD(&ep->queue);
344 342
345 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 343 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
346 ep->ep.ops = &net2280_ep_ops; 344 ep->ep.ops = &net2280_ep_ops;
347 345
348 /* disable the dma, irqs, endpoint... */ 346 /* disable the dma, irqs, endpoint... */
349 if (ep->dma) { 347 if (ep->dma) {
350 writel (0, &ep->dma->dmactl); 348 writel(0, &ep->dma->dmactl);
351 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) | 349 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
352 BIT(DMA_TRANSACTION_DONE_INTERRUPT) | 350 BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
353 BIT(DMA_ABORT), 351 BIT(DMA_ABORT),
354 &ep->dma->dmastat); 352 &ep->dma->dmastat);
355 353
356 tmp = readl (&regs->pciirqenb0); 354 tmp = readl(&regs->pciirqenb0);
357 tmp &= ~BIT(ep->num); 355 tmp &= ~BIT(ep->num);
358 writel (tmp, &regs->pciirqenb0); 356 writel(tmp, &regs->pciirqenb0);
359 } else { 357 } else {
360 tmp = readl (&regs->pciirqenb1); 358 tmp = readl(&regs->pciirqenb1);
361 tmp &= ~BIT((8 + ep->num)); /* completion */ 359 tmp &= ~BIT((8 + ep->num)); /* completion */
362 writel (tmp, &regs->pciirqenb1); 360 writel(tmp, &regs->pciirqenb1);
363 } 361 }
364 writel (0, &ep->regs->ep_irqenb); 362 writel(0, &ep->regs->ep_irqenb);
365 363
366 /* init to our chosen defaults, notably so that we NAK OUT 364 /* init to our chosen defaults, notably so that we NAK OUT
367 * packets until the driver queues a read (+note erratum 0112) 365 * packets until the driver queues a read (+note erratum 0112)
@@ -383,7 +381,7 @@ static void ep_reset_228x(struct net2280_regs __iomem *regs,
383 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) | 381 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
384 BIT(CLEAR_ENDPOINT_HALT); 382 BIT(CLEAR_ENDPOINT_HALT);
385 } 383 }
386 writel (tmp, &ep->regs->ep_rsp); 384 writel(tmp, &ep->regs->ep_rsp);
387 385
388 /* scrub most status bits, and flush any fifo state */ 386 /* scrub most status bits, and flush any fifo state */
389 if (ep->dev->pdev->device == 0x2280) 387 if (ep->dev->pdev->device == 0x2280)
@@ -459,64 +457,64 @@ static void ep_reset_338x(struct net2280_regs __iomem *regs,
459 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); 457 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
460} 458}
461 459
462static void nuke (struct net2280_ep *); 460static void nuke(struct net2280_ep *);
463 461
464static int net2280_disable (struct usb_ep *_ep) 462static int net2280_disable(struct usb_ep *_ep)
465{ 463{
466 struct net2280_ep *ep; 464 struct net2280_ep *ep;
467 unsigned long flags; 465 unsigned long flags;
468 466
469 ep = container_of (_ep, struct net2280_ep, ep); 467 ep = container_of(_ep, struct net2280_ep, ep);
470 if (!_ep || !ep->desc || _ep->name == ep0name) 468 if (!_ep || !ep->desc || _ep->name == ep0name)
471 return -EINVAL; 469 return -EINVAL;
472 470
473 spin_lock_irqsave (&ep->dev->lock, flags); 471 spin_lock_irqsave(&ep->dev->lock, flags);
474 nuke (ep); 472 nuke(ep);
475 473
476 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX) 474 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
477 ep_reset_338x(ep->dev->regs, ep); 475 ep_reset_338x(ep->dev->regs, ep);
478 else 476 else
479 ep_reset_228x(ep->dev->regs, ep); 477 ep_reset_228x(ep->dev->regs, ep);
480 478
481 VDEBUG (ep->dev, "disabled %s %s\n", 479 VDEBUG(ep->dev, "disabled %s %s\n",
482 ep->dma ? "dma" : "pio", _ep->name); 480 ep->dma ? "dma" : "pio", _ep->name);
483 481
484 /* synch memory views with the device */ 482 /* synch memory views with the device */
485 (void)readl(&ep->cfg->ep_cfg); 483 (void)readl(&ep->cfg->ep_cfg);
486 484
487 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4) 485 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
488 ep->dma = &ep->dev->dma [ep->num - 1]; 486 ep->dma = &ep->dev->dma[ep->num - 1];
489 487
490 spin_unlock_irqrestore (&ep->dev->lock, flags); 488 spin_unlock_irqrestore(&ep->dev->lock, flags);
491 return 0; 489 return 0;
492} 490}
493 491
494/*-------------------------------------------------------------------------*/ 492/*-------------------------------------------------------------------------*/
495 493
496static struct usb_request * 494static struct usb_request
497net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) 495*net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
498{ 496{
499 struct net2280_ep *ep; 497 struct net2280_ep *ep;
500 struct net2280_request *req; 498 struct net2280_request *req;
501 499
502 if (!_ep) 500 if (!_ep)
503 return NULL; 501 return NULL;
504 ep = container_of (_ep, struct net2280_ep, ep); 502 ep = container_of(_ep, struct net2280_ep, ep);
505 503
506 req = kzalloc(sizeof(*req), gfp_flags); 504 req = kzalloc(sizeof(*req), gfp_flags);
507 if (!req) 505 if (!req)
508 return NULL; 506 return NULL;
509 507
510 INIT_LIST_HEAD (&req->queue); 508 INIT_LIST_HEAD(&req->queue);
511 509
512 /* this dma descriptor may be swapped with the previous dummy */ 510 /* this dma descriptor may be swapped with the previous dummy */
513 if (ep->dma) { 511 if (ep->dma) {
514 struct net2280_dma *td; 512 struct net2280_dma *td;
515 513
516 td = pci_pool_alloc (ep->dev->requests, gfp_flags, 514 td = pci_pool_alloc(ep->dev->requests, gfp_flags,
517 &req->td_dma); 515 &req->td_dma);
518 if (!td) { 516 if (!td) {
519 kfree (req); 517 kfree(req);
520 return NULL; 518 return NULL;
521 } 519 }
522 td->dmacount = 0; /* not VALID */ 520 td->dmacount = 0; /* not VALID */
@@ -526,21 +524,20 @@ net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
526 return &req->req; 524 return &req->req;
527} 525}
528 526
529static void 527static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
530net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
531{ 528{
532 struct net2280_ep *ep; 529 struct net2280_ep *ep;
533 struct net2280_request *req; 530 struct net2280_request *req;
534 531
535 ep = container_of (_ep, struct net2280_ep, ep); 532 ep = container_of(_ep, struct net2280_ep, ep);
536 if (!_ep || !_req) 533 if (!_ep || !_req)
537 return; 534 return;
538 535
539 req = container_of (_req, struct net2280_request, req); 536 req = container_of(_req, struct net2280_request, req);
540 WARN_ON (!list_empty (&req->queue)); 537 WARN_ON(!list_empty(&req->queue));
541 if (req->td) 538 if (req->td)
542 pci_pool_free (ep->dev->requests, req->td, req->td_dma); 539 pci_pool_free(ep->dev->requests, req->td, req->td_dma);
543 kfree (req); 540 kfree(req);
544} 541}
545 542
546/*-------------------------------------------------------------------------*/ 543/*-------------------------------------------------------------------------*/
@@ -552,8 +549,7 @@ net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
552 * at a time, but this code is simpler because it knows it only writes 549 * at a time, but this code is simpler because it knows it only writes
553 * one packet. ep-a..ep-d should use dma instead. 550 * one packet. ep-a..ep-d should use dma instead.
554 */ 551 */
555static void 552static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
556write_fifo (struct net2280_ep *ep, struct usb_request *req)
557{ 553{
558 struct net2280_ep_regs __iomem *regs = ep->regs; 554 struct net2280_ep_regs __iomem *regs = ep->regs;
559 u8 *buf; 555 u8 *buf;
@@ -564,7 +560,7 @@ write_fifo (struct net2280_ep *ep, struct usb_request *req)
564 560
565 if (req) { 561 if (req) {
566 buf = req->buf + req->actual; 562 buf = req->buf + req->actual;
567 prefetch (buf); 563 prefetch(buf);
568 total = req->length - req->actual; 564 total = req->length - req->actual;
569 } else { 565 } else {
570 total = 0; 566 total = 0;
@@ -576,7 +572,7 @@ write_fifo (struct net2280_ep *ep, struct usb_request *req)
576 if (count > total) /* min() cannot be used on a bitfield */ 572 if (count > total) /* min() cannot be used on a bitfield */
577 count = total; 573 count = total;
578 574
579 VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", 575 VDEBUG(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
580 ep->ep.name, count, 576 ep->ep.name, count,
581 (count != ep->ep.maxpacket) ? " (short)" : "", 577 (count != ep->ep.maxpacket) ? " (short)" : "",
582 req); 578 req);
@@ -585,9 +581,9 @@ write_fifo (struct net2280_ep *ep, struct usb_request *req)
585 * should normally be full (4 bytes) and successive partial 581 * should normally be full (4 bytes) and successive partial
586 * lines are ok only in certain cases. 582 * lines are ok only in certain cases.
587 */ 583 */
588 tmp = get_unaligned ((u32 *)buf); 584 tmp = get_unaligned((u32 *)buf);
589 cpu_to_le32s (&tmp); 585 cpu_to_le32s(&tmp);
590 writel (tmp, &regs->ep_data); 586 writel(tmp, &regs->ep_data);
591 buf += 4; 587 buf += 4;
592 count -= 4; 588 count -= 4;
593 } 589 }
@@ -597,10 +593,10 @@ write_fifo (struct net2280_ep *ep, struct usb_request *req)
597 * when maxpacket is not a multiple of 4 bytes. 593 * when maxpacket is not a multiple of 4 bytes.
598 */ 594 */
599 if (count || total < ep->ep.maxpacket) { 595 if (count || total < ep->ep.maxpacket) {
600 tmp = count ? get_unaligned ((u32 *)buf) : count; 596 tmp = count ? get_unaligned((u32 *)buf) : count;
601 cpu_to_le32s (&tmp); 597 cpu_to_le32s(&tmp);
602 set_fifo_bytecount (ep, count & 0x03); 598 set_fifo_bytecount(ep, count & 0x03);
603 writel (tmp, &regs->ep_data); 599 writel(tmp, &regs->ep_data);
604 } 600 }
605 601
606 /* pci writes may still be posted */ 602 /* pci writes may still be posted */
@@ -613,20 +609,21 @@ write_fifo (struct net2280_ep *ep, struct usb_request *req)
613 * NOTE: also used in cases where that erratum doesn't apply: 609 * NOTE: also used in cases where that erratum doesn't apply:
614 * where the host wrote "too much" data to us. 610 * where the host wrote "too much" data to us.
615 */ 611 */
616static void out_flush (struct net2280_ep *ep) 612static void out_flush(struct net2280_ep *ep)
617{ 613{
618 u32 __iomem *statp; 614 u32 __iomem *statp;
619 u32 tmp; 615 u32 tmp;
620 616
621 ASSERT_OUT_NAKING (ep); 617 ASSERT_OUT_NAKING(ep);
622 618
623 statp = &ep->regs->ep_stat; 619 statp = &ep->regs->ep_stat;
624 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 620 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
625 BIT(DATA_PACKET_RECEIVED_INTERRUPT) 621 BIT(DATA_PACKET_RECEIVED_INTERRUPT)
626 , statp); 622 , statp);
627 writel(BIT(FIFO_FLUSH), statp); 623 writel(BIT(FIFO_FLUSH), statp);
628 mb (); 624 /* Make sure that stap is written */
629 tmp = readl (statp); 625 mb();
626 tmp = readl(statp);
630 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) 627 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)
631 /* high speed did bulk NYET; fifo isn't filling */ 628 /* high speed did bulk NYET; fifo isn't filling */
632 && ep->dev->gadget.speed == USB_SPEED_FULL) { 629 && ep->dev->gadget.speed == USB_SPEED_FULL) {
@@ -646,8 +643,7 @@ static void out_flush (struct net2280_ep *ep)
646 * for ep-a..ep-d this will read multiple packets out when they 643 * for ep-a..ep-d this will read multiple packets out when they
647 * have been accepted. 644 * have been accepted.
648 */ 645 */
649static int 646static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
650read_fifo (struct net2280_ep *ep, struct net2280_request *req)
651{ 647{
652 struct net2280_ep_regs __iomem *regs = ep->regs; 648 struct net2280_ep_regs __iomem *regs = ep->regs;
653 u8 *buf = req->req.buf + req->req.actual; 649 u8 *buf = req->req.buf + req->req.actual;
@@ -659,12 +655,12 @@ read_fifo (struct net2280_ep *ep, struct net2280_request *req)
659 */ 655 */
660 if (ep->dev->chiprev == 0x0100 656 if (ep->dev->chiprev == 0x0100
661 && ep->dev->gadget.speed == USB_SPEED_FULL) { 657 && ep->dev->gadget.speed == USB_SPEED_FULL) {
662 udelay (1); 658 udelay(1);
663 tmp = readl (&ep->regs->ep_stat); 659 tmp = readl(&ep->regs->ep_stat);
664 if ((tmp & BIT(NAK_OUT_PACKETS))) 660 if ((tmp & BIT(NAK_OUT_PACKETS)))
665 cleanup = 1; 661 cleanup = 1;
666 else if ((tmp & BIT(FIFO_FULL))) { 662 else if ((tmp & BIT(FIFO_FULL))) {
667 start_out_naking (ep); 663 start_out_naking(ep);
668 prevent = 1; 664 prevent = 1;
669 } 665 }
670 /* else: hope we don't see the problem */ 666 /* else: hope we don't see the problem */
@@ -673,12 +669,12 @@ read_fifo (struct net2280_ep *ep, struct net2280_request *req)
673 /* never overflow the rx buffer. the fifo reads packets until 669 /* never overflow the rx buffer. the fifo reads packets until
674 * it sees a short one; we might not be ready for them all. 670 * it sees a short one; we might not be ready for them all.
675 */ 671 */
676 prefetchw (buf); 672 prefetchw(buf);
677 count = readl (&regs->ep_avail); 673 count = readl(&regs->ep_avail);
678 if (unlikely (count == 0)) { 674 if (unlikely(count == 0)) {
679 udelay (1); 675 udelay(1);
680 tmp = readl (&ep->regs->ep_stat); 676 tmp = readl(&ep->regs->ep_stat);
681 count = readl (&regs->ep_avail); 677 count = readl(&regs->ep_avail);
682 /* handled that data already? */ 678 /* handled that data already? */
683 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0) 679 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
684 return 0; 680 return 0;
@@ -688,7 +684,7 @@ read_fifo (struct net2280_ep *ep, struct net2280_request *req)
688 if (count > tmp) { 684 if (count > tmp) {
689 /* as with DMA, data overflow gets flushed */ 685 /* as with DMA, data overflow gets flushed */
690 if ((tmp % ep->ep.maxpacket) != 0) { 686 if ((tmp % ep->ep.maxpacket) != 0) {
691 ERROR (ep->dev, 687 ERROR(ep->dev,
692 "%s out fifo %d bytes, expected %d\n", 688 "%s out fifo %d bytes, expected %d\n",
693 ep->ep.name, count, tmp); 689 ep->ep.name, count, tmp);
694 req->req.status = -EOVERFLOW; 690 req->req.status = -EOVERFLOW;
@@ -703,20 +699,20 @@ read_fifo (struct net2280_ep *ep, struct net2280_request *req)
703 699
704 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); 700 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
705 701
706 VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", 702 VDEBUG(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
707 ep->ep.name, count, is_short ? " (short)" : "", 703 ep->ep.name, count, is_short ? " (short)" : "",
708 cleanup ? " flush" : "", prevent ? " nak" : "", 704 cleanup ? " flush" : "", prevent ? " nak" : "",
709 req, req->req.actual, req->req.length); 705 req, req->req.actual, req->req.length);
710 706
711 while (count >= 4) { 707 while (count >= 4) {
712 tmp = readl (&regs->ep_data); 708 tmp = readl(&regs->ep_data);
713 cpu_to_le32s (&tmp); 709 cpu_to_le32s(&tmp);
714 put_unaligned (tmp, (u32 *)buf); 710 put_unaligned(tmp, (u32 *)buf);
715 buf += 4; 711 buf += 4;
716 count -= 4; 712 count -= 4;
717 } 713 }
718 if (count) { 714 if (count) {
719 tmp = readl (&regs->ep_data); 715 tmp = readl(&regs->ep_data);
720 /* LE conversion is implicit here: */ 716 /* LE conversion is implicit here: */
721 do { 717 do {
722 *buf++ = (u8) tmp; 718 *buf++ = (u8) tmp;
@@ -724,10 +720,10 @@ read_fifo (struct net2280_ep *ep, struct net2280_request *req)
724 } while (--count); 720 } while (--count);
725 } 721 }
726 if (cleanup) 722 if (cleanup)
727 out_flush (ep); 723 out_flush(ep);
728 if (prevent) { 724 if (prevent) {
729 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 725 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
730 (void) readl (&ep->regs->ep_rsp); 726 (void) readl(&ep->regs->ep_rsp);
731 } 727 }
732 728
733 return is_short || ((req->req.actual == req->req.length) 729 return is_short || ((req->req.actual == req->req.length)
@@ -735,8 +731,8 @@ read_fifo (struct net2280_ep *ep, struct net2280_request *req)
735} 731}
736 732
737/* fill out dma descriptor to match a given request */ 733/* fill out dma descriptor to match a given request */
738static void 734static void fill_dma_desc(struct net2280_ep *ep,
739fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid) 735 struct net2280_request *req, int valid)
740{ 736{
741 struct net2280_dma *td = req->td; 737 struct net2280_dma *td = req->td;
742 u32 dmacount = req->req.length; 738 u32 dmacount = req->req.length;
@@ -762,7 +758,7 @@ fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
762 td->dmaaddr = cpu_to_le32 (req->req.dma); 758 td->dmaaddr = cpu_to_le32 (req->req.dma);
763 759
764 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ 760 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
765 wmb (); 761 wmb();
766 td->dmacount = cpu_to_le32(dmacount); 762 td->dmacount = cpu_to_le32(dmacount);
767} 763}
768 764
@@ -777,18 +773,18 @@ static const u32 dmactl_default =
777 /* erratum 0116 workaround part 2 (no AUTOSTART) */ 773 /* erratum 0116 workaround part 2 (no AUTOSTART) */
778 BIT(DMA_ENABLE); 774 BIT(DMA_ENABLE);
779 775
780static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma) 776static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
781{ 777{
782 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50); 778 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
783} 779}
784 780
785static inline void stop_dma (struct net2280_dma_regs __iomem *dma) 781static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
786{ 782{
787 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl); 783 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
788 spin_stop_dma (dma); 784 spin_stop_dma(dma);
789} 785}
790 786
791static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma) 787static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
792{ 788{
793 struct net2280_dma_regs __iomem *dma = ep->dma; 789 struct net2280_dma_regs __iomem *dma = ep->dma;
794 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION); 790 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
@@ -796,24 +792,24 @@ static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
796 if (ep->dev->pdev->device != 0x2280) 792 if (ep->dev->pdev->device != 0x2280)
797 tmp |= BIT(END_OF_CHAIN); 793 tmp |= BIT(END_OF_CHAIN);
798 794
799 writel (tmp, &dma->dmacount); 795 writel(tmp, &dma->dmacount);
800 writel (readl (&dma->dmastat), &dma->dmastat); 796 writel(readl(&dma->dmastat), &dma->dmastat);
801 797
802 writel (td_dma, &dma->dmadesc); 798 writel(td_dma, &dma->dmadesc);
803 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX) 799 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
804 dmactl |= BIT(DMA_REQUEST_OUTSTANDING); 800 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
805 writel (dmactl, &dma->dmactl); 801 writel(dmactl, &dma->dmactl);
806 802
807 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ 803 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
808 (void) readl (&ep->dev->pci->pcimstctl); 804 (void) readl(&ep->dev->pci->pcimstctl);
809 805
810 writel(BIT(DMA_START), &dma->dmastat); 806 writel(BIT(DMA_START), &dma->dmastat);
811 807
812 if (!ep->is_in) 808 if (!ep->is_in)
813 stop_out_naking (ep); 809 stop_out_naking(ep);
814} 810}
815 811
816static void start_dma (struct net2280_ep *ep, struct net2280_request *req) 812static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
817{ 813{
818 u32 tmp; 814 u32 tmp;
819 struct net2280_dma_regs __iomem *dma = ep->dma; 815 struct net2280_dma_regs __iomem *dma = ep->dma;
@@ -822,24 +818,24 @@ static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
822 818
823 /* on this path we "know" there's no dma active (yet) */ 819 /* on this path we "know" there's no dma active (yet) */
824 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE)); 820 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
825 writel (0, &ep->dma->dmactl); 821 writel(0, &ep->dma->dmactl);
826 822
827 /* previous OUT packet might have been short */ 823 /* previous OUT packet might have been short */
828 if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat)) 824 if (!ep->is_in && (readl(&ep->regs->ep_stat) &
829 & BIT(NAK_OUT_PACKETS)) != 0) { 825 BIT(NAK_OUT_PACKETS))) {
830 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT), 826 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
831 &ep->regs->ep_stat); 827 &ep->regs->ep_stat);
832 828
833 tmp = readl (&ep->regs->ep_avail); 829 tmp = readl(&ep->regs->ep_avail);
834 if (tmp) { 830 if (tmp) {
835 writel (readl (&dma->dmastat), &dma->dmastat); 831 writel(readl(&dma->dmastat), &dma->dmastat);
836 832
837 /* transfer all/some fifo data */ 833 /* transfer all/some fifo data */
838 writel (req->req.dma, &dma->dmaaddr); 834 writel(req->req.dma, &dma->dmaaddr);
839 tmp = min (tmp, req->req.length); 835 tmp = min(tmp, req->req.length);
840 836
841 /* dma irq, faking scatterlist status */ 837 /* dma irq, faking scatterlist status */
842 req->td->dmacount = cpu_to_le32 (req->req.length - tmp); 838 req->td->dmacount = cpu_to_le32(req->req.length - tmp);
843 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) 839 writel(BIT(DMA_DONE_INTERRUPT_ENABLE)
844 | tmp, &dma->dmacount); 840 | tmp, &dma->dmacount);
845 req->td->dmadesc = 0; 841 req->td->dmadesc = 0;
@@ -858,8 +854,8 @@ static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
858 * (zero length) unless the driver explicitly said to do that. 854 * (zero length) unless the driver explicitly said to do that.
859 */ 855 */
860 if (ep->is_in) { 856 if (ep->is_in) {
861 if (likely ((req->req.length % ep->ep.maxpacket) != 0 857 if (likely((req->req.length % ep->ep.maxpacket) ||
862 || req->req.zero)) { 858 req->req.zero)){
863 tmp |= BIT(DMA_FIFO_VALIDATE); 859 tmp |= BIT(DMA_FIFO_VALIDATE);
864 ep->in_fifo_validate = 1; 860 ep->in_fifo_validate = 1;
865 } else 861 } else
@@ -868,12 +864,12 @@ static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
868 864
869 /* init req->td, pointing to the current dummy */ 865 /* init req->td, pointing to the current dummy */
870 req->td->dmadesc = cpu_to_le32 (ep->td_dma); 866 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
871 fill_dma_desc (ep, req, 1); 867 fill_dma_desc(ep, req, 1);
872 868
873 if (!use_dma_chaining) 869 if (!use_dma_chaining)
874 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); 870 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
875 871
876 start_queue (ep, tmp, req->td_dma); 872 start_queue(ep, tmp, req->td_dma);
877} 873}
878 874
879static inline void resume_dma(struct net2280_ep *ep) 875static inline void resume_dma(struct net2280_ep *ep)
@@ -892,7 +888,7 @@ static inline void ep_stop_dma(struct net2280_ep *ep)
892} 888}
893 889
894static inline void 890static inline void
895queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid) 891queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
896{ 892{
897 struct net2280_dma *end; 893 struct net2280_dma *end;
898 dma_addr_t tmp; 894 dma_addr_t tmp;
@@ -908,16 +904,16 @@ queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
908 904
909 end->dmadesc = cpu_to_le32 (ep->td_dma); 905 end->dmadesc = cpu_to_le32 (ep->td_dma);
910 906
911 fill_dma_desc (ep, req, valid); 907 fill_dma_desc(ep, req, valid);
912} 908}
913 909
914static void 910static void
915done (struct net2280_ep *ep, struct net2280_request *req, int status) 911done(struct net2280_ep *ep, struct net2280_request *req, int status)
916{ 912{
917 struct net2280 *dev; 913 struct net2280 *dev;
918 unsigned stopped = ep->stopped; 914 unsigned stopped = ep->stopped;
919 915
920 list_del_init (&req->queue); 916 list_del_init(&req->queue);
921 917
922 if (req->req.status == -EINPROGRESS) 918 if (req->req.status == -EINPROGRESS)
923 req->req.status = status; 919 req->req.status = status;
@@ -929,22 +925,22 @@ done (struct net2280_ep *ep, struct net2280_request *req, int status)
929 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); 925 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
930 926
931 if (status && status != -ESHUTDOWN) 927 if (status && status != -ESHUTDOWN)
932 VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n", 928 VDEBUG(dev, "complete %s req %p stat %d len %u/%u\n",
933 ep->ep.name, &req->req, status, 929 ep->ep.name, &req->req, status,
934 req->req.actual, req->req.length); 930 req->req.actual, req->req.length);
935 931
936 /* don't modify queue heads during completion callback */ 932 /* don't modify queue heads during completion callback */
937 ep->stopped = 1; 933 ep->stopped = 1;
938 spin_unlock (&dev->lock); 934 spin_unlock(&dev->lock);
939 req->req.complete (&ep->ep, &req->req); 935 req->req.complete(&ep->ep, &req->req);
940 spin_lock (&dev->lock); 936 spin_lock(&dev->lock);
941 ep->stopped = stopped; 937 ep->stopped = stopped;
942} 938}
943 939
944/*-------------------------------------------------------------------------*/ 940/*-------------------------------------------------------------------------*/
945 941
946static int 942static int
947net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 943net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
948{ 944{
949 struct net2280_request *req; 945 struct net2280_request *req;
950 struct net2280_ep *ep; 946 struct net2280_ep *ep;
@@ -954,13 +950,13 @@ net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
954 /* we always require a cpu-view buffer, so that we can 950 /* we always require a cpu-view buffer, so that we can
955 * always use pio (as fallback or whatever). 951 * always use pio (as fallback or whatever).
956 */ 952 */
957 req = container_of (_req, struct net2280_request, req); 953 req = container_of(_req, struct net2280_request, req);
958 if (!_req || !_req->complete || !_req->buf 954 if (!_req || !_req->complete || !_req->buf ||
959 || !list_empty (&req->queue)) 955 !list_empty(&req->queue))
960 return -EINVAL; 956 return -EINVAL;
961 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) 957 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
962 return -EDOM; 958 return -EDOM;
963 ep = container_of (_ep, struct net2280_ep, ep); 959 ep = container_of(_ep, struct net2280_ep, ep);
964 if (!_ep || (!ep->desc && ep->num != 0)) 960 if (!_ep || (!ep->desc && ep->num != 0))
965 return -EINVAL; 961 return -EINVAL;
966 dev = ep->dev; 962 dev = ep->dev;
@@ -982,17 +978,17 @@ net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
982 } 978 }
983 979
984#if 0 980#if 0
985 VDEBUG (dev, "%s queue req %p, len %d buf %p\n", 981 VDEBUG(dev, "%s queue req %p, len %d buf %p\n",
986 _ep->name, _req, _req->length, _req->buf); 982 _ep->name, _req, _req->length, _req->buf);
987#endif 983#endif
988 984
989 spin_lock_irqsave (&dev->lock, flags); 985 spin_lock_irqsave(&dev->lock, flags);
990 986
991 _req->status = -EINPROGRESS; 987 _req->status = -EINPROGRESS;
992 _req->actual = 0; 988 _req->actual = 0;
993 989
994 /* kickstart this i/o queue? */ 990 /* kickstart this i/o queue? */
995 if (list_empty (&ep->queue) && !ep->stopped) { 991 if (list_empty(&ep->queue) && !ep->stopped) {
996 /* DMA request while EP halted */ 992 /* DMA request while EP halted */
997 if (ep->dma && 993 if (ep->dma &&
998 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) && 994 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) &&
@@ -1010,24 +1006,24 @@ net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1010 } 1006 }
1011 /* use DMA if the endpoint supports it, else pio */ 1007 /* use DMA if the endpoint supports it, else pio */
1012 else if (ep->dma) 1008 else if (ep->dma)
1013 start_dma (ep, req); 1009 start_dma(ep, req);
1014 else { 1010 else {
1015 /* maybe there's no control data, just status ack */ 1011 /* maybe there's no control data, just status ack */
1016 if (ep->num == 0 && _req->length == 0) { 1012 if (ep->num == 0 && _req->length == 0) {
1017 allow_status (ep); 1013 allow_status(ep);
1018 done (ep, req, 0); 1014 done(ep, req, 0);
1019 VDEBUG (dev, "%s status ack\n", ep->ep.name); 1015 VDEBUG(dev, "%s status ack\n", ep->ep.name);
1020 goto done; 1016 goto done;
1021 } 1017 }
1022 1018
1023 /* PIO ... stuff the fifo, or unblock it. */ 1019 /* PIO ... stuff the fifo, or unblock it. */
1024 if (ep->is_in) 1020 if (ep->is_in)
1025 write_fifo (ep, _req); 1021 write_fifo(ep, _req);
1026 else if (list_empty (&ep->queue)) { 1022 else if (list_empty(&ep->queue)) {
1027 u32 s; 1023 u32 s;
1028 1024
1029 /* OUT FIFO might have packet(s) buffered */ 1025 /* OUT FIFO might have packet(s) buffered */
1030 s = readl (&ep->regs->ep_stat); 1026 s = readl(&ep->regs->ep_stat);
1031 if ((s & BIT(FIFO_EMPTY)) == 0) { 1027 if ((s & BIT(FIFO_EMPTY)) == 0) {
1032 /* note: _req->short_not_ok is 1028 /* note: _req->short_not_ok is
1033 * ignored here since PIO _always_ 1029 * ignored here since PIO _always_
@@ -1035,14 +1031,18 @@ net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1035 * _req->status doesn't change for 1031 * _req->status doesn't change for
1036 * short reads (only _req->actual) 1032 * short reads (only _req->actual)
1037 */ 1033 */
1038 if (read_fifo (ep, req)) { 1034 if (read_fifo(ep, req) &&
1039 done (ep, req, 0); 1035 ep->num == 0) {
1040 if (ep->num == 0) 1036 done(ep, req, 0);
1041 allow_status (ep); 1037 allow_status(ep);
1042 /* don't queue it */ 1038 /* don't queue it */
1043 req = NULL; 1039 req = NULL;
1040 } else if (read_fifo(ep, req) &&
1041 ep->num != 0) {
1042 done(ep, req, 0);
1043 req = NULL;
1044 } else 1044 } else
1045 s = readl (&ep->regs->ep_stat); 1045 s = readl(&ep->regs->ep_stat);
1046 } 1046 }
1047 1047
1048 /* don't NAK, let the fifo fill */ 1048 /* don't NAK, let the fifo fill */
@@ -1061,54 +1061,50 @@ net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1061 /* preventing magic zlps is per-engine state, not 1061 /* preventing magic zlps is per-engine state, not
1062 * per-transfer; irq logic must recover hiccups. 1062 * per-transfer; irq logic must recover hiccups.
1063 */ 1063 */
1064 expect = likely (req->req.zero 1064 expect = likely(req->req.zero ||
1065 || (req->req.length % ep->ep.maxpacket) != 0); 1065 (req->req.length % ep->ep.maxpacket));
1066 if (expect != ep->in_fifo_validate) 1066 if (expect != ep->in_fifo_validate)
1067 valid = 0; 1067 valid = 0;
1068 } 1068 }
1069 queue_dma (ep, req, valid); 1069 queue_dma(ep, req, valid);
1070 1070
1071 } /* else the irq handler advances the queue. */ 1071 } /* else the irq handler advances the queue. */
1072 1072
1073 ep->responded = 1; 1073 ep->responded = 1;
1074 if (req) 1074 if (req)
1075 list_add_tail (&req->queue, &ep->queue); 1075 list_add_tail(&req->queue, &ep->queue);
1076done: 1076done:
1077 spin_unlock_irqrestore (&dev->lock, flags); 1077 spin_unlock_irqrestore(&dev->lock, flags);
1078 1078
1079 /* pci writes may still be posted */ 1079 /* pci writes may still be posted */
1080 return 0; 1080 return 0;
1081} 1081}
1082 1082
1083static inline void 1083static inline void
1084dma_done ( 1084dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
1085 struct net2280_ep *ep, 1085 int status)
1086 struct net2280_request *req,
1087 u32 dmacount,
1088 int status
1089)
1090{ 1086{
1091 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); 1087 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1092 done (ep, req, status); 1088 done(ep, req, status);
1093} 1089}
1094 1090
1095static void restart_dma (struct net2280_ep *ep); 1091static void restart_dma(struct net2280_ep *ep);
1096 1092
1097static void scan_dma_completions (struct net2280_ep *ep) 1093static void scan_dma_completions(struct net2280_ep *ep)
1098{ 1094{
1099 /* only look at descriptors that were "naturally" retired, 1095 /* only look at descriptors that were "naturally" retired,
1100 * so fifo and list head state won't matter 1096 * so fifo and list head state won't matter
1101 */ 1097 */
1102 while (!list_empty (&ep->queue)) { 1098 while (!list_empty(&ep->queue)) {
1103 struct net2280_request *req; 1099 struct net2280_request *req;
1104 u32 tmp; 1100 u32 tmp;
1105 1101
1106 req = list_entry (ep->queue.next, 1102 req = list_entry(ep->queue.next,
1107 struct net2280_request, queue); 1103 struct net2280_request, queue);
1108 if (!req->valid) 1104 if (!req->valid)
1109 break; 1105 break;
1110 rmb (); 1106 rmb();
1111 tmp = le32_to_cpup (&req->td->dmacount); 1107 tmp = le32_to_cpup(&req->td->dmacount);
1112 if ((tmp & BIT(VALID_BIT)) != 0) 1108 if ((tmp & BIT(VALID_BIT)) != 0)
1113 break; 1109 break;
1114 1110
@@ -1116,17 +1112,17 @@ static void scan_dma_completions (struct net2280_ep *ep)
1116 * cases where DMA must be aborted; this code handles 1112 * cases where DMA must be aborted; this code handles
1117 * all non-abort DMA completions. 1113 * all non-abort DMA completions.
1118 */ 1114 */
1119 if (unlikely (req->td->dmadesc == 0)) { 1115 if (unlikely(req->td->dmadesc == 0)) {
1120 /* paranoia */ 1116 /* paranoia */
1121 tmp = readl (&ep->dma->dmacount); 1117 tmp = readl(&ep->dma->dmacount);
1122 if (tmp & DMA_BYTE_COUNT_MASK) 1118 if (tmp & DMA_BYTE_COUNT_MASK)
1123 break; 1119 break;
1124 /* single transfer mode */ 1120 /* single transfer mode */
1125 dma_done (ep, req, tmp, 0); 1121 dma_done(ep, req, tmp, 0);
1126 break; 1122 break;
1127 } else if (!ep->is_in 1123 } else if (!ep->is_in
1128 && (req->req.length % ep->ep.maxpacket) != 0) { 1124 && (req->req.length % ep->ep.maxpacket) != 0) {
1129 tmp = readl (&ep->regs->ep_stat); 1125 tmp = readl(&ep->regs->ep_stat);
1130 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX) 1126 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
1131 return dma_done(ep, req, tmp, 0); 1127 return dma_done(ep, req, tmp, 0);
1132 1128
@@ -1135,33 +1131,37 @@ static void scan_dma_completions (struct net2280_ep *ep)
1135 * 0122, and 0124; not all cases trigger the warning. 1131 * 0122, and 0124; not all cases trigger the warning.
1136 */ 1132 */
1137 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { 1133 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
1138 WARNING (ep->dev, "%s lost packet sync!\n", 1134 WARNING(ep->dev, "%s lost packet sync!\n",
1139 ep->ep.name); 1135 ep->ep.name);
1140 req->req.status = -EOVERFLOW; 1136 req->req.status = -EOVERFLOW;
1141 } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) { 1137 } else {
1142 /* fifo gets flushed later */ 1138 tmp = readl(&ep->regs->ep_avail);
1143 ep->out_overflow = 1; 1139 if (tmp) {
1144 DEBUG (ep->dev, "%s dma, discard %d len %d\n", 1140 /* fifo gets flushed later */
1141 ep->out_overflow = 1;
1142 DEBUG(ep->dev,
1143 "%s dma, discard %d len %d\n",
1145 ep->ep.name, tmp, 1144 ep->ep.name, tmp,
1146 req->req.length); 1145 req->req.length);
1147 req->req.status = -EOVERFLOW; 1146 req->req.status = -EOVERFLOW;
1147 }
1148 } 1148 }
1149 } 1149 }
1150 dma_done (ep, req, tmp, 0); 1150 dma_done(ep, req, tmp, 0);
1151 } 1151 }
1152} 1152}
1153 1153
1154static void restart_dma (struct net2280_ep *ep) 1154static void restart_dma(struct net2280_ep *ep)
1155{ 1155{
1156 struct net2280_request *req; 1156 struct net2280_request *req;
1157 u32 dmactl = dmactl_default; 1157 u32 dmactl = dmactl_default;
1158 1158
1159 if (ep->stopped) 1159 if (ep->stopped)
1160 return; 1160 return;
1161 req = list_entry (ep->queue.next, struct net2280_request, queue); 1161 req = list_entry(ep->queue.next, struct net2280_request, queue);
1162 1162
1163 if (!use_dma_chaining) { 1163 if (!use_dma_chaining) {
1164 start_dma (ep, req); 1164 start_dma(ep, req);
1165 return; 1165 return;
1166 } 1166 }
1167 1167
@@ -1175,21 +1175,20 @@ static void restart_dma (struct net2280_ep *ep)
1175 struct net2280_request *entry, *prev = NULL; 1175 struct net2280_request *entry, *prev = NULL;
1176 int reqmode, done = 0; 1176 int reqmode, done = 0;
1177 1177
1178 DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td); 1178 DEBUG(ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1179 ep->in_fifo_validate = likely (req->req.zero 1179 ep->in_fifo_validate = likely(req->req.zero ||
1180 || (req->req.length % ep->ep.maxpacket) != 0); 1180 (req->req.length % ep->ep.maxpacket) != 0);
1181 if (ep->in_fifo_validate) 1181 if (ep->in_fifo_validate)
1182 dmactl |= BIT(DMA_FIFO_VALIDATE); 1182 dmactl |= BIT(DMA_FIFO_VALIDATE);
1183 list_for_each_entry (entry, &ep->queue, queue) { 1183 list_for_each_entry(entry, &ep->queue, queue) {
1184 __le32 dmacount; 1184 __le32 dmacount;
1185 1185
1186 if (entry == req) 1186 if (entry == req)
1187 continue; 1187 continue;
1188 dmacount = entry->td->dmacount; 1188 dmacount = entry->td->dmacount;
1189 if (!done) { 1189 if (!done) {
1190 reqmode = likely (entry->req.zero 1190 reqmode = likely(entry->req.zero ||
1191 || (entry->req.length 1191 (entry->req.length % ep->ep.maxpacket));
1192 % ep->ep.maxpacket) != 0);
1193 if (reqmode == ep->in_fifo_validate) { 1192 if (reqmode == ep->in_fifo_validate) {
1194 entry->valid = 1; 1193 entry->valid = 1;
1195 dmacount |= valid_bit; 1194 dmacount |= valid_bit;
@@ -1211,20 +1210,20 @@ static void restart_dma (struct net2280_ep *ep)
1211 } 1210 }
1212 } 1211 }
1213 1212
1214 writel (0, &ep->dma->dmactl); 1213 writel(0, &ep->dma->dmactl);
1215 start_queue (ep, dmactl, req->td_dma); 1214 start_queue(ep, dmactl, req->td_dma);
1216} 1215}
1217 1216
1218static void abort_dma_228x(struct net2280_ep *ep) 1217static void abort_dma_228x(struct net2280_ep *ep)
1219{ 1218{
1220 /* abort the current transfer */ 1219 /* abort the current transfer */
1221 if (likely (!list_empty (&ep->queue))) { 1220 if (likely(!list_empty(&ep->queue))) {
1222 /* FIXME work around errata 0121, 0122, 0124 */ 1221 /* FIXME work around errata 0121, 0122, 0124 */
1223 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 1222 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
1224 spin_stop_dma (ep->dma); 1223 spin_stop_dma(ep->dma);
1225 } else 1224 } else
1226 stop_dma (ep->dma); 1225 stop_dma(ep->dma);
1227 scan_dma_completions (ep); 1226 scan_dma_completions(ep);
1228} 1227}
1229 1228
1230static void abort_dma_338x(struct net2280_ep *ep) 1229static void abort_dma_338x(struct net2280_ep *ep)
@@ -1241,24 +1240,24 @@ static void abort_dma(struct net2280_ep *ep)
1241} 1240}
1242 1241
1243/* dequeue ALL requests */ 1242/* dequeue ALL requests */
1244static void nuke (struct net2280_ep *ep) 1243static void nuke(struct net2280_ep *ep)
1245{ 1244{
1246 struct net2280_request *req; 1245 struct net2280_request *req;
1247 1246
1248 /* called with spinlock held */ 1247 /* called with spinlock held */
1249 ep->stopped = 1; 1248 ep->stopped = 1;
1250 if (ep->dma) 1249 if (ep->dma)
1251 abort_dma (ep); 1250 abort_dma(ep);
1252 while (!list_empty (&ep->queue)) { 1251 while (!list_empty(&ep->queue)) {
1253 req = list_entry (ep->queue.next, 1252 req = list_entry(ep->queue.next,
1254 struct net2280_request, 1253 struct net2280_request,
1255 queue); 1254 queue);
1256 done (ep, req, -ESHUTDOWN); 1255 done(ep, req, -ESHUTDOWN);
1257 } 1256 }
1258} 1257}
1259 1258
1260/* dequeue JUST ONE request */ 1259/* dequeue JUST ONE request */
1261static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req) 1260static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1262{ 1261{
1263 struct net2280_ep *ep; 1262 struct net2280_ep *ep;
1264 struct net2280_request *req; 1263 struct net2280_request *req;
@@ -1266,65 +1265,65 @@ static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1266 u32 dmactl; 1265 u32 dmactl;
1267 int stopped; 1266 int stopped;
1268 1267
1269 ep = container_of (_ep, struct net2280_ep, ep); 1268 ep = container_of(_ep, struct net2280_ep, ep);
1270 if (!_ep || (!ep->desc && ep->num != 0) || !_req) 1269 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1271 return -EINVAL; 1270 return -EINVAL;
1272 1271
1273 spin_lock_irqsave (&ep->dev->lock, flags); 1272 spin_lock_irqsave(&ep->dev->lock, flags);
1274 stopped = ep->stopped; 1273 stopped = ep->stopped;
1275 1274
1276 /* quiesce dma while we patch the queue */ 1275 /* quiesce dma while we patch the queue */
1277 dmactl = 0; 1276 dmactl = 0;
1278 ep->stopped = 1; 1277 ep->stopped = 1;
1279 if (ep->dma) { 1278 if (ep->dma) {
1280 dmactl = readl (&ep->dma->dmactl); 1279 dmactl = readl(&ep->dma->dmactl);
1281 /* WARNING erratum 0127 may kick in ... */ 1280 /* WARNING erratum 0127 may kick in ... */
1282 stop_dma (ep->dma); 1281 stop_dma(ep->dma);
1283 scan_dma_completions (ep); 1282 scan_dma_completions(ep);
1284 } 1283 }
1285 1284
1286 /* make sure it's still queued on this endpoint */ 1285 /* make sure it's still queued on this endpoint */
1287 list_for_each_entry (req, &ep->queue, queue) { 1286 list_for_each_entry(req, &ep->queue, queue) {
1288 if (&req->req == _req) 1287 if (&req->req == _req)
1289 break; 1288 break;
1290 } 1289 }
1291 if (&req->req != _req) { 1290 if (&req->req != _req) {
1292 spin_unlock_irqrestore (&ep->dev->lock, flags); 1291 spin_unlock_irqrestore(&ep->dev->lock, flags);
1293 return -EINVAL; 1292 return -EINVAL;
1294 } 1293 }
1295 1294
1296 /* queue head may be partially complete. */ 1295 /* queue head may be partially complete. */
1297 if (ep->queue.next == &req->queue) { 1296 if (ep->queue.next == &req->queue) {
1298 if (ep->dma) { 1297 if (ep->dma) {
1299 DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name); 1298 DEBUG(ep->dev, "unlink (%s) dma\n", _ep->name);
1300 _req->status = -ECONNRESET; 1299 _req->status = -ECONNRESET;
1301 abort_dma (ep); 1300 abort_dma(ep);
1302 if (likely (ep->queue.next == &req->queue)) { 1301 if (likely(ep->queue.next == &req->queue)) {
1303 // NOTE: misreports single-transfer mode 1302 /* NOTE: misreports single-transfer mode*/
1304 req->td->dmacount = 0; /* invalidate */ 1303 req->td->dmacount = 0; /* invalidate */
1305 dma_done (ep, req, 1304 dma_done(ep, req,
1306 readl (&ep->dma->dmacount), 1305 readl(&ep->dma->dmacount),
1307 -ECONNRESET); 1306 -ECONNRESET);
1308 } 1307 }
1309 } else { 1308 } else {
1310 DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name); 1309 DEBUG(ep->dev, "unlink (%s) pio\n", _ep->name);
1311 done (ep, req, -ECONNRESET); 1310 done(ep, req, -ECONNRESET);
1312 } 1311 }
1313 req = NULL; 1312 req = NULL;
1314 1313
1315 /* patch up hardware chaining data */ 1314 /* patch up hardware chaining data */
1316 } else if (ep->dma && use_dma_chaining) { 1315 } else if (ep->dma && use_dma_chaining) {
1317 if (req->queue.prev == ep->queue.next) { 1316 if (req->queue.prev == ep->queue.next) {
1318 writel (le32_to_cpu (req->td->dmadesc), 1317 writel(le32_to_cpu(req->td->dmadesc),
1319 &ep->dma->dmadesc); 1318 &ep->dma->dmadesc);
1320 if (req->td->dmacount & dma_done_ie) 1319 if (req->td->dmacount & dma_done_ie)
1321 writel (readl (&ep->dma->dmacount) 1320 writel(readl(&ep->dma->dmacount)
1322 | le32_to_cpu(dma_done_ie), 1321 | le32_to_cpu(dma_done_ie),
1323 &ep->dma->dmacount); 1322 &ep->dma->dmacount);
1324 } else { 1323 } else {
1325 struct net2280_request *prev; 1324 struct net2280_request *prev;
1326 1325
1327 prev = list_entry (req->queue.prev, 1326 prev = list_entry(req->queue.prev,
1328 struct net2280_request, queue); 1327 struct net2280_request, queue);
1329 prev->td->dmadesc = req->td->dmadesc; 1328 prev->td->dmadesc = req->td->dmadesc;
1330 if (req->td->dmacount & dma_done_ie) 1329 if (req->td->dmacount & dma_done_ie)
@@ -1333,30 +1332,30 @@ static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1333 } 1332 }
1334 1333
1335 if (req) 1334 if (req)
1336 done (ep, req, -ECONNRESET); 1335 done(ep, req, -ECONNRESET);
1337 ep->stopped = stopped; 1336 ep->stopped = stopped;
1338 1337
1339 if (ep->dma) { 1338 if (ep->dma) {
1340 /* turn off dma on inactive queues */ 1339 /* turn off dma on inactive queues */
1341 if (list_empty (&ep->queue)) 1340 if (list_empty(&ep->queue))
1342 stop_dma (ep->dma); 1341 stop_dma(ep->dma);
1343 else if (!ep->stopped) { 1342 else if (!ep->stopped) {
1344 /* resume current request, or start new one */ 1343 /* resume current request, or start new one */
1345 if (req) 1344 if (req)
1346 writel (dmactl, &ep->dma->dmactl); 1345 writel(dmactl, &ep->dma->dmactl);
1347 else 1346 else
1348 start_dma (ep, list_entry (ep->queue.next, 1347 start_dma(ep, list_entry(ep->queue.next,
1349 struct net2280_request, queue)); 1348 struct net2280_request, queue));
1350 } 1349 }
1351 } 1350 }
1352 1351
1353 spin_unlock_irqrestore (&ep->dev->lock, flags); 1352 spin_unlock_irqrestore(&ep->dev->lock, flags);
1354 return 0; 1353 return 0;
1355} 1354}
1356 1355
1357/*-------------------------------------------------------------------------*/ 1356/*-------------------------------------------------------------------------*/
1358 1357
1359static int net2280_fifo_status (struct usb_ep *_ep); 1358static int net2280_fifo_status(struct usb_ep *_ep);
1360 1359
1361static int 1360static int
1362net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 1361net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
@@ -1365,7 +1364,7 @@ net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1365 unsigned long flags; 1364 unsigned long flags;
1366 int retval = 0; 1365 int retval = 0;
1367 1366
1368 ep = container_of (_ep, struct net2280_ep, ep); 1367 ep = container_of(_ep, struct net2280_ep, ep);
1369 if (!_ep || (!ep->desc && ep->num != 0)) 1368 if (!_ep || (!ep->desc && ep->num != 0))
1370 return -EINVAL; 1369 return -EINVAL;
1371 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1370 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
@@ -1374,13 +1373,13 @@ net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1374 == USB_ENDPOINT_XFER_ISOC) 1373 == USB_ENDPOINT_XFER_ISOC)
1375 return -EINVAL; 1374 return -EINVAL;
1376 1375
1377 spin_lock_irqsave (&ep->dev->lock, flags); 1376 spin_lock_irqsave(&ep->dev->lock, flags);
1378 if (!list_empty (&ep->queue)) 1377 if (!list_empty(&ep->queue))
1379 retval = -EAGAIN; 1378 retval = -EAGAIN;
1380 else if (ep->is_in && value && net2280_fifo_status (_ep) != 0) 1379 else if (ep->is_in && value && net2280_fifo_status(_ep) != 0)
1381 retval = -EAGAIN; 1380 retval = -EAGAIN;
1382 else { 1381 else {
1383 VDEBUG (ep->dev, "%s %s %s\n", _ep->name, 1382 VDEBUG(ep->dev, "%s %s %s\n", _ep->name,
1384 value ? "set" : "clear", 1383 value ? "set" : "clear",
1385 wedged ? "wedge" : "halt"); 1384 wedged ? "wedge" : "halt");
1386 /* set/clear, then synch memory views with the device */ 1385 /* set/clear, then synch memory views with the device */
@@ -1388,44 +1387,41 @@ net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1388 if (ep->num == 0) 1387 if (ep->num == 0)
1389 ep->dev->protocol_stall = 1; 1388 ep->dev->protocol_stall = 1;
1390 else 1389 else
1391 set_halt (ep); 1390 set_halt(ep);
1392 if (wedged) 1391 if (wedged)
1393 ep->wedged = 1; 1392 ep->wedged = 1;
1394 } else { 1393 } else {
1395 clear_halt (ep); 1394 clear_halt(ep);
1396 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX && 1395 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX &&
1397 !list_empty(&ep->queue) && ep->td_dma) 1396 !list_empty(&ep->queue) && ep->td_dma)
1398 restart_dma(ep); 1397 restart_dma(ep);
1399 ep->wedged = 0; 1398 ep->wedged = 0;
1400 } 1399 }
1401 (void) readl (&ep->regs->ep_rsp); 1400 (void) readl(&ep->regs->ep_rsp);
1402 } 1401 }
1403 spin_unlock_irqrestore (&ep->dev->lock, flags); 1402 spin_unlock_irqrestore(&ep->dev->lock, flags);
1404 1403
1405 return retval; 1404 return retval;
1406} 1405}
1407 1406
1408static int 1407static int net2280_set_halt(struct usb_ep *_ep, int value)
1409net2280_set_halt(struct usb_ep *_ep, int value)
1410{ 1408{
1411 return net2280_set_halt_and_wedge(_ep, value, 0); 1409 return net2280_set_halt_and_wedge(_ep, value, 0);
1412} 1410}
1413 1411
1414static int 1412static int net2280_set_wedge(struct usb_ep *_ep)
1415net2280_set_wedge(struct usb_ep *_ep)
1416{ 1413{
1417 if (!_ep || _ep->name == ep0name) 1414 if (!_ep || _ep->name == ep0name)
1418 return -EINVAL; 1415 return -EINVAL;
1419 return net2280_set_halt_and_wedge(_ep, 1, 1); 1416 return net2280_set_halt_and_wedge(_ep, 1, 1);
1420} 1417}
1421 1418
1422static int 1419static int net2280_fifo_status(struct usb_ep *_ep)
1423net2280_fifo_status (struct usb_ep *_ep)
1424{ 1420{
1425 struct net2280_ep *ep; 1421 struct net2280_ep *ep;
1426 u32 avail; 1422 u32 avail;
1427 1423
1428 ep = container_of (_ep, struct net2280_ep, ep); 1424 ep = container_of(_ep, struct net2280_ep, ep);
1429 if (!_ep || (!ep->desc && ep->num != 0)) 1425 if (!_ep || (!ep->desc && ep->num != 0))
1430 return -ENODEV; 1426 return -ENODEV;
1431 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1427 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
@@ -1439,19 +1435,18 @@ net2280_fifo_status (struct usb_ep *_ep)
1439 return avail; 1435 return avail;
1440} 1436}
1441 1437
1442static void 1438static void net2280_fifo_flush(struct usb_ep *_ep)
1443net2280_fifo_flush (struct usb_ep *_ep)
1444{ 1439{
1445 struct net2280_ep *ep; 1440 struct net2280_ep *ep;
1446 1441
1447 ep = container_of (_ep, struct net2280_ep, ep); 1442 ep = container_of(_ep, struct net2280_ep, ep);
1448 if (!_ep || (!ep->desc && ep->num != 0)) 1443 if (!_ep || (!ep->desc && ep->num != 0))
1449 return; 1444 return;
1450 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1445 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1451 return; 1446 return;
1452 1447
1453 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); 1448 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1454 (void) readl (&ep->regs->ep_rsp); 1449 (void) readl(&ep->regs->ep_rsp);
1455} 1450}
1456 1451
1457static const struct usb_ep_ops net2280_ep_ops = { 1452static const struct usb_ep_ops net2280_ep_ops = {
@@ -1472,7 +1467,7 @@ static const struct usb_ep_ops net2280_ep_ops = {
1472 1467
1473/*-------------------------------------------------------------------------*/ 1468/*-------------------------------------------------------------------------*/
1474 1469
1475static int net2280_get_frame (struct usb_gadget *_gadget) 1470static int net2280_get_frame(struct usb_gadget *_gadget)
1476{ 1471{
1477 struct net2280 *dev; 1472 struct net2280 *dev;
1478 unsigned long flags; 1473 unsigned long flags;
@@ -1480,14 +1475,14 @@ static int net2280_get_frame (struct usb_gadget *_gadget)
1480 1475
1481 if (!_gadget) 1476 if (!_gadget)
1482 return -ENODEV; 1477 return -ENODEV;
1483 dev = container_of (_gadget, struct net2280, gadget); 1478 dev = container_of(_gadget, struct net2280, gadget);
1484 spin_lock_irqsave (&dev->lock, flags); 1479 spin_lock_irqsave(&dev->lock, flags);
1485 retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff; 1480 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
1486 spin_unlock_irqrestore (&dev->lock, flags); 1481 spin_unlock_irqrestore(&dev->lock, flags);
1487 return retval; 1482 return retval;
1488} 1483}
1489 1484
1490static int net2280_wakeup (struct usb_gadget *_gadget) 1485static int net2280_wakeup(struct usb_gadget *_gadget)
1491{ 1486{
1492 struct net2280 *dev; 1487 struct net2280 *dev;
1493 u32 tmp; 1488 u32 tmp;
@@ -1495,19 +1490,19 @@ static int net2280_wakeup (struct usb_gadget *_gadget)
1495 1490
1496 if (!_gadget) 1491 if (!_gadget)
1497 return 0; 1492 return 0;
1498 dev = container_of (_gadget, struct net2280, gadget); 1493 dev = container_of(_gadget, struct net2280, gadget);
1499 1494
1500 spin_lock_irqsave (&dev->lock, flags); 1495 spin_lock_irqsave(&dev->lock, flags);
1501 tmp = readl (&dev->usb->usbctl); 1496 tmp = readl(&dev->usb->usbctl);
1502 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE)) 1497 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1503 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat); 1498 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
1504 spin_unlock_irqrestore (&dev->lock, flags); 1499 spin_unlock_irqrestore(&dev->lock, flags);
1505 1500
1506 /* pci writes may still be posted */ 1501 /* pci writes may still be posted */
1507 return 0; 1502 return 0;
1508} 1503}
1509 1504
1510static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value) 1505static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
1511{ 1506{
1512 struct net2280 *dev; 1507 struct net2280 *dev;
1513 u32 tmp; 1508 u32 tmp;
@@ -1515,10 +1510,10 @@ static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1515 1510
1516 if (!_gadget) 1511 if (!_gadget)
1517 return 0; 1512 return 0;
1518 dev = container_of (_gadget, struct net2280, gadget); 1513 dev = container_of(_gadget, struct net2280, gadget);
1519 1514
1520 spin_lock_irqsave (&dev->lock, flags); 1515 spin_lock_irqsave(&dev->lock, flags);
1521 tmp = readl (&dev->usb->usbctl); 1516 tmp = readl(&dev->usb->usbctl);
1522 if (value) { 1517 if (value) {
1523 tmp |= BIT(SELF_POWERED_STATUS); 1518 tmp |= BIT(SELF_POWERED_STATUS);
1524 dev->selfpowered = 1; 1519 dev->selfpowered = 1;
@@ -1526,8 +1521,8 @@ static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1526 tmp &= ~BIT(SELF_POWERED_STATUS); 1521 tmp &= ~BIT(SELF_POWERED_STATUS);
1527 dev->selfpowered = 0; 1522 dev->selfpowered = 0;
1528 } 1523 }
1529 writel (tmp, &dev->usb->usbctl); 1524 writel(tmp, &dev->usb->usbctl);
1530 spin_unlock_irqrestore (&dev->lock, flags); 1525 spin_unlock_irqrestore(&dev->lock, flags);
1531 1526
1532 return 0; 1527 return 0;
1533} 1528}
@@ -1540,17 +1535,17 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1540 1535
1541 if (!_gadget) 1536 if (!_gadget)
1542 return -ENODEV; 1537 return -ENODEV;
1543 dev = container_of (_gadget, struct net2280, gadget); 1538 dev = container_of(_gadget, struct net2280, gadget);
1544 1539
1545 spin_lock_irqsave (&dev->lock, flags); 1540 spin_lock_irqsave(&dev->lock, flags);
1546 tmp = readl (&dev->usb->usbctl); 1541 tmp = readl(&dev->usb->usbctl);
1547 dev->softconnect = (is_on != 0); 1542 dev->softconnect = (is_on != 0);
1548 if (is_on) 1543 if (is_on)
1549 tmp |= BIT(USB_DETECT_ENABLE); 1544 tmp |= BIT(USB_DETECT_ENABLE);
1550 else 1545 else
1551 tmp &= ~BIT(USB_DETECT_ENABLE); 1546 tmp &= ~BIT(USB_DETECT_ENABLE);
1552 writel (tmp, &dev->usb->usbctl); 1547 writel(tmp, &dev->usb->usbctl);
1553 spin_unlock_irqrestore (&dev->lock, flags); 1548 spin_unlock_irqrestore(&dev->lock, flags);
1554 1549
1555 return 0; 1550 return 0;
1556} 1551}
@@ -1582,13 +1577,12 @@ static const struct usb_gadget_ops net2280_ops = {
1582static ssize_t function_show(struct device *_dev, struct device_attribute *attr, 1577static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1583 char *buf) 1578 char *buf)
1584{ 1579{
1585 struct net2280 *dev = dev_get_drvdata (_dev); 1580 struct net2280 *dev = dev_get_drvdata(_dev);
1586 1581
1587 if (!dev->driver 1582 if (!dev->driver || !dev->driver->function ||
1588 || !dev->driver->function 1583 strlen(dev->driver->function) > PAGE_SIZE)
1589 || strlen (dev->driver->function) > PAGE_SIZE)
1590 return 0; 1584 return 0;
1591 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function); 1585 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1592} 1586}
1593static DEVICE_ATTR_RO(function); 1587static DEVICE_ATTR_RO(function);
1594 1588
@@ -1603,10 +1597,10 @@ static ssize_t registers_show(struct device *_dev,
1603 u32 t1, t2; 1597 u32 t1, t2;
1604 const char *s; 1598 const char *s;
1605 1599
1606 dev = dev_get_drvdata (_dev); 1600 dev = dev_get_drvdata(_dev);
1607 next = buf; 1601 next = buf;
1608 size = PAGE_SIZE; 1602 size = PAGE_SIZE;
1609 spin_lock_irqsave (&dev->lock, flags); 1603 spin_lock_irqsave(&dev->lock, flags);
1610 1604
1611 if (dev->driver) 1605 if (dev->driver)
1612 s = dev->driver->driver.name; 1606 s = dev->driver->driver.name;
@@ -1614,7 +1608,7 @@ static ssize_t registers_show(struct device *_dev,
1614 s = "(none)"; 1608 s = "(none)";
1615 1609
1616 /* Main Control Registers */ 1610 /* Main Control Registers */
1617 t = scnprintf (next, size, "%s version " DRIVER_VERSION 1611 t = scnprintf(next, size, "%s version " DRIVER_VERSION
1618 ", chiprev %04x, dma %s\n\n" 1612 ", chiprev %04x, dma %s\n\n"
1619 "devinit %03x fifoctl %08x gadget '%s'\n" 1613 "devinit %03x fifoctl %08x gadget '%s'\n"
1620 "pci irqenb0 %02x irqenb1 %08x " 1614 "pci irqenb0 %02x irqenb1 %08x "
@@ -1623,19 +1617,19 @@ static ssize_t registers_show(struct device *_dev,
1623 use_dma 1617 use_dma
1624 ? (use_dma_chaining ? "chaining" : "enabled") 1618 ? (use_dma_chaining ? "chaining" : "enabled")
1625 : "disabled", 1619 : "disabled",
1626 readl (&dev->regs->devinit), 1620 readl(&dev->regs->devinit),
1627 readl (&dev->regs->fifoctl), 1621 readl(&dev->regs->fifoctl),
1628 s, 1622 s,
1629 readl (&dev->regs->pciirqenb0), 1623 readl(&dev->regs->pciirqenb0),
1630 readl (&dev->regs->pciirqenb1), 1624 readl(&dev->regs->pciirqenb1),
1631 readl (&dev->regs->irqstat0), 1625 readl(&dev->regs->irqstat0),
1632 readl (&dev->regs->irqstat1)); 1626 readl(&dev->regs->irqstat1));
1633 size -= t; 1627 size -= t;
1634 next += t; 1628 next += t;
1635 1629
1636 /* USB Control Registers */ 1630 /* USB Control Registers */
1637 t1 = readl (&dev->usb->usbctl); 1631 t1 = readl(&dev->usb->usbctl);
1638 t2 = readl (&dev->usb->usbstat); 1632 t2 = readl(&dev->usb->usbstat);
1639 if (t1 & BIT(VBUS_PIN)) { 1633 if (t1 & BIT(VBUS_PIN)) {
1640 if (t2 & BIT(HIGH_SPEED)) 1634 if (t2 & BIT(HIGH_SPEED))
1641 s = "high speed"; 1635 s = "high speed";
@@ -1646,11 +1640,11 @@ static ssize_t registers_show(struct device *_dev,
1646 /* full speed bit (6) not working?? */ 1640 /* full speed bit (6) not working?? */
1647 } else 1641 } else
1648 s = "not attached"; 1642 s = "not attached";
1649 t = scnprintf (next, size, 1643 t = scnprintf(next, size,
1650 "stdrsp %08x usbctl %08x usbstat %08x " 1644 "stdrsp %08x usbctl %08x usbstat %08x "
1651 "addr 0x%02x (%s)\n", 1645 "addr 0x%02x (%s)\n",
1652 readl (&dev->usb->stdrsp), t1, t2, 1646 readl(&dev->usb->stdrsp), t1, t2,
1653 readl (&dev->usb->ouraddr), s); 1647 readl(&dev->usb->ouraddr), s);
1654 size -= t; 1648 size -= t;
1655 next += t; 1649 next += t;
1656 1650
@@ -1662,13 +1656,13 @@ static ssize_t registers_show(struct device *_dev,
1662 for (i = 0; i < dev->n_ep; i++) { 1656 for (i = 0; i < dev->n_ep; i++) {
1663 struct net2280_ep *ep; 1657 struct net2280_ep *ep;
1664 1658
1665 ep = &dev->ep [i]; 1659 ep = &dev->ep[i];
1666 if (i && !ep->desc) 1660 if (i && !ep->desc)
1667 continue; 1661 continue;
1668 1662
1669 t1 = readl(&ep->cfg->ep_cfg); 1663 t1 = readl(&ep->cfg->ep_cfg);
1670 t2 = readl (&ep->regs->ep_rsp) & 0xff; 1664 t2 = readl(&ep->regs->ep_rsp) & 0xff;
1671 t = scnprintf (next, size, 1665 t = scnprintf(next, size,
1672 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s" 1666 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1673 "irqenb %02x\n", 1667 "irqenb %02x\n",
1674 ep->ep.name, t1, t2, 1668 ep->ep.name, t1, t2,
@@ -1688,17 +1682,17 @@ static ssize_t registers_show(struct device *_dev,
1688 ? "DATA1 " : "DATA0 ", 1682 ? "DATA1 " : "DATA0 ",
1689 (t2 & BIT(CLEAR_ENDPOINT_HALT)) 1683 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1690 ? "HALT " : "", 1684 ? "HALT " : "",
1691 readl (&ep->regs->ep_irqenb)); 1685 readl(&ep->regs->ep_irqenb));
1692 size -= t; 1686 size -= t;
1693 next += t; 1687 next += t;
1694 1688
1695 t = scnprintf (next, size, 1689 t = scnprintf(next, size,
1696 "\tstat %08x avail %04x " 1690 "\tstat %08x avail %04x "
1697 "(ep%d%s-%s)%s\n", 1691 "(ep%d%s-%s)%s\n",
1698 readl (&ep->regs->ep_stat), 1692 readl(&ep->regs->ep_stat),
1699 readl (&ep->regs->ep_avail), 1693 readl(&ep->regs->ep_avail),
1700 t1 & 0x0f, DIR_STRING (t1), 1694 t1 & 0x0f, DIR_STRING(t1),
1701 type_string (t1 >> 8), 1695 type_string(t1 >> 8),
1702 ep->stopped ? "*" : ""); 1696 ep->stopped ? "*" : "");
1703 size -= t; 1697 size -= t;
1704 next += t; 1698 next += t;
@@ -1706,42 +1700,41 @@ static ssize_t registers_show(struct device *_dev,
1706 if (!ep->dma) 1700 if (!ep->dma)
1707 continue; 1701 continue;
1708 1702
1709 t = scnprintf (next, size, 1703 t = scnprintf(next, size,
1710 " dma\tctl %08x stat %08x count %08x\n" 1704 " dma\tctl %08x stat %08x count %08x\n"
1711 "\taddr %08x desc %08x\n", 1705 "\taddr %08x desc %08x\n",
1712 readl (&ep->dma->dmactl), 1706 readl(&ep->dma->dmactl),
1713 readl (&ep->dma->dmastat), 1707 readl(&ep->dma->dmastat),
1714 readl (&ep->dma->dmacount), 1708 readl(&ep->dma->dmacount),
1715 readl (&ep->dma->dmaaddr), 1709 readl(&ep->dma->dmaaddr),
1716 readl (&ep->dma->dmadesc)); 1710 readl(&ep->dma->dmadesc));
1717 size -= t; 1711 size -= t;
1718 next += t; 1712 next += t;
1719 1713
1720 } 1714 }
1721 1715
1722 /* Indexed Registers */ 1716 /* Indexed Registers (none yet) */
1723 // none yet
1724 1717
1725 /* Statistics */ 1718 /* Statistics */
1726 t = scnprintf (next, size, "\nirqs: "); 1719 t = scnprintf(next, size, "\nirqs: ");
1727 size -= t; 1720 size -= t;
1728 next += t; 1721 next += t;
1729 for (i = 0; i < dev->n_ep; i++) { 1722 for (i = 0; i < dev->n_ep; i++) {
1730 struct net2280_ep *ep; 1723 struct net2280_ep *ep;
1731 1724
1732 ep = &dev->ep [i]; 1725 ep = &dev->ep[i];
1733 if (i && !ep->irqs) 1726 if (i && !ep->irqs)
1734 continue; 1727 continue;
1735 t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs); 1728 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
1736 size -= t; 1729 size -= t;
1737 next += t; 1730 next += t;
1738 1731
1739 } 1732 }
1740 t = scnprintf (next, size, "\n"); 1733 t = scnprintf(next, size, "\n");
1741 size -= t; 1734 size -= t;
1742 next += t; 1735 next += t;
1743 1736
1744 spin_unlock_irqrestore (&dev->lock, flags); 1737 spin_unlock_irqrestore(&dev->lock, flags);
1745 1738
1746 return PAGE_SIZE - size; 1739 return PAGE_SIZE - size;
1747} 1740}
@@ -1756,13 +1749,13 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1756 unsigned long flags; 1749 unsigned long flags;
1757 int i; 1750 int i;
1758 1751
1759 dev = dev_get_drvdata (_dev); 1752 dev = dev_get_drvdata(_dev);
1760 next = buf; 1753 next = buf;
1761 size = PAGE_SIZE; 1754 size = PAGE_SIZE;
1762 spin_lock_irqsave (&dev->lock, flags); 1755 spin_lock_irqsave(&dev->lock, flags);
1763 1756
1764 for (i = 0; i < dev->n_ep; i++) { 1757 for (i = 0; i < dev->n_ep; i++) {
1765 struct net2280_ep *ep = &dev->ep [i]; 1758 struct net2280_ep *ep = &dev->ep[i];
1766 struct net2280_request *req; 1759 struct net2280_request *req;
1767 int t; 1760 int t;
1768 1761
@@ -1773,40 +1766,40 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1773 if (!d) 1766 if (!d)
1774 continue; 1767 continue;
1775 t = d->bEndpointAddress; 1768 t = d->bEndpointAddress;
1776 t = scnprintf (next, size, 1769 t = scnprintf(next, size,
1777 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n", 1770 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1778 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, 1771 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1779 (t & USB_DIR_IN) ? "in" : "out", 1772 (t & USB_DIR_IN) ? "in" : "out",
1780 type_string(d->bmAttributes), 1773 type_string(d->bmAttributes),
1781 usb_endpoint_maxp (d) & 0x1fff, 1774 usb_endpoint_maxp(d) & 0x1fff,
1782 ep->dma ? "dma" : "pio", ep->fifo_size 1775 ep->dma ? "dma" : "pio", ep->fifo_size
1783 ); 1776 );
1784 } else /* ep0 should only have one transfer queued */ 1777 } else /* ep0 should only have one transfer queued */
1785 t = scnprintf (next, size, "ep0 max 64 pio %s\n", 1778 t = scnprintf(next, size, "ep0 max 64 pio %s\n",
1786 ep->is_in ? "in" : "out"); 1779 ep->is_in ? "in" : "out");
1787 if (t <= 0 || t > size) 1780 if (t <= 0 || t > size)
1788 goto done; 1781 goto done;
1789 size -= t; 1782 size -= t;
1790 next += t; 1783 next += t;
1791 1784
1792 if (list_empty (&ep->queue)) { 1785 if (list_empty(&ep->queue)) {
1793 t = scnprintf (next, size, "\t(nothing queued)\n"); 1786 t = scnprintf(next, size, "\t(nothing queued)\n");
1794 if (t <= 0 || t > size) 1787 if (t <= 0 || t > size)
1795 goto done; 1788 goto done;
1796 size -= t; 1789 size -= t;
1797 next += t; 1790 next += t;
1798 continue; 1791 continue;
1799 } 1792 }
1800 list_for_each_entry (req, &ep->queue, queue) { 1793 list_for_each_entry(req, &ep->queue, queue) {
1801 if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc)) 1794 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
1802 t = scnprintf (next, size, 1795 t = scnprintf(next, size,
1803 "\treq %p len %d/%d " 1796 "\treq %p len %d/%d "
1804 "buf %p (dmacount %08x)\n", 1797 "buf %p (dmacount %08x)\n",
1805 &req->req, req->req.actual, 1798 &req->req, req->req.actual,
1806 req->req.length, req->req.buf, 1799 req->req.length, req->req.buf,
1807 readl (&ep->dma->dmacount)); 1800 readl(&ep->dma->dmacount));
1808 else 1801 else
1809 t = scnprintf (next, size, 1802 t = scnprintf(next, size,
1810 "\treq %p len %d/%d buf %p\n", 1803 "\treq %p len %d/%d buf %p\n",
1811 &req->req, req->req.actual, 1804 &req->req, req->req.actual,
1812 req->req.length, req->req.buf); 1805 req->req.length, req->req.buf);
@@ -1819,12 +1812,12 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1819 struct net2280_dma *td; 1812 struct net2280_dma *td;
1820 1813
1821 td = req->td; 1814 td = req->td;
1822 t = scnprintf (next, size, "\t td %08x " 1815 t = scnprintf(next, size, "\t td %08x "
1823 " count %08x buf %08x desc %08x\n", 1816 " count %08x buf %08x desc %08x\n",
1824 (u32) req->td_dma, 1817 (u32) req->td_dma,
1825 le32_to_cpu (td->dmacount), 1818 le32_to_cpu(td->dmacount),
1826 le32_to_cpu (td->dmaaddr), 1819 le32_to_cpu(td->dmaaddr),
1827 le32_to_cpu (td->dmadesc)); 1820 le32_to_cpu(td->dmadesc));
1828 if (t <= 0 || t > size) 1821 if (t <= 0 || t > size)
1829 goto done; 1822 goto done;
1830 size -= t; 1823 size -= t;
@@ -1834,7 +1827,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1834 } 1827 }
1835 1828
1836done: 1829done:
1837 spin_unlock_irqrestore (&dev->lock, flags); 1830 spin_unlock_irqrestore(&dev->lock, flags);
1838 return PAGE_SIZE - size; 1831 return PAGE_SIZE - size;
1839} 1832}
1840static DEVICE_ATTR_RO(queues); 1833static DEVICE_ATTR_RO(queues);
@@ -1842,8 +1835,8 @@ static DEVICE_ATTR_RO(queues);
1842 1835
1843#else 1836#else
1844 1837
1845#define device_create_file(a,b) (0) 1838#define device_create_file(a, b) (0)
1846#define device_remove_file(a,b) do { } while (0) 1839#define device_remove_file(a, b) do { } while (0)
1847 1840
1848#endif 1841#endif
1849 1842
@@ -1853,33 +1846,33 @@ static DEVICE_ATTR_RO(queues);
1853 * to/from another device fifo instead of to/from memory. 1846 * to/from another device fifo instead of to/from memory.
1854 */ 1847 */
1855 1848
1856static void set_fifo_mode (struct net2280 *dev, int mode) 1849static void set_fifo_mode(struct net2280 *dev, int mode)
1857{ 1850{
1858 /* keeping high bits preserves BAR2 */ 1851 /* keeping high bits preserves BAR2 */
1859 writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl); 1852 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1860 1853
1861 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */ 1854 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1862 INIT_LIST_HEAD (&dev->gadget.ep_list); 1855 INIT_LIST_HEAD(&dev->gadget.ep_list);
1863 list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list); 1856 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1864 list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list); 1857 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1865 switch (mode) { 1858 switch (mode) {
1866 case 0: 1859 case 0:
1867 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list); 1860 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1868 list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list); 1861 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
1869 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024; 1862 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1870 break; 1863 break;
1871 case 1: 1864 case 1:
1872 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048; 1865 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
1873 break; 1866 break;
1874 case 2: 1867 case 2:
1875 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list); 1868 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1876 dev->ep [1].fifo_size = 2048; 1869 dev->ep[1].fifo_size = 2048;
1877 dev->ep [2].fifo_size = 1024; 1870 dev->ep[2].fifo_size = 1024;
1878 break; 1871 break;
1879 } 1872 }
1880 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */ 1873 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1881 list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list); 1874 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
1882 list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list); 1875 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
1883} 1876}
1884 1877
1885static void defect7374_disable_data_eps(struct net2280 *dev) 1878static void defect7374_disable_data_eps(struct net2280 *dev)
@@ -2011,14 +2004,14 @@ static void usb_reset_228x(struct net2280 *dev)
2011 u32 tmp; 2004 u32 tmp;
2012 2005
2013 dev->gadget.speed = USB_SPEED_UNKNOWN; 2006 dev->gadget.speed = USB_SPEED_UNKNOWN;
2014 (void) readl (&dev->usb->usbctl); 2007 (void) readl(&dev->usb->usbctl);
2015 2008
2016 net2280_led_init (dev); 2009 net2280_led_init(dev);
2017 2010
2018 /* disable automatic responses, and irqs */ 2011 /* disable automatic responses, and irqs */
2019 writel (0, &dev->usb->stdrsp); 2012 writel(0, &dev->usb->stdrsp);
2020 writel (0, &dev->regs->pciirqenb0); 2013 writel(0, &dev->regs->pciirqenb0);
2021 writel (0, &dev->regs->pciirqenb1); 2014 writel(0, &dev->regs->pciirqenb1);
2022 2015
2023 /* clear old dma and irq state */ 2016 /* clear old dma and irq state */
2024 for (tmp = 0; tmp < 4; tmp++) { 2017 for (tmp = 0; tmp < 4; tmp++) {
@@ -2027,7 +2020,7 @@ static void usb_reset_228x(struct net2280 *dev)
2027 abort_dma(ep); 2020 abort_dma(ep);
2028 } 2021 }
2029 2022
2030 writel (~0, &dev->regs->irqstat0), 2023 writel(~0, &dev->regs->irqstat0),
2031 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), 2024 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
2032 2025
2033 /* reset, and enable pci */ 2026 /* reset, and enable pci */
@@ -2036,10 +2029,10 @@ static void usb_reset_228x(struct net2280 *dev)
2036 BIT(FIFO_SOFT_RESET) | 2029 BIT(FIFO_SOFT_RESET) |
2037 BIT(USB_SOFT_RESET) | 2030 BIT(USB_SOFT_RESET) |
2038 BIT(M8051_RESET); 2031 BIT(M8051_RESET);
2039 writel (tmp, &dev->regs->devinit); 2032 writel(tmp, &dev->regs->devinit);
2040 2033
2041 /* standard fifo and endpoint allocations */ 2034 /* standard fifo and endpoint allocations */
2042 set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0); 2035 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
2043} 2036}
2044 2037
2045static void usb_reset_338x(struct net2280 *dev) 2038static void usb_reset_338x(struct net2280 *dev)
@@ -2112,35 +2105,35 @@ static void usb_reinit_228x(struct net2280 *dev)
2112 2105
2113 /* basic endpoint init */ 2106 /* basic endpoint init */
2114 for (tmp = 0; tmp < 7; tmp++) { 2107 for (tmp = 0; tmp < 7; tmp++) {
2115 struct net2280_ep *ep = &dev->ep [tmp]; 2108 struct net2280_ep *ep = &dev->ep[tmp];
2116 2109
2117 ep->ep.name = ep_name [tmp]; 2110 ep->ep.name = ep_name[tmp];
2118 ep->dev = dev; 2111 ep->dev = dev;
2119 ep->num = tmp; 2112 ep->num = tmp;
2120 2113
2121 if (tmp > 0 && tmp <= 4) { 2114 if (tmp > 0 && tmp <= 4) {
2122 ep->fifo_size = 1024; 2115 ep->fifo_size = 1024;
2123 if (init_dma) 2116 if (init_dma)
2124 ep->dma = &dev->dma [tmp - 1]; 2117 ep->dma = &dev->dma[tmp - 1];
2125 } else 2118 } else
2126 ep->fifo_size = 64; 2119 ep->fifo_size = 64;
2127 ep->regs = &dev->epregs [tmp]; 2120 ep->regs = &dev->epregs[tmp];
2128 ep->cfg = &dev->epregs[tmp]; 2121 ep->cfg = &dev->epregs[tmp];
2129 ep_reset_228x(dev->regs, ep); 2122 ep_reset_228x(dev->regs, ep);
2130 } 2123 }
2131 usb_ep_set_maxpacket_limit(&dev->ep [0].ep, 64); 2124 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
2132 usb_ep_set_maxpacket_limit(&dev->ep [5].ep, 64); 2125 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
2133 usb_ep_set_maxpacket_limit(&dev->ep [6].ep, 64); 2126 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
2134 2127
2135 dev->gadget.ep0 = &dev->ep [0].ep; 2128 dev->gadget.ep0 = &dev->ep[0].ep;
2136 dev->ep [0].stopped = 0; 2129 dev->ep[0].stopped = 0;
2137 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list); 2130 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2138 2131
2139 /* we want to prevent lowlevel/insecure access from the USB host, 2132 /* we want to prevent lowlevel/insecure access from the USB host,
2140 * but erratum 0119 means this enable bit is ignored 2133 * but erratum 0119 means this enable bit is ignored
2141 */ 2134 */
2142 for (tmp = 0; tmp < 5; tmp++) 2135 for (tmp = 0; tmp < 5; tmp++)
2143 writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg); 2136 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
2144} 2137}
2145 2138
2146static void usb_reinit_338x(struct net2280 *dev) 2139static void usb_reinit_338x(struct net2280 *dev)
@@ -2263,7 +2256,7 @@ static void ep0_start_228x(struct net2280 *dev)
2263 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) | 2256 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2264 BIT(CLEAR_NAK_OUT_PACKETS) | 2257 BIT(CLEAR_NAK_OUT_PACKETS) |
2265 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE) 2258 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
2266 , &dev->epregs [0].ep_rsp); 2259 , &dev->epregs[0].ep_rsp);
2267 2260
2268 /* 2261 /*
2269 * hardware optionally handles a bunch of standard requests 2262 * hardware optionally handles a bunch of standard requests
@@ -2298,7 +2291,7 @@ static void ep0_start_228x(struct net2280 *dev)
2298 &dev->regs->pciirqenb1); 2291 &dev->regs->pciirqenb1);
2299 2292
2300 /* don't leave any writes posted */ 2293 /* don't leave any writes posted */
2301 (void) readl (&dev->usb->usbctl); 2294 (void) readl(&dev->usb->usbctl);
2302} 2295}
2303 2296
2304static void ep0_start_338x(struct net2280 *dev) 2297static void ep0_start_338x(struct net2280 *dev)
@@ -2377,20 +2370,22 @@ static int net2280_start(struct usb_gadget *_gadget,
2377 || !driver->setup) 2370 || !driver->setup)
2378 return -EINVAL; 2371 return -EINVAL;
2379 2372
2380 dev = container_of (_gadget, struct net2280, gadget); 2373 dev = container_of(_gadget, struct net2280, gadget);
2381 2374
2382 for (i = 0; i < dev->n_ep; i++) 2375 for (i = 0; i < dev->n_ep; i++)
2383 dev->ep [i].irqs = 0; 2376 dev->ep[i].irqs = 0;
2384 2377
2385 /* hook up the driver ... */ 2378 /* hook up the driver ... */
2386 dev->softconnect = 1; 2379 dev->softconnect = 1;
2387 driver->driver.bus = NULL; 2380 driver->driver.bus = NULL;
2388 dev->driver = driver; 2381 dev->driver = driver;
2389 2382
2390 retval = device_create_file (&dev->pdev->dev, &dev_attr_function); 2383 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
2391 if (retval) goto err_unbind; 2384 if (retval)
2392 retval = device_create_file (&dev->pdev->dev, &dev_attr_queues); 2385 goto err_unbind;
2393 if (retval) goto err_func; 2386 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
2387 if (retval)
2388 goto err_func;
2394 2389
2395 /* Enable force-full-speed testing mode, if desired */ 2390 /* Enable force-full-speed testing mode, if desired */
2396 if (full_speed && dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) 2391 if (full_speed && dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
@@ -2399,30 +2394,29 @@ static int net2280_start(struct usb_gadget *_gadget,
2399 /* ... then enable host detection and ep0; and we're ready 2394 /* ... then enable host detection and ep0; and we're ready
2400 * for set_configuration as well as eventual disconnect. 2395 * for set_configuration as well as eventual disconnect.
2401 */ 2396 */
2402 net2280_led_active (dev, 1); 2397 net2280_led_active(dev, 1);
2403 2398
2404 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) 2399 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
2405 defect7374_enable_data_eps_zero(dev); 2400 defect7374_enable_data_eps_zero(dev);
2406 2401
2407 ep0_start (dev); 2402 ep0_start(dev);
2408 2403
2409 DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n", 2404 DEBUG(dev, "%s ready, usbctl %08x stdrsp %08x\n",
2410 driver->driver.name, 2405 driver->driver.name,
2411 readl (&dev->usb->usbctl), 2406 readl(&dev->usb->usbctl),
2412 readl (&dev->usb->stdrsp)); 2407 readl(&dev->usb->stdrsp));
2413 2408
2414 /* pci writes may still be posted */ 2409 /* pci writes may still be posted */
2415 return 0; 2410 return 0;
2416 2411
2417err_func: 2412err_func:
2418 device_remove_file (&dev->pdev->dev, &dev_attr_function); 2413 device_remove_file(&dev->pdev->dev, &dev_attr_function);
2419err_unbind: 2414err_unbind:
2420 dev->driver = NULL; 2415 dev->driver = NULL;
2421 return retval; 2416 return retval;
2422} 2417}
2423 2418
2424static void 2419static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2425stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2426{ 2420{
2427 int i; 2421 int i;
2428 2422
@@ -2433,9 +2427,9 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2433 /* stop hardware; prevent new request submissions; 2427 /* stop hardware; prevent new request submissions;
2434 * and kill any outstanding requests. 2428 * and kill any outstanding requests.
2435 */ 2429 */
2436 usb_reset (dev); 2430 usb_reset(dev);
2437 for (i = 0; i < dev->n_ep; i++) 2431 for (i = 0; i < dev->n_ep; i++)
2438 nuke (&dev->ep [i]); 2432 nuke(&dev->ep[i]);
2439 2433
2440 /* report disconnect; the driver is already quiesced */ 2434 /* report disconnect; the driver is already quiesced */
2441 if (driver) { 2435 if (driver) {
@@ -2444,7 +2438,7 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2444 spin_lock(&dev->lock); 2438 spin_lock(&dev->lock);
2445 } 2439 }
2446 2440
2447 usb_reinit (dev); 2441 usb_reinit(dev);
2448} 2442}
2449 2443
2450static int net2280_stop(struct usb_gadget *_gadget, 2444static int net2280_stop(struct usb_gadget *_gadget,
@@ -2453,22 +2447,22 @@ static int net2280_stop(struct usb_gadget *_gadget,
2453 struct net2280 *dev; 2447 struct net2280 *dev;
2454 unsigned long flags; 2448 unsigned long flags;
2455 2449
2456 dev = container_of (_gadget, struct net2280, gadget); 2450 dev = container_of(_gadget, struct net2280, gadget);
2457 2451
2458 spin_lock_irqsave (&dev->lock, flags); 2452 spin_lock_irqsave(&dev->lock, flags);
2459 stop_activity (dev, driver); 2453 stop_activity(dev, driver);
2460 spin_unlock_irqrestore (&dev->lock, flags); 2454 spin_unlock_irqrestore(&dev->lock, flags);
2461 2455
2462 dev->driver = NULL; 2456 dev->driver = NULL;
2463 2457
2464 net2280_led_active (dev, 0); 2458 net2280_led_active(dev, 0);
2465 2459
2466 /* Disable full-speed test mode */ 2460 /* Disable full-speed test mode */
2467 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) 2461 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
2468 writel(0, &dev->usb->xcvrdiag); 2462 writel(0, &dev->usb->xcvrdiag);
2469 2463
2470 device_remove_file (&dev->pdev->dev, &dev_attr_function); 2464 device_remove_file(&dev->pdev->dev, &dev_attr_function);
2471 device_remove_file (&dev->pdev->dev, &dev_attr_queues); 2465 device_remove_file(&dev->pdev->dev, &dev_attr_queues);
2472 2466
2473 DEBUG(dev, "unregistered driver '%s'\n", 2467 DEBUG(dev, "unregistered driver '%s'\n",
2474 driver ? driver->driver.name : ""); 2468 driver ? driver->driver.name : "");
@@ -2482,31 +2476,31 @@ static int net2280_stop(struct usb_gadget *_gadget,
2482 * also works for dma-capable endpoints, in pio mode or just 2476 * also works for dma-capable endpoints, in pio mode or just
2483 * to manually advance the queue after short OUT transfers. 2477 * to manually advance the queue after short OUT transfers.
2484 */ 2478 */
2485static void handle_ep_small (struct net2280_ep *ep) 2479static void handle_ep_small(struct net2280_ep *ep)
2486{ 2480{
2487 struct net2280_request *req; 2481 struct net2280_request *req;
2488 u32 t; 2482 u32 t;
2489 /* 0 error, 1 mid-data, 2 done */ 2483 /* 0 error, 1 mid-data, 2 done */
2490 int mode = 1; 2484 int mode = 1;
2491 2485
2492 if (!list_empty (&ep->queue)) 2486 if (!list_empty(&ep->queue))
2493 req = list_entry (ep->queue.next, 2487 req = list_entry(ep->queue.next,
2494 struct net2280_request, queue); 2488 struct net2280_request, queue);
2495 else 2489 else
2496 req = NULL; 2490 req = NULL;
2497 2491
2498 /* ack all, and handle what we care about */ 2492 /* ack all, and handle what we care about */
2499 t = readl (&ep->regs->ep_stat); 2493 t = readl(&ep->regs->ep_stat);
2500 ep->irqs++; 2494 ep->irqs++;
2501#if 0 2495#if 0
2502 VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n", 2496 VDEBUG(ep->dev, "%s ack ep_stat %08x, req %p\n",
2503 ep->ep.name, t, req ? &req->req : 0); 2497 ep->ep.name, t, req ? &req->req : 0);
2504#endif 2498#endif
2505 if (!ep->is_in || ep->dev->pdev->device == 0x2280) 2499 if (!ep->is_in || ep->dev->pdev->device == 0x2280)
2506 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); 2500 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
2507 else 2501 else
2508 /* Added for 2282 */ 2502 /* Added for 2282 */
2509 writel (t, &ep->regs->ep_stat); 2503 writel(t, &ep->regs->ep_stat);
2510 2504
2511 /* for ep0, monitor token irqs to catch data stage length errors 2505 /* for ep0, monitor token irqs to catch data stage length errors
2512 * and to synchronize on status. 2506 * and to synchronize on status.
@@ -2518,33 +2512,33 @@ static void handle_ep_small (struct net2280_ep *ep)
2518 * control requests could be slightly faster without token synch for 2512 * control requests could be slightly faster without token synch for
2519 * status, but status can jam up that way. 2513 * status, but status can jam up that way.
2520 */ 2514 */
2521 if (unlikely (ep->num == 0)) { 2515 if (unlikely(ep->num == 0)) {
2522 if (ep->is_in) { 2516 if (ep->is_in) {
2523 /* status; stop NAKing */ 2517 /* status; stop NAKing */
2524 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) { 2518 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
2525 if (ep->dev->protocol_stall) { 2519 if (ep->dev->protocol_stall) {
2526 ep->stopped = 1; 2520 ep->stopped = 1;
2527 set_halt (ep); 2521 set_halt(ep);
2528 } 2522 }
2529 if (!req) 2523 if (!req)
2530 allow_status (ep); 2524 allow_status(ep);
2531 mode = 2; 2525 mode = 2;
2532 /* reply to extra IN data tokens with a zlp */ 2526 /* reply to extra IN data tokens with a zlp */
2533 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2527 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
2534 if (ep->dev->protocol_stall) { 2528 if (ep->dev->protocol_stall) {
2535 ep->stopped = 1; 2529 ep->stopped = 1;
2536 set_halt (ep); 2530 set_halt(ep);
2537 mode = 2; 2531 mode = 2;
2538 } else if (ep->responded && 2532 } else if (ep->responded &&
2539 !req && !ep->stopped) 2533 !req && !ep->stopped)
2540 write_fifo (ep, NULL); 2534 write_fifo(ep, NULL);
2541 } 2535 }
2542 } else { 2536 } else {
2543 /* status; stop NAKing */ 2537 /* status; stop NAKing */
2544 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) { 2538 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
2545 if (ep->dev->protocol_stall) { 2539 if (ep->dev->protocol_stall) {
2546 ep->stopped = 1; 2540 ep->stopped = 1;
2547 set_halt (ep); 2541 set_halt(ep);
2548 } 2542 }
2549 mode = 2; 2543 mode = 2;
2550 /* an extra OUT token is an error */ 2544 /* an extra OUT token is an error */
@@ -2553,20 +2547,20 @@ static void handle_ep_small (struct net2280_ep *ep)
2553 && req->req.actual == req->req.length) 2547 && req->req.actual == req->req.length)
2554 || (ep->responded && !req)) { 2548 || (ep->responded && !req)) {
2555 ep->dev->protocol_stall = 1; 2549 ep->dev->protocol_stall = 1;
2556 set_halt (ep); 2550 set_halt(ep);
2557 ep->stopped = 1; 2551 ep->stopped = 1;
2558 if (req) 2552 if (req)
2559 done (ep, req, -EOVERFLOW); 2553 done(ep, req, -EOVERFLOW);
2560 req = NULL; 2554 req = NULL;
2561 } 2555 }
2562 } 2556 }
2563 } 2557 }
2564 2558
2565 if (unlikely (!req)) 2559 if (unlikely(!req))
2566 return; 2560 return;
2567 2561
2568 /* manual DMA queue advance after short OUT */ 2562 /* manual DMA queue advance after short OUT */
2569 if (likely (ep->dma)) { 2563 if (likely(ep->dma)) {
2570 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 2564 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2571 u32 count; 2565 u32 count;
2572 int stopped = ep->stopped; 2566 int stopped = ep->stopped;
@@ -2576,27 +2570,27 @@ static void handle_ep_small (struct net2280_ep *ep)
2576 * iff (M < N) we won't ever see a DMA interrupt. 2570 * iff (M < N) we won't ever see a DMA interrupt.
2577 */ 2571 */
2578 ep->stopped = 1; 2572 ep->stopped = 1;
2579 for (count = 0; ; t = readl (&ep->regs->ep_stat)) { 2573 for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
2580 2574
2581 /* any preceding dma transfers must finish. 2575 /* any preceding dma transfers must finish.
2582 * dma handles (M >= N), may empty the queue 2576 * dma handles (M >= N), may empty the queue
2583 */ 2577 */
2584 scan_dma_completions (ep); 2578 scan_dma_completions(ep);
2585 if (unlikely (list_empty (&ep->queue) 2579 if (unlikely(list_empty(&ep->queue)
2586 || ep->out_overflow)) { 2580 || ep->out_overflow)) {
2587 req = NULL; 2581 req = NULL;
2588 break; 2582 break;
2589 } 2583 }
2590 req = list_entry (ep->queue.next, 2584 req = list_entry(ep->queue.next,
2591 struct net2280_request, queue); 2585 struct net2280_request, queue);
2592 2586
2593 /* here either (M < N), a "real" short rx; 2587 /* here either (M < N), a "real" short rx;
2594 * or (M == N) and the queue didn't empty 2588 * or (M == N) and the queue didn't empty
2595 */ 2589 */
2596 if (likely(t & BIT(FIFO_EMPTY))) { 2590 if (likely(t & BIT(FIFO_EMPTY))) {
2597 count = readl (&ep->dma->dmacount); 2591 count = readl(&ep->dma->dmacount);
2598 count &= DMA_BYTE_COUNT_MASK; 2592 count &= DMA_BYTE_COUNT_MASK;
2599 if (readl (&ep->dma->dmadesc) 2593 if (readl(&ep->dma->dmadesc)
2600 != req->td_dma) 2594 != req->td_dma)
2601 req = NULL; 2595 req = NULL;
2602 break; 2596 break;
@@ -2606,37 +2600,37 @@ static void handle_ep_small (struct net2280_ep *ep)
2606 2600
2607 /* stop DMA, leave ep NAKing */ 2601 /* stop DMA, leave ep NAKing */
2608 writel(BIT(DMA_ABORT), &ep->dma->dmastat); 2602 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
2609 spin_stop_dma (ep->dma); 2603 spin_stop_dma(ep->dma);
2610 2604
2611 if (likely (req)) { 2605 if (likely(req)) {
2612 req->td->dmacount = 0; 2606 req->td->dmacount = 0;
2613 t = readl (&ep->regs->ep_avail); 2607 t = readl(&ep->regs->ep_avail);
2614 dma_done (ep, req, count, 2608 dma_done(ep, req, count,
2615 (ep->out_overflow || t) 2609 (ep->out_overflow || t)
2616 ? -EOVERFLOW : 0); 2610 ? -EOVERFLOW : 0);
2617 } 2611 }
2618 2612
2619 /* also flush to prevent erratum 0106 trouble */ 2613 /* also flush to prevent erratum 0106 trouble */
2620 if (unlikely (ep->out_overflow 2614 if (unlikely(ep->out_overflow
2621 || (ep->dev->chiprev == 0x0100 2615 || (ep->dev->chiprev == 0x0100
2622 && ep->dev->gadget.speed 2616 && ep->dev->gadget.speed
2623 == USB_SPEED_FULL))) { 2617 == USB_SPEED_FULL))) {
2624 out_flush (ep); 2618 out_flush(ep);
2625 ep->out_overflow = 0; 2619 ep->out_overflow = 0;
2626 } 2620 }
2627 2621
2628 /* (re)start dma if needed, stop NAKing */ 2622 /* (re)start dma if needed, stop NAKing */
2629 ep->stopped = stopped; 2623 ep->stopped = stopped;
2630 if (!list_empty (&ep->queue)) 2624 if (!list_empty(&ep->queue))
2631 restart_dma (ep); 2625 restart_dma(ep);
2632 } else 2626 } else
2633 DEBUG (ep->dev, "%s dma ep_stat %08x ??\n", 2627 DEBUG(ep->dev, "%s dma ep_stat %08x ??\n",
2634 ep->ep.name, t); 2628 ep->ep.name, t);
2635 return; 2629 return;
2636 2630
2637 /* data packet(s) received (in the fifo, OUT) */ 2631 /* data packet(s) received (in the fifo, OUT) */
2638 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) { 2632 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
2639 if (read_fifo (ep, req) && ep->num != 0) 2633 if (read_fifo(ep, req) && ep->num != 0)
2640 mode = 2; 2634 mode = 2;
2641 2635
2642 /* data packet(s) transmitted (IN) */ 2636 /* data packet(s) transmitted (IN) */
@@ -2649,12 +2643,10 @@ static void handle_ep_small (struct net2280_ep *ep)
2649 req->req.actual += len; 2643 req->req.actual += len;
2650 2644
2651 /* if we wrote it all, we're usually done */ 2645 /* if we wrote it all, we're usually done */
2652 if (req->req.actual == req->req.length) { 2646 /* send zlps until the status stage */
2653 if (ep->num == 0) { 2647 if ((req->req.actual == req->req.length) &&
2654 /* send zlps until the status stage */ 2648 (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
2655 } else if (!req->req.zero || len != ep->ep.maxpacket)
2656 mode = 2; 2649 mode = 2;
2657 }
2658 2650
2659 /* there was nothing to do ... */ 2651 /* there was nothing to do ... */
2660 } else if (mode == 1) 2652 } else if (mode == 1)
@@ -2663,7 +2655,7 @@ static void handle_ep_small (struct net2280_ep *ep)
2663 /* done */ 2655 /* done */
2664 if (mode == 2) { 2656 if (mode == 2) {
2665 /* stream endpoints often resubmit/unlink in completion */ 2657 /* stream endpoints often resubmit/unlink in completion */
2666 done (ep, req, 0); 2658 done(ep, req, 0);
2667 2659
2668 /* maybe advance queue to next request */ 2660 /* maybe advance queue to next request */
2669 if (ep->num == 0) { 2661 if (ep->num == 0) {
@@ -2672,16 +2664,16 @@ static void handle_ep_small (struct net2280_ep *ep)
2672 * them control that, the api doesn't (yet) allow it. 2664 * them control that, the api doesn't (yet) allow it.
2673 */ 2665 */
2674 if (!ep->stopped) 2666 if (!ep->stopped)
2675 allow_status (ep); 2667 allow_status(ep);
2676 req = NULL; 2668 req = NULL;
2677 } else { 2669 } else {
2678 if (!list_empty (&ep->queue) && !ep->stopped) 2670 if (!list_empty(&ep->queue) && !ep->stopped)
2679 req = list_entry (ep->queue.next, 2671 req = list_entry(ep->queue.next,
2680 struct net2280_request, queue); 2672 struct net2280_request, queue);
2681 else 2673 else
2682 req = NULL; 2674 req = NULL;
2683 if (req && !ep->is_in) 2675 if (req && !ep->is_in)
2684 stop_out_naking (ep); 2676 stop_out_naking(ep);
2685 } 2677 }
2686 } 2678 }
2687 2679
@@ -2692,18 +2684,17 @@ static void handle_ep_small (struct net2280_ep *ep)
2692 2684
2693 /* load IN fifo with next packet (may be zlp) */ 2685 /* load IN fifo with next packet (may be zlp) */
2694 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) 2686 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
2695 write_fifo (ep, &req->req); 2687 write_fifo(ep, &req->req);
2696 } 2688 }
2697} 2689}
2698 2690
2699static struct net2280_ep * 2691static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
2700get_ep_by_addr (struct net2280 *dev, u16 wIndex)
2701{ 2692{
2702 struct net2280_ep *ep; 2693 struct net2280_ep *ep;
2703 2694
2704 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 2695 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2705 return &dev->ep [0]; 2696 return &dev->ep[0];
2706 list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) { 2697 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
2707 u8 bEndpointAddress; 2698 u8 bEndpointAddress;
2708 2699
2709 if (!ep->desc) 2700 if (!ep->desc)
@@ -3061,7 +3052,7 @@ next_endpoints3:
3061 return; 3052 return;
3062} 3053}
3063 3054
3064static void handle_stat0_irqs (struct net2280 *dev, u32 stat) 3055static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
3065{ 3056{
3066 struct net2280_ep *ep; 3057 struct net2280_ep *ep;
3067 u32 num, scratch; 3058 u32 num, scratch;
@@ -3070,12 +3061,12 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3070 stat &= ~BIT(INTA_ASSERTED); 3061 stat &= ~BIT(INTA_ASSERTED);
3071 if (!stat) 3062 if (!stat)
3072 return; 3063 return;
3073 // DEBUG (dev, "irqstat0 %04x\n", stat); 3064 /* DEBUG(dev, "irqstat0 %04x\n", stat); */
3074 3065
3075 /* starting a control request? */ 3066 /* starting a control request? */
3076 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) { 3067 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
3077 union { 3068 union {
3078 u32 raw [2]; 3069 u32 raw[2];
3079 struct usb_ctrlrequest r; 3070 struct usb_ctrlrequest r;
3080 } u; 3071 } u;
3081 int tmp; 3072 int tmp;
@@ -3096,19 +3087,20 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3096 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 3087 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3097 EP0_HS_MAX_PACKET_SIZE); 3088 EP0_HS_MAX_PACKET_SIZE);
3098 } 3089 }
3099 net2280_led_speed (dev, dev->gadget.speed); 3090 net2280_led_speed(dev, dev->gadget.speed);
3100 DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed)); 3091 DEBUG(dev, "%s\n",
3092 usb_speed_string(dev->gadget.speed));
3101 } 3093 }
3102 3094
3103 ep = &dev->ep [0]; 3095 ep = &dev->ep[0];
3104 ep->irqs++; 3096 ep->irqs++;
3105 3097
3106 /* make sure any leftover request state is cleared */ 3098 /* make sure any leftover request state is cleared */
3107 stat &= ~BIT(ENDPOINT_0_INTERRUPT); 3099 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
3108 while (!list_empty (&ep->queue)) { 3100 while (!list_empty(&ep->queue)) {
3109 req = list_entry (ep->queue.next, 3101 req = list_entry(ep->queue.next,
3110 struct net2280_request, queue); 3102 struct net2280_request, queue);
3111 done (ep, req, (req->req.actual == req->req.length) 3103 done(ep, req, (req->req.actual == req->req.length)
3112 ? 0 : -EPROTO); 3104 ? 0 : -EPROTO);
3113 } 3105 }
3114 ep->stopped = 0; 3106 ep->stopped = 0;
@@ -3139,8 +3131,8 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3139 u.raw[0] = readl(&dev->usb->setup0123); 3131 u.raw[0] = readl(&dev->usb->setup0123);
3140 u.raw[1] = readl(&dev->usb->setup4567); 3132 u.raw[1] = readl(&dev->usb->setup4567);
3141 3133
3142 cpu_to_le32s (&u.raw [0]); 3134 cpu_to_le32s(&u.raw[0]);
3143 cpu_to_le32s (&u.raw [1]); 3135 cpu_to_le32s(&u.raw[1]);
3144 3136
3145 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) 3137 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
3146 defect7374_workaround(dev, u.r); 3138 defect7374_workaround(dev, u.r);
@@ -3165,12 +3157,12 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3165 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) | 3157 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3166 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3158 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3167 BIT(DATA_IN_TOKEN_INTERRUPT); 3159 BIT(DATA_IN_TOKEN_INTERRUPT);
3168 stop_out_naking (ep); 3160 stop_out_naking(ep);
3169 } else 3161 } else
3170 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) | 3162 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3171 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) | 3163 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3172 BIT(DATA_IN_TOKEN_INTERRUPT); 3164 BIT(DATA_IN_TOKEN_INTERRUPT);
3173 writel (scratch, &dev->epregs [0].ep_irqenb); 3165 writel(scratch, &dev->epregs[0].ep_irqenb);
3174 3166
3175 /* we made the hardware handle most lowlevel requests; 3167 /* we made the hardware handle most lowlevel requests;
3176 * everything else goes uplevel to the gadget code. 3168 * everything else goes uplevel to the gadget code.
@@ -3190,21 +3182,21 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3190 /* hw handles device and interface status */ 3182 /* hw handles device and interface status */
3191 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT)) 3183 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
3192 goto delegate; 3184 goto delegate;
3193 if ((e = get_ep_by_addr (dev, w_index)) == NULL 3185 e = get_ep_by_addr(dev, w_index);
3194 || w_length > 2) 3186 if (!e || w_length > 2)
3195 goto do_stall; 3187 goto do_stall;
3196 3188
3197 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT)) 3189 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
3198 status = cpu_to_le32 (1); 3190 status = cpu_to_le32(1);
3199 else 3191 else
3200 status = cpu_to_le32 (0); 3192 status = cpu_to_le32(0);
3201 3193
3202 /* don't bother with a request object! */ 3194 /* don't bother with a request object! */
3203 writel (0, &dev->epregs [0].ep_irqenb); 3195 writel(0, &dev->epregs[0].ep_irqenb);
3204 set_fifo_bytecount (ep, w_length); 3196 set_fifo_bytecount(ep, w_length);
3205 writel ((__force u32)status, &dev->epregs [0].ep_data); 3197 writel((__force u32)status, &dev->epregs[0].ep_data);
3206 allow_status (ep); 3198 allow_status(ep);
3207 VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status); 3199 VDEBUG(dev, "%s stat %02x\n", ep->ep.name, status);
3208 goto next_endpoints; 3200 goto next_endpoints;
3209 } 3201 }
3210 break; 3202 break;
@@ -3217,7 +3209,8 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3217 if (w_value != USB_ENDPOINT_HALT 3209 if (w_value != USB_ENDPOINT_HALT
3218 || w_length != 0) 3210 || w_length != 0)
3219 goto do_stall; 3211 goto do_stall;
3220 if ((e = get_ep_by_addr (dev, w_index)) == NULL) 3212 e = get_ep_by_addr(dev, w_index);
3213 if (!e)
3221 goto do_stall; 3214 goto do_stall;
3222 if (e->wedged) { 3215 if (e->wedged) {
3223 VDEBUG(dev, "%s wedged, halt not cleared\n", 3216 VDEBUG(dev, "%s wedged, halt not cleared\n",
@@ -3230,7 +3223,7 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3230 !list_empty(&e->queue) && e->td_dma) 3223 !list_empty(&e->queue) && e->td_dma)
3231 restart_dma(e); 3224 restart_dma(e);
3232 } 3225 }
3233 allow_status (ep); 3226 allow_status(ep);
3234 goto next_endpoints; 3227 goto next_endpoints;
3235 } 3228 }
3236 break; 3229 break;
@@ -3243,35 +3236,36 @@ static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3243 if (w_value != USB_ENDPOINT_HALT 3236 if (w_value != USB_ENDPOINT_HALT
3244 || w_length != 0) 3237 || w_length != 0)
3245 goto do_stall; 3238 goto do_stall;
3246 if ((e = get_ep_by_addr (dev, w_index)) == NULL) 3239 e = get_ep_by_addr(dev, w_index);
3240 if (!e)
3247 goto do_stall; 3241 goto do_stall;
3248 if (e->ep.name == ep0name) 3242 if (e->ep.name == ep0name)
3249 goto do_stall; 3243 goto do_stall;
3250 set_halt (e); 3244 set_halt(e);
3251 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX && e->dma) 3245 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX && e->dma)
3252 abort_dma(e); 3246 abort_dma(e);
3253 allow_status (ep); 3247 allow_status(ep);
3254 VDEBUG (dev, "%s set halt\n", ep->ep.name); 3248 VDEBUG(dev, "%s set halt\n", ep->ep.name);
3255 goto next_endpoints; 3249 goto next_endpoints;
3256 } 3250 }
3257 break; 3251 break;
3258 default: 3252 default:
3259delegate: 3253delegate:
3260 VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x " 3254 VDEBUG(dev, "setup %02x.%02x v%04x i%04x l%04x "
3261 "ep_cfg %08x\n", 3255 "ep_cfg %08x\n",
3262 u.r.bRequestType, u.r.bRequest, 3256 u.r.bRequestType, u.r.bRequest,
3263 w_value, w_index, w_length, 3257 w_value, w_index, w_length,
3264 readl(&ep->cfg->ep_cfg)); 3258 readl(&ep->cfg->ep_cfg));
3265 ep->responded = 0; 3259 ep->responded = 0;
3266 spin_unlock (&dev->lock); 3260 spin_unlock(&dev->lock);
3267 tmp = dev->driver->setup (&dev->gadget, &u.r); 3261 tmp = dev->driver->setup(&dev->gadget, &u.r);
3268 spin_lock (&dev->lock); 3262 spin_lock(&dev->lock);
3269 } 3263 }
3270 3264
3271 /* stall ep0 on error */ 3265 /* stall ep0 on error */
3272 if (tmp < 0) { 3266 if (tmp < 0) {
3273do_stall: 3267do_stall:
3274 VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n", 3268 VDEBUG(dev, "req %02x.%02x protocol STALL; stat %d\n",
3275 u.r.bRequestType, u.r.bRequest, tmp); 3269 u.r.bRequestType, u.r.bRequest, tmp);
3276 dev->protocol_stall = 1; 3270 dev->protocol_stall = 1;
3277 } 3271 }
@@ -3299,12 +3293,12 @@ next_endpoints:
3299 continue; 3293 continue;
3300 scratch ^= t; 3294 scratch ^= t;
3301 3295
3302 ep = &dev->ep [num]; 3296 ep = &dev->ep[num];
3303 handle_ep_small (ep); 3297 handle_ep_small(ep);
3304 } 3298 }
3305 3299
3306 if (stat) 3300 if (stat)
3307 DEBUG (dev, "unhandled irqstat0 %08x\n", stat); 3301 DEBUG(dev, "unhandled irqstat0 %08x\n", stat);
3308} 3302}
3309 3303
3310#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \ 3304#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
@@ -3316,7 +3310,7 @@ next_endpoints:
3316 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \ 3310 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3317 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3311 BIT(PCI_RETRY_ABORT_INTERRUPT))
3318 3312
3319static void handle_stat1_irqs (struct net2280 *dev, u32 stat) 3313static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3320{ 3314{
3321 struct net2280_ep *ep; 3315 struct net2280_ep *ep;
3322 u32 tmp, num, mask, scratch; 3316 u32 tmp, num, mask, scratch;
@@ -3331,17 +3325,17 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3331 * only indicates a change in the reset state). 3325 * only indicates a change in the reset state).
3332 */ 3326 */
3333 if (stat & tmp) { 3327 if (stat & tmp) {
3334 writel (tmp, &dev->regs->irqstat1); 3328 writel(tmp, &dev->regs->irqstat1);
3335 if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) 3329 if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT))
3336 && ((readl (&dev->usb->usbstat) & mask) 3330 && ((readl(&dev->usb->usbstat) & mask)
3337 == 0)) 3331 == 0))
3338 || ((readl (&dev->usb->usbctl) 3332 || ((readl(&dev->usb->usbctl)
3339 & BIT(VBUS_PIN)) == 0) 3333 & BIT(VBUS_PIN)) == 0)
3340 ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) { 3334 ) && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
3341 DEBUG (dev, "disconnect %s\n", 3335 DEBUG(dev, "disconnect %s\n",
3342 dev->driver->driver.name); 3336 dev->driver->driver.name);
3343 stop_activity (dev, dev->driver); 3337 stop_activity(dev, dev->driver);
3344 ep0_start (dev); 3338 ep0_start(dev);
3345 return; 3339 return;
3346 } 3340 }
3347 stat &= ~tmp; 3341 stat &= ~tmp;
@@ -3358,15 +3352,15 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3358 */ 3352 */
3359 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); 3353 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
3360 if (stat & tmp) { 3354 if (stat & tmp) {
3361 writel (tmp, &dev->regs->irqstat1); 3355 writel(tmp, &dev->regs->irqstat1);
3362 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { 3356 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
3363 if (dev->driver->suspend) 3357 if (dev->driver->suspend)
3364 dev->driver->suspend (&dev->gadget); 3358 dev->driver->suspend(&dev->gadget);
3365 if (!enable_suspend) 3359 if (!enable_suspend)
3366 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT); 3360 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
3367 } else { 3361 } else {
3368 if (dev->driver->resume) 3362 if (dev->driver->resume)
3369 dev->driver->resume (&dev->gadget); 3363 dev->driver->resume(&dev->gadget);
3370 /* at high speed, note erratum 0133 */ 3364 /* at high speed, note erratum 0133 */
3371 } 3365 }
3372 stat &= ~tmp; 3366 stat &= ~tmp;
@@ -3374,7 +3368,7 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3374 3368
3375 /* clear any other status/irqs */ 3369 /* clear any other status/irqs */
3376 if (stat) 3370 if (stat)
3377 writel (stat, &dev->regs->irqstat1); 3371 writel(stat, &dev->regs->irqstat1);
3378 3372
3379 /* some status we can just ignore */ 3373 /* some status we can just ignore */
3380 if (dev->pdev->device == 0x2280) 3374 if (dev->pdev->device == 0x2280)
@@ -3390,7 +3384,7 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3390 3384
3391 if (!stat) 3385 if (!stat)
3392 return; 3386 return;
3393 // DEBUG (dev, "irqstat1 %08x\n", stat); 3387 /* DEBUG(dev, "irqstat1 %08x\n", stat);*/
3394 3388
3395 /* DMA status, for ep-{a,b,c,d} */ 3389 /* DMA status, for ep-{a,b,c,d} */
3396 scratch = stat & DMA_INTERRUPTS; 3390 scratch = stat & DMA_INTERRUPTS;
@@ -3404,15 +3398,15 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3404 continue; 3398 continue;
3405 scratch ^= tmp; 3399 scratch ^= tmp;
3406 3400
3407 ep = &dev->ep [num + 1]; 3401 ep = &dev->ep[num + 1];
3408 dma = ep->dma; 3402 dma = ep->dma;
3409 3403
3410 if (!dma) 3404 if (!dma)
3411 continue; 3405 continue;
3412 3406
3413 /* clear ep's dma status */ 3407 /* clear ep's dma status */
3414 tmp = readl (&dma->dmastat); 3408 tmp = readl(&dma->dmastat);
3415 writel (tmp, &dma->dmastat); 3409 writel(tmp, &dma->dmastat);
3416 3410
3417 /* dma sync*/ 3411 /* dma sync*/
3418 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) { 3412 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
@@ -3427,11 +3421,11 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3427 */ 3421 */
3428 if (!use_dma_chaining) { 3422 if (!use_dma_chaining) {
3429 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) { 3423 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
3430 DEBUG (ep->dev, "%s no xact done? %08x\n", 3424 DEBUG(ep->dev, "%s no xact done? %08x\n",
3431 ep->ep.name, tmp); 3425 ep->ep.name, tmp);
3432 continue; 3426 continue;
3433 } 3427 }
3434 stop_dma (ep->dma); 3428 stop_dma(ep->dma);
3435 } 3429 }
3436 3430
3437 /* OUT transfers terminate when the data from the 3431 /* OUT transfers terminate when the data from the
@@ -3444,16 +3438,16 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3444 * long time ... we ignore that for now, accounting 3438 * long time ... we ignore that for now, accounting
3445 * precisely (like PIO does) needs per-packet irqs 3439 * precisely (like PIO does) needs per-packet irqs
3446 */ 3440 */
3447 scan_dma_completions (ep); 3441 scan_dma_completions(ep);
3448 3442
3449 /* disable dma on inactive queues; else maybe restart */ 3443 /* disable dma on inactive queues; else maybe restart */
3450 if (list_empty (&ep->queue)) { 3444 if (list_empty(&ep->queue)) {
3451 if (use_dma_chaining) 3445 if (use_dma_chaining)
3452 stop_dma (ep->dma); 3446 stop_dma(ep->dma);
3453 } else { 3447 } else {
3454 tmp = readl (&dma->dmactl); 3448 tmp = readl(&dma->dmactl);
3455 if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0) 3449 if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0)
3456 restart_dma (ep); 3450 restart_dma(ep);
3457 else if (ep->is_in && use_dma_chaining) { 3451 else if (ep->is_in && use_dma_chaining) {
3458 struct net2280_request *req; 3452 struct net2280_request *req;
3459 __le32 dmacount; 3453 __le32 dmacount;
@@ -3463,13 +3457,13 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3463 * used to trigger changing DMA_FIFO_VALIDATE 3457 * used to trigger changing DMA_FIFO_VALIDATE
3464 * (affects automagic zlp writes). 3458 * (affects automagic zlp writes).
3465 */ 3459 */
3466 req = list_entry (ep->queue.next, 3460 req = list_entry(ep->queue.next,
3467 struct net2280_request, queue); 3461 struct net2280_request, queue);
3468 dmacount = req->td->dmacount; 3462 dmacount = req->td->dmacount;
3469 dmacount &= cpu_to_le32(BIT(VALID_BIT) | 3463 dmacount &= cpu_to_le32(BIT(VALID_BIT) |
3470 DMA_BYTE_COUNT_MASK); 3464 DMA_BYTE_COUNT_MASK);
3471 if (dmacount && (dmacount & valid_bit) == 0) 3465 if (dmacount && (dmacount & valid_bit) == 0)
3472 restart_dma (ep); 3466 restart_dma(ep);
3473 } 3467 }
3474 } 3468 }
3475 ep->irqs++; 3469 ep->irqs++;
@@ -3479,21 +3473,21 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3479 * if they appear very often, here's where to try recovering. 3473 * if they appear very often, here's where to try recovering.
3480 */ 3474 */
3481 if (stat & PCI_ERROR_INTERRUPTS) { 3475 if (stat & PCI_ERROR_INTERRUPTS) {
3482 ERROR (dev, "pci dma error; stat %08x\n", stat); 3476 ERROR(dev, "pci dma error; stat %08x\n", stat);
3483 stat &= ~PCI_ERROR_INTERRUPTS; 3477 stat &= ~PCI_ERROR_INTERRUPTS;
3484 /* these are fatal errors, but "maybe" they won't 3478 /* these are fatal errors, but "maybe" they won't
3485 * happen again ... 3479 * happen again ...
3486 */ 3480 */
3487 stop_activity (dev, dev->driver); 3481 stop_activity(dev, dev->driver);
3488 ep0_start (dev); 3482 ep0_start(dev);
3489 stat = 0; 3483 stat = 0;
3490 } 3484 }
3491 3485
3492 if (stat) 3486 if (stat)
3493 DEBUG (dev, "unhandled irqstat1 %08x\n", stat); 3487 DEBUG(dev, "unhandled irqstat1 %08x\n", stat);
3494} 3488}
3495 3489
3496static irqreturn_t net2280_irq (int irq, void *_dev) 3490static irqreturn_t net2280_irq(int irq, void *_dev)
3497{ 3491{
3498 struct net2280 *dev = _dev; 3492 struct net2280 *dev = _dev;
3499 3493
@@ -3502,13 +3496,13 @@ static irqreturn_t net2280_irq (int irq, void *_dev)
3502 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED)))) 3496 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
3503 return IRQ_NONE; 3497 return IRQ_NONE;
3504 3498
3505 spin_lock (&dev->lock); 3499 spin_lock(&dev->lock);
3506 3500
3507 /* handle disconnect, dma, and more */ 3501 /* handle disconnect, dma, and more */
3508 handle_stat1_irqs (dev, readl (&dev->regs->irqstat1)); 3502 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
3509 3503
3510 /* control requests and PIO */ 3504 /* control requests and PIO */
3511 handle_stat0_irqs (dev, readl (&dev->regs->irqstat0)); 3505 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
3512 3506
3513 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) { 3507 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
3514 /* re-enable interrupt to trigger any possible new interrupt */ 3508 /* re-enable interrupt to trigger any possible new interrupt */
@@ -3517,54 +3511,54 @@ static irqreturn_t net2280_irq (int irq, void *_dev)
3517 writel(pciirqenb1, &dev->regs->pciirqenb1); 3511 writel(pciirqenb1, &dev->regs->pciirqenb1);
3518 } 3512 }
3519 3513
3520 spin_unlock (&dev->lock); 3514 spin_unlock(&dev->lock);
3521 3515
3522 return IRQ_HANDLED; 3516 return IRQ_HANDLED;
3523} 3517}
3524 3518
3525/*-------------------------------------------------------------------------*/ 3519/*-------------------------------------------------------------------------*/
3526 3520
3527static void gadget_release (struct device *_dev) 3521static void gadget_release(struct device *_dev)
3528{ 3522{
3529 struct net2280 *dev = dev_get_drvdata (_dev); 3523 struct net2280 *dev = dev_get_drvdata(_dev);
3530 3524
3531 kfree (dev); 3525 kfree(dev);
3532} 3526}
3533 3527
3534/* tear down the binding between this driver and the pci device */ 3528/* tear down the binding between this driver and the pci device */
3535 3529
3536static void net2280_remove (struct pci_dev *pdev) 3530static void net2280_remove(struct pci_dev *pdev)
3537{ 3531{
3538 struct net2280 *dev = pci_get_drvdata (pdev); 3532 struct net2280 *dev = pci_get_drvdata(pdev);
3539 3533
3540 usb_del_gadget_udc(&dev->gadget); 3534 usb_del_gadget_udc(&dev->gadget);
3541 3535
3542 BUG_ON(dev->driver); 3536 BUG_ON(dev->driver);
3543 3537
3544 /* then clean up the resources we allocated during probe() */ 3538 /* then clean up the resources we allocated during probe() */
3545 net2280_led_shutdown (dev); 3539 net2280_led_shutdown(dev);
3546 if (dev->requests) { 3540 if (dev->requests) {
3547 int i; 3541 int i;
3548 for (i = 1; i < 5; i++) { 3542 for (i = 1; i < 5; i++) {
3549 if (!dev->ep [i].dummy) 3543 if (!dev->ep[i].dummy)
3550 continue; 3544 continue;
3551 pci_pool_free (dev->requests, dev->ep [i].dummy, 3545 pci_pool_free(dev->requests, dev->ep[i].dummy,
3552 dev->ep [i].td_dma); 3546 dev->ep[i].td_dma);
3553 } 3547 }
3554 pci_pool_destroy (dev->requests); 3548 pci_pool_destroy(dev->requests);
3555 } 3549 }
3556 if (dev->got_irq) 3550 if (dev->got_irq)
3557 free_irq (pdev->irq, dev); 3551 free_irq(pdev->irq, dev);
3558 if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX) 3552 if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
3559 pci_disable_msi(pdev); 3553 pci_disable_msi(pdev);
3560 if (dev->regs) 3554 if (dev->regs)
3561 iounmap (dev->regs); 3555 iounmap(dev->regs);
3562 if (dev->region) 3556 if (dev->region)
3563 release_mem_region (pci_resource_start (pdev, 0), 3557 release_mem_region(pci_resource_start(pdev, 0),
3564 pci_resource_len (pdev, 0)); 3558 pci_resource_len(pdev, 0));
3565 if (dev->enabled) 3559 if (dev->enabled)
3566 pci_disable_device (pdev); 3560 pci_disable_device(pdev);
3567 device_remove_file (&pdev->dev, &dev_attr_registers); 3561 device_remove_file(&pdev->dev, &dev_attr_registers);
3568 3562
3569 INFO (dev, "unbind\n"); 3563 INFO (dev, "unbind\n");
3570} 3564}
@@ -3573,7 +3567,7 @@ static void net2280_remove (struct pci_dev *pdev)
3573 * don't respond over USB until a gadget driver binds to us. 3567 * don't respond over USB until a gadget driver binds to us.
3574 */ 3568 */
3575 3569
3576static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) 3570static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3577{ 3571{
3578 struct net2280 *dev; 3572 struct net2280 *dev;
3579 unsigned long resource, len; 3573 unsigned long resource, len;
@@ -3584,14 +3578,14 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3584 use_dma_chaining = 0; 3578 use_dma_chaining = 0;
3585 3579
3586 /* alloc, and start init */ 3580 /* alloc, and start init */
3587 dev = kzalloc (sizeof *dev, GFP_KERNEL); 3581 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3588 if (dev == NULL){ 3582 if (dev == NULL) {
3589 retval = -ENOMEM; 3583 retval = -ENOMEM;
3590 goto done; 3584 goto done;
3591 } 3585 }
3592 3586
3593 pci_set_drvdata (pdev, dev); 3587 pci_set_drvdata(pdev, dev);
3594 spin_lock_init (&dev->lock); 3588 spin_lock_init(&dev->lock);
3595 dev->pdev = pdev; 3589 dev->pdev = pdev;
3596 dev->gadget.ops = &net2280_ops; 3590 dev->gadget.ops = &net2280_ops;
3597 dev->gadget.max_speed = (dev->pdev->vendor == PCI_VENDOR_ID_PLX) ? 3591 dev->gadget.max_speed = (dev->pdev->vendor == PCI_VENDOR_ID_PLX) ?
@@ -3601,8 +3595,8 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3601 dev->gadget.name = driver_name; 3595 dev->gadget.name = driver_name;
3602 3596
3603 /* now all the pci goodies ... */ 3597 /* now all the pci goodies ... */
3604 if (pci_enable_device (pdev) < 0) { 3598 if (pci_enable_device(pdev) < 0) {
3605 retval = -ENODEV; 3599 retval = -ENODEV;
3606 goto done; 3600 goto done;
3607 } 3601 }
3608 dev->enabled = 1; 3602 dev->enabled = 1;
@@ -3611,10 +3605,10 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3611 * BAR 1 is 8051 memory; unused here (note erratum 0103) 3605 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3612 * BAR 2 is fifo memory; unused here 3606 * BAR 2 is fifo memory; unused here
3613 */ 3607 */
3614 resource = pci_resource_start (pdev, 0); 3608 resource = pci_resource_start(pdev, 0);
3615 len = pci_resource_len (pdev, 0); 3609 len = pci_resource_len(pdev, 0);
3616 if (!request_mem_region (resource, len, driver_name)) { 3610 if (!request_mem_region(resource, len, driver_name)) {
3617 DEBUG (dev, "controller already in use\n"); 3611 DEBUG(dev, "controller already in use\n");
3618 retval = -EBUSY; 3612 retval = -EBUSY;
3619 goto done; 3613 goto done;
3620 } 3614 }
@@ -3624,9 +3618,9 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3624 * 8051 code into the chip, e.g. to turn on PCI PM. 3618 * 8051 code into the chip, e.g. to turn on PCI PM.
3625 */ 3619 */
3626 3620
3627 base = ioremap_nocache (resource, len); 3621 base = ioremap_nocache(resource, len);
3628 if (base == NULL) { 3622 if (base == NULL) {
3629 DEBUG (dev, "can't map memory\n"); 3623 DEBUG(dev, "can't map memory\n");
3630 retval = -EFAULT; 3624 retval = -EFAULT;
3631 goto done; 3625 goto done;
3632 } 3626 }
@@ -3655,7 +3649,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3655 dev->plregs = (struct usb338x_pl_regs __iomem *) 3649 dev->plregs = (struct usb338x_pl_regs __iomem *)
3656 (base + 0x0800); 3650 (base + 0x0800);
3657 usbstat = readl(&dev->usb->usbstat); 3651 usbstat = readl(&dev->usb->usbstat);
3658 dev->enhanced_mode = (usbstat & BIT(11)) ? 1 : 0; 3652 dev->enhanced_mode = !!(usbstat & BIT(11));
3659 dev->n_ep = (dev->enhanced_mode) ? 9 : 5; 3653 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3660 /* put into initial config, link up all endpoints */ 3654 /* put into initial config, link up all endpoints */
3661 fsmvalue = get_idx_reg(dev->regs, SCRATCH) & 3655 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
@@ -3670,12 +3664,12 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3670 writel(0, &dev->usb->usbctl); 3664 writel(0, &dev->usb->usbctl);
3671 } 3665 }
3672 3666
3673 usb_reset (dev); 3667 usb_reset(dev);
3674 usb_reinit (dev); 3668 usb_reinit(dev);
3675 3669
3676 /* irq setup after old hardware is cleaned up */ 3670 /* irq setup after old hardware is cleaned up */
3677 if (!pdev->irq) { 3671 if (!pdev->irq) {
3678 ERROR (dev, "No IRQ. Check PCI setup!\n"); 3672 ERROR(dev, "No IRQ. Check PCI setup!\n");
3679 retval = -ENODEV; 3673 retval = -ENODEV;
3680 goto done; 3674 goto done;
3681 } 3675 }
@@ -3684,9 +3678,9 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3684 if (pci_enable_msi(pdev)) 3678 if (pci_enable_msi(pdev))
3685 ERROR(dev, "Failed to enable MSI mode\n"); 3679 ERROR(dev, "Failed to enable MSI mode\n");
3686 3680
3687 if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev) 3681 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
3688 != 0) { 3682 driver_name, dev)) {
3689 ERROR (dev, "request interrupt %d failed\n", pdev->irq); 3683 ERROR(dev, "request interrupt %d failed\n", pdev->irq);
3690 retval = -EBUSY; 3684 retval = -EBUSY;
3691 goto done; 3685 goto done;
3692 } 3686 }
@@ -3694,28 +3688,28 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3694 3688
3695 /* DMA setup */ 3689 /* DMA setup */
3696 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */ 3690 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
3697 dev->requests = pci_pool_create ("requests", pdev, 3691 dev->requests = pci_pool_create("requests", pdev,
3698 sizeof (struct net2280_dma), 3692 sizeof(struct net2280_dma),
3699 0 /* no alignment requirements */, 3693 0 /* no alignment requirements */,
3700 0 /* or page-crossing issues */); 3694 0 /* or page-crossing issues */);
3701 if (!dev->requests) { 3695 if (!dev->requests) {
3702 DEBUG (dev, "can't get request pool\n"); 3696 DEBUG(dev, "can't get request pool\n");
3703 retval = -ENOMEM; 3697 retval = -ENOMEM;
3704 goto done; 3698 goto done;
3705 } 3699 }
3706 for (i = 1; i < 5; i++) { 3700 for (i = 1; i < 5; i++) {
3707 struct net2280_dma *td; 3701 struct net2280_dma *td;
3708 3702
3709 td = pci_pool_alloc (dev->requests, GFP_KERNEL, 3703 td = pci_pool_alloc(dev->requests, GFP_KERNEL,
3710 &dev->ep [i].td_dma); 3704 &dev->ep[i].td_dma);
3711 if (!td) { 3705 if (!td) {
3712 DEBUG (dev, "can't get dummy %d\n", i); 3706 DEBUG(dev, "can't get dummy %d\n", i);
3713 retval = -ENOMEM; 3707 retval = -ENOMEM;
3714 goto done; 3708 goto done;
3715 } 3709 }
3716 td->dmacount = 0; /* not VALID */ 3710 td->dmacount = 0; /* not VALID */
3717 td->dmadesc = td->dmaaddr; 3711 td->dmadesc = td->dmaaddr;
3718 dev->ep [i].dummy = td; 3712 dev->ep[i].dummy = td;
3719 } 3713 }
3720 3714
3721 /* enable lower-overhead pci memory bursts during DMA */ 3715 /* enable lower-overhead pci memory bursts during DMA */
@@ -3729,22 +3723,23 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3729 BIT(DMA_READ_LINE_ENABLE), 3723 BIT(DMA_READ_LINE_ENABLE),
3730 &dev->pci->pcimstctl); 3724 &dev->pci->pcimstctl);
3731 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */ 3725 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
3732 pci_set_master (pdev); 3726 pci_set_master(pdev);
3733 pci_try_set_mwi (pdev); 3727 pci_try_set_mwi(pdev);
3734 3728
3735 /* ... also flushes any posted pci writes */ 3729 /* ... also flushes any posted pci writes */
3736 dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff; 3730 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
3737 3731
3738 /* done */ 3732 /* done */
3739 INFO (dev, "%s\n", driver_desc); 3733 INFO(dev, "%s\n", driver_desc);
3740 INFO (dev, "irq %d, pci mem %p, chip rev %04x\n", 3734 INFO(dev, "irq %d, pci mem %p, chip rev %04x\n",
3741 pdev->irq, base, dev->chiprev); 3735 pdev->irq, base, dev->chiprev);
3742 INFO(dev, "version: " DRIVER_VERSION "; dma %s %s\n", 3736 INFO(dev, "version: " DRIVER_VERSION "; dma %s %s\n",
3743 use_dma ? (use_dma_chaining ? "chaining" : "enabled") 3737 use_dma ? (use_dma_chaining ? "chaining" : "enabled")
3744 : "disabled", 3738 : "disabled",
3745 dev->enhanced_mode ? "enhanced mode" : "legacy mode"); 3739 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
3746 retval = device_create_file (&pdev->dev, &dev_attr_registers); 3740 retval = device_create_file(&pdev->dev, &dev_attr_registers);
3747 if (retval) goto done; 3741 if (retval)
3742 goto done;
3748 3743
3749 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, 3744 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3750 gadget_release); 3745 gadget_release);
@@ -3754,7 +3749,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3754 3749
3755done: 3750done:
3756 if (dev) 3751 if (dev)
3757 net2280_remove (pdev); 3752 net2280_remove(pdev);
3758 return retval; 3753 return retval;
3759} 3754}
3760 3755
@@ -3762,16 +3757,16 @@ done:
3762 * generating IRQs across the upcoming reboot. 3757 * generating IRQs across the upcoming reboot.
3763 */ 3758 */
3764 3759
3765static void net2280_shutdown (struct pci_dev *pdev) 3760static void net2280_shutdown(struct pci_dev *pdev)
3766{ 3761{
3767 struct net2280 *dev = pci_get_drvdata (pdev); 3762 struct net2280 *dev = pci_get_drvdata(pdev);
3768 3763
3769 /* disable IRQs */ 3764 /* disable IRQs */
3770 writel (0, &dev->regs->pciirqenb0); 3765 writel(0, &dev->regs->pciirqenb0);
3771 writel (0, &dev->regs->pciirqenb1); 3766 writel(0, &dev->regs->pciirqenb1);
3772 3767
3773 /* disable the pullup so the host will think we're gone */ 3768 /* disable the pullup so the host will think we're gone */
3774 writel (0, &dev->usb->usbctl); 3769 writel(0, &dev->usb->usbctl);
3775 3770
3776 /* Disable full-speed test mode */ 3771 /* Disable full-speed test mode */
3777 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) 3772 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
@@ -3781,7 +3776,7 @@ static void net2280_shutdown (struct pci_dev *pdev)
3781 3776
3782/*-------------------------------------------------------------------------*/ 3777/*-------------------------------------------------------------------------*/
3783 3778
3784static const struct pci_device_id pci_ids [] = { { 3779static const struct pci_device_id pci_ids[] = { {
3785 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 3780 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3786 .class_mask = ~0, 3781 .class_mask = ~0,
3787 .vendor = PCI_VENDOR_ID_PLX_LEGACY, 3782 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
@@ -3814,7 +3809,7 @@ static const struct pci_device_id pci_ids [] = { {
3814 }, 3809 },
3815{ /* end: all zeroes */ } 3810{ /* end: all zeroes */ }
3816}; 3811};
3817MODULE_DEVICE_TABLE (pci, pci_ids); 3812MODULE_DEVICE_TABLE(pci, pci_ids);
3818 3813
3819/* pci driver glue; this is a "new style" PCI driver module */ 3814/* pci driver glue; this is a "new style" PCI driver module */
3820static struct pci_driver net2280_pci_driver = { 3815static struct pci_driver net2280_pci_driver = {
@@ -3830,6 +3825,6 @@ static struct pci_driver net2280_pci_driver = {
3830 3825
3831module_pci_driver(net2280_pci_driver); 3826module_pci_driver(net2280_pci_driver);
3832 3827
3833MODULE_DESCRIPTION (DRIVER_DESC); 3828MODULE_DESCRIPTION(DRIVER_DESC);
3834MODULE_AUTHOR ("David Brownell"); 3829MODULE_AUTHOR("David Brownell");
3835MODULE_LICENSE ("GPL"); 3830MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h
index e1c5d1a5a7d0..f019d6c74fc3 100644
--- a/drivers/usb/gadget/net2280.h
+++ b/drivers/usb/gadget/net2280.h
@@ -25,19 +25,18 @@
25 * caller must own the device lock. 25 * caller must own the device lock.
26 */ 26 */
27 27
28static inline u32 28static inline u32 get_idx_reg(struct net2280_regs __iomem *regs, u32 index)
29get_idx_reg (struct net2280_regs __iomem *regs, u32 index)
30{ 29{
31 writel (index, &regs->idxaddr); 30 writel(index, &regs->idxaddr);
32 /* NOTE: synchs device/cpu memory views */ 31 /* NOTE: synchs device/cpu memory views */
33 return readl (&regs->idxdata); 32 return readl(&regs->idxdata);
34} 33}
35 34
36static inline void 35static inline void
37set_idx_reg (struct net2280_regs __iomem *regs, u32 index, u32 value) 36set_idx_reg(struct net2280_regs __iomem *regs, u32 index, u32 value)
38{ 37{
39 writel (index, &regs->idxaddr); 38 writel(index, &regs->idxaddr);
40 writel (value, &regs->idxdata); 39 writel(value, &regs->idxdata);
41 /* posted, may not be visible yet */ 40 /* posted, may not be visible yet */
42} 41}
43 42
@@ -81,7 +80,7 @@ struct net2280_dma {
81 __le32 dmaaddr; /* the buffer */ 80 __le32 dmaaddr; /* the buffer */
82 __le32 dmadesc; /* next dma descriptor */ 81 __le32 dmadesc; /* next dma descriptor */
83 __le32 _reserved; 82 __le32 _reserved;
84} __attribute__ ((aligned (16))); 83} __aligned(16);
85 84
86/*-------------------------------------------------------------------------*/ 85/*-------------------------------------------------------------------------*/
87 86
@@ -113,7 +112,7 @@ struct net2280_ep {
113 responded : 1; 112 responded : 1;
114}; 113};
115 114
116static inline void allow_status (struct net2280_ep *ep) 115static inline void allow_status(struct net2280_ep *ep)
117{ 116{
118 /* ep0 only */ 117 /* ep0 only */
119 writel(BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE) | 118 writel(BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE) |
@@ -152,7 +151,7 @@ struct net2280 {
152 struct usb_gadget gadget; 151 struct usb_gadget gadget;
153 spinlock_t lock; 152 spinlock_t lock;
154 struct net2280_ep ep[9]; 153 struct net2280_ep ep[9];
155 struct usb_gadget_driver *driver; 154 struct usb_gadget_driver *driver;
156 unsigned enabled : 1, 155 unsigned enabled : 1,
157 protocol_stall : 1, 156 protocol_stall : 1,
158 softconnect : 1, 157 softconnect : 1,
@@ -185,10 +184,10 @@ struct net2280 {
185 struct usb338x_pl_regs __iomem *plregs; 184 struct usb338x_pl_regs __iomem *plregs;
186 185
187 struct pci_pool *requests; 186 struct pci_pool *requests;
188 // statistics... 187 /* statistics...*/
189}; 188};
190 189
191static inline void set_halt (struct net2280_ep *ep) 190static inline void set_halt(struct net2280_ep *ep)
192{ 191{
193 /* ep0 and bulk/intr endpoints */ 192 /* ep0 and bulk/intr endpoints */
194 writel(BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE) | 193 writel(BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE) |
@@ -198,7 +197,7 @@ static inline void set_halt (struct net2280_ep *ep)
198 &ep->regs->ep_rsp); 197 &ep->regs->ep_rsp);
199} 198}
200 199
201static inline void clear_halt (struct net2280_ep *ep) 200static inline void clear_halt(struct net2280_ep *ep)
202{ 201{
203 /* ep0 and bulk/intr endpoints */ 202 /* ep0 and bulk/intr endpoints */
204 writel(BIT(CLEAR_ENDPOINT_HALT) | 203 writel(BIT(CLEAR_ENDPOINT_HALT) |
@@ -250,7 +249,7 @@ static inline void clear_halt (struct net2280_ep *ep)
250 249
251#ifdef USE_RDK_LEDS 250#ifdef USE_RDK_LEDS
252 251
253static inline void net2280_led_init (struct net2280 *dev) 252static inline void net2280_led_init(struct net2280 *dev)
254{ 253{
255 /* LED3 (green) is on during USB activity. note erratum 0113. */ 254 /* LED3 (green) is on during USB activity. note erratum 0113. */
256 writel(BIT(GPIO3_LED_SELECT) | 255 writel(BIT(GPIO3_LED_SELECT) |
@@ -263,9 +262,9 @@ static inline void net2280_led_init (struct net2280 *dev)
263 262
264/* indicate speed with bi-color LED 0/1 */ 263/* indicate speed with bi-color LED 0/1 */
265static inline 264static inline
266void net2280_led_speed (struct net2280 *dev, enum usb_device_speed speed) 265void net2280_led_speed(struct net2280 *dev, enum usb_device_speed speed)
267{ 266{
268 u32 val = readl (&dev->regs->gpioctl); 267 u32 val = readl(&dev->regs->gpioctl);
269 switch (speed) { 268 switch (speed) {
270 case USB_SPEED_SUPER: /* green + red */ 269 case USB_SPEED_SUPER: /* green + red */
271 val |= BIT(GPIO0_DATA) | BIT(GPIO1_DATA); 270 val |= BIT(GPIO0_DATA) | BIT(GPIO1_DATA);
@@ -282,25 +281,26 @@ void net2280_led_speed (struct net2280 *dev, enum usb_device_speed speed)
282 val &= ~(BIT(GPIO1_DATA) | BIT(GPIO0_DATA)); 281 val &= ~(BIT(GPIO1_DATA) | BIT(GPIO0_DATA));
283 break; 282 break;
284 } 283 }
285 writel (val, &dev->regs->gpioctl); 284 writel(val, &dev->regs->gpioctl);
286} 285}
287 286
288/* indicate power with LED 2 */ 287/* indicate power with LED 2 */
289static inline void net2280_led_active (struct net2280 *dev, int is_active) 288static inline void net2280_led_active(struct net2280 *dev, int is_active)
290{ 289{
291 u32 val = readl (&dev->regs->gpioctl); 290 u32 val = readl(&dev->regs->gpioctl);
292 291
293 // FIXME this LED never seems to turn on. 292 /* FIXME this LED never seems to turn on.*/
294 if (is_active) 293 if (is_active)
295 val |= GPIO2_DATA; 294 val |= GPIO2_DATA;
296 else 295 else
297 val &= ~GPIO2_DATA; 296 val &= ~GPIO2_DATA;
298 writel (val, &dev->regs->gpioctl); 297 writel(val, &dev->regs->gpioctl);
299} 298}
300static inline void net2280_led_shutdown (struct net2280 *dev) 299
300static inline void net2280_led_shutdown(struct net2280 *dev)
301{ 301{
302 /* turn off all four GPIO*_DATA bits */ 302 /* turn off all four GPIO*_DATA bits */
303 writel (readl (&dev->regs->gpioctl) & ~0x0f, 303 writel(readl(&dev->regs->gpioctl) & ~0x0f,
304 &dev->regs->gpioctl); 304 &dev->regs->gpioctl);
305} 305}
306 306
@@ -314,32 +314,32 @@ static inline void net2280_led_shutdown (struct net2280 *dev)
314 314
315/*-------------------------------------------------------------------------*/ 315/*-------------------------------------------------------------------------*/
316 316
317#define xprintk(dev,level,fmt,args...) \ 317#define xprintk(dev, level, fmt, args...) \
318 printk(level "%s %s: " fmt , driver_name , \ 318 printk(level "%s %s: " fmt, driver_name, \
319 pci_name(dev->pdev) , ## args) 319 pci_name(dev->pdev), ## args)
320 320
321#ifdef DEBUG 321#ifdef DEBUG
322#undef DEBUG 322#undef DEBUG
323#define DEBUG(dev,fmt,args...) \ 323#define DEBUG(dev, fmt, args...) \
324 xprintk(dev , KERN_DEBUG , fmt , ## args) 324 xprintk(dev, KERN_DEBUG, fmt, ## args)
325#else 325#else
326#define DEBUG(dev,fmt,args...) \ 326#define DEBUG(dev, fmt, args...) \
327 do { } while (0) 327 do { } while (0)
328#endif /* DEBUG */ 328#endif /* DEBUG*/
329 329
330#ifdef VERBOSE 330#ifdef VERBOSE
331#define VDEBUG DEBUG 331#define VDEBUG DEBUG
332#else 332#else
333#define VDEBUG(dev,fmt,args...) \ 333#define VDEBUG(dev, fmt, args...) \
334 do { } while (0) 334 do { } while (0)
335#endif /* VERBOSE */ 335#endif /* VERBOSE */
336 336
337#define ERROR(dev,fmt,args...) \ 337#define ERROR(dev, fmt, args...) \
338 xprintk(dev , KERN_ERR , fmt , ## args) 338 xprintk(dev, KERN_ERR, fmt, ## args)
339#define WARNING(dev,fmt,args...) \ 339#define WARNING(dev, fmt, args...) \
340 xprintk(dev , KERN_WARNING , fmt , ## args) 340 xprintk(dev, KERN_WARNING, fmt, ## args)
341#define INFO(dev,fmt,args...) \ 341#define INFO(dev, fmt, args...) \
342 xprintk(dev , KERN_INFO , fmt , ## args) 342 xprintk(dev, KERN_INFO, fmt, ## args)
343 343
344/*-------------------------------------------------------------------------*/ 344/*-------------------------------------------------------------------------*/
345 345
@@ -354,36 +354,36 @@ static inline void set_fifo_bytecount(struct net2280_ep *ep, unsigned count)
354 } 354 }
355} 355}
356 356
357static inline void start_out_naking (struct net2280_ep *ep) 357static inline void start_out_naking(struct net2280_ep *ep)
358{ 358{
359 /* NOTE: hardware races lurk here, and PING protocol issues */ 359 /* NOTE: hardware races lurk here, and PING protocol issues */
360 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 360 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
361 /* synch with device */ 361 /* synch with device */
362 readl (&ep->regs->ep_rsp); 362 readl(&ep->regs->ep_rsp);
363} 363}
364 364
365#ifdef DEBUG 365#ifdef DEBUG
366static inline void assert_out_naking (struct net2280_ep *ep, const char *where) 366static inline void assert_out_naking(struct net2280_ep *ep, const char *where)
367{ 367{
368 u32 tmp = readl (&ep->regs->ep_stat); 368 u32 tmp = readl(&ep->regs->ep_stat);
369 369
370 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { 370 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
371 DEBUG (ep->dev, "%s %s %08x !NAK\n", 371 DEBUG(ep->dev, "%s %s %08x !NAK\n",
372 ep->ep.name, where, tmp); 372 ep->ep.name, where, tmp);
373 writel(BIT(SET_NAK_OUT_PACKETS), 373 writel(BIT(SET_NAK_OUT_PACKETS),
374 &ep->regs->ep_rsp); 374 &ep->regs->ep_rsp);
375 } 375 }
376} 376}
377#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep,__func__) 377#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
378#else 378#else
379#define ASSERT_OUT_NAKING(ep) do {} while (0) 379#define ASSERT_OUT_NAKING(ep) do {} while (0)
380#endif 380#endif
381 381
382static inline void stop_out_naking (struct net2280_ep *ep) 382static inline void stop_out_naking(struct net2280_ep *ep)
383{ 383{
384 u32 tmp; 384 u32 tmp;
385 385
386 tmp = readl (&ep->regs->ep_stat); 386 tmp = readl(&ep->regs->ep_stat);
387 if ((tmp & BIT(NAK_OUT_PACKETS)) != 0) 387 if ((tmp & BIT(NAK_OUT_PACKETS)) != 0)
388 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); 388 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
389} 389}