aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/gadget
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/gadget')
-rw-r--r--drivers/usb/gadget/Kconfig57
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/amd5536udc.c3454
-rw-r--r--drivers/usb/gadget/amd5536udc.h626
-rw-r--r--drivers/usb/gadget/ether.c4
-rw-r--r--drivers/usb/gadget/gadget_chips.h10
-rw-r--r--drivers/usb/gadget/m66592-udc.c255
-rw-r--r--drivers/usb/gadget/m66592-udc.h610
-rw-r--r--drivers/usb/gadget/serial.c25
9 files changed, 4577 insertions, 465 deletions
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 45e01e289455..767aed5b4bea 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -82,6 +82,27 @@ choice
82 Many controller drivers are platform-specific; these 82 Many controller drivers are platform-specific; these
83 often need board-specific hooks. 83 often need board-specific hooks.
84 84
85config USB_GADGET_AMD5536UDC
86 boolean "AMD5536 UDC"
87 depends on PCI
88 select USB_GADGET_DUALSPEED
89 help
90 The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
91 It is a USB Highspeed DMA capable USB device controller. Beside ep0
92 it provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
93 The UDC port supports OTG operation, and may be used as a host port
94 if it's not being used to implement peripheral or OTG roles.
95
96 Say "y" to link the driver statically, or "m" to build a
97 dynamically linked module called "amd5536udc" and force all
98 gadget drivers to also be dynamically linked.
99
100config USB_AMD5536UDC
101 tristate
102 depends on USB_GADGET_AMD5536UDC
103 default USB_GADGET
104 select USB_GADGET_SELECTED
105
85config USB_GADGET_FSL_USB2 106config USB_GADGET_FSL_USB2
86 boolean "Freescale Highspeed USB DR Peripheral Controller" 107 boolean "Freescale Highspeed USB DR Peripheral Controller"
87 depends on MPC834x || PPC_MPC831x 108 depends on MPC834x || PPC_MPC831x
@@ -156,6 +177,24 @@ config USB_PXA2XX_SMALL
156 default y if USB_ETH 177 default y if USB_ETH
157 default y if USB_G_SERIAL 178 default y if USB_G_SERIAL
158 179
180config USB_GADGET_M66592
181 boolean "Renesas M66592 USB Peripheral Controller"
182 select USB_GADGET_DUALSPEED
183 help
184 M66592 is a discrete USB peripheral controller chip that
185 supports both full and high speed USB 2.0 data transfers.
186 It has seven configurable endpoints, and endpoint zero.
187
188 Say "y" to link the driver statically, or "m" to build a
189 dynamically linked module called "m66592_udc" and force all
190 gadget drivers to also be dynamically linked.
191
192config USB_M66592
193 tristate
194 depends on USB_GADGET_M66592
195 default USB_GADGET
196 select USB_GADGET_SELECTED
197
159config USB_GADGET_GOKU 198config USB_GADGET_GOKU
160 boolean "Toshiba TC86C001 'Goku-S'" 199 boolean "Toshiba TC86C001 'Goku-S'"
161 depends on PCI 200 depends on PCI
@@ -261,24 +300,6 @@ config USB_AT91
261 depends on USB_GADGET_AT91 300 depends on USB_GADGET_AT91
262 default USB_GADGET 301 default USB_GADGET
263 302
264config USB_GADGET_M66592
265 boolean "M66592 driver"
266 select USB_GADGET_DUALSPEED
267 help
268 M66592 is a USB 2.0 peripheral controller.
269
270 It has seven configurable endpoints, and endpoint zero.
271
272 Say "y" to link the driver statically, or "m" to build a
273 dynamically linked module called "m66592_udc" and force all
274 gadget drivers to also be dynamically linked.
275
276config USB_M66592
277 tristate
278 depends on USB_GADGET_M66592
279 default USB_GADGET
280 select USB_GADGET_SELECTED
281
282config USB_GADGET_DUMMY_HCD 303config USB_GADGET_DUMMY_HCD
283 boolean "Dummy HCD (DEVELOPMENT)" 304 boolean "Dummy HCD (DEVELOPMENT)"
284 depends on (USB=y || (USB=m && USB_GADGET=m)) && EXPERIMENTAL 305 depends on (USB=y || (USB=m && USB_GADGET=m)) && EXPERIMENTAL
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 8ae76f738635..1bc0f03550ce 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -7,6 +7,7 @@ endif
7 7
8obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o 8obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
9obj-$(CONFIG_USB_NET2280) += net2280.o 9obj-$(CONFIG_USB_NET2280) += net2280.o
10obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
10obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o 11obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o
11obj-$(CONFIG_USB_GOKU) += goku_udc.o 12obj-$(CONFIG_USB_GOKU) += goku_udc.o
12obj-$(CONFIG_USB_OMAP) += omap_udc.o 13obj-$(CONFIG_USB_OMAP) += omap_udc.o
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
new file mode 100644
index 000000000000..714156ca8fe4
--- /dev/null
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -0,0 +1,3454 @@
1/*
2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5 * Author: Thomas Dahlmann
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22/*
23 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
24 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
25 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
26 *
27 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
28 * be used as host port) and UOC bits PAD_EN and APU are set (should be done
29 * by BIOS init).
30 *
31 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
32 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
33 * can be used with gadget ether.
34 */
35
36/* debug control */
37/* #define UDC_VERBOSE */
38
39/* Driver strings */
40#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
41#define UDC_DRIVER_VERSION_STRING "01.00.0206 - $Revision: #3 $"
42
43/* system */
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/kernel.h>
47#include <linux/version.h>
48#include <linux/delay.h>
49#include <linux/ioport.h>
50#include <linux/sched.h>
51#include <linux/slab.h>
52#include <linux/smp_lock.h>
53#include <linux/errno.h>
54#include <linux/init.h>
55#include <linux/timer.h>
56#include <linux/list.h>
57#include <linux/interrupt.h>
58#include <linux/ioctl.h>
59#include <linux/fs.h>
60#include <linux/dmapool.h>
61#include <linux/moduleparam.h>
62#include <linux/device.h>
63#include <linux/io.h>
64#include <linux/irq.h>
65
66#include <asm/byteorder.h>
67#include <asm/system.h>
68#include <asm/unaligned.h>
69
70/* gadget stack */
71#include <linux/usb/ch9.h>
72#include <linux/usb_gadget.h>
73
74/* udc specific */
75#include "amd5536udc.h"
76
77
78static void udc_tasklet_disconnect(unsigned long);
79static void empty_req_queue(struct udc_ep *);
80static int udc_probe(struct udc *dev);
81static void udc_basic_init(struct udc *dev);
82static void udc_setup_endpoints(struct udc *dev);
83static void udc_soft_reset(struct udc *dev);
84static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
85static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
86static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
87static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
88 unsigned long buf_len, gfp_t gfp_flags);
89static int udc_remote_wakeup(struct udc *dev);
90static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
91static void udc_pci_remove(struct pci_dev *pdev);
92
93/* description */
94static const char mod_desc[] = UDC_MOD_DESCRIPTION;
95static const char name[] = "amd5536udc";
96
97/* structure to hold endpoint function pointers */
98static const struct usb_ep_ops udc_ep_ops;
99
100/* received setup data */
101static union udc_setup_data setup_data;
102
103/* pointer to device object */
104static struct udc *udc;
105
106/* irq spin lock for soft reset */
107static DEFINE_SPINLOCK(udc_irq_spinlock);
108/* stall spin lock */
109static DEFINE_SPINLOCK(udc_stall_spinlock);
110
111/*
112* slave mode: pending bytes in rx fifo after nyet,
113* used if EPIN irq came but no req was available
114*/
115static unsigned int udc_rxfifo_pending;
116
117/* count soft resets after suspend to avoid loop */
118static int soft_reset_occured;
119static int soft_reset_after_usbreset_occured;
120
121/* timer */
122static struct timer_list udc_timer;
123static int stop_timer;
124
125/* set_rde -- Is used to control enabling of RX DMA. Problem is
126 * that UDC has only one bit (RDE) to enable/disable RX DMA for
127 * all OUT endpoints. So we have to handle race conditions like
128 * when OUT data reaches the fifo but no request was queued yet.
129 * This cannot be solved by letting the RX DMA disabled until a
130 * request gets queued because there may be other OUT packets
131 * in the FIFO (important for not blocking control traffic).
132 * The value of set_rde controls the correspondig timer.
133 *
134 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
135 * set_rde 0 == do not touch RDE, do no start the RDE timer
136 * set_rde 1 == timer function will look whether FIFO has data
137 * set_rde 2 == set by timer function to enable RX DMA on next call
138 */
139static int set_rde = -1;
140
141static DECLARE_COMPLETION(on_exit);
142static struct timer_list udc_pollstall_timer;
143static int stop_pollstall_timer;
144static DECLARE_COMPLETION(on_pollstall_exit);
145
146/* tasklet for usb disconnect */
147static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
148 (unsigned long) &udc);
149
150
151/* endpoint names used for print */
152static const char ep0_string[] = "ep0in";
153static const char *ep_string[] = {
154 ep0_string,
155 "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
156 "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
157 "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
158 "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
159 "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
160 "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
161 "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
162};
163
164/* DMA usage flag */
165static int use_dma = 1;
166/* packet per buffer dma */
167static int use_dma_ppb = 1;
168/* with per descr. update */
169static int use_dma_ppb_du;
170/* buffer fill mode */
171static int use_dma_bufferfill_mode;
172/* full speed only mode */
173static int use_fullspeed;
174/* tx buffer size for high speed */
175static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
176
177/* module parameters */
178module_param(use_dma, bool, S_IRUGO);
179MODULE_PARM_DESC(use_dma, "true for DMA");
180module_param(use_dma_ppb, bool, S_IRUGO);
181MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
182module_param(use_dma_ppb_du, bool, S_IRUGO);
183MODULE_PARM_DESC(use_dma_ppb_du,
184 "true for DMA in packet per buffer mode with descriptor update");
185module_param(use_fullspeed, bool, S_IRUGO);
186MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
187
188/*---------------------------------------------------------------------------*/
189/* Prints UDC device registers and endpoint irq registers */
190static void print_regs(struct udc *dev)
191{
192 DBG(dev, "------- Device registers -------\n");
193 DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
194 DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
195 DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
196 DBG(dev, "\n");
197 DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
198 DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
199 DBG(dev, "\n");
200 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
201 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
202 DBG(dev, "\n");
203 DBG(dev, "USE DMA = %d\n", use_dma);
204 if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
205 DBG(dev, "DMA mode = PPBNDU (packet per buffer "
206 "WITHOUT desc. update)\n");
207 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
208 } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) {
209 DBG(dev, "DMA mode = PPBDU (packet per buffer "
210 "WITH desc. update)\n");
211 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
212 }
213 if (use_dma && use_dma_bufferfill_mode) {
214 DBG(dev, "DMA mode = BF (buffer fill mode)\n");
215 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
216 }
217 if (!use_dma) {
218 dev_info(&dev->pdev->dev, "FIFO mode\n");
219 }
220 DBG(dev, "-------------------------------------------------------\n");
221}
222
223/* Masks unused interrupts */
224static int udc_mask_unused_interrupts(struct udc *dev)
225{
226 u32 tmp;
227
228 /* mask all dev interrupts */
229 tmp = AMD_BIT(UDC_DEVINT_SVC) |
230 AMD_BIT(UDC_DEVINT_ENUM) |
231 AMD_BIT(UDC_DEVINT_US) |
232 AMD_BIT(UDC_DEVINT_UR) |
233 AMD_BIT(UDC_DEVINT_ES) |
234 AMD_BIT(UDC_DEVINT_SI) |
235 AMD_BIT(UDC_DEVINT_SOF)|
236 AMD_BIT(UDC_DEVINT_SC);
237 writel(tmp, &dev->regs->irqmsk);
238
239 /* mask all ep interrupts */
240 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
241
242 return 0;
243}
244
245/* Enables endpoint 0 interrupts */
246static int udc_enable_ep0_interrupts(struct udc *dev)
247{
248 u32 tmp;
249
250 DBG(dev, "udc_enable_ep0_interrupts()\n");
251
252 /* read irq mask */
253 tmp = readl(&dev->regs->ep_irqmsk);
254 /* enable ep0 irq's */
255 tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
256 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
257 writel(tmp, &dev->regs->ep_irqmsk);
258
259 return 0;
260}
261
262/* Enables device interrupts for SET_INTF and SET_CONFIG */
263static int udc_enable_dev_setup_interrupts(struct udc *dev)
264{
265 u32 tmp;
266
267 DBG(dev, "enable device interrupts for setup data\n");
268
269 /* read irq mask */
270 tmp = readl(&dev->regs->irqmsk);
271
272 /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
273 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
274 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
275 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
276 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
277 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
278 writel(tmp, &dev->regs->irqmsk);
279
280 return 0;
281}
282
283/* Calculates fifo start of endpoint based on preceeding endpoints */
284static int udc_set_txfifo_addr(struct udc_ep *ep)
285{
286 struct udc *dev;
287 u32 tmp;
288 int i;
289
290 if (!ep || !(ep->in))
291 return -EINVAL;
292
293 dev = ep->dev;
294 ep->txfifo = dev->txfifo;
295
296 /* traverse ep's */
297 for (i = 0; i < ep->num; i++) {
298 if (dev->ep[i].regs) {
299 /* read fifo size */
300 tmp = readl(&dev->ep[i].regs->bufin_framenum);
301 tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
302 ep->txfifo += tmp;
303 }
304 }
305 return 0;
306}
307
308/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
309static u32 cnak_pending;
310
311static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
312{
313 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
314 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
315 cnak_pending |= 1 << (num);
316 ep->naking = 1;
317 } else
318 cnak_pending = cnak_pending & (~(1 << (num)));
319}
320
321
322/* Enables endpoint, is called by gadget driver */
323static int
324udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
325{
326 struct udc_ep *ep;
327 struct udc *dev;
328 u32 tmp;
329 unsigned long iflags;
330 u8 udc_csr_epix;
331
332 if (!usbep
333 || usbep->name == ep0_string
334 || !desc
335 || desc->bDescriptorType != USB_DT_ENDPOINT)
336 return -EINVAL;
337
338 ep = container_of(usbep, struct udc_ep, ep);
339 dev = ep->dev;
340
341 DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
342
343 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
344 return -ESHUTDOWN;
345
346 spin_lock_irqsave(&dev->lock, iflags);
347 ep->desc = desc;
348
349 ep->halted = 0;
350
351 /* set traffic type */
352 tmp = readl(&dev->ep[ep->num].regs->ctl);
353 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
354 writel(tmp, &dev->ep[ep->num].regs->ctl);
355
356 /* set max packet size */
357 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
358 tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_EP_MAX_PKT_SIZE);
359 ep->ep.maxpacket = desc->wMaxPacketSize;
360 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
361
362 /* IN ep */
363 if (ep->in) {
364
365 /* ep ix in UDC CSR register space */
366 udc_csr_epix = ep->num;
367
368 /* set buffer size (tx fifo entries) */
369 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
370 /* double buffering: fifo size = 2 x max packet size */
371 tmp = AMD_ADDBITS(
372 tmp,
373 desc->wMaxPacketSize * UDC_EPIN_BUFF_SIZE_MULT
374 / UDC_DWORD_BYTES,
375 UDC_EPIN_BUFF_SIZE);
376 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
377
378 /* calc. tx fifo base addr */
379 udc_set_txfifo_addr(ep);
380
381 /* flush fifo */
382 tmp = readl(&ep->regs->ctl);
383 tmp |= AMD_BIT(UDC_EPCTL_F);
384 writel(tmp, &ep->regs->ctl);
385
386 /* OUT ep */
387 } else {
388 /* ep ix in UDC CSR register space */
389 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
390
391 /* set max packet size UDC CSR */
392 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
393 tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize,
394 UDC_CSR_NE_MAX_PKT);
395 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
396
397 if (use_dma && !ep->in) {
398 /* alloc and init BNA dummy request */
399 ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
400 ep->bna_occurred = 0;
401 }
402
403 if (ep->num != UDC_EP0OUT_IX)
404 dev->data_ep_enabled = 1;
405 }
406
407 /* set ep values */
408 tmp = readl(&dev->csr->ne[udc_csr_epix]);
409 /* max packet */
410 tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_CSR_NE_MAX_PKT);
411 /* ep number */
412 tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
413 /* ep direction */
414 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
415 /* ep type */
416 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
417 /* ep config */
418 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
419 /* ep interface */
420 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
421 /* ep alt */
422 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
423 /* write reg */
424 writel(tmp, &dev->csr->ne[udc_csr_epix]);
425
426 /* enable ep irq */
427 tmp = readl(&dev->regs->ep_irqmsk);
428 tmp &= AMD_UNMASK_BIT(ep->num);
429 writel(tmp, &dev->regs->ep_irqmsk);
430
431 /*
432 * clear NAK by writing CNAK
433 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
434 */
435 if (!use_dma || ep->in) {
436 tmp = readl(&ep->regs->ctl);
437 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
438 writel(tmp, &ep->regs->ctl);
439 ep->naking = 0;
440 UDC_QUEUE_CNAK(ep, ep->num);
441 }
442 tmp = desc->bEndpointAddress;
443 DBG(dev, "%s enabled\n", usbep->name);
444
445 spin_unlock_irqrestore(&dev->lock, iflags);
446 return 0;
447}
448
449/* Resets endpoint */
450static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
451{
452 u32 tmp;
453
454 VDBG(ep->dev, "ep-%d reset\n", ep->num);
455 ep->desc = NULL;
456 ep->ep.ops = &udc_ep_ops;
457 INIT_LIST_HEAD(&ep->queue);
458
459 ep->ep.maxpacket = (u16) ~0;
460 /* set NAK */
461 tmp = readl(&ep->regs->ctl);
462 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
463 writel(tmp, &ep->regs->ctl);
464 ep->naking = 1;
465
466 /* disable interrupt */
467 tmp = readl(&regs->ep_irqmsk);
468 tmp |= AMD_BIT(ep->num);
469 writel(tmp, &regs->ep_irqmsk);
470
471 if (ep->in) {
472 /* unset P and IN bit of potential former DMA */
473 tmp = readl(&ep->regs->ctl);
474 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
475 writel(tmp, &ep->regs->ctl);
476
477 tmp = readl(&ep->regs->sts);
478 tmp |= AMD_BIT(UDC_EPSTS_IN);
479 writel(tmp, &ep->regs->sts);
480
481 /* flush the fifo */
482 tmp = readl(&ep->regs->ctl);
483 tmp |= AMD_BIT(UDC_EPCTL_F);
484 writel(tmp, &ep->regs->ctl);
485
486 }
487 /* reset desc pointer */
488 writel(0, &ep->regs->desptr);
489}
490
491/* Disables endpoint, is called by gadget driver */
492static int udc_ep_disable(struct usb_ep *usbep)
493{
494 struct udc_ep *ep = NULL;
495 unsigned long iflags;
496
497 if (!usbep)
498 return -EINVAL;
499
500 ep = container_of(usbep, struct udc_ep, ep);
501 if (usbep->name == ep0_string || !ep->desc)
502 return -EINVAL;
503
504 DBG(ep->dev, "Disable ep-%d\n", ep->num);
505
506 spin_lock_irqsave(&ep->dev->lock, iflags);
507 udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
508 empty_req_queue(ep);
509 ep_init(ep->dev->regs, ep);
510 spin_unlock_irqrestore(&ep->dev->lock, iflags);
511
512 return 0;
513}
514
515/* Allocates request packet, called by gadget driver */
516static struct usb_request *
517udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
518{
519 struct udc_request *req;
520 struct udc_data_dma *dma_desc;
521 struct udc_ep *ep;
522
523 if (!usbep)
524 return NULL;
525
526 ep = container_of(usbep, struct udc_ep, ep);
527
528 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
529 req = kzalloc(sizeof(struct udc_request), gfp);
530 if (!req)
531 return NULL;
532
533 req->req.dma = DMA_DONT_USE;
534 INIT_LIST_HEAD(&req->queue);
535
536 if (ep->dma) {
537 /* ep0 in requests are allocated from data pool here */
538 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
539 &req->td_phys);
540 if (!dma_desc) {
541 kfree(req);
542 return NULL;
543 }
544
545 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
546 "td_phys = %lx\n",
547 req, dma_desc,
548 (unsigned long)req->td_phys);
549 /* prevent from using desc. - set HOST BUSY */
550 dma_desc->status = AMD_ADDBITS(dma_desc->status,
551 UDC_DMA_STP_STS_BS_HOST_BUSY,
552 UDC_DMA_STP_STS_BS);
553 dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE);
554 req->td_data = dma_desc;
555 req->td_data_last = NULL;
556 req->chain_len = 1;
557 }
558
559 return &req->req;
560}
561
562/* Frees request packet, called by gadget driver */
563static void
564udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
565{
566 struct udc_ep *ep;
567 struct udc_request *req;
568
569 if (!usbep || !usbreq)
570 return;
571
572 ep = container_of(usbep, struct udc_ep, ep);
573 req = container_of(usbreq, struct udc_request, req);
574 VDBG(ep->dev, "free_req req=%p\n", req);
575 BUG_ON(!list_empty(&req->queue));
576 if (req->td_data) {
577 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
578
579 /* free dma chain if created */
580 if (req->chain_len > 1) {
581 udc_free_dma_chain(ep->dev, req);
582 }
583
584 pci_pool_free(ep->dev->data_requests, req->td_data,
585 req->td_phys);
586 }
587 kfree(req);
588}
589
590/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
591static void udc_init_bna_dummy(struct udc_request *req)
592{
593 if (req) {
594 /* set last bit */
595 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
596 /* set next pointer to itself */
597 req->td_data->next = req->td_phys;
598 /* set HOST BUSY */
599 req->td_data->status
600 = AMD_ADDBITS(req->td_data->status,
601 UDC_DMA_STP_STS_BS_DMA_DONE,
602 UDC_DMA_STP_STS_BS);
603#ifdef UDC_VERBOSE
604 pr_debug("bna desc = %p, sts = %08x\n",
605 req->td_data, req->td_data->status);
606#endif
607 }
608}
609
610/* Allocate BNA dummy descriptor */
611static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
612{
613 struct udc_request *req = NULL;
614 struct usb_request *_req = NULL;
615
616 /* alloc the dummy request */
617 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
618 if (_req) {
619 req = container_of(_req, struct udc_request, req);
620 ep->bna_dummy_req = req;
621 udc_init_bna_dummy(req);
622 }
623 return req;
624}
625
626/* Write data to TX fifo for IN packets */
627static void
628udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
629{
630 u8 *req_buf;
631 u32 *buf;
632 int i, j;
633 unsigned bytes = 0;
634 unsigned remaining = 0;
635
636 if (!req || !ep)
637 return;
638
639 req_buf = req->buf + req->actual;
640 prefetch(req_buf);
641 remaining = req->length - req->actual;
642
643 buf = (u32 *) req_buf;
644
645 bytes = ep->ep.maxpacket;
646 if (bytes > remaining)
647 bytes = remaining;
648
649 /* dwords first */
650 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
651 writel(*(buf + i), ep->txfifo);
652 }
653
654 /* remaining bytes must be written by byte access */
655 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
656 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
657 ep->txfifo);
658 }
659
660 /* dummy write confirm */
661 writel(0, &ep->regs->confirm);
662}
663
664/* Read dwords from RX fifo for OUT transfers */
665static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
666{
667 int i;
668
669 VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
670
671 for (i = 0; i < dwords; i++) {
672 *(buf + i) = readl(dev->rxfifo);
673 }
674 return 0;
675}
676
677/* Read bytes from RX fifo for OUT transfers */
678static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
679{
680 int i, j;
681 u32 tmp;
682
683 VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
684
685 /* dwords first */
686 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
687 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
688 }
689
690 /* remaining bytes must be read by byte access */
691 if (bytes % UDC_DWORD_BYTES) {
692 tmp = readl(dev->rxfifo);
693 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
694 *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
695 tmp = tmp >> UDC_BITS_PER_BYTE;
696 }
697 }
698
699 return 0;
700}
701
702/* Read data from RX fifo for OUT transfers */
703static int
704udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
705{
706 u8 *buf;
707 unsigned buf_space;
708 unsigned bytes = 0;
709 unsigned finished = 0;
710
711 /* received number bytes */
712 bytes = readl(&ep->regs->sts);
713 bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
714
715 buf_space = req->req.length - req->req.actual;
716 buf = req->req.buf + req->req.actual;
717 if (bytes > buf_space) {
718 if ((buf_space % ep->ep.maxpacket) != 0) {
719 DBG(ep->dev,
720 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
721 ep->ep.name, bytes, buf_space);
722 req->req.status = -EOVERFLOW;
723 }
724 bytes = buf_space;
725 }
726 req->req.actual += bytes;
727
728 /* last packet ? */
729 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
730 || ((req->req.actual == req->req.length) && !req->req.zero))
731 finished = 1;
732
733 /* read rx fifo bytes */
734 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
735 udc_rxfifo_read_bytes(ep->dev, buf, bytes);
736
737 return finished;
738}
739
740/* create/re-init a DMA descriptor or a DMA descriptor chain */
741static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
742{
743 int retval = 0;
744 u32 tmp;
745
746 VDBG(ep->dev, "prep_dma\n");
747 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
748 ep->num, req->td_data);
749
750 /* set buffer pointer */
751 req->td_data->bufptr = req->req.dma;
752
753 /* set last bit */
754 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
755
756 /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
757 if (use_dma_ppb) {
758
759 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
760 if (retval != 0) {
761 if (retval == -ENOMEM)
762 DBG(ep->dev, "Out of DMA memory\n");
763 return retval;
764 }
765 if (ep->in) {
766 if (req->req.length == ep->ep.maxpacket) {
767 /* write tx bytes */
768 req->td_data->status =
769 AMD_ADDBITS(req->td_data->status,
770 ep->ep.maxpacket,
771 UDC_DMA_IN_STS_TXBYTES);
772
773 }
774 }
775
776 }
777
778 if (ep->in) {
779 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
780 "maxpacket=%d ep%d\n",
781 use_dma_ppb, req->req.length,
782 ep->ep.maxpacket, ep->num);
783 /*
784 * if bytes < max packet then tx bytes must
785 * be written in packet per buffer mode
786 */
787 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
788 || ep->num == UDC_EP0OUT_IX
789 || ep->num == UDC_EP0IN_IX) {
790 /* write tx bytes */
791 req->td_data->status =
792 AMD_ADDBITS(req->td_data->status,
793 req->req.length,
794 UDC_DMA_IN_STS_TXBYTES);
795 /* reset frame num */
796 req->td_data->status =
797 AMD_ADDBITS(req->td_data->status,
798 0,
799 UDC_DMA_IN_STS_FRAMENUM);
800 }
801 /* set HOST BUSY */
802 req->td_data->status =
803 AMD_ADDBITS(req->td_data->status,
804 UDC_DMA_STP_STS_BS_HOST_BUSY,
805 UDC_DMA_STP_STS_BS);
806 } else {
807 VDBG(ep->dev, "OUT set host ready\n");
808 /* set HOST READY */
809 req->td_data->status =
810 AMD_ADDBITS(req->td_data->status,
811 UDC_DMA_STP_STS_BS_HOST_READY,
812 UDC_DMA_STP_STS_BS);
813
814
815 /* clear NAK by writing CNAK */
816 if (ep->naking) {
817 tmp = readl(&ep->regs->ctl);
818 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
819 writel(tmp, &ep->regs->ctl);
820 ep->naking = 0;
821 UDC_QUEUE_CNAK(ep, ep->num);
822 }
823
824 }
825
826 return retval;
827}
828
829/* Completes request packet ... caller MUST hold lock */
830static void
831complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
832__releases(ep->dev->lock)
833__acquires(ep->dev->lock)
834{
835 struct udc *dev;
836 unsigned halted;
837
838 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
839
840 dev = ep->dev;
841 /* unmap DMA */
842 if (req->dma_mapping) {
843 if (ep->in)
844 pci_unmap_single(dev->pdev,
845 req->req.dma,
846 req->req.length,
847 PCI_DMA_TODEVICE);
848 else
849 pci_unmap_single(dev->pdev,
850 req->req.dma,
851 req->req.length,
852 PCI_DMA_FROMDEVICE);
853 req->dma_mapping = 0;
854 req->req.dma = DMA_DONT_USE;
855 }
856
857 halted = ep->halted;
858 ep->halted = 1;
859
860 /* set new status if pending */
861 if (req->req.status == -EINPROGRESS)
862 req->req.status = sts;
863
864 /* remove from ep queue */
865 list_del_init(&req->queue);
866
867 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
868 &req->req, req->req.length, ep->ep.name, sts);
869
870 spin_unlock(&dev->lock);
871 req->req.complete(&ep->ep, &req->req);
872 spin_lock(&dev->lock);
873 ep->halted = halted;
874}
875
876/* frees pci pool descriptors of a DMA chain */
877static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
878{
879
880 int ret_val = 0;
881 struct udc_data_dma *td;
882 struct udc_data_dma *td_last = NULL;
883 unsigned int i;
884
885 DBG(dev, "free chain req = %p\n", req);
886
887 /* do not free first desc., will be done by free for request */
888 td_last = req->td_data;
889 td = phys_to_virt(td_last->next);
890
891 for (i = 1; i < req->chain_len; i++) {
892
893 pci_pool_free(dev->data_requests, td,
894 (dma_addr_t) td_last->next);
895 td_last = td;
896 td = phys_to_virt(td_last->next);
897 }
898
899 return ret_val;
900}
901
902/* Iterates to the end of a DMA chain and returns last descriptor */
903static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
904{
905 struct udc_data_dma *td;
906
907 td = req->td_data;
908 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
909 td = phys_to_virt(td->next);
910 }
911
912 return td;
913
914}
915
916/* Iterates to the end of a DMA chain and counts bytes received */
917static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
918{
919 struct udc_data_dma *td;
920 u32 count;
921
922 td = req->td_data;
923 /* received number bytes */
924 count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
925
926 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
927 td = phys_to_virt(td->next);
928 /* received number bytes */
929 if (td) {
930 count += AMD_GETBITS(td->status,
931 UDC_DMA_OUT_STS_RXBYTES);
932 }
933 }
934
935 return count;
936
937}
938
939/* Creates or re-inits a DMA chain */
940static int udc_create_dma_chain(
941 struct udc_ep *ep,
942 struct udc_request *req,
943 unsigned long buf_len, gfp_t gfp_flags
944)
945{
946 unsigned long bytes = req->req.length;
947 unsigned int i;
948 dma_addr_t dma_addr;
949 struct udc_data_dma *td = NULL;
950 struct udc_data_dma *last = NULL;
951 unsigned long txbytes;
952 unsigned create_new_chain = 0;
953 unsigned len;
954
955 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
956 bytes, buf_len);
957 dma_addr = DMA_DONT_USE;
958
959 /* unset L bit in first desc for OUT */
960 if (!ep->in) {
961 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
962 }
963
964 /* alloc only new desc's if not already available */
965 len = req->req.length / ep->ep.maxpacket;
966 if (req->req.length % ep->ep.maxpacket) {
967 len++;
968 }
969
970 if (len > req->chain_len) {
971 /* shorter chain already allocated before */
972 if (req->chain_len > 1) {
973 udc_free_dma_chain(ep->dev, req);
974 }
975 req->chain_len = len;
976 create_new_chain = 1;
977 }
978
979 td = req->td_data;
980 /* gen. required number of descriptors and buffers */
981 for (i = buf_len; i < bytes; i += buf_len) {
982 /* create or determine next desc. */
983 if (create_new_chain) {
984
985 td = pci_pool_alloc(ep->dev->data_requests,
986 gfp_flags, &dma_addr);
987 if (!td)
988 return -ENOMEM;
989
990 td->status = 0;
991 } else if (i == buf_len) {
992 /* first td */
993 td = (struct udc_data_dma *) phys_to_virt(
994 req->td_data->next);
995 td->status = 0;
996 } else {
997 td = (struct udc_data_dma *) phys_to_virt(last->next);
998 td->status = 0;
999 }
1000
1001
1002 if (td)
1003 td->bufptr = req->req.dma + i; /* assign buffer */
1004 else
1005 break;
1006
1007 /* short packet ? */
1008 if ((bytes - i) >= buf_len) {
1009 txbytes = buf_len;
1010 } else {
1011 /* short packet */
1012 txbytes = bytes - i;
1013 }
1014
1015 /* link td and assign tx bytes */
1016 if (i == buf_len) {
1017 if (create_new_chain) {
1018 req->td_data->next = dma_addr;
1019 } else {
1020 /* req->td_data->next = virt_to_phys(td); */
1021 }
1022 /* write tx bytes */
1023 if (ep->in) {
1024 /* first desc */
1025 req->td_data->status =
1026 AMD_ADDBITS(req->td_data->status,
1027 ep->ep.maxpacket,
1028 UDC_DMA_IN_STS_TXBYTES);
1029 /* second desc */
1030 td->status = AMD_ADDBITS(td->status,
1031 txbytes,
1032 UDC_DMA_IN_STS_TXBYTES);
1033 }
1034 } else {
1035 if (create_new_chain) {
1036 last->next = dma_addr;
1037 } else {
1038 /* last->next = virt_to_phys(td); */
1039 }
1040 if (ep->in) {
1041 /* write tx bytes */
1042 td->status = AMD_ADDBITS(td->status,
1043 txbytes,
1044 UDC_DMA_IN_STS_TXBYTES);
1045 }
1046 }
1047 last = td;
1048 }
1049 /* set last bit */
1050 if (td) {
1051 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1052 /* last desc. points to itself */
1053 req->td_data_last = td;
1054 }
1055
1056 return 0;
1057}
1058
1059/* Enabling RX DMA */
1060static void udc_set_rde(struct udc *dev)
1061{
1062 u32 tmp;
1063
1064 VDBG(dev, "udc_set_rde()\n");
1065 /* stop RDE timer */
1066 if (timer_pending(&udc_timer)) {
1067 set_rde = 0;
1068 mod_timer(&udc_timer, jiffies - 1);
1069 }
1070 /* set RDE */
1071 tmp = readl(&dev->regs->ctl);
1072 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1073 writel(tmp, &dev->regs->ctl);
1074}
1075
1076/* Queues a request packet, called by gadget driver */
1077static int
1078udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1079{
1080 int retval = 0;
1081 u8 open_rxfifo = 0;
1082 unsigned long iflags;
1083 struct udc_ep *ep;
1084 struct udc_request *req;
1085 struct udc *dev;
1086 u32 tmp;
1087
1088 /* check the inputs */
1089 req = container_of(usbreq, struct udc_request, req);
1090
1091 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1092 || !list_empty(&req->queue))
1093 return -EINVAL;
1094
1095 ep = container_of(usbep, struct udc_ep, ep);
1096 if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1097 return -EINVAL;
1098
1099 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1100 dev = ep->dev;
1101
1102 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1103 return -ESHUTDOWN;
1104
1105 /* map dma (usually done before) */
1106 if (ep->dma && usbreq->length != 0
1107 && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
1108 VDBG(dev, "DMA map req %p\n", req);
1109 if (ep->in)
1110 usbreq->dma = pci_map_single(dev->pdev,
1111 usbreq->buf,
1112 usbreq->length,
1113 PCI_DMA_TODEVICE);
1114 else
1115 usbreq->dma = pci_map_single(dev->pdev,
1116 usbreq->buf,
1117 usbreq->length,
1118 PCI_DMA_FROMDEVICE);
1119 req->dma_mapping = 1;
1120 }
1121
1122 VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1123 usbep->name, usbreq, usbreq->length,
1124 req->td_data, usbreq->buf);
1125
1126 spin_lock_irqsave(&dev->lock, iflags);
1127 usbreq->actual = 0;
1128 usbreq->status = -EINPROGRESS;
1129 req->dma_done = 0;
1130
1131 /* on empty queue just do first transfer */
1132 if (list_empty(&ep->queue)) {
1133 /* zlp */
1134 if (usbreq->length == 0) {
1135 /* IN zlp's are handled by hardware */
1136 complete_req(ep, req, 0);
1137 VDBG(dev, "%s: zlp\n", ep->ep.name);
1138 /*
1139 * if set_config or set_intf is waiting for ack by zlp
1140 * then set CSR_DONE
1141 */
1142 if (dev->set_cfg_not_acked) {
1143 tmp = readl(&dev->regs->ctl);
1144 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1145 writel(tmp, &dev->regs->ctl);
1146 dev->set_cfg_not_acked = 0;
1147 }
1148 /* setup command is ACK'ed now by zlp */
1149 if (dev->waiting_zlp_ack_ep0in) {
1150 /* clear NAK by writing CNAK in EP0_IN */
1151 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1152 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1153 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1154 dev->ep[UDC_EP0IN_IX].naking = 0;
1155 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1156 UDC_EP0IN_IX);
1157 dev->waiting_zlp_ack_ep0in = 0;
1158 }
1159 goto finished;
1160 }
1161 if (ep->dma) {
1162 retval = prep_dma(ep, req, gfp);
1163 if (retval != 0)
1164 goto finished;
1165 /* write desc pointer to enable DMA */
1166 if (ep->in) {
1167 /* set HOST READY */
1168 req->td_data->status =
1169 AMD_ADDBITS(req->td_data->status,
1170 UDC_DMA_IN_STS_BS_HOST_READY,
1171 UDC_DMA_IN_STS_BS);
1172 }
1173
1174 /* disabled rx dma while descriptor update */
1175 if (!ep->in) {
1176 /* stop RDE timer */
1177 if (timer_pending(&udc_timer)) {
1178 set_rde = 0;
1179 mod_timer(&udc_timer, jiffies - 1);
1180 }
1181 /* clear RDE */
1182 tmp = readl(&dev->regs->ctl);
1183 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1184 writel(tmp, &dev->regs->ctl);
1185 open_rxfifo = 1;
1186
1187 /*
1188 * if BNA occurred then let BNA dummy desc.
1189 * point to current desc.
1190 */
1191 if (ep->bna_occurred) {
1192 VDBG(dev, "copy to BNA dummy desc.\n");
1193 memcpy(ep->bna_dummy_req->td_data,
1194 req->td_data,
1195 sizeof(struct udc_data_dma));
1196 }
1197 }
1198 /* write desc pointer */
1199 writel(req->td_phys, &ep->regs->desptr);
1200
1201 /* clear NAK by writing CNAK */
1202 if (ep->naking) {
1203 tmp = readl(&ep->regs->ctl);
1204 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1205 writel(tmp, &ep->regs->ctl);
1206 ep->naking = 0;
1207 UDC_QUEUE_CNAK(ep, ep->num);
1208 }
1209
1210 if (ep->in) {
1211 /* enable ep irq */
1212 tmp = readl(&dev->regs->ep_irqmsk);
1213 tmp &= AMD_UNMASK_BIT(ep->num);
1214 writel(tmp, &dev->regs->ep_irqmsk);
1215 }
1216 }
1217
1218 } else if (ep->dma) {
1219
1220 /*
1221 * prep_dma not used for OUT ep's, this is not possible
1222 * for PPB modes, because of chain creation reasons
1223 */
1224 if (ep->in) {
1225 retval = prep_dma(ep, req, gfp);
1226 if (retval != 0)
1227 goto finished;
1228 }
1229 }
1230 VDBG(dev, "list_add\n");
1231 /* add request to ep queue */
1232 if (req) {
1233
1234 list_add_tail(&req->queue, &ep->queue);
1235
1236 /* open rxfifo if out data queued */
1237 if (open_rxfifo) {
1238 /* enable DMA */
1239 req->dma_going = 1;
1240 udc_set_rde(dev);
1241 if (ep->num != UDC_EP0OUT_IX)
1242 dev->data_ep_queued = 1;
1243 }
1244 /* stop OUT naking */
1245 if (!ep->in) {
1246 if (!use_dma && udc_rxfifo_pending) {
1247 DBG(dev, "udc_queue(): pending bytes in"
1248 "rxfifo after nyet\n");
1249 /*
1250 * read pending bytes afer nyet:
1251 * referring to isr
1252 */
1253 if (udc_rxfifo_read(ep, req)) {
1254 /* finish */
1255 complete_req(ep, req, 0);
1256 }
1257 udc_rxfifo_pending = 0;
1258
1259 }
1260 }
1261 }
1262
1263finished:
1264 spin_unlock_irqrestore(&dev->lock, iflags);
1265 return retval;
1266}
1267
1268/* Empty request queue of an endpoint; caller holds spinlock */
1269static void empty_req_queue(struct udc_ep *ep)
1270{
1271 struct udc_request *req;
1272
1273 ep->halted = 1;
1274 while (!list_empty(&ep->queue)) {
1275 req = list_entry(ep->queue.next,
1276 struct udc_request,
1277 queue);
1278 complete_req(ep, req, -ESHUTDOWN);
1279 }
1280}
1281
1282/* Dequeues a request packet, called by gadget driver */
1283static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1284{
1285 struct udc_ep *ep;
1286 struct udc_request *req;
1287 unsigned halted;
1288 unsigned long iflags;
1289
1290 ep = container_of(usbep, struct udc_ep, ep);
1291 if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
1292 && ep->num != UDC_EP0OUT_IX)))
1293 return -EINVAL;
1294
1295 req = container_of(usbreq, struct udc_request, req);
1296
1297 spin_lock_irqsave(&ep->dev->lock, iflags);
1298 halted = ep->halted;
1299 ep->halted = 1;
1300 /* request in processing or next one */
1301 if (ep->queue.next == &req->queue) {
1302 if (ep->dma && req->dma_going) {
1303 if (ep->in)
1304 ep->cancel_transfer = 1;
1305 else {
1306 u32 tmp;
1307 u32 dma_sts;
1308 /* stop potential receive DMA */
1309 tmp = readl(&udc->regs->ctl);
1310 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1311 &udc->regs->ctl);
1312 /*
1313 * Cancel transfer later in ISR
1314 * if descriptor was touched.
1315 */
1316 dma_sts = AMD_GETBITS(req->td_data->status,
1317 UDC_DMA_OUT_STS_BS);
1318 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1319 ep->cancel_transfer = 1;
1320 else {
1321 udc_init_bna_dummy(ep->req);
1322 writel(ep->bna_dummy_req->td_phys,
1323 &ep->regs->desptr);
1324 }
1325 writel(tmp, &udc->regs->ctl);
1326 }
1327 }
1328 }
1329 complete_req(ep, req, -ECONNRESET);
1330 ep->halted = halted;
1331
1332 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1333 return 0;
1334}
1335
1336/* Halt or clear halt of endpoint */
1337static int
1338udc_set_halt(struct usb_ep *usbep, int halt)
1339{
1340 struct udc_ep *ep;
1341 u32 tmp;
1342 unsigned long iflags;
1343 int retval = 0;
1344
1345 if (!usbep)
1346 return -EINVAL;
1347
1348 pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1349
1350 ep = container_of(usbep, struct udc_ep, ep);
1351 if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1352 return -EINVAL;
1353 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1354 return -ESHUTDOWN;
1355
1356 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1357 /* halt or clear halt */
1358 if (halt) {
1359 if (ep->num == 0)
1360 ep->dev->stall_ep0in = 1;
1361 else {
1362 /*
1363 * set STALL
1364 * rxfifo empty not taken into acount
1365 */
1366 tmp = readl(&ep->regs->ctl);
1367 tmp |= AMD_BIT(UDC_EPCTL_S);
1368 writel(tmp, &ep->regs->ctl);
1369 ep->halted = 1;
1370
1371 /* setup poll timer */
1372 if (!timer_pending(&udc_pollstall_timer)) {
1373 udc_pollstall_timer.expires = jiffies +
1374 HZ * UDC_POLLSTALL_TIMER_USECONDS
1375 / (1000 * 1000);
1376 if (!stop_pollstall_timer) {
1377 DBG(ep->dev, "start polltimer\n");
1378 add_timer(&udc_pollstall_timer);
1379 }
1380 }
1381 }
1382 } else {
1383 /* ep is halted by set_halt() before */
1384 if (ep->halted) {
1385 tmp = readl(&ep->regs->ctl);
1386 /* clear stall bit */
1387 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1388 /* clear NAK by writing CNAK */
1389 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1390 writel(tmp, &ep->regs->ctl);
1391 ep->halted = 0;
1392 UDC_QUEUE_CNAK(ep, ep->num);
1393 }
1394 }
1395 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1396 return retval;
1397}
1398
1399/* gadget interface */
1400static const struct usb_ep_ops udc_ep_ops = {
1401 .enable = udc_ep_enable,
1402 .disable = udc_ep_disable,
1403
1404 .alloc_request = udc_alloc_request,
1405 .free_request = udc_free_request,
1406
1407 .queue = udc_queue,
1408 .dequeue = udc_dequeue,
1409
1410 .set_halt = udc_set_halt,
1411 /* fifo ops not implemented */
1412};
1413
1414/*-------------------------------------------------------------------------*/
1415
1416/* Get frame counter (not implemented) */
1417static int udc_get_frame(struct usb_gadget *gadget)
1418{
1419 return -EOPNOTSUPP;
1420}
1421
1422/* Remote wakeup gadget interface */
1423static int udc_wakeup(struct usb_gadget *gadget)
1424{
1425 struct udc *dev;
1426
1427 if (!gadget)
1428 return -EINVAL;
1429 dev = container_of(gadget, struct udc, gadget);
1430 udc_remote_wakeup(dev);
1431
1432 return 0;
1433}
1434
1435/* gadget operations */
1436static const struct usb_gadget_ops udc_ops = {
1437 .wakeup = udc_wakeup,
1438 .get_frame = udc_get_frame,
1439};
1440
1441/* Setups endpoint parameters, adds endpoints to linked list */
1442static void make_ep_lists(struct udc *dev)
1443{
1444 /* make gadget ep lists */
1445 INIT_LIST_HEAD(&dev->gadget.ep_list);
1446 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1447 &dev->gadget.ep_list);
1448 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1449 &dev->gadget.ep_list);
1450 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1451 &dev->gadget.ep_list);
1452
1453 /* fifo config */
1454 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1455 if (dev->gadget.speed == USB_SPEED_FULL)
1456 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1457 else if (dev->gadget.speed == USB_SPEED_HIGH)
1458 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1459 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1460}
1461
1462/* init registers at driver load time */
1463static int startup_registers(struct udc *dev)
1464{
1465 u32 tmp;
1466
1467 /* init controller by soft reset */
1468 udc_soft_reset(dev);
1469
1470 /* mask not needed interrupts */
1471 udc_mask_unused_interrupts(dev);
1472
1473 /* put into initial config */
1474 udc_basic_init(dev);
1475 /* link up all endpoints */
1476 udc_setup_endpoints(dev);
1477
1478 /* program speed */
1479 tmp = readl(&dev->regs->cfg);
1480 if (use_fullspeed) {
1481 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1482 } else {
1483 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1484 }
1485 writel(tmp, &dev->regs->cfg);
1486
1487 return 0;
1488}
1489
1490/* Inits UDC context */
1491static void udc_basic_init(struct udc *dev)
1492{
1493 u32 tmp;
1494
1495 DBG(dev, "udc_basic_init()\n");
1496
1497 dev->gadget.speed = USB_SPEED_UNKNOWN;
1498
1499 /* stop RDE timer */
1500 if (timer_pending(&udc_timer)) {
1501 set_rde = 0;
1502 mod_timer(&udc_timer, jiffies - 1);
1503 }
1504 /* stop poll stall timer */
1505 if (timer_pending(&udc_pollstall_timer)) {
1506 mod_timer(&udc_pollstall_timer, jiffies - 1);
1507 }
1508 /* disable DMA */
1509 tmp = readl(&dev->regs->ctl);
1510 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1511 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1512 writel(tmp, &dev->regs->ctl);
1513
1514 /* enable dynamic CSR programming */
1515 tmp = readl(&dev->regs->cfg);
1516 tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1517 /* set self powered */
1518 tmp |= AMD_BIT(UDC_DEVCFG_SP);
1519 /* set remote wakeupable */
1520 tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1521 writel(tmp, &dev->regs->cfg);
1522
1523 make_ep_lists(dev);
1524
1525 dev->data_ep_enabled = 0;
1526 dev->data_ep_queued = 0;
1527}
1528
1529/* Sets initial endpoint parameters */
1530static void udc_setup_endpoints(struct udc *dev)
1531{
1532 struct udc_ep *ep;
1533 u32 tmp;
1534 u32 reg;
1535
1536 DBG(dev, "udc_setup_endpoints()\n");
1537
1538 /* read enum speed */
1539 tmp = readl(&dev->regs->sts);
1540 tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1541 if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
1542 dev->gadget.speed = USB_SPEED_HIGH;
1543 } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
1544 dev->gadget.speed = USB_SPEED_FULL;
1545 }
1546
1547 /* set basic ep parameters */
1548 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1549 ep = &dev->ep[tmp];
1550 ep->dev = dev;
1551 ep->ep.name = ep_string[tmp];
1552 ep->num = tmp;
1553 /* txfifo size is calculated at enable time */
1554 ep->txfifo = dev->txfifo;
1555
1556 /* fifo size */
1557 if (tmp < UDC_EPIN_NUM) {
1558 ep->fifo_depth = UDC_TXFIFO_SIZE;
1559 ep->in = 1;
1560 } else {
1561 ep->fifo_depth = UDC_RXFIFO_SIZE;
1562 ep->in = 0;
1563
1564 }
1565 ep->regs = &dev->ep_regs[tmp];
1566 /*
1567 * ep will be reset only if ep was not enabled before to avoid
1568 * disabling ep interrupts when ENUM interrupt occurs but ep is
1569 * not enabled by gadget driver
1570 */
1571 if (!ep->desc) {
1572 ep_init(dev->regs, ep);
1573 }
1574
1575 if (use_dma) {
1576 /*
1577 * ep->dma is not really used, just to indicate that
1578 * DMA is active: remove this
1579 * dma regs = dev control regs
1580 */
1581 ep->dma = &dev->regs->ctl;
1582
1583 /* nak OUT endpoints until enable - not for ep0 */
1584 if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1585 && tmp > UDC_EPIN_NUM) {
1586 /* set NAK */
1587 reg = readl(&dev->ep[tmp].regs->ctl);
1588 reg |= AMD_BIT(UDC_EPCTL_SNAK);
1589 writel(reg, &dev->ep[tmp].regs->ctl);
1590 dev->ep[tmp].naking = 1;
1591
1592 }
1593 }
1594 }
1595 /* EP0 max packet */
1596 if (dev->gadget.speed == USB_SPEED_FULL) {
1597 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
1598 dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
1599 UDC_FS_EP0OUT_MAX_PKT_SIZE;
1600 } else if (dev->gadget.speed == USB_SPEED_HIGH) {
1601 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
1602 dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
1603 }
1604
1605 /*
1606 * with suspend bug workaround, ep0 params for gadget driver
1607 * are set at gadget driver bind() call
1608 */
1609 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1610 dev->ep[UDC_EP0IN_IX].halted = 0;
1611 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1612
1613 /* init cfg/alt/int */
1614 dev->cur_config = 0;
1615 dev->cur_intf = 0;
1616 dev->cur_alt = 0;
1617}
1618
1619/* Bringup after Connect event, initial bringup to be ready for ep0 events */
1620static void usb_connect(struct udc *dev)
1621{
1622
1623 dev_info(&dev->pdev->dev, "USB Connect\n");
1624
1625 dev->connected = 1;
1626
1627 /* put into initial config */
1628 udc_basic_init(dev);
1629
1630 /* enable device setup interrupts */
1631 udc_enable_dev_setup_interrupts(dev);
1632}
1633
1634/*
1635 * Calls gadget with disconnect event and resets the UDC and makes
1636 * initial bringup to be ready for ep0 events
1637 */
1638static void usb_disconnect(struct udc *dev)
1639{
1640
1641 dev_info(&dev->pdev->dev, "USB Disconnect\n");
1642
1643 dev->connected = 0;
1644
1645 /* mask interrupts */
1646 udc_mask_unused_interrupts(dev);
1647
1648 /* REVISIT there doesn't seem to be a point to having this
1649 * talk to a tasklet ... do it directly, we already hold
1650 * the spinlock needed to process the disconnect.
1651 */
1652
1653 tasklet_schedule(&disconnect_tasklet);
1654}
1655
1656/* Tasklet for disconnect to be outside of interrupt context */
1657static void udc_tasklet_disconnect(unsigned long par)
1658{
1659 struct udc *dev = (struct udc *)(*((struct udc **) par));
1660 u32 tmp;
1661
1662 DBG(dev, "Tasklet disconnect\n");
1663 spin_lock_irq(&dev->lock);
1664
1665 if (dev->driver) {
1666 spin_unlock(&dev->lock);
1667 dev->driver->disconnect(&dev->gadget);
1668 spin_lock(&dev->lock);
1669
1670 /* empty queues */
1671 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1672 empty_req_queue(&dev->ep[tmp]);
1673 }
1674
1675 }
1676
1677 /* disable ep0 */
1678 ep_init(dev->regs,
1679 &dev->ep[UDC_EP0IN_IX]);
1680
1681
1682 if (!soft_reset_occured) {
1683 /* init controller by soft reset */
1684 udc_soft_reset(dev);
1685 soft_reset_occured++;
1686 }
1687
1688 /* re-enable dev interrupts */
1689 udc_enable_dev_setup_interrupts(dev);
1690 /* back to full speed ? */
1691 if (use_fullspeed) {
1692 tmp = readl(&dev->regs->cfg);
1693 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1694 writel(tmp, &dev->regs->cfg);
1695 }
1696
1697 spin_unlock_irq(&dev->lock);
1698}
1699
1700/* Reset the UDC core */
1701static void udc_soft_reset(struct udc *dev)
1702{
1703 unsigned long flags;
1704
1705 DBG(dev, "Soft reset\n");
1706 /*
1707 * reset possible waiting interrupts, because int.
1708 * status is lost after soft reset,
1709 * ep int. status reset
1710 */
1711 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1712 /* device int. status reset */
1713 writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1714
1715 spin_lock_irqsave(&udc_irq_spinlock, flags);
1716 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1717 readl(&dev->regs->cfg);
1718 spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1719
1720}
1721
1722/* RDE timer callback to set RDE bit */
1723static void udc_timer_function(unsigned long v)
1724{
1725 u32 tmp;
1726
1727 spin_lock_irq(&udc_irq_spinlock);
1728
1729 if (set_rde > 0) {
1730 /*
1731 * open the fifo if fifo was filled on last timer call
1732 * conditionally
1733 */
1734 if (set_rde > 1) {
1735 /* set RDE to receive setup data */
1736 tmp = readl(&udc->regs->ctl);
1737 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1738 writel(tmp, &udc->regs->ctl);
1739 set_rde = -1;
1740 } else if (readl(&udc->regs->sts)
1741 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1742 /*
1743 * if fifo empty setup polling, do not just
1744 * open the fifo
1745 */
1746 udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1747 if (!stop_timer) {
1748 add_timer(&udc_timer);
1749 }
1750 } else {
1751 /*
1752 * fifo contains data now, setup timer for opening
1753 * the fifo when timer expires to be able to receive
1754 * setup packets, when data packets gets queued by
1755 * gadget layer then timer will forced to expire with
1756 * set_rde=0 (RDE is set in udc_queue())
1757 */
1758 set_rde++;
1759 /* debug: lhadmot_timer_start = 221070 */
1760 udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1761 if (!stop_timer) {
1762 add_timer(&udc_timer);
1763 }
1764 }
1765
1766 } else
1767 set_rde = -1; /* RDE was set by udc_queue() */
1768 spin_unlock_irq(&udc_irq_spinlock);
1769 if (stop_timer)
1770 complete(&on_exit);
1771
1772}
1773
1774/* Handle halt state, used in stall poll timer */
1775static void udc_handle_halt_state(struct udc_ep *ep)
1776{
1777 u32 tmp;
1778 /* set stall as long not halted */
1779 if (ep->halted == 1) {
1780 tmp = readl(&ep->regs->ctl);
1781 /* STALL cleared ? */
1782 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1783 /*
1784 * FIXME: MSC spec requires that stall remains
1785 * even on receivng of CLEAR_FEATURE HALT. So
1786 * we would set STALL again here to be compliant.
1787 * But with current mass storage drivers this does
1788 * not work (would produce endless host retries).
1789 * So we clear halt on CLEAR_FEATURE.
1790 *
1791 DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1792 tmp |= AMD_BIT(UDC_EPCTL_S);
1793 writel(tmp, &ep->regs->ctl);*/
1794
1795 /* clear NAK by writing CNAK */
1796 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1797 writel(tmp, &ep->regs->ctl);
1798 ep->halted = 0;
1799 UDC_QUEUE_CNAK(ep, ep->num);
1800 }
1801 }
1802}
1803
1804/* Stall timer callback to poll S bit and set it again after */
1805static void udc_pollstall_timer_function(unsigned long v)
1806{
1807 struct udc_ep *ep;
1808 int halted = 0;
1809
1810 spin_lock_irq(&udc_stall_spinlock);
1811 /*
1812 * only one IN and OUT endpoints are handled
1813 * IN poll stall
1814 */
1815 ep = &udc->ep[UDC_EPIN_IX];
1816 udc_handle_halt_state(ep);
1817 if (ep->halted)
1818 halted = 1;
1819 /* OUT poll stall */
1820 ep = &udc->ep[UDC_EPOUT_IX];
1821 udc_handle_halt_state(ep);
1822 if (ep->halted)
1823 halted = 1;
1824
1825 /* setup timer again when still halted */
1826 if (!stop_pollstall_timer && halted) {
1827 udc_pollstall_timer.expires = jiffies +
1828 HZ * UDC_POLLSTALL_TIMER_USECONDS
1829 / (1000 * 1000);
1830 add_timer(&udc_pollstall_timer);
1831 }
1832 spin_unlock_irq(&udc_stall_spinlock);
1833
1834 if (stop_pollstall_timer)
1835 complete(&on_pollstall_exit);
1836}
1837
1838/* Inits endpoint 0 so that SETUP packets are processed */
1839static void activate_control_endpoints(struct udc *dev)
1840{
1841 u32 tmp;
1842
1843 DBG(dev, "activate_control_endpoints\n");
1844
1845 /* flush fifo */
1846 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1847 tmp |= AMD_BIT(UDC_EPCTL_F);
1848 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1849
1850 /* set ep0 directions */
1851 dev->ep[UDC_EP0IN_IX].in = 1;
1852 dev->ep[UDC_EP0OUT_IX].in = 0;
1853
1854 /* set buffer size (tx fifo entries) of EP0_IN */
1855 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1856 if (dev->gadget.speed == USB_SPEED_FULL)
1857 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1858 UDC_EPIN_BUFF_SIZE);
1859 else if (dev->gadget.speed == USB_SPEED_HIGH)
1860 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1861 UDC_EPIN_BUFF_SIZE);
1862 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1863
1864 /* set max packet size of EP0_IN */
1865 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1866 if (dev->gadget.speed == USB_SPEED_FULL)
1867 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1868 UDC_EP_MAX_PKT_SIZE);
1869 else if (dev->gadget.speed == USB_SPEED_HIGH)
1870 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1871 UDC_EP_MAX_PKT_SIZE);
1872 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1873
1874 /* set max packet size of EP0_OUT */
1875 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1876 if (dev->gadget.speed == USB_SPEED_FULL)
1877 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1878 UDC_EP_MAX_PKT_SIZE);
1879 else if (dev->gadget.speed == USB_SPEED_HIGH)
1880 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1881 UDC_EP_MAX_PKT_SIZE);
1882 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1883
1884 /* set max packet size of EP0 in UDC CSR */
1885 tmp = readl(&dev->csr->ne[0]);
1886 if (dev->gadget.speed == USB_SPEED_FULL)
1887 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1888 UDC_CSR_NE_MAX_PKT);
1889 else if (dev->gadget.speed == USB_SPEED_HIGH)
1890 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1891 UDC_CSR_NE_MAX_PKT);
1892 writel(tmp, &dev->csr->ne[0]);
1893
1894 if (use_dma) {
1895 dev->ep[UDC_EP0OUT_IX].td->status |=
1896 AMD_BIT(UDC_DMA_OUT_STS_L);
1897 /* write dma desc address */
1898 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1899 &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1900 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1901 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1902 /* stop RDE timer */
1903 if (timer_pending(&udc_timer)) {
1904 set_rde = 0;
1905 mod_timer(&udc_timer, jiffies - 1);
1906 }
1907 /* stop pollstall timer */
1908 if (timer_pending(&udc_pollstall_timer)) {
1909 mod_timer(&udc_pollstall_timer, jiffies - 1);
1910 }
1911 /* enable DMA */
1912 tmp = readl(&dev->regs->ctl);
1913 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1914 | AMD_BIT(UDC_DEVCTL_RDE)
1915 | AMD_BIT(UDC_DEVCTL_TDE);
1916 if (use_dma_bufferfill_mode) {
1917 tmp |= AMD_BIT(UDC_DEVCTL_BF);
1918 } else if (use_dma_ppb_du) {
1919 tmp |= AMD_BIT(UDC_DEVCTL_DU);
1920 }
1921 writel(tmp, &dev->regs->ctl);
1922 }
1923
1924 /* clear NAK by writing CNAK for EP0IN */
1925 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1926 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1927 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1928 dev->ep[UDC_EP0IN_IX].naking = 0;
1929 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1930
1931 /* clear NAK by writing CNAK for EP0OUT */
1932 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1933 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1934 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1935 dev->ep[UDC_EP0OUT_IX].naking = 0;
1936 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1937}
1938
1939/* Make endpoint 0 ready for control traffic */
1940static int setup_ep0(struct udc *dev)
1941{
1942 activate_control_endpoints(dev);
1943 /* enable ep0 interrupts */
1944 udc_enable_ep0_interrupts(dev);
1945 /* enable device setup interrupts */
1946 udc_enable_dev_setup_interrupts(dev);
1947
1948 return 0;
1949}
1950
1951/* Called by gadget driver to register itself */
1952int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1953{
1954 struct udc *dev = udc;
1955 int retval;
1956 u32 tmp;
1957
1958 if (!driver || !driver->bind || !driver->setup
1959 || driver->speed != USB_SPEED_HIGH)
1960 return -EINVAL;
1961 if (!dev)
1962 return -ENODEV;
1963 if (dev->driver)
1964 return -EBUSY;
1965
1966 driver->driver.bus = NULL;
1967 dev->driver = driver;
1968 dev->gadget.dev.driver = &driver->driver;
1969
1970 retval = driver->bind(&dev->gadget);
1971
1972 /* Some gadget drivers use both ep0 directions.
1973 * NOTE: to gadget driver, ep0 is just one endpoint...
1974 */
1975 dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1976 dev->ep[UDC_EP0IN_IX].ep.driver_data;
1977
1978 if (retval) {
1979 DBG(dev, "binding to %s returning %d\n",
1980 driver->driver.name, retval);
1981 dev->driver = NULL;
1982 dev->gadget.dev.driver = NULL;
1983 return retval;
1984 }
1985
1986 /* get ready for ep0 traffic */
1987 setup_ep0(dev);
1988
1989 /* clear SD */
1990 tmp = readl(&dev->regs->ctl);
1991 tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1992 writel(tmp, &dev->regs->ctl);
1993
1994 usb_connect(dev);
1995
1996 return 0;
1997}
1998EXPORT_SYMBOL(usb_gadget_register_driver);
1999
2000/* shutdown requests and disconnect from gadget */
2001static void
2002shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2003__releases(dev->lock)
2004__acquires(dev->lock)
2005{
2006 int tmp;
2007
2008 /* empty queues and init hardware */
2009 udc_basic_init(dev);
2010 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
2011 empty_req_queue(&dev->ep[tmp]);
2012 }
2013
2014 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2015 spin_unlock(&dev->lock);
2016 driver->disconnect(&dev->gadget);
2017 spin_lock(&dev->lock);
2018 }
2019 /* init */
2020 udc_setup_endpoints(dev);
2021}
2022
2023/* Called by gadget driver to unregister itself */
2024int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2025{
2026 struct udc *dev = udc;
2027 unsigned long flags;
2028 u32 tmp;
2029
2030 if (!dev)
2031 return -ENODEV;
2032 if (!driver || driver != dev->driver || !driver->unbind)
2033 return -EINVAL;
2034
2035 spin_lock_irqsave(&dev->lock, flags);
2036 udc_mask_unused_interrupts(dev);
2037 shutdown(dev, driver);
2038 spin_unlock_irqrestore(&dev->lock, flags);
2039
2040 driver->unbind(&dev->gadget);
2041 dev->driver = NULL;
2042
2043 /* set SD */
2044 tmp = readl(&dev->regs->ctl);
2045 tmp |= AMD_BIT(UDC_DEVCTL_SD);
2046 writel(tmp, &dev->regs->ctl);
2047
2048
2049 DBG(dev, "%s: unregistered\n", driver->driver.name);
2050
2051 return 0;
2052}
2053EXPORT_SYMBOL(usb_gadget_unregister_driver);
2054
2055
2056/* Clear pending NAK bits */
2057static void udc_process_cnak_queue(struct udc *dev)
2058{
2059 u32 tmp;
2060 u32 reg;
2061
2062 /* check epin's */
2063 DBG(dev, "CNAK pending queue processing\n");
2064 for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2065 if (cnak_pending & (1 << tmp)) {
2066 DBG(dev, "CNAK pending for ep%d\n", tmp);
2067 /* clear NAK by writing CNAK */
2068 reg = readl(&dev->ep[tmp].regs->ctl);
2069 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2070 writel(reg, &dev->ep[tmp].regs->ctl);
2071 dev->ep[tmp].naking = 0;
2072 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2073 }
2074 }
2075 /* ... and ep0out */
2076 if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2077 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2078 /* clear NAK by writing CNAK */
2079 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2080 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2081 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2082 dev->ep[UDC_EP0OUT_IX].naking = 0;
2083 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2084 dev->ep[UDC_EP0OUT_IX].num);
2085 }
2086}
2087
2088/* Enabling RX DMA after setup packet */
2089static void udc_ep0_set_rde(struct udc *dev)
2090{
2091 if (use_dma) {
2092 /*
2093 * only enable RXDMA when no data endpoint enabled
2094 * or data is queued
2095 */
2096 if (!dev->data_ep_enabled || dev->data_ep_queued) {
2097 udc_set_rde(dev);
2098 } else {
2099 /*
2100 * setup timer for enabling RDE (to not enable
2101 * RXFIFO DMA for data endpoints to early)
2102 */
2103 if (set_rde != 0 && !timer_pending(&udc_timer)) {
2104 udc_timer.expires =
2105 jiffies + HZ/UDC_RDE_TIMER_DIV;
2106 set_rde = 1;
2107 if (!stop_timer) {
2108 add_timer(&udc_timer);
2109 }
2110 }
2111 }
2112 }
2113}
2114
2115
2116/* Interrupt handler for data OUT traffic */
2117static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2118{
2119 irqreturn_t ret_val = IRQ_NONE;
2120 u32 tmp;
2121 struct udc_ep *ep;
2122 struct udc_request *req;
2123 unsigned int count;
2124 struct udc_data_dma *td = NULL;
2125 unsigned dma_done;
2126
2127 VDBG(dev, "ep%d irq\n", ep_ix);
2128 ep = &dev->ep[ep_ix];
2129
2130 tmp = readl(&ep->regs->sts);
2131 if (use_dma) {
2132 /* BNA event ? */
2133 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2134 DBG(dev, "BNA ep%dout occured - DESPTR = %x \n",
2135 ep->num, readl(&ep->regs->desptr));
2136 /* clear BNA */
2137 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2138 if (!ep->cancel_transfer)
2139 ep->bna_occurred = 1;
2140 else
2141 ep->cancel_transfer = 0;
2142 ret_val = IRQ_HANDLED;
2143 goto finished;
2144 }
2145 }
2146 /* HE event ? */
2147 if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2148 dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num);
2149
2150 /* clear HE */
2151 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2152 ret_val = IRQ_HANDLED;
2153 goto finished;
2154 }
2155
2156 if (!list_empty(&ep->queue)) {
2157
2158 /* next request */
2159 req = list_entry(ep->queue.next,
2160 struct udc_request, queue);
2161 } else {
2162 req = NULL;
2163 udc_rxfifo_pending = 1;
2164 }
2165 VDBG(dev, "req = %p\n", req);
2166 /* fifo mode */
2167 if (!use_dma) {
2168
2169 /* read fifo */
2170 if (req && udc_rxfifo_read(ep, req)) {
2171 ret_val = IRQ_HANDLED;
2172
2173 /* finish */
2174 complete_req(ep, req, 0);
2175 /* next request */
2176 if (!list_empty(&ep->queue) && !ep->halted) {
2177 req = list_entry(ep->queue.next,
2178 struct udc_request, queue);
2179 } else
2180 req = NULL;
2181 }
2182
2183 /* DMA */
2184 } else if (!ep->cancel_transfer && req != NULL) {
2185 ret_val = IRQ_HANDLED;
2186
2187 /* check for DMA done */
2188 if (!use_dma_ppb) {
2189 dma_done = AMD_GETBITS(req->td_data->status,
2190 UDC_DMA_OUT_STS_BS);
2191 /* packet per buffer mode - rx bytes */
2192 } else {
2193 /*
2194 * if BNA occurred then recover desc. from
2195 * BNA dummy desc.
2196 */
2197 if (ep->bna_occurred) {
2198 VDBG(dev, "Recover desc. from BNA dummy\n");
2199 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2200 sizeof(struct udc_data_dma));
2201 ep->bna_occurred = 0;
2202 udc_init_bna_dummy(ep->req);
2203 }
2204 td = udc_get_last_dma_desc(req);
2205 dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2206 }
2207 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2208 /* buffer fill mode - rx bytes */
2209 if (!use_dma_ppb) {
2210 /* received number bytes */
2211 count = AMD_GETBITS(req->td_data->status,
2212 UDC_DMA_OUT_STS_RXBYTES);
2213 VDBG(dev, "rx bytes=%u\n", count);
2214 /* packet per buffer mode - rx bytes */
2215 } else {
2216 VDBG(dev, "req->td_data=%p\n", req->td_data);
2217 VDBG(dev, "last desc = %p\n", td);
2218 /* received number bytes */
2219 if (use_dma_ppb_du) {
2220 /* every desc. counts bytes */
2221 count = udc_get_ppbdu_rxbytes(req);
2222 } else {
2223 /* last desc. counts bytes */
2224 count = AMD_GETBITS(td->status,
2225 UDC_DMA_OUT_STS_RXBYTES);
2226 if (!count && req->req.length
2227 == UDC_DMA_MAXPACKET) {
2228 /*
2229 * on 64k packets the RXBYTES
2230 * field is zero
2231 */
2232 count = UDC_DMA_MAXPACKET;
2233 }
2234 }
2235 VDBG(dev, "last desc rx bytes=%u\n", count);
2236 }
2237
2238 tmp = req->req.length - req->req.actual;
2239 if (count > tmp) {
2240 if ((tmp % ep->ep.maxpacket) != 0) {
2241 DBG(dev, "%s: rx %db, space=%db\n",
2242 ep->ep.name, count, tmp);
2243 req->req.status = -EOVERFLOW;
2244 }
2245 count = tmp;
2246 }
2247 req->req.actual += count;
2248 req->dma_going = 0;
2249 /* complete request */
2250 complete_req(ep, req, 0);
2251
2252 /* next request */
2253 if (!list_empty(&ep->queue) && !ep->halted) {
2254 req = list_entry(ep->queue.next,
2255 struct udc_request,
2256 queue);
2257 /*
2258 * DMA may be already started by udc_queue()
2259 * called by gadget drivers completion
2260 * routine. This happens when queue
2261 * holds one request only.
2262 */
2263 if (req->dma_going == 0) {
2264 /* next dma */
2265 if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2266 goto finished;
2267 /* write desc pointer */
2268 writel(req->td_phys,
2269 &ep->regs->desptr);
2270 req->dma_going = 1;
2271 /* enable DMA */
2272 udc_set_rde(dev);
2273 }
2274 } else {
2275 /*
2276 * implant BNA dummy descriptor to allow
2277 * RXFIFO opening by RDE
2278 */
2279 if (ep->bna_dummy_req) {
2280 /* write desc pointer */
2281 writel(ep->bna_dummy_req->td_phys,
2282 &ep->regs->desptr);
2283 ep->bna_occurred = 0;
2284 }
2285
2286 /*
2287 * schedule timer for setting RDE if queue
2288 * remains empty to allow ep0 packets pass
2289 * through
2290 */
2291 if (set_rde != 0
2292 && !timer_pending(&udc_timer)) {
2293 udc_timer.expires =
2294 jiffies
2295 + HZ*UDC_RDE_TIMER_SECONDS;
2296 set_rde = 1;
2297 if (!stop_timer) {
2298 add_timer(&udc_timer);
2299 }
2300 }
2301 if (ep->num != UDC_EP0OUT_IX)
2302 dev->data_ep_queued = 0;
2303 }
2304
2305 } else {
2306 /*
2307 * RX DMA must be reenabled for each desc in PPBDU mode
2308 * and must be enabled for PPBNDU mode in case of BNA
2309 */
2310 udc_set_rde(dev);
2311 }
2312
2313 } else if (ep->cancel_transfer) {
2314 ret_val = IRQ_HANDLED;
2315 ep->cancel_transfer = 0;
2316 }
2317
2318 /* check pending CNAKS */
2319 if (cnak_pending) {
2320 /* CNAk processing when rxfifo empty only */
2321 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2322 udc_process_cnak_queue(dev);
2323 }
2324 }
2325
2326 /* clear OUT bits in ep status */
2327 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2328finished:
2329 return ret_val;
2330}
2331
2332/* Interrupt handler for data IN traffic */
2333static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2334{
2335 irqreturn_t ret_val = IRQ_NONE;
2336 u32 tmp;
2337 u32 epsts;
2338 struct udc_ep *ep;
2339 struct udc_request *req;
2340 struct udc_data_dma *td;
2341 unsigned dma_done;
2342 unsigned len;
2343
2344 ep = &dev->ep[ep_ix];
2345
2346 epsts = readl(&ep->regs->sts);
2347 if (use_dma) {
2348 /* BNA ? */
2349 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2350 dev_err(&dev->pdev->dev,
2351 "BNA ep%din occured - DESPTR = %08lx \n",
2352 ep->num,
2353 (unsigned long) readl(&ep->regs->desptr));
2354
2355 /* clear BNA */
2356 writel(epsts, &ep->regs->sts);
2357 ret_val = IRQ_HANDLED;
2358 goto finished;
2359 }
2360 }
2361 /* HE event ? */
2362 if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2363 dev_err(&dev->pdev->dev,
2364 "HE ep%dn occured - DESPTR = %08lx \n",
2365 ep->num, (unsigned long) readl(&ep->regs->desptr));
2366
2367 /* clear HE */
2368 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2369 ret_val = IRQ_HANDLED;
2370 goto finished;
2371 }
2372
2373 /* DMA completion */
2374 if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2375 VDBG(dev, "TDC set- completion\n");
2376 ret_val = IRQ_HANDLED;
2377 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2378 req = list_entry(ep->queue.next,
2379 struct udc_request, queue);
2380 if (req) {
2381 /*
2382 * length bytes transfered
2383 * check dma done of last desc. in PPBDU mode
2384 */
2385 if (use_dma_ppb_du) {
2386 td = udc_get_last_dma_desc(req);
2387 if (td) {
2388 dma_done =
2389 AMD_GETBITS(td->status,
2390 UDC_DMA_IN_STS_BS);
2391 /* don't care DMA done */
2392 req->req.actual =
2393 req->req.length;
2394 }
2395 } else {
2396 /* assume all bytes transferred */
2397 req->req.actual = req->req.length;
2398 }
2399
2400 if (req->req.actual == req->req.length) {
2401 /* complete req */
2402 complete_req(ep, req, 0);
2403 req->dma_going = 0;
2404 /* further request available ? */
2405 if (list_empty(&ep->queue)) {
2406 /* disable interrupt */
2407 tmp = readl(
2408 &dev->regs->ep_irqmsk);
2409 tmp |= AMD_BIT(ep->num);
2410 writel(tmp,
2411 &dev->regs->ep_irqmsk);
2412 }
2413
2414 }
2415 }
2416 }
2417 ep->cancel_transfer = 0;
2418
2419 }
2420 /*
2421 * status reg has IN bit set and TDC not set (if TDC was handled,
2422 * IN must not be handled (UDC defect) ?
2423 */
2424 if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2425 && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2426 ret_val = IRQ_HANDLED;
2427 if (!list_empty(&ep->queue)) {
2428 /* next request */
2429 req = list_entry(ep->queue.next,
2430 struct udc_request, queue);
2431 /* FIFO mode */
2432 if (!use_dma) {
2433 /* write fifo */
2434 udc_txfifo_write(ep, &req->req);
2435 len = req->req.length - req->req.actual;
2436 if (len > ep->ep.maxpacket)
2437 len = ep->ep.maxpacket;
2438 req->req.actual += len;
2439 if (req->req.actual == req->req.length
2440 || (len != ep->ep.maxpacket)) {
2441 /* complete req */
2442 complete_req(ep, req, 0);
2443 }
2444 /* DMA */
2445 } else if (req && !req->dma_going) {
2446 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2447 req, req->td_data);
2448 if (req->td_data) {
2449
2450 req->dma_going = 1;
2451
2452 /*
2453 * unset L bit of first desc.
2454 * for chain
2455 */
2456 if (use_dma_ppb && req->req.length >
2457 ep->ep.maxpacket) {
2458 req->td_data->status &=
2459 AMD_CLEAR_BIT(
2460 UDC_DMA_IN_STS_L);
2461 }
2462
2463 /* write desc pointer */
2464 writel(req->td_phys, &ep->regs->desptr);
2465
2466 /* set HOST READY */
2467 req->td_data->status =
2468 AMD_ADDBITS(
2469 req->td_data->status,
2470 UDC_DMA_IN_STS_BS_HOST_READY,
2471 UDC_DMA_IN_STS_BS);
2472
2473 /* set poll demand bit */
2474 tmp = readl(&ep->regs->ctl);
2475 tmp |= AMD_BIT(UDC_EPCTL_P);
2476 writel(tmp, &ep->regs->ctl);
2477 }
2478 }
2479
2480 }
2481 }
2482 /* clear status bits */
2483 writel(epsts, &ep->regs->sts);
2484
2485finished:
2486 return ret_val;
2487
2488}
2489
2490/* Interrupt handler for Control OUT traffic */
2491static irqreturn_t udc_control_out_isr(struct udc *dev)
2492__releases(dev->lock)
2493__acquires(dev->lock)
2494{
2495 irqreturn_t ret_val = IRQ_NONE;
2496 u32 tmp;
2497 int setup_supported;
2498 u32 count;
2499 int set = 0;
2500 struct udc_ep *ep;
2501 struct udc_ep *ep_tmp;
2502
2503 ep = &dev->ep[UDC_EP0OUT_IX];
2504
2505 /* clear irq */
2506 writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2507
2508 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2509 /* check BNA and clear if set */
2510 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2511 VDBG(dev, "ep0: BNA set\n");
2512 writel(AMD_BIT(UDC_EPSTS_BNA),
2513 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2514 ep->bna_occurred = 1;
2515 ret_val = IRQ_HANDLED;
2516 goto finished;
2517 }
2518
2519 /* type of data: SETUP or DATA 0 bytes */
2520 tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2521 VDBG(dev, "data_typ = %x\n", tmp);
2522
2523 /* setup data */
2524 if (tmp == UDC_EPSTS_OUT_SETUP) {
2525 ret_val = IRQ_HANDLED;
2526
2527 ep->dev->stall_ep0in = 0;
2528 dev->waiting_zlp_ack_ep0in = 0;
2529
2530 /* set NAK for EP0_IN */
2531 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2532 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2533 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2534 dev->ep[UDC_EP0IN_IX].naking = 1;
2535 /* get setup data */
2536 if (use_dma) {
2537
2538 /* clear OUT bits in ep status */
2539 writel(UDC_EPSTS_OUT_CLEAR,
2540 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2541
2542 setup_data.data[0] =
2543 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2544 setup_data.data[1] =
2545 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2546 /* set HOST READY */
2547 dev->ep[UDC_EP0OUT_IX].td_stp->status =
2548 UDC_DMA_STP_STS_BS_HOST_READY;
2549 } else {
2550 /* read fifo */
2551 udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2552 }
2553
2554 /* determine direction of control data */
2555 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2556 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2557 /* enable RDE */
2558 udc_ep0_set_rde(dev);
2559 set = 0;
2560 } else {
2561 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2562 /*
2563 * implant BNA dummy descriptor to allow RXFIFO opening
2564 * by RDE
2565 */
2566 if (ep->bna_dummy_req) {
2567 /* write desc pointer */
2568 writel(ep->bna_dummy_req->td_phys,
2569 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2570 ep->bna_occurred = 0;
2571 }
2572
2573 set = 1;
2574 dev->ep[UDC_EP0OUT_IX].naking = 1;
2575 /*
2576 * setup timer for enabling RDE (to not enable
2577 * RXFIFO DMA for data to early)
2578 */
2579 set_rde = 1;
2580 if (!timer_pending(&udc_timer)) {
2581 udc_timer.expires = jiffies +
2582 HZ/UDC_RDE_TIMER_DIV;
2583 if (!stop_timer) {
2584 add_timer(&udc_timer);
2585 }
2586 }
2587 }
2588
2589 /*
2590 * mass storage reset must be processed here because
2591 * next packet may be a CLEAR_FEATURE HALT which would not
2592 * clear the stall bit when no STALL handshake was received
2593 * before (autostall can cause this)
2594 */
2595 if (setup_data.data[0] == UDC_MSCRES_DWORD0
2596 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2597 DBG(dev, "MSC Reset\n");
2598 /*
2599 * clear stall bits
2600 * only one IN and OUT endpoints are handled
2601 */
2602 ep_tmp = &udc->ep[UDC_EPIN_IX];
2603 udc_set_halt(&ep_tmp->ep, 0);
2604 ep_tmp = &udc->ep[UDC_EPOUT_IX];
2605 udc_set_halt(&ep_tmp->ep, 0);
2606 }
2607
2608 /* call gadget with setup data received */
2609 spin_unlock(&dev->lock);
2610 setup_supported = dev->driver->setup(&dev->gadget,
2611 &setup_data.request);
2612 spin_lock(&dev->lock);
2613
2614 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2615 /* ep0 in returns data (not zlp) on IN phase */
2616 if (setup_supported >= 0 && setup_supported <
2617 UDC_EP0IN_MAXPACKET) {
2618 /* clear NAK by writing CNAK in EP0_IN */
2619 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2620 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2621 dev->ep[UDC_EP0IN_IX].naking = 0;
2622 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2623
2624 /* if unsupported request then stall */
2625 } else if (setup_supported < 0) {
2626 tmp |= AMD_BIT(UDC_EPCTL_S);
2627 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2628 } else
2629 dev->waiting_zlp_ack_ep0in = 1;
2630
2631
2632 /* clear NAK by writing CNAK in EP0_OUT */
2633 if (!set) {
2634 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2635 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2636 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2637 dev->ep[UDC_EP0OUT_IX].naking = 0;
2638 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2639 }
2640
2641 if (!use_dma) {
2642 /* clear OUT bits in ep status */
2643 writel(UDC_EPSTS_OUT_CLEAR,
2644 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2645 }
2646
2647 /* data packet 0 bytes */
2648 } else if (tmp == UDC_EPSTS_OUT_DATA) {
2649 /* clear OUT bits in ep status */
2650 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2651
2652 /* get setup data: only 0 packet */
2653 if (use_dma) {
2654 /* no req if 0 packet, just reactivate */
2655 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2656 VDBG(dev, "ZLP\n");
2657
2658 /* set HOST READY */
2659 dev->ep[UDC_EP0OUT_IX].td->status =
2660 AMD_ADDBITS(
2661 dev->ep[UDC_EP0OUT_IX].td->status,
2662 UDC_DMA_OUT_STS_BS_HOST_READY,
2663 UDC_DMA_OUT_STS_BS);
2664 /* enable RDE */
2665 udc_ep0_set_rde(dev);
2666 ret_val = IRQ_HANDLED;
2667
2668 } else {
2669 /* control write */
2670 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2671 /* re-program desc. pointer for possible ZLPs */
2672 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2673 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2674 /* enable RDE */
2675 udc_ep0_set_rde(dev);
2676 }
2677 } else {
2678
2679 /* received number bytes */
2680 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2681 count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2682 /* out data for fifo mode not working */
2683 count = 0;
2684
2685 /* 0 packet or real data ? */
2686 if (count != 0) {
2687 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2688 } else {
2689 /* dummy read confirm */
2690 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2691 ret_val = IRQ_HANDLED;
2692 }
2693 }
2694 }
2695
2696 /* check pending CNAKS */
2697 if (cnak_pending) {
2698 /* CNAk processing when rxfifo empty only */
2699 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2700 udc_process_cnak_queue(dev);
2701 }
2702 }
2703
2704finished:
2705 return ret_val;
2706}
2707
2708/* Interrupt handler for Control IN traffic */
2709static irqreturn_t udc_control_in_isr(struct udc *dev)
2710{
2711 irqreturn_t ret_val = IRQ_NONE;
2712 u32 tmp;
2713 struct udc_ep *ep;
2714 struct udc_request *req;
2715 unsigned len;
2716
2717 ep = &dev->ep[UDC_EP0IN_IX];
2718
2719 /* clear irq */
2720 writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2721
2722 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2723 /* DMA completion */
2724 if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2725 VDBG(dev, "isr: TDC clear \n");
2726 ret_val = IRQ_HANDLED;
2727
2728 /* clear TDC bit */
2729 writel(AMD_BIT(UDC_EPSTS_TDC),
2730 &dev->ep[UDC_EP0IN_IX].regs->sts);
2731
2732 /* status reg has IN bit set ? */
2733 } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2734 ret_val = IRQ_HANDLED;
2735
2736 if (ep->dma) {
2737 /* clear IN bit */
2738 writel(AMD_BIT(UDC_EPSTS_IN),
2739 &dev->ep[UDC_EP0IN_IX].regs->sts);
2740 }
2741 if (dev->stall_ep0in) {
2742 DBG(dev, "stall ep0in\n");
2743 /* halt ep0in */
2744 tmp = readl(&ep->regs->ctl);
2745 tmp |= AMD_BIT(UDC_EPCTL_S);
2746 writel(tmp, &ep->regs->ctl);
2747 } else {
2748 if (!list_empty(&ep->queue)) {
2749 /* next request */
2750 req = list_entry(ep->queue.next,
2751 struct udc_request, queue);
2752
2753 if (ep->dma) {
2754 /* write desc pointer */
2755 writel(req->td_phys, &ep->regs->desptr);
2756 /* set HOST READY */
2757 req->td_data->status =
2758 AMD_ADDBITS(
2759 req->td_data->status,
2760 UDC_DMA_STP_STS_BS_HOST_READY,
2761 UDC_DMA_STP_STS_BS);
2762
2763 /* set poll demand bit */
2764 tmp =
2765 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2766 tmp |= AMD_BIT(UDC_EPCTL_P);
2767 writel(tmp,
2768 &dev->ep[UDC_EP0IN_IX].regs->ctl);
2769
2770 /* all bytes will be transferred */
2771 req->req.actual = req->req.length;
2772
2773 /* complete req */
2774 complete_req(ep, req, 0);
2775
2776 } else {
2777 /* write fifo */
2778 udc_txfifo_write(ep, &req->req);
2779
2780 /* lengh bytes transfered */
2781 len = req->req.length - req->req.actual;
2782 if (len > ep->ep.maxpacket)
2783 len = ep->ep.maxpacket;
2784
2785 req->req.actual += len;
2786 if (req->req.actual == req->req.length
2787 || (len != ep->ep.maxpacket)) {
2788 /* complete req */
2789 complete_req(ep, req, 0);
2790 }
2791 }
2792
2793 }
2794 }
2795 ep->halted = 0;
2796 dev->stall_ep0in = 0;
2797 if (!ep->dma) {
2798 /* clear IN bit */
2799 writel(AMD_BIT(UDC_EPSTS_IN),
2800 &dev->ep[UDC_EP0IN_IX].regs->sts);
2801 }
2802 }
2803
2804 return ret_val;
2805}
2806
2807
2808/* Interrupt handler for global device events */
2809static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2810__releases(dev->lock)
2811__acquires(dev->lock)
2812{
2813 irqreturn_t ret_val = IRQ_NONE;
2814 u32 tmp;
2815 u32 cfg;
2816 struct udc_ep *ep;
2817 u16 i;
2818 u8 udc_csr_epix;
2819
2820 /* SET_CONFIG irq ? */
2821 if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2822 ret_val = IRQ_HANDLED;
2823
2824 /* read config value */
2825 tmp = readl(&dev->regs->sts);
2826 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2827 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2828 dev->cur_config = cfg;
2829 dev->set_cfg_not_acked = 1;
2830
2831 /* make usb request for gadget driver */
2832 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2833 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2834 setup_data.request.wValue = dev->cur_config;
2835
2836 /* programm the NE registers */
2837 for (i = 0; i < UDC_EP_NUM; i++) {
2838 ep = &dev->ep[i];
2839 if (ep->in) {
2840
2841 /* ep ix in UDC CSR register space */
2842 udc_csr_epix = ep->num;
2843
2844
2845 /* OUT ep */
2846 } else {
2847 /* ep ix in UDC CSR register space */
2848 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2849 }
2850
2851 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2852 /* ep cfg */
2853 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2854 UDC_CSR_NE_CFG);
2855 /* write reg */
2856 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2857
2858 /* clear stall bits */
2859 ep->halted = 0;
2860 tmp = readl(&ep->regs->ctl);
2861 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2862 writel(tmp, &ep->regs->ctl);
2863 }
2864 /* call gadget zero with setup data received */
2865 spin_unlock(&dev->lock);
2866 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2867 spin_lock(&dev->lock);
2868
2869 } /* SET_INTERFACE ? */
2870 if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2871 ret_val = IRQ_HANDLED;
2872
2873 dev->set_cfg_not_acked = 1;
2874 /* read interface and alt setting values */
2875 tmp = readl(&dev->regs->sts);
2876 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2877 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2878
2879 /* make usb request for gadget driver */
2880 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2881 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2882 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2883 setup_data.request.wValue = dev->cur_alt;
2884 setup_data.request.wIndex = dev->cur_intf;
2885
2886 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2887 dev->cur_alt, dev->cur_intf);
2888
2889 /* programm the NE registers */
2890 for (i = 0; i < UDC_EP_NUM; i++) {
2891 ep = &dev->ep[i];
2892 if (ep->in) {
2893
2894 /* ep ix in UDC CSR register space */
2895 udc_csr_epix = ep->num;
2896
2897
2898 /* OUT ep */
2899 } else {
2900 /* ep ix in UDC CSR register space */
2901 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2902 }
2903
2904 /* UDC CSR reg */
2905 /* set ep values */
2906 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2907 /* ep interface */
2908 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2909 UDC_CSR_NE_INTF);
2910 /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2911 /* ep alt */
2912 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2913 UDC_CSR_NE_ALT);
2914 /* write reg */
2915 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2916
2917 /* clear stall bits */
2918 ep->halted = 0;
2919 tmp = readl(&ep->regs->ctl);
2920 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2921 writel(tmp, &ep->regs->ctl);
2922 }
2923
2924 /* call gadget zero with setup data received */
2925 spin_unlock(&dev->lock);
2926 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2927 spin_lock(&dev->lock);
2928
2929 } /* USB reset */
2930 if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2931 DBG(dev, "USB Reset interrupt\n");
2932 ret_val = IRQ_HANDLED;
2933
2934 /* allow soft reset when suspend occurs */
2935 soft_reset_occured = 0;
2936
2937 dev->waiting_zlp_ack_ep0in = 0;
2938 dev->set_cfg_not_acked = 0;
2939
2940 /* mask not needed interrupts */
2941 udc_mask_unused_interrupts(dev);
2942
2943 /* call gadget to resume and reset configs etc. */
2944 spin_unlock(&dev->lock);
2945 if (dev->sys_suspended && dev->driver->resume) {
2946 dev->driver->resume(&dev->gadget);
2947 dev->sys_suspended = 0;
2948 }
2949 dev->driver->disconnect(&dev->gadget);
2950 spin_lock(&dev->lock);
2951
2952 /* disable ep0 to empty req queue */
2953 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2954 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2955
2956 /* soft reset when rxfifo not empty */
2957 tmp = readl(&dev->regs->sts);
2958 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2959 && !soft_reset_after_usbreset_occured) {
2960 udc_soft_reset(dev);
2961 soft_reset_after_usbreset_occured++;
2962 }
2963
2964 /*
2965 * DMA reset to kill potential old DMA hw hang,
2966 * POLL bit is already reset by ep_init() through
2967 * disconnect()
2968 */
2969 DBG(dev, "DMA machine reset\n");
2970 tmp = readl(&dev->regs->cfg);
2971 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2972 writel(tmp, &dev->regs->cfg);
2973
2974 /* put into initial config */
2975 udc_basic_init(dev);
2976
2977 /* enable device setup interrupts */
2978 udc_enable_dev_setup_interrupts(dev);
2979
2980 /* enable suspend interrupt */
2981 tmp = readl(&dev->regs->irqmsk);
2982 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2983 writel(tmp, &dev->regs->irqmsk);
2984
2985 } /* USB suspend */
2986 if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2987 DBG(dev, "USB Suspend interrupt\n");
2988 ret_val = IRQ_HANDLED;
2989 if (dev->driver->suspend) {
2990 spin_unlock(&dev->lock);
2991 dev->sys_suspended = 1;
2992 dev->driver->suspend(&dev->gadget);
2993 spin_lock(&dev->lock);
2994 }
2995 } /* new speed ? */
2996 if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2997 DBG(dev, "ENUM interrupt\n");
2998 ret_val = IRQ_HANDLED;
2999 soft_reset_after_usbreset_occured = 0;
3000
3001 /* disable ep0 to empty req queue */
3002 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3003 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3004
3005 /* link up all endpoints */
3006 udc_setup_endpoints(dev);
3007 if (dev->gadget.speed == USB_SPEED_HIGH) {
3008 dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
3009 "high");
3010 } else if (dev->gadget.speed == USB_SPEED_FULL) {
3011 dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
3012 "full");
3013 }
3014
3015 /* init ep 0 */
3016 activate_control_endpoints(dev);
3017
3018 /* enable ep0 interrupts */
3019 udc_enable_ep0_interrupts(dev);
3020 }
3021 /* session valid change interrupt */
3022 if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3023 DBG(dev, "USB SVC interrupt\n");
3024 ret_val = IRQ_HANDLED;
3025
3026 /* check that session is not valid to detect disconnect */
3027 tmp = readl(&dev->regs->sts);
3028 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3029 /* disable suspend interrupt */
3030 tmp = readl(&dev->regs->irqmsk);
3031 tmp |= AMD_BIT(UDC_DEVINT_US);
3032 writel(tmp, &dev->regs->irqmsk);
3033 DBG(dev, "USB Disconnect (session valid low)\n");
3034 /* cleanup on disconnect */
3035 usb_disconnect(udc);
3036 }
3037
3038 }
3039
3040 return ret_val;
3041}
3042
3043/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3044static irqreturn_t udc_irq(int irq, void *pdev)
3045{
3046 struct udc *dev = pdev;
3047 u32 reg;
3048 u16 i;
3049 u32 ep_irq;
3050 irqreturn_t ret_val = IRQ_NONE;
3051
3052 spin_lock(&dev->lock);
3053
3054 /* check for ep irq */
3055 reg = readl(&dev->regs->ep_irqsts);
3056 if (reg) {
3057 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3058 ret_val |= udc_control_out_isr(dev);
3059 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3060 ret_val |= udc_control_in_isr(dev);
3061
3062 /*
3063 * data endpoint
3064 * iterate ep's
3065 */
3066 for (i = 1; i < UDC_EP_NUM; i++) {
3067 ep_irq = 1 << i;
3068 if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3069 continue;
3070
3071 /* clear irq status */
3072 writel(ep_irq, &dev->regs->ep_irqsts);
3073
3074 /* irq for out ep ? */
3075 if (i > UDC_EPIN_NUM)
3076 ret_val |= udc_data_out_isr(dev, i);
3077 else
3078 ret_val |= udc_data_in_isr(dev, i);
3079 }
3080
3081 }
3082
3083
3084 /* check for dev irq */
3085 reg = readl(&dev->regs->irqsts);
3086 if (reg) {
3087 /* clear irq */
3088 writel(reg, &dev->regs->irqsts);
3089 ret_val |= udc_dev_isr(dev, reg);
3090 }
3091
3092
3093 spin_unlock(&dev->lock);
3094 return ret_val;
3095}
3096
3097/* Tears down device */
3098static void gadget_release(struct device *pdev)
3099{
3100 struct amd5536udc *dev = dev_get_drvdata(pdev);
3101 kfree(dev);
3102}
3103
3104/* Cleanup on device remove */
3105static void udc_remove(struct udc *dev)
3106{
3107 /* remove timer */
3108 stop_timer++;
3109 if (timer_pending(&udc_timer))
3110 wait_for_completion(&on_exit);
3111 if (udc_timer.data)
3112 del_timer_sync(&udc_timer);
3113 /* remove pollstall timer */
3114 stop_pollstall_timer++;
3115 if (timer_pending(&udc_pollstall_timer))
3116 wait_for_completion(&on_pollstall_exit);
3117 if (udc_pollstall_timer.data)
3118 del_timer_sync(&udc_pollstall_timer);
3119 udc = NULL;
3120}
3121
3122/* Reset all pci context */
3123static void udc_pci_remove(struct pci_dev *pdev)
3124{
3125 struct udc *dev;
3126
3127 dev = pci_get_drvdata(pdev);
3128
3129 /* gadget driver must not be registered */
3130 BUG_ON(dev->driver != NULL);
3131
3132 /* dma pool cleanup */
3133 if (dev->data_requests)
3134 pci_pool_destroy(dev->data_requests);
3135
3136 if (dev->stp_requests) {
3137 /* cleanup DMA desc's for ep0in */
3138 pci_pool_free(dev->stp_requests,
3139 dev->ep[UDC_EP0OUT_IX].td_stp,
3140 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3141 pci_pool_free(dev->stp_requests,
3142 dev->ep[UDC_EP0OUT_IX].td,
3143 dev->ep[UDC_EP0OUT_IX].td_phys);
3144
3145 pci_pool_destroy(dev->stp_requests);
3146 }
3147
3148 /* reset controller */
3149 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3150 if (dev->irq_registered)
3151 free_irq(pdev->irq, dev);
3152 if (dev->regs)
3153 iounmap(dev->regs);
3154 if (dev->mem_region)
3155 release_mem_region(pci_resource_start(pdev, 0),
3156 pci_resource_len(pdev, 0));
3157 if (dev->active)
3158 pci_disable_device(pdev);
3159
3160 device_unregister(&dev->gadget.dev);
3161 pci_set_drvdata(pdev, NULL);
3162
3163 udc_remove(dev);
3164}
3165
3166/* create dma pools on init */
3167static int init_dma_pools(struct udc *dev)
3168{
3169 struct udc_stp_dma *td_stp;
3170 struct udc_data_dma *td_data;
3171 int retval;
3172
3173 /* consistent DMA mode setting ? */
3174 if (use_dma_ppb) {
3175 use_dma_bufferfill_mode = 0;
3176 } else {
3177 use_dma_ppb_du = 0;
3178 use_dma_bufferfill_mode = 1;
3179 }
3180
3181 /* DMA setup */
3182 dev->data_requests = dma_pool_create("data_requests", NULL,
3183 sizeof(struct udc_data_dma), 0, 0);
3184 if (!dev->data_requests) {
3185 DBG(dev, "can't get request data pool\n");
3186 retval = -ENOMEM;
3187 goto finished;
3188 }
3189
3190 /* EP0 in dma regs = dev control regs */
3191 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3192
3193 /* dma desc for setup data */
3194 dev->stp_requests = dma_pool_create("setup requests", NULL,
3195 sizeof(struct udc_stp_dma), 0, 0);
3196 if (!dev->stp_requests) {
3197 DBG(dev, "can't get stp request pool\n");
3198 retval = -ENOMEM;
3199 goto finished;
3200 }
3201 /* setup */
3202 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3203 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3204 if (td_stp == NULL) {
3205 retval = -ENOMEM;
3206 goto finished;
3207 }
3208 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3209
3210 /* data: 0 packets !? */
3211 td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3212 &dev->ep[UDC_EP0OUT_IX].td_phys);
3213 if (td_data == NULL) {
3214 retval = -ENOMEM;
3215 goto finished;
3216 }
3217 dev->ep[UDC_EP0OUT_IX].td = td_data;
3218 return 0;
3219
3220finished:
3221 return retval;
3222}
3223
3224/* Called by pci bus driver to init pci context */
3225static int udc_pci_probe(
3226 struct pci_dev *pdev,
3227 const struct pci_device_id *id
3228)
3229{
3230 struct udc *dev;
3231 unsigned long resource;
3232 unsigned long len;
3233 int retval = 0;
3234
3235 /* one udc only */
3236 if (udc) {
3237 dev_dbg(&pdev->dev, "already probed\n");
3238 return -EBUSY;
3239 }
3240
3241 /* init */
3242 dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3243 if (!dev) {
3244 retval = -ENOMEM;
3245 goto finished;
3246 }
3247 memset(dev, 0, sizeof(struct udc));
3248
3249 /* pci setup */
3250 if (pci_enable_device(pdev) < 0) {
3251 retval = -ENODEV;
3252 goto finished;
3253 }
3254 dev->active = 1;
3255
3256 /* PCI resource allocation */
3257 resource = pci_resource_start(pdev, 0);
3258 len = pci_resource_len(pdev, 0);
3259
3260 if (!request_mem_region(resource, len, name)) {
3261 dev_dbg(&pdev->dev, "pci device used already\n");
3262 retval = -EBUSY;
3263 goto finished;
3264 }
3265 dev->mem_region = 1;
3266
3267 dev->virt_addr = ioremap_nocache(resource, len);
3268 if (dev->virt_addr == NULL) {
3269 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3270 retval = -EFAULT;
3271 goto finished;
3272 }
3273
3274 if (!pdev->irq) {
3275 dev_err(&dev->pdev->dev, "irq not set\n");
3276 retval = -ENODEV;
3277 goto finished;
3278 }
3279
3280 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3281 dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3282 retval = -EBUSY;
3283 goto finished;
3284 }
3285 dev->irq_registered = 1;
3286
3287 pci_set_drvdata(pdev, dev);
3288
3289 /* chip revision */
3290 dev->chiprev = 0;
3291
3292 pci_set_master(pdev);
3293 pci_set_mwi(pdev);
3294
3295 /* chip rev for Hs AMD5536 */
3296 pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) &dev->chiprev);
3297 /* init dma pools */
3298 if (use_dma) {
3299 retval = init_dma_pools(dev);
3300 if (retval != 0)
3301 goto finished;
3302 }
3303
3304 dev->phys_addr = resource;
3305 dev->irq = pdev->irq;
3306 dev->pdev = pdev;
3307 dev->gadget.dev.parent = &pdev->dev;
3308 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3309
3310 /* general probing */
3311 if (udc_probe(dev) == 0)
3312 return 0;
3313
3314finished:
3315 if (dev)
3316 udc_pci_remove(pdev);
3317 return retval;
3318}
3319
3320/* general probe */
3321static int udc_probe(struct udc *dev)
3322{
3323 char tmp[128];
3324 u32 reg;
3325 int retval;
3326
3327 /* mark timer as not initialized */
3328 udc_timer.data = 0;
3329 udc_pollstall_timer.data = 0;
3330
3331 /* device struct setup */
3332 spin_lock_init(&dev->lock);
3333 dev->gadget.ops = &udc_ops;
3334
3335 strcpy(dev->gadget.dev.bus_id, "gadget");
3336 dev->gadget.dev.release = gadget_release;
3337 dev->gadget.name = name;
3338 dev->gadget.name = name;
3339 dev->gadget.is_dualspeed = 1;
3340
3341 /* udc csr registers base */
3342 dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3343 /* dev registers base */
3344 dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3345 /* ep registers base */
3346 dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3347 /* fifo's base */
3348 dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3349 dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3350
3351 /* init registers, interrupts, ... */
3352 startup_registers(dev);
3353
3354 dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3355
3356 snprintf(tmp, sizeof tmp, "%d", dev->irq);
3357 dev_info(&dev->pdev->dev,
3358 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3359 tmp, dev->phys_addr, dev->chiprev,
3360 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3361 strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3362 if (dev->chiprev == UDC_HSA0_REV) {
3363 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3364 retval = -ENODEV;
3365 goto finished;
3366 }
3367 dev_info(&dev->pdev->dev,
3368 "driver version: %s(for Geode5536 B1)\n", tmp);
3369 udc = dev;
3370
3371 retval = device_register(&dev->gadget.dev);
3372 if (retval)
3373 goto finished;
3374
3375 /* timer init */
3376 init_timer(&udc_timer);
3377 udc_timer.function = udc_timer_function;
3378 udc_timer.data = 1;
3379 /* timer pollstall init */
3380 init_timer(&udc_pollstall_timer);
3381 udc_pollstall_timer.function = udc_pollstall_timer_function;
3382 udc_pollstall_timer.data = 1;
3383
3384 /* set SD */
3385 reg = readl(&dev->regs->ctl);
3386 reg |= AMD_BIT(UDC_DEVCTL_SD);
3387 writel(reg, &dev->regs->ctl);
3388
3389 /* print dev register info */
3390 print_regs(dev);
3391
3392 return 0;
3393
3394finished:
3395 return retval;
3396}
3397
3398/* Initiates a remote wakeup */
3399static int udc_remote_wakeup(struct udc *dev)
3400{
3401 unsigned long flags;
3402 u32 tmp;
3403
3404 DBG(dev, "UDC initiates remote wakeup\n");
3405
3406 spin_lock_irqsave(&dev->lock, flags);
3407
3408 tmp = readl(&dev->regs->ctl);
3409 tmp |= AMD_BIT(UDC_DEVCTL_RES);
3410 writel(tmp, &dev->regs->ctl);
3411 tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
3412 writel(tmp, &dev->regs->ctl);
3413
3414 spin_unlock_irqrestore(&dev->lock, flags);
3415 return 0;
3416}
3417
3418/* PCI device parameters */
3419static const struct pci_device_id pci_id[] = {
3420 {
3421 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3422 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3423 .class_mask = 0xffffffff,
3424 },
3425 {},
3426};
3427MODULE_DEVICE_TABLE(pci, pci_id);
3428
3429/* PCI functions */
3430static struct pci_driver udc_pci_driver = {
3431 .name = (char *) name,
3432 .id_table = pci_id,
3433 .probe = udc_pci_probe,
3434 .remove = udc_pci_remove,
3435};
3436
3437/* Inits driver */
3438static int __init init(void)
3439{
3440 return pci_register_driver(&udc_pci_driver);
3441}
3442module_init(init);
3443
3444/* Cleans driver */
3445static void __exit cleanup(void)
3446{
3447 pci_unregister_driver(&udc_pci_driver);
3448}
3449module_exit(cleanup);
3450
3451MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3452MODULE_AUTHOR("Thomas Dahlmann");
3453MODULE_LICENSE("GPL");
3454
diff --git a/drivers/usb/gadget/amd5536udc.h b/drivers/usb/gadget/amd5536udc.h
new file mode 100644
index 000000000000..4bbabbbfc93f
--- /dev/null
+++ b/drivers/usb/gadget/amd5536udc.h
@@ -0,0 +1,626 @@
1/*
2 * amd5536.h -- header for AMD 5536 UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2007 AMD (http://www.amd.com)
5 * Author: Thomas Dahlmann
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef AMD5536UDC_H
23#define AMD5536UDC_H
24
25/* various constants */
26#define UDC_RDE_TIMER_SECONDS 1
27#define UDC_RDE_TIMER_DIV 10
28#define UDC_POLLSTALL_TIMER_USECONDS 500
29
30/* Hs AMD5536 chip rev. */
31#define UDC_HSA0_REV 1
32#define UDC_HSB1_REV 2
33
34/*
35 * SETUP usb commands
36 * needed, because some SETUP's are handled in hw, but must be passed to
37 * gadget driver above
38 * SET_CONFIG
39 */
40#define UDC_SETCONFIG_DWORD0 0x00000900
41#define UDC_SETCONFIG_DWORD0_VALUE_MASK 0xffff0000
42#define UDC_SETCONFIG_DWORD0_VALUE_OFS 16
43
44#define UDC_SETCONFIG_DWORD1 0x00000000
45
46/* SET_INTERFACE */
47#define UDC_SETINTF_DWORD0 0x00000b00
48#define UDC_SETINTF_DWORD0_ALT_MASK 0xffff0000
49#define UDC_SETINTF_DWORD0_ALT_OFS 16
50
51#define UDC_SETINTF_DWORD1 0x00000000
52#define UDC_SETINTF_DWORD1_INTF_MASK 0x0000ffff
53#define UDC_SETINTF_DWORD1_INTF_OFS 0
54
55/* Mass storage reset */
56#define UDC_MSCRES_DWORD0 0x0000ff21
57#define UDC_MSCRES_DWORD1 0x00000000
58
59/* Global CSR's -------------------------------------------------------------*/
60#define UDC_CSR_ADDR 0x500
61
62/* EP NE bits */
63/* EP number */
64#define UDC_CSR_NE_NUM_MASK 0x0000000f
65#define UDC_CSR_NE_NUM_OFS 0
66/* EP direction */
67#define UDC_CSR_NE_DIR_MASK 0x00000010
68#define UDC_CSR_NE_DIR_OFS 4
69/* EP type */
70#define UDC_CSR_NE_TYPE_MASK 0x00000060
71#define UDC_CSR_NE_TYPE_OFS 5
72/* EP config number */
73#define UDC_CSR_NE_CFG_MASK 0x00000780
74#define UDC_CSR_NE_CFG_OFS 7
75/* EP interface number */
76#define UDC_CSR_NE_INTF_MASK 0x00007800
77#define UDC_CSR_NE_INTF_OFS 11
78/* EP alt setting */
79#define UDC_CSR_NE_ALT_MASK 0x00078000
80#define UDC_CSR_NE_ALT_OFS 15
81
82/* max pkt */
83#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
84#define UDC_CSR_NE_MAX_PKT_OFS 19
85
86/* Device Config Register ---------------------------------------------------*/
87#define UDC_DEVCFG_ADDR 0x400
88
89#define UDC_DEVCFG_SOFTRESET 31
90#define UDC_DEVCFG_HNPSFEN 30
91#define UDC_DEVCFG_DMARST 29
92#define UDC_DEVCFG_SET_DESC 18
93#define UDC_DEVCFG_CSR_PRG 17
94#define UDC_DEVCFG_STATUS 7
95#define UDC_DEVCFG_DIR 6
96#define UDC_DEVCFG_PI 5
97#define UDC_DEVCFG_SS 4
98#define UDC_DEVCFG_SP 3
99#define UDC_DEVCFG_RWKP 2
100
101#define UDC_DEVCFG_SPD_MASK 0x3
102#define UDC_DEVCFG_SPD_OFS 0
103#define UDC_DEVCFG_SPD_HS 0x0
104#define UDC_DEVCFG_SPD_FS 0x1
105#define UDC_DEVCFG_SPD_LS 0x2
106/*#define UDC_DEVCFG_SPD_FS 0x3*/
107
108
109/* Device Control Register --------------------------------------------------*/
110#define UDC_DEVCTL_ADDR 0x404
111
112#define UDC_DEVCTL_THLEN_MASK 0xff000000
113#define UDC_DEVCTL_THLEN_OFS 24
114
115#define UDC_DEVCTL_BRLEN_MASK 0x00ff0000
116#define UDC_DEVCTL_BRLEN_OFS 16
117
118#define UDC_DEVCTL_CSR_DONE 13
119#define UDC_DEVCTL_DEVNAK 12
120#define UDC_DEVCTL_SD 10
121#define UDC_DEVCTL_MODE 9
122#define UDC_DEVCTL_BREN 8
123#define UDC_DEVCTL_THE 7
124#define UDC_DEVCTL_BF 6
125#define UDC_DEVCTL_BE 5
126#define UDC_DEVCTL_DU 4
127#define UDC_DEVCTL_TDE 3
128#define UDC_DEVCTL_RDE 2
129#define UDC_DEVCTL_RES 0
130
131
132/* Device Status Register ---------------------------------------------------*/
133#define UDC_DEVSTS_ADDR 0x408
134
135#define UDC_DEVSTS_TS_MASK 0xfffc0000
136#define UDC_DEVSTS_TS_OFS 18
137
138#define UDC_DEVSTS_SESSVLD 17
139#define UDC_DEVSTS_PHY_ERROR 16
140#define UDC_DEVSTS_RXFIFO_EMPTY 15
141
142#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
143#define UDC_DEVSTS_ENUM_SPEED_OFS 13
144#define UDC_DEVSTS_ENUM_SPEED_FULL 1
145#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
146
147#define UDC_DEVSTS_SUSP 12
148
149#define UDC_DEVSTS_ALT_MASK 0x00000f00
150#define UDC_DEVSTS_ALT_OFS 8
151
152#define UDC_DEVSTS_INTF_MASK 0x000000f0
153#define UDC_DEVSTS_INTF_OFS 4
154
155#define UDC_DEVSTS_CFG_MASK 0x0000000f
156#define UDC_DEVSTS_CFG_OFS 0
157
158
159/* Device Interrupt Register ------------------------------------------------*/
160#define UDC_DEVINT_ADDR 0x40c
161
162#define UDC_DEVINT_SVC 7
163#define UDC_DEVINT_ENUM 6
164#define UDC_DEVINT_SOF 5
165#define UDC_DEVINT_US 4
166#define UDC_DEVINT_UR 3
167#define UDC_DEVINT_ES 2
168#define UDC_DEVINT_SI 1
169#define UDC_DEVINT_SC 0
170
171/* Device Interrupt Mask Register -------------------------------------------*/
172#define UDC_DEVINT_MSK_ADDR 0x410
173
174#define UDC_DEVINT_MSK 0x7f
175
176/* Endpoint Interrupt Register ----------------------------------------------*/
177#define UDC_EPINT_ADDR 0x414
178
179#define UDC_EPINT_OUT_MASK 0xffff0000
180#define UDC_EPINT_OUT_OFS 16
181#define UDC_EPINT_IN_MASK 0x0000ffff
182#define UDC_EPINT_IN_OFS 0
183
184#define UDC_EPINT_IN_EP0 0
185#define UDC_EPINT_IN_EP1 1
186#define UDC_EPINT_IN_EP2 2
187#define UDC_EPINT_IN_EP3 3
188#define UDC_EPINT_OUT_EP0 16
189#define UDC_EPINT_OUT_EP1 17
190#define UDC_EPINT_OUT_EP2 18
191#define UDC_EPINT_OUT_EP3 19
192
193#define UDC_EPINT_EP0_ENABLE_MSK 0x001e001e
194
195/* Endpoint Interrupt Mask Register -----------------------------------------*/
196#define UDC_EPINT_MSK_ADDR 0x418
197
198#define UDC_EPINT_OUT_MSK_MASK 0xffff0000
199#define UDC_EPINT_OUT_MSK_OFS 16
200#define UDC_EPINT_IN_MSK_MASK 0x0000ffff
201#define UDC_EPINT_IN_MSK_OFS 0
202
203#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
204/* mask non-EP0 endpoints */
205#define UDC_EPDATAINT_MSK_DISABLE 0xfffefffe
206/* mask all dev interrupts */
207#define UDC_DEV_MSK_DISABLE 0x7f
208
209/* Endpoint-specific CSR's --------------------------------------------------*/
210#define UDC_EPREGS_ADDR 0x0
211#define UDC_EPIN_REGS_ADDR 0x0
212#define UDC_EPOUT_REGS_ADDR 0x200
213
214#define UDC_EPCTL_ADDR 0x0
215
216#define UDC_EPCTL_RRDY 9
217#define UDC_EPCTL_CNAK 8
218#define UDC_EPCTL_SNAK 7
219#define UDC_EPCTL_NAK 6
220
221#define UDC_EPCTL_ET_MASK 0x00000030
222#define UDC_EPCTL_ET_OFS 4
223#define UDC_EPCTL_ET_CONTROL 0
224#define UDC_EPCTL_ET_ISO 1
225#define UDC_EPCTL_ET_BULK 2
226#define UDC_EPCTL_ET_INTERRUPT 3
227
228#define UDC_EPCTL_P 3
229#define UDC_EPCTL_SN 2
230#define UDC_EPCTL_F 1
231#define UDC_EPCTL_S 0
232
233/* Endpoint Status Registers ------------------------------------------------*/
234#define UDC_EPSTS_ADDR 0x4
235
236#define UDC_EPSTS_RX_PKT_SIZE_MASK 0x007ff800
237#define UDC_EPSTS_RX_PKT_SIZE_OFS 11
238
239#define UDC_EPSTS_TDC 10
240#define UDC_EPSTS_HE 9
241#define UDC_EPSTS_BNA 7
242#define UDC_EPSTS_IN 6
243
244#define UDC_EPSTS_OUT_MASK 0x00000030
245#define UDC_EPSTS_OUT_OFS 4
246#define UDC_EPSTS_OUT_DATA 1
247#define UDC_EPSTS_OUT_DATA_CLEAR 0x10
248#define UDC_EPSTS_OUT_SETUP 2
249#define UDC_EPSTS_OUT_SETUP_CLEAR 0x20
250#define UDC_EPSTS_OUT_CLEAR 0x30
251
252/* Endpoint Buffer Size IN/ Receive Packet Frame Number OUT Registers ------*/
253#define UDC_EPIN_BUFF_SIZE_ADDR 0x8
254#define UDC_EPOUT_FRAME_NUMBER_ADDR 0x8
255
256#define UDC_EPIN_BUFF_SIZE_MASK 0x0000ffff
257#define UDC_EPIN_BUFF_SIZE_OFS 0
258/* EP0in txfifo = 128 bytes*/
259#define UDC_EPIN0_BUFF_SIZE 32
260/* EP0in fullspeed txfifo = 128 bytes*/
261#define UDC_FS_EPIN0_BUFF_SIZE 32
262
263/* fifo size mult = fifo size / max packet */
264#define UDC_EPIN_BUFF_SIZE_MULT 2
265
266/* EPin data fifo size = 1024 bytes DOUBLE BUFFERING */
267#define UDC_EPIN_BUFF_SIZE 256
268/* EPin small INT data fifo size = 128 bytes */
269#define UDC_EPIN_SMALLINT_BUFF_SIZE 32
270
271/* EPin fullspeed data fifo size = 128 bytes DOUBLE BUFFERING */
272#define UDC_FS_EPIN_BUFF_SIZE 32
273
274#define UDC_EPOUT_FRAME_NUMBER_MASK 0x0000ffff
275#define UDC_EPOUT_FRAME_NUMBER_OFS 0
276
277/* Endpoint Buffer Size OUT/Max Packet Size Registers -----------------------*/
278#define UDC_EPOUT_BUFF_SIZE_ADDR 0x0c
279#define UDC_EP_MAX_PKT_SIZE_ADDR 0x0c
280
281#define UDC_EPOUT_BUFF_SIZE_MASK 0xffff0000
282#define UDC_EPOUT_BUFF_SIZE_OFS 16
283#define UDC_EP_MAX_PKT_SIZE_MASK 0x0000ffff
284#define UDC_EP_MAX_PKT_SIZE_OFS 0
285/* EP0in max packet size = 64 bytes */
286#define UDC_EP0IN_MAX_PKT_SIZE 64
287/* EP0out max packet size = 64 bytes */
288#define UDC_EP0OUT_MAX_PKT_SIZE 64
289/* EP0in fullspeed max packet size = 64 bytes */
290#define UDC_FS_EP0IN_MAX_PKT_SIZE 64
291/* EP0out fullspeed max packet size = 64 bytes */
292#define UDC_FS_EP0OUT_MAX_PKT_SIZE 64
293
294/*
295 * Endpoint dma descriptors ------------------------------------------------
296 *
297 * Setup data, Status dword
298 */
299#define UDC_DMA_STP_STS_CFG_MASK 0x0fff0000
300#define UDC_DMA_STP_STS_CFG_OFS 16
301#define UDC_DMA_STP_STS_CFG_ALT_MASK 0x000f0000
302#define UDC_DMA_STP_STS_CFG_ALT_OFS 16
303#define UDC_DMA_STP_STS_CFG_INTF_MASK 0x00f00000
304#define UDC_DMA_STP_STS_CFG_INTF_OFS 20
305#define UDC_DMA_STP_STS_CFG_NUM_MASK 0x0f000000
306#define UDC_DMA_STP_STS_CFG_NUM_OFS 24
307#define UDC_DMA_STP_STS_RX_MASK 0x30000000
308#define UDC_DMA_STP_STS_RX_OFS 28
309#define UDC_DMA_STP_STS_BS_MASK 0xc0000000
310#define UDC_DMA_STP_STS_BS_OFS 30
311#define UDC_DMA_STP_STS_BS_HOST_READY 0
312#define UDC_DMA_STP_STS_BS_DMA_BUSY 1
313#define UDC_DMA_STP_STS_BS_DMA_DONE 2
314#define UDC_DMA_STP_STS_BS_HOST_BUSY 3
315/* IN data, Status dword */
316#define UDC_DMA_IN_STS_TXBYTES_MASK 0x0000ffff
317#define UDC_DMA_IN_STS_TXBYTES_OFS 0
318#define UDC_DMA_IN_STS_FRAMENUM_MASK 0x07ff0000
319#define UDC_DMA_IN_STS_FRAMENUM_OFS 0
320#define UDC_DMA_IN_STS_L 27
321#define UDC_DMA_IN_STS_TX_MASK 0x30000000
322#define UDC_DMA_IN_STS_TX_OFS 28
323#define UDC_DMA_IN_STS_BS_MASK 0xc0000000
324#define UDC_DMA_IN_STS_BS_OFS 30
325#define UDC_DMA_IN_STS_BS_HOST_READY 0
326#define UDC_DMA_IN_STS_BS_DMA_BUSY 1
327#define UDC_DMA_IN_STS_BS_DMA_DONE 2
328#define UDC_DMA_IN_STS_BS_HOST_BUSY 3
329/* OUT data, Status dword */
330#define UDC_DMA_OUT_STS_RXBYTES_MASK 0x0000ffff
331#define UDC_DMA_OUT_STS_RXBYTES_OFS 0
332#define UDC_DMA_OUT_STS_FRAMENUM_MASK 0x07ff0000
333#define UDC_DMA_OUT_STS_FRAMENUM_OFS 0
334#define UDC_DMA_OUT_STS_L 27
335#define UDC_DMA_OUT_STS_RX_MASK 0x30000000
336#define UDC_DMA_OUT_STS_RX_OFS 28
337#define UDC_DMA_OUT_STS_BS_MASK 0xc0000000
338#define UDC_DMA_OUT_STS_BS_OFS 30
339#define UDC_DMA_OUT_STS_BS_HOST_READY 0
340#define UDC_DMA_OUT_STS_BS_DMA_BUSY 1
341#define UDC_DMA_OUT_STS_BS_DMA_DONE 2
342#define UDC_DMA_OUT_STS_BS_HOST_BUSY 3
343/* max ep0in packet */
344#define UDC_EP0IN_MAXPACKET 1000
345/* max dma packet */
346#define UDC_DMA_MAXPACKET 65536
347
348/* un-usable DMA address */
349#define DMA_DONT_USE (~(dma_addr_t) 0 )
350
351/* other Endpoint register addresses and values-----------------------------*/
352#define UDC_EP_SUBPTR_ADDR 0x10
353#define UDC_EP_DESPTR_ADDR 0x14
354#define UDC_EP_WRITE_CONFIRM_ADDR 0x1c
355
356/* EP number as layouted in AHB space */
357#define UDC_EP_NUM 32
358#define UDC_EPIN_NUM 16
359#define UDC_EPIN_NUM_USED 5
360#define UDC_EPOUT_NUM 16
361/* EP number of EP's really used = EP0 + 8 data EP's */
362#define UDC_USED_EP_NUM 9
363/* UDC CSR regs are aligned but AHB regs not - offset for OUT EP's */
364#define UDC_CSR_EP_OUT_IX_OFS 12
365
366#define UDC_EP0OUT_IX 16
367#define UDC_EP0IN_IX 0
368
369/* Rx fifo address and size = 1k -------------------------------------------*/
370#define UDC_RXFIFO_ADDR 0x800
371#define UDC_RXFIFO_SIZE 0x400
372
373/* Tx fifo address and size = 1.5k -----------------------------------------*/
374#define UDC_TXFIFO_ADDR 0xc00
375#define UDC_TXFIFO_SIZE 0x600
376
377/* default data endpoints --------------------------------------------------*/
378#define UDC_EPIN_STATUS_IX 1
379#define UDC_EPIN_IX 2
380#define UDC_EPOUT_IX 18
381
382/* general constants -------------------------------------------------------*/
383#define UDC_DWORD_BYTES 4
384#define UDC_BITS_PER_BYTE_SHIFT 3
385#define UDC_BYTE_MASK 0xff
386#define UDC_BITS_PER_BYTE 8
387
388/*---------------------------------------------------------------------------*/
389/* UDC CSR's */
390struct udc_csrs {
391
392 /* sca - setup command address */
393 u32 sca;
394
395 /* ep ne's */
396 u32 ne[UDC_USED_EP_NUM];
397} __attribute__ ((packed));
398
399/* AHB subsystem CSR registers */
400struct udc_regs {
401
402 /* device configuration */
403 u32 cfg;
404
405 /* device control */
406 u32 ctl;
407
408 /* device status */
409 u32 sts;
410
411 /* device interrupt */
412 u32 irqsts;
413
414 /* device interrupt mask */
415 u32 irqmsk;
416
417 /* endpoint interrupt */
418 u32 ep_irqsts;
419
420 /* endpoint interrupt mask */
421 u32 ep_irqmsk;
422} __attribute__ ((packed));
423
424/* endpoint specific registers */
425struct udc_ep_regs {
426
427 /* endpoint control */
428 u32 ctl;
429
430 /* endpoint status */
431 u32 sts;
432
433 /* endpoint buffer size in/ receive packet frame number out */
434 u32 bufin_framenum;
435
436 /* endpoint buffer size out/max packet size */
437 u32 bufout_maxpkt;
438
439 /* endpoint setup buffer pointer */
440 u32 subptr;
441
442 /* endpoint data descriptor pointer */
443 u32 desptr;
444
445 /* reserverd */
446 u32 reserved;
447
448 /* write/read confirmation */
449 u32 confirm;
450
451} __attribute__ ((packed));
452
453/* control data DMA desc */
454struct udc_stp_dma {
455 /* status quadlet */
456 u32 status;
457 /* reserved */
458 u32 _reserved;
459 /* first setup word */
460 u32 data12;
461 /* second setup word */
462 u32 data34;
463} __attribute__ ((aligned (16)));
464
465/* normal data DMA desc */
466struct udc_data_dma {
467 /* status quadlet */
468 u32 status;
469 /* reserved */
470 u32 _reserved;
471 /* buffer pointer */
472 u32 bufptr;
473 /* next descriptor pointer */
474 u32 next;
475} __attribute__ ((aligned (16)));
476
477/* request packet */
478struct udc_request {
479 /* embedded gadget ep */
480 struct usb_request req;
481
482 /* flags */
483 unsigned dma_going : 1,
484 dma_mapping : 1,
485 dma_done : 1;
486 /* phys. address */
487 dma_addr_t td_phys;
488 /* first dma desc. of chain */
489 struct udc_data_dma *td_data;
490 /* last dma desc. of chain */
491 struct udc_data_dma *td_data_last;
492 struct list_head queue;
493
494 /* chain length */
495 unsigned chain_len;
496
497};
498
499/* UDC specific endpoint parameters */
500struct udc_ep {
501 struct usb_ep ep;
502 struct udc_ep_regs __iomem *regs;
503 u32 __iomem *txfifo;
504 u32 __iomem *dma;
505 dma_addr_t td_phys;
506 dma_addr_t td_stp_dma;
507 struct udc_stp_dma *td_stp;
508 struct udc_data_dma *td;
509 /* temp request */
510 struct udc_request *req;
511 unsigned req_used;
512 unsigned req_completed;
513 /* dummy DMA desc for BNA dummy */
514 struct udc_request *bna_dummy_req;
515 unsigned bna_occurred;
516
517 /* NAK state */
518 unsigned naking;
519
520 struct udc *dev;
521
522 /* queue for requests */
523 struct list_head queue;
524 const struct usb_endpoint_descriptor *desc;
525 unsigned halted;
526 unsigned cancel_transfer;
527 unsigned num : 5,
528 fifo_depth : 14,
529 in : 1;
530};
531
532/* device struct */
533struct udc {
534 struct usb_gadget gadget;
535 spinlock_t lock; /* protects all state */
536 /* all endpoints */
537 struct udc_ep ep[UDC_EP_NUM];
538 struct usb_gadget_driver *driver;
539 /* operational flags */
540 unsigned active : 1,
541 stall_ep0in : 1,
542 waiting_zlp_ack_ep0in : 1,
543 set_cfg_not_acked : 1,
544 irq_registered : 1,
545 data_ep_enabled : 1,
546 data_ep_queued : 1,
547 mem_region : 1,
548 sys_suspended : 1,
549 connected;
550
551 u16 chiprev;
552
553 /* registers */
554 struct pci_dev *pdev;
555 struct udc_csrs __iomem *csr;
556 struct udc_regs __iomem *regs;
557 struct udc_ep_regs __iomem *ep_regs;
558 u32 __iomem *rxfifo;
559 u32 __iomem *txfifo;
560
561 /* DMA desc pools */
562 struct pci_pool *data_requests;
563 struct pci_pool *stp_requests;
564
565 /* device data */
566 unsigned long phys_addr;
567 void __iomem *virt_addr;
568 unsigned irq;
569
570 /* states */
571 u16 cur_config;
572 u16 cur_intf;
573 u16 cur_alt;
574};
575
576/* setup request data */
577union udc_setup_data {
578 u32 data[2];
579 struct usb_ctrlrequest request;
580};
581
582/*
583 *---------------------------------------------------------------------------
584 * SET and GET bitfields in u32 values
585 * via constants for mask/offset:
586 * <bit_field_stub_name> is the text between
587 * UDC_ and _MASK|_OFS of appropiate
588 * constant
589 *
590 * set bitfield value in u32 u32Val
591 */
592#define AMD_ADDBITS(u32Val, bitfield_val, bitfield_stub_name) \
593 (((u32Val) & (((u32) ~((u32) bitfield_stub_name##_MASK)))) \
594 | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \
595 & ((u32) bitfield_stub_name##_MASK)))
596
597/*
598 * set bitfield value in zero-initialized u32 u32Val
599 * => bitfield bits in u32Val are all zero
600 */
601#define AMD_INIT_SETBITS(u32Val, bitfield_val, bitfield_stub_name) \
602 ((u32Val) \
603 | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \
604 & ((u32) bitfield_stub_name##_MASK)))
605
606/* get bitfield value from u32 u32Val */
607#define AMD_GETBITS(u32Val, bitfield_stub_name) \
608 ((u32Val & ((u32) bitfield_stub_name##_MASK)) \
609 >> ((u32) bitfield_stub_name##_OFS))
610
611/* SET and GET bits in u32 values ------------------------------------------*/
612#define AMD_BIT(bit_stub_name) (1 << bit_stub_name)
613#define AMD_UNMASK_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name))
614#define AMD_CLEAR_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name))
615
616/* debug macros ------------------------------------------------------------*/
617
618#define DBG(udc , args...) dev_dbg(&(udc)->pdev->dev, args)
619
620#ifdef UDC_VERBOSE
621#define VDBG DBG
622#else
623#define VDBG(udc , args...) do {} while (0)
624#endif
625
626#endif /* #ifdef AMD5536UDC_H */
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index dbaf867436df..a3376739a81b 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -305,6 +305,10 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
305#define DEV_CONFIG_CDC 305#define DEV_CONFIG_CDC
306#endif 306#endif
307 307
308#ifdef CONFIG_USB_GADGET_AMD5536UDC
309#define DEV_CONFIG_CDC
310#endif
311
308 312
309/*-------------------------------------------------------------------------*/ 313/*-------------------------------------------------------------------------*/
310 314
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 53e9139ba388..f7f159c1002b 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -17,6 +17,12 @@
17#define gadget_is_net2280(g) 0 17#define gadget_is_net2280(g) 0
18#endif 18#endif
19 19
20#ifdef CONFIG_USB_GADGET_AMD5536UDC
21#define gadget_is_amd5536udc(g) !strcmp("amd5536udc", (g)->name)
22#else
23#define gadget_is_amd5536udc(g) 0
24#endif
25
20#ifdef CONFIG_USB_GADGET_DUMMY_HCD 26#ifdef CONFIG_USB_GADGET_DUMMY_HCD
21#define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name) 27#define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name)
22#else 28#else
@@ -202,7 +208,9 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
202 return 0x18; 208 return 0x18;
203 else if (gadget_is_fsl_usb2(gadget)) 209 else if (gadget_is_fsl_usb2(gadget))
204 return 0x19; 210 return 0x19;
205 else if (gadget_is_m66592(gadget)) 211 else if (gadget_is_amd5536udc(gadget))
206 return 0x20; 212 return 0x20;
213 else if (gadget_is_m66592(gadget))
214 return 0x21;
207 return -ENOENT; 215 return -ENOENT;
208} 216}
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 0174a322e007..700dda8a9157 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -21,26 +21,18 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/smp_lock.h>
27#include <linux/errno.h>
28#include <linux/init.h>
29#include <linux/timer.h>
30#include <linux/delay.h>
31#include <linux/list.h>
32#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/io.h>
33#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28
34#include <linux/usb/ch9.h> 29#include <linux/usb/ch9.h>
35#include <linux/usb_gadget.h> 30#include <linux/usb_gadget.h>
36 31
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/system.h>
40
41#include "m66592-udc.h" 32#include "m66592-udc.h"
42 33
43MODULE_DESCRIPTION("M66592 USB gadget driiver"); 34
35MODULE_DESCRIPTION("M66592 USB gadget driver");
44MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Yoshihiro Shimoda"); 37MODULE_AUTHOR("Yoshihiro Shimoda");
46 38
@@ -49,16 +41,21 @@ MODULE_AUTHOR("Yoshihiro Shimoda");
49/* module parameters */ 41/* module parameters */
50static unsigned short clock = M66592_XTAL24; 42static unsigned short clock = M66592_XTAL24;
51module_param(clock, ushort, 0644); 43module_param(clock, ushort, 0644);
52MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0(default=16384)"); 44MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
45 "(default=16384)");
46
53static unsigned short vif = M66592_LDRV; 47static unsigned short vif = M66592_LDRV;
54module_param(vif, ushort, 0644); 48module_param(vif, ushort, 0644);
55MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)"); 49MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0 (default=32768)");
56static unsigned short endian = 0; 50
51static unsigned short endian;
57module_param(endian, ushort, 0644); 52module_param(endian, ushort, 0644);
58MODULE_PARM_DESC(endian, "data endian: big=256, little=0(default=0)"); 53MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
54
59static unsigned short irq_sense = M66592_INTL; 55static unsigned short irq_sense = M66592_INTL;
60module_param(irq_sense, ushort, 0644); 56module_param(irq_sense, ushort, 0644);
61MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0(default=2)"); 57MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0 "
58 "(default=2)");
62 59
63static const char udc_name[] = "m66592_udc"; 60static const char udc_name[] = "m66592_udc";
64static const char *m66592_ep_name[] = { 61static const char *m66592_ep_name[] = {
@@ -72,8 +69,8 @@ static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
72 gfp_t gfp_flags); 69 gfp_t gfp_flags);
73 70
74static void transfer_complete(struct m66592_ep *ep, 71static void transfer_complete(struct m66592_ep *ep,
75 struct m66592_request *req, 72 struct m66592_request *req, int status);
76 int status); 73
77/*-------------------------------------------------------------------------*/ 74/*-------------------------------------------------------------------------*/
78static inline u16 get_usb_speed(struct m66592 *m66592) 75static inline u16 get_usb_speed(struct m66592 *m66592)
79{ 76{
@@ -81,25 +78,25 @@ static inline u16 get_usb_speed(struct m66592 *m66592)
81} 78}
82 79
83static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum, 80static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum,
84 unsigned long reg) 81 unsigned long reg)
85{ 82{
86 u16 tmp; 83 u16 tmp;
87 84
88 tmp = m66592_read(m66592, M66592_INTENB0); 85 tmp = m66592_read(m66592, M66592_INTENB0);
89 m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE, 86 m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
90 M66592_INTENB0); 87 M66592_INTENB0);
91 m66592_bset(m66592, (1 << pipenum), reg); 88 m66592_bset(m66592, (1 << pipenum), reg);
92 m66592_write(m66592, tmp, M66592_INTENB0); 89 m66592_write(m66592, tmp, M66592_INTENB0);
93} 90}
94 91
95static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum, 92static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum,
96 unsigned long reg) 93 unsigned long reg)
97{ 94{
98 u16 tmp; 95 u16 tmp;
99 96
100 tmp = m66592_read(m66592, M66592_INTENB0); 97 tmp = m66592_read(m66592, M66592_INTENB0);
101 m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE, 98 m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
102 M66592_INTENB0); 99 M66592_INTENB0);
103 m66592_bclr(m66592, (1 << pipenum), reg); 100 m66592_bclr(m66592, (1 << pipenum), reg);
104 m66592_write(m66592, tmp, M66592_INTENB0); 101 m66592_write(m66592, tmp, M66592_INTENB0);
105} 102}
@@ -108,17 +105,19 @@ static void m66592_usb_connect(struct m66592 *m66592)
108{ 105{
109 m66592_bset(m66592, M66592_CTRE, M66592_INTENB0); 106 m66592_bset(m66592, M66592_CTRE, M66592_INTENB0);
110 m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL, 107 m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
111 M66592_INTENB0); 108 M66592_INTENB0);
112 m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0); 109 m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
113 110
114 m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG); 111 m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
115} 112}
116 113
117static void m66592_usb_disconnect(struct m66592 *m66592) 114static void m66592_usb_disconnect(struct m66592 *m66592)
115__releases(m66592->lock)
116__acquires(m66592->lock)
118{ 117{
119 m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0); 118 m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0);
120 m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL, 119 m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
121 M66592_INTENB0); 120 M66592_INTENB0);
122 m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0); 121 m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
123 m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); 122 m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
124 123
@@ -148,7 +147,7 @@ static inline u16 control_reg_get_pid(struct m66592 *m66592, u16 pipenum)
148} 147}
149 148
150static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum, 149static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum,
151 u16 pid) 150 u16 pid)
152{ 151{
153 unsigned long offset; 152 unsigned long offset;
154 153
@@ -250,7 +249,7 @@ static inline void pipe_change(struct m66592 *m66592, u16 pipenum)
250} 249}
251 250
252static int pipe_buffer_setting(struct m66592 *m66592, 251static int pipe_buffer_setting(struct m66592 *m66592,
253 struct m66592_pipe_info *info) 252 struct m66592_pipe_info *info)
254{ 253{
255 u16 bufnum = 0, buf_bsize = 0; 254 u16 bufnum = 0, buf_bsize = 0;
256 u16 pipecfg = 0; 255 u16 pipecfg = 0;
@@ -287,7 +286,7 @@ static int pipe_buffer_setting(struct m66592 *m66592,
287 } 286 }
288 if (m66592->bi_bufnum > M66592_MAX_BUFNUM) { 287 if (m66592->bi_bufnum > M66592_MAX_BUFNUM) {
289 printk(KERN_ERR "m66592 pipe memory is insufficient(%d)\n", 288 printk(KERN_ERR "m66592 pipe memory is insufficient(%d)\n",
290 m66592->bi_bufnum); 289 m66592->bi_bufnum);
291 return -ENOMEM; 290 return -ENOMEM;
292 } 291 }
293 292
@@ -328,7 +327,7 @@ static void pipe_buffer_release(struct m66592 *m66592,
328 m66592->bulk--; 327 m66592->bulk--;
329 } else 328 } else
330 printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n", 329 printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n",
331 info->pipe); 330 info->pipe);
332} 331}
333 332
334static void pipe_initialize(struct m66592_ep *ep) 333static void pipe_initialize(struct m66592_ep *ep)
@@ -350,8 +349,8 @@ static void pipe_initialize(struct m66592_ep *ep)
350} 349}
351 350
352static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep, 351static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep,
353 const struct usb_endpoint_descriptor *desc, 352 const struct usb_endpoint_descriptor *desc,
354 u16 pipenum, int dma) 353 u16 pipenum, int dma)
355{ 354{
356 if ((pipenum != 0) && dma) { 355 if ((pipenum != 0) && dma) {
357 if (m66592->num_dma == 0) { 356 if (m66592->num_dma == 0) {
@@ -385,7 +384,7 @@ static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep,
385 384
386 ep->pipectr = get_pipectr_addr(pipenum); 385 ep->pipectr = get_pipectr_addr(pipenum);
387 ep->pipenum = pipenum; 386 ep->pipenum = pipenum;
388 ep->ep.maxpacket = desc->wMaxPacketSize; 387 ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
389 m66592->pipenum2ep[pipenum] = ep; 388 m66592->pipenum2ep[pipenum] = ep;
390 m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep; 389 m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep;
391 INIT_LIST_HEAD(&ep->queue); 390 INIT_LIST_HEAD(&ep->queue);
@@ -407,7 +406,7 @@ static void m66592_ep_release(struct m66592_ep *ep)
407} 406}
408 407
409static int alloc_pipe_config(struct m66592_ep *ep, 408static int alloc_pipe_config(struct m66592_ep *ep,
410 const struct usb_endpoint_descriptor *desc) 409 const struct usb_endpoint_descriptor *desc)
411{ 410{
412 struct m66592 *m66592 = ep->m66592; 411 struct m66592 *m66592 = ep->m66592;
413 struct m66592_pipe_info info; 412 struct m66592_pipe_info info;
@@ -419,15 +418,15 @@ static int alloc_pipe_config(struct m66592_ep *ep,
419 418
420 BUG_ON(ep->pipenum); 419 BUG_ON(ep->pipenum);
421 420
422 switch(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 421 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
423 case USB_ENDPOINT_XFER_BULK: 422 case USB_ENDPOINT_XFER_BULK:
424 if (m66592->bulk >= M66592_MAX_NUM_BULK) { 423 if (m66592->bulk >= M66592_MAX_NUM_BULK) {
425 if (m66592->isochronous >= M66592_MAX_NUM_ISOC) { 424 if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
426 printk(KERN_ERR "bulk pipe is insufficient\n"); 425 printk(KERN_ERR "bulk pipe is insufficient\n");
427 return -ENODEV; 426 return -ENODEV;
428 } else { 427 } else {
429 info.pipe = M66592_BASE_PIPENUM_ISOC + 428 info.pipe = M66592_BASE_PIPENUM_ISOC
430 m66592->isochronous; 429 + m66592->isochronous;
431 counter = &m66592->isochronous; 430 counter = &m66592->isochronous;
432 } 431 }
433 } else { 432 } else {
@@ -462,7 +461,7 @@ static int alloc_pipe_config(struct m66592_ep *ep,
462 ep->type = info.type; 461 ep->type = info.type;
463 462
464 info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 463 info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
465 info.maxpacket = desc->wMaxPacketSize; 464 info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
466 info.interval = desc->bInterval; 465 info.interval = desc->bInterval;
467 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) 466 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
468 info.dir_in = 1; 467 info.dir_in = 1;
@@ -525,8 +524,8 @@ static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
525 524
526 pipe_change(m66592, ep->pipenum); 525 pipe_change(m66592, ep->pipenum);
527 m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0, 526 m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0,
528 (M66592_ISEL | M66592_CURPIPE), 527 (M66592_ISEL | M66592_CURPIPE),
529 M66592_CFIFOSEL); 528 M66592_CFIFOSEL);
530 m66592_write(m66592, M66592_BCLR, ep->fifoctr); 529 m66592_write(m66592, M66592_BCLR, ep->fifoctr);
531 if (req->req.length == 0) { 530 if (req->req.length == 0) {
532 m66592_bset(m66592, M66592_BVAL, ep->fifoctr); 531 m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
@@ -561,8 +560,8 @@ static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req)
561 560
562 if (ep->pipenum == 0) { 561 if (ep->pipenum == 0) {
563 m66592_mdfy(m66592, M66592_PIPE0, 562 m66592_mdfy(m66592, M66592_PIPE0,
564 (M66592_ISEL | M66592_CURPIPE), 563 (M66592_ISEL | M66592_CURPIPE),
565 M66592_CFIFOSEL); 564 M66592_CFIFOSEL);
566 m66592_write(m66592, M66592_BCLR, ep->fifoctr); 565 m66592_write(m66592, M66592_BCLR, ep->fifoctr);
567 pipe_start(m66592, pipenum); 566 pipe_start(m66592, pipenum);
568 pipe_irq_enable(m66592, pipenum); 567 pipe_irq_enable(m66592, pipenum);
@@ -572,8 +571,9 @@ static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req)
572 pipe_change(m66592, pipenum); 571 pipe_change(m66592, pipenum);
573 m66592_bset(m66592, M66592_TRENB, ep->fifosel); 572 m66592_bset(m66592, M66592_TRENB, ep->fifosel);
574 m66592_write(m66592, 573 m66592_write(m66592,
575 (req->req.length + ep->ep.maxpacket - 1) / 574 (req->req.length + ep->ep.maxpacket - 1)
576 ep->ep.maxpacket, ep->fifotrn); 575 / ep->ep.maxpacket,
576 ep->fifotrn);
577 } 577 }
578 pipe_start(m66592, pipenum); /* trigger once */ 578 pipe_start(m66592, pipenum); /* trigger once */
579 pipe_irq_enable(m66592, pipenum); 579 pipe_irq_enable(m66592, pipenum);
@@ -614,7 +614,7 @@ static void start_ep0(struct m66592_ep *ep, struct m66592_request *req)
614static void init_controller(struct m66592 *m66592) 614static void init_controller(struct m66592 *m66592)
615{ 615{
616 m66592_bset(m66592, (vif & M66592_LDRV) | (endian & M66592_BIGEND), 616 m66592_bset(m66592, (vif & M66592_LDRV) | (endian & M66592_BIGEND),
617 M66592_PINCFG); 617 M66592_PINCFG);
618 m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ 618 m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */
619 m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, M66592_SYSCFG); 619 m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, M66592_SYSCFG);
620 620
@@ -634,7 +634,7 @@ static void init_controller(struct m66592 *m66592)
634 634
635 m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1); 635 m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1);
636 m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR, 636 m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR,
637 M66592_DMA0CFG); 637 M66592_DMA0CFG);
638} 638}
639 639
640static void disable_controller(struct m66592 *m66592) 640static void disable_controller(struct m66592 *m66592)
@@ -659,8 +659,9 @@ static void m66592_start_xclock(struct m66592 *m66592)
659 659
660/*-------------------------------------------------------------------------*/ 660/*-------------------------------------------------------------------------*/
661static void transfer_complete(struct m66592_ep *ep, 661static void transfer_complete(struct m66592_ep *ep,
662 struct m66592_request *req, 662 struct m66592_request *req, int status)
663 int status) 663__releases(m66592->lock)
664__acquires(m66592->lock)
664{ 665{
665 int restart = 0; 666 int restart = 0;
666 667
@@ -680,8 +681,9 @@ static void transfer_complete(struct m66592_ep *ep,
680 if (!list_empty(&ep->queue)) 681 if (!list_empty(&ep->queue))
681 restart = 1; 682 restart = 1;
682 683
683 if (likely(req->req.complete)) 684 spin_unlock(&ep->m66592->lock);
684 req->req.complete(&ep->ep, &req->req); 685 req->req.complete(&ep->ep, &req->req);
686 spin_lock(&ep->m66592->lock);
685 687
686 if (restart) { 688 if (restart) {
687 req = list_entry(ep->queue.next, struct m66592_request, queue); 689 req = list_entry(ep->queue.next, struct m66592_request, queue);
@@ -693,7 +695,7 @@ static void transfer_complete(struct m66592_ep *ep,
693static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req) 695static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
694{ 696{
695 int i; 697 int i;
696 volatile u16 tmp; 698 u16 tmp;
697 unsigned bufsize; 699 unsigned bufsize;
698 size_t size; 700 size_t size;
699 void *buf; 701 void *buf;
@@ -731,8 +733,9 @@ static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
731 req->req.actual += size; 733 req->req.actual += size;
732 734
733 /* check transfer finish */ 735 /* check transfer finish */
734 if ((!req->req.zero && (req->req.actual == req->req.length)) || 736 if ((!req->req.zero && (req->req.actual == req->req.length))
735 (size % ep->ep.maxpacket) || (size == 0)) { 737 || (size % ep->ep.maxpacket)
738 || (size == 0)) {
736 disable_irq_ready(m66592, pipenum); 739 disable_irq_ready(m66592, pipenum);
737 disable_irq_empty(m66592, pipenum); 740 disable_irq_empty(m66592, pipenum);
738 } else { 741 } else {
@@ -768,16 +771,19 @@ static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req)
768 /* write fifo */ 771 /* write fifo */
769 if (req->req.buf) { 772 if (req->req.buf) {
770 m66592_write_fifo(m66592, ep->fifoaddr, buf, size); 773 m66592_write_fifo(m66592, ep->fifoaddr, buf, size);
771 if ((size == 0) || ((size % ep->ep.maxpacket) != 0) || 774 if ((size == 0)
772 ((bufsize != ep->ep.maxpacket) && (bufsize > size))) 775 || ((size % ep->ep.maxpacket) != 0)
776 || ((bufsize != ep->ep.maxpacket)
777 && (bufsize > size)))
773 m66592_bset(m66592, M66592_BVAL, ep->fifoctr); 778 m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
774 } 779 }
775 780
776 /* update parameters */ 781 /* update parameters */
777 req->req.actual += size; 782 req->req.actual += size;
778 /* check transfer finish */ 783 /* check transfer finish */
779 if ((!req->req.zero && (req->req.actual == req->req.length)) || 784 if ((!req->req.zero && (req->req.actual == req->req.length))
780 (size % ep->ep.maxpacket) || (size == 0)) { 785 || (size % ep->ep.maxpacket)
786 || (size == 0)) {
781 disable_irq_ready(m66592, pipenum); 787 disable_irq_ready(m66592, pipenum);
782 enable_irq_empty(m66592, pipenum); 788 enable_irq_empty(m66592, pipenum);
783 } else { 789 } else {
@@ -821,8 +827,9 @@ static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req)
821 req->req.actual += size; 827 req->req.actual += size;
822 828
823 /* check transfer finish */ 829 /* check transfer finish */
824 if ((!req->req.zero && (req->req.actual == req->req.length)) || 830 if ((!req->req.zero && (req->req.actual == req->req.length))
825 (size % ep->ep.maxpacket) || (size == 0)) { 831 || (size % ep->ep.maxpacket)
832 || (size == 0)) {
826 pipe_stop(m66592, pipenum); 833 pipe_stop(m66592, pipenum);
827 pipe_irq_disable(m66592, pipenum); 834 pipe_irq_disable(m66592, pipenum);
828 finish = 1; 835 finish = 1;
@@ -850,7 +857,7 @@ static void irq_pipe_ready(struct m66592 *m66592, u16 status, u16 enb)
850 if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) { 857 if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) {
851 m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS); 858 m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS);
852 m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE, 859 m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE,
853 M66592_CFIFOSEL); 860 M66592_CFIFOSEL);
854 861
855 ep = &m66592->ep[0]; 862 ep = &m66592->ep[0];
856 req = list_entry(ep->queue.next, struct m66592_request, queue); 863 req = list_entry(ep->queue.next, struct m66592_request, queue);
@@ -909,23 +916,26 @@ static void irq_pipe_empty(struct m66592 *m66592, u16 status, u16 enb)
909} 916}
910 917
911static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) 918static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
919__releases(m66592->lock)
920__acquires(m66592->lock)
912{ 921{
913 struct m66592_ep *ep; 922 struct m66592_ep *ep;
914 u16 pid; 923 u16 pid;
915 u16 status = 0; 924 u16 status = 0;
925 u16 w_index = le16_to_cpu(ctrl->wIndex);
916 926
917 switch (ctrl->bRequestType & USB_RECIP_MASK) { 927 switch (ctrl->bRequestType & USB_RECIP_MASK) {
918 case USB_RECIP_DEVICE: 928 case USB_RECIP_DEVICE:
919 status = 1; /* selfpower */ 929 status = 1 << USB_DEVICE_SELF_POWERED;
920 break; 930 break;
921 case USB_RECIP_INTERFACE: 931 case USB_RECIP_INTERFACE:
922 status = 0; 932 status = 0;
923 break; 933 break;
924 case USB_RECIP_ENDPOINT: 934 case USB_RECIP_ENDPOINT:
925 ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; 935 ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
926 pid = control_reg_get_pid(m66592, ep->pipenum); 936 pid = control_reg_get_pid(m66592, ep->pipenum);
927 if (pid == M66592_PID_STALL) 937 if (pid == M66592_PID_STALL)
928 status = 1; 938 status = 1 << USB_ENDPOINT_HALT;
929 else 939 else
930 status = 0; 940 status = 0;
931 break; 941 break;
@@ -934,11 +944,13 @@ static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
934 return; /* exit */ 944 return; /* exit */
935 } 945 }
936 946
937 *m66592->ep0_buf = status; 947 m66592->ep0_data = cpu_to_le16(status);
938 m66592->ep0_req->buf = m66592->ep0_buf; 948 m66592->ep0_req->buf = &m66592->ep0_data;
939 m66592->ep0_req->length = 2; 949 m66592->ep0_req->length = 2;
940 /* AV: what happens if we get called again before that gets through? */ 950 /* AV: what happens if we get called again before that gets through? */
951 spin_unlock(&m66592->lock);
941 m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL); 952 m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL);
953 spin_lock(&m66592->lock);
942} 954}
943 955
944static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) 956static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
@@ -953,8 +965,9 @@ static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
953 case USB_RECIP_ENDPOINT: { 965 case USB_RECIP_ENDPOINT: {
954 struct m66592_ep *ep; 966 struct m66592_ep *ep;
955 struct m66592_request *req; 967 struct m66592_request *req;
968 u16 w_index = le16_to_cpu(ctrl->wIndex);
956 969
957 ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; 970 ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
958 pipe_stop(m66592, ep->pipenum); 971 pipe_stop(m66592, ep->pipenum);
959 control_reg_sqclr(m66592, ep->pipenum); 972 control_reg_sqclr(m66592, ep->pipenum);
960 973
@@ -989,8 +1002,9 @@ static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
989 break; 1002 break;
990 case USB_RECIP_ENDPOINT: { 1003 case USB_RECIP_ENDPOINT: {
991 struct m66592_ep *ep; 1004 struct m66592_ep *ep;
1005 u16 w_index = le16_to_cpu(ctrl->wIndex);
992 1006
993 ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; 1007 ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
994 pipe_stall(m66592, ep->pipenum); 1008 pipe_stall(m66592, ep->pipenum);
995 1009
996 control_end(m66592, 1); 1010 control_end(m66592, 1);
@@ -1066,14 +1080,16 @@ static void irq_device_state(struct m66592 *m66592)
1066 } 1080 }
1067 if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG) 1081 if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG)
1068 m66592_update_usb_speed(m66592); 1082 m66592_update_usb_speed(m66592);
1069 if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS) && 1083 if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS)
1070 m66592->gadget.speed == USB_SPEED_UNKNOWN) 1084 && m66592->gadget.speed == USB_SPEED_UNKNOWN)
1071 m66592_update_usb_speed(m66592); 1085 m66592_update_usb_speed(m66592);
1072 1086
1073 m66592->old_dvsq = dvsq; 1087 m66592->old_dvsq = dvsq;
1074} 1088}
1075 1089
1076static void irq_control_stage(struct m66592 *m66592) 1090static void irq_control_stage(struct m66592 *m66592)
1091__releases(m66592->lock)
1092__acquires(m66592->lock)
1077{ 1093{
1078 struct usb_ctrlrequest ctrl; 1094 struct usb_ctrlrequest ctrl;
1079 u16 ctsq; 1095 u16 ctsq;
@@ -1095,8 +1111,10 @@ static void irq_control_stage(struct m66592 *m66592)
1095 case M66592_CS_WRDS: 1111 case M66592_CS_WRDS:
1096 case M66592_CS_WRND: 1112 case M66592_CS_WRND:
1097 if (setup_packet(m66592, &ctrl)) { 1113 if (setup_packet(m66592, &ctrl)) {
1114 spin_unlock(&m66592->lock);
1098 if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0) 1115 if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0)
1099 pipe_stall(m66592, 0); 1116 pipe_stall(m66592, 0);
1117 spin_lock(&m66592->lock);
1100 } 1118 }
1101 break; 1119 break;
1102 case M66592_CS_RDSS: 1120 case M66592_CS_RDSS:
@@ -1119,6 +1137,8 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
1119 u16 savepipe; 1137 u16 savepipe;
1120 u16 mask0; 1138 u16 mask0;
1121 1139
1140 spin_lock(&m66592->lock);
1141
1122 intsts0 = m66592_read(m66592, M66592_INTSTS0); 1142 intsts0 = m66592_read(m66592, M66592_INTSTS0);
1123 intenb0 = m66592_read(m66592, M66592_INTENB0); 1143 intenb0 = m66592_read(m66592, M66592_INTENB0);
1124 1144
@@ -1134,27 +1154,27 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
1134 bempenb = m66592_read(m66592, M66592_BEMPENB); 1154 bempenb = m66592_read(m66592, M66592_BEMPENB);
1135 1155
1136 if (mask0 & M66592_VBINT) { 1156 if (mask0 & M66592_VBINT) {
1137 m66592_write(m66592, (u16)~M66592_VBINT, 1157 m66592_write(m66592, 0xffff & ~M66592_VBINT,
1138 M66592_INTSTS0); 1158 M66592_INTSTS0);
1139 m66592_start_xclock(m66592); 1159 m66592_start_xclock(m66592);
1140 1160
1141 /* start vbus sampling */ 1161 /* start vbus sampling */
1142 m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0) 1162 m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0)
1143 & M66592_VBSTS; 1163 & M66592_VBSTS;
1144 m66592->scount = M66592_MAX_SAMPLING; 1164 m66592->scount = M66592_MAX_SAMPLING;
1145 1165
1146 mod_timer(&m66592->timer, 1166 mod_timer(&m66592->timer,
1147 jiffies + msecs_to_jiffies(50)); 1167 jiffies + msecs_to_jiffies(50));
1148 } 1168 }
1149 if (intsts0 & M66592_DVSQ) 1169 if (intsts0 & M66592_DVSQ)
1150 irq_device_state(m66592); 1170 irq_device_state(m66592);
1151 1171
1152 if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE) && 1172 if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE)
1153 (brdysts & brdyenb)) { 1173 && (brdysts & brdyenb)) {
1154 irq_pipe_ready(m66592, brdysts, brdyenb); 1174 irq_pipe_ready(m66592, brdysts, brdyenb);
1155 } 1175 }
1156 if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE) && 1176 if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE)
1157 (bempsts & bempenb)) { 1177 && (bempsts & bempenb)) {
1158 irq_pipe_empty(m66592, bempsts, bempenb); 1178 irq_pipe_empty(m66592, bempsts, bempenb);
1159 } 1179 }
1160 1180
@@ -1164,6 +1184,7 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
1164 1184
1165 m66592_write(m66592, savepipe, M66592_CFIFOSEL); 1185 m66592_write(m66592, savepipe, M66592_CFIFOSEL);
1166 1186
1187 spin_unlock(&m66592->lock);
1167 return IRQ_HANDLED; 1188 return IRQ_HANDLED;
1168} 1189}
1169 1190
@@ -1191,13 +1212,13 @@ static void m66592_timer(unsigned long _m66592)
1191 m66592_usb_disconnect(m66592); 1212 m66592_usb_disconnect(m66592);
1192 } else { 1213 } else {
1193 mod_timer(&m66592->timer, 1214 mod_timer(&m66592->timer,
1194 jiffies + msecs_to_jiffies(50)); 1215 jiffies + msecs_to_jiffies(50));
1195 } 1216 }
1196 } else { 1217 } else {
1197 m66592->scount = M66592_MAX_SAMPLING; 1218 m66592->scount = M66592_MAX_SAMPLING;
1198 m66592->old_vbus = tmp; 1219 m66592->old_vbus = tmp;
1199 mod_timer(&m66592->timer, 1220 mod_timer(&m66592->timer,
1200 jiffies + msecs_to_jiffies(50)); 1221 jiffies + msecs_to_jiffies(50));
1201 } 1222 }
1202 } 1223 }
1203 spin_unlock_irqrestore(&m66592->lock, flags); 1224 spin_unlock_irqrestore(&m66592->lock, flags);
@@ -1335,11 +1356,6 @@ out:
1335 return ret; 1356 return ret;
1336} 1357}
1337 1358
1338static int m66592_fifo_status(struct usb_ep *_ep)
1339{
1340 return -EOPNOTSUPP;
1341}
1342
1343static void m66592_fifo_flush(struct usb_ep *_ep) 1359static void m66592_fifo_flush(struct usb_ep *_ep)
1344{ 1360{
1345 struct m66592_ep *ep; 1361 struct m66592_ep *ep;
@@ -1365,7 +1381,6 @@ static struct usb_ep_ops m66592_ep_ops = {
1365 .dequeue = m66592_dequeue, 1381 .dequeue = m66592_dequeue,
1366 1382
1367 .set_halt = m66592_set_halt, 1383 .set_halt = m66592_set_halt,
1368 .fifo_status = m66592_fifo_status,
1369 .fifo_flush = m66592_fifo_flush, 1384 .fifo_flush = m66592_fifo_flush,
1370}; 1385};
1371 1386
@@ -1377,11 +1392,10 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1377 struct m66592 *m66592 = the_controller; 1392 struct m66592 *m66592 = the_controller;
1378 int retval; 1393 int retval;
1379 1394
1380 if (!driver || 1395 if (!driver
1381 driver->speed != USB_SPEED_HIGH || 1396 || driver->speed != USB_SPEED_HIGH
1382 !driver->bind || 1397 || !driver->bind
1383 !driver->unbind || 1398 || !driver->setup)
1384 !driver->setup)
1385 return -EINVAL; 1399 return -EINVAL;
1386 if (!m66592) 1400 if (!m66592)
1387 return -ENODEV; 1401 return -ENODEV;
@@ -1413,8 +1427,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1413 m66592->old_vbus = m66592_read(m66592, 1427 m66592->old_vbus = m66592_read(m66592,
1414 M66592_INTSTS0) & M66592_VBSTS; 1428 M66592_INTSTS0) & M66592_VBSTS;
1415 m66592->scount = M66592_MAX_SAMPLING; 1429 m66592->scount = M66592_MAX_SAMPLING;
1416 mod_timer(&m66592->timer, 1430 mod_timer(&m66592->timer, jiffies + msecs_to_jiffies(50));
1417 jiffies + msecs_to_jiffies(50));
1418 } 1431 }
1419 1432
1420 return 0; 1433 return 0;
@@ -1432,6 +1445,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1432 struct m66592 *m66592 = the_controller; 1445 struct m66592 *m66592 = the_controller;
1433 unsigned long flags; 1446 unsigned long flags;
1434 1447
1448 if (driver != m66592->driver || !driver->unbind)
1449 return -EINVAL;
1450
1435 spin_lock_irqsave(&m66592->lock, flags); 1451 spin_lock_irqsave(&m66592->lock, flags);
1436 if (m66592->gadget.speed != USB_SPEED_UNKNOWN) 1452 if (m66592->gadget.speed != USB_SPEED_UNKNOWN)
1437 m66592_usb_disconnect(m66592); 1453 m66592_usb_disconnect(m66592);
@@ -1461,46 +1477,35 @@ static struct usb_gadget_ops m66592_gadget_ops = {
1461 .get_frame = m66592_get_frame, 1477 .get_frame = m66592_get_frame,
1462}; 1478};
1463 1479
1464#if defined(CONFIG_PM) 1480static int __exit m66592_remove(struct platform_device *pdev)
1465static int m66592_suspend(struct platform_device *pdev, pm_message_t state)
1466{
1467 pdev->dev.power.power_state = state;
1468 return 0;
1469}
1470
1471static int m66592_resume(struct platform_device *pdev)
1472{
1473 pdev->dev.power.power_state = PMSG_ON;
1474 return 0;
1475}
1476#else /* if defined(CONFIG_PM) */
1477#define m66592_suspend NULL
1478#define m66592_resume NULL
1479#endif
1480
1481static int __init_or_module m66592_remove(struct platform_device *pdev)
1482{ 1481{
1483 struct m66592 *m66592 = dev_get_drvdata(&pdev->dev); 1482 struct m66592 *m66592 = dev_get_drvdata(&pdev->dev);
1484 1483
1485 del_timer_sync(&m66592->timer); 1484 del_timer_sync(&m66592->timer);
1486 iounmap(m66592->reg); 1485 iounmap(m66592->reg);
1487 free_irq(platform_get_irq(pdev, 0), m66592); 1486 free_irq(platform_get_irq(pdev, 0), m66592);
1487 m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
1488 kfree(m66592); 1488 kfree(m66592);
1489 return 0; 1489 return 0;
1490} 1490}
1491 1491
1492static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1493{
1494}
1495
1492#define resource_len(r) (((r)->end - (r)->start) + 1) 1496#define resource_len(r) (((r)->end - (r)->start) + 1)
1497
1493static int __init m66592_probe(struct platform_device *pdev) 1498static int __init m66592_probe(struct platform_device *pdev)
1494{ 1499{
1495 struct resource *res = NULL; 1500 struct resource *res;
1496 int irq = -1; 1501 int irq;
1497 void __iomem *reg = NULL; 1502 void __iomem *reg = NULL;
1498 struct m66592 *m66592 = NULL; 1503 struct m66592 *m66592 = NULL;
1499 int ret = 0; 1504 int ret = 0;
1500 int i; 1505 int i;
1501 1506
1502 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1507 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1503 (char *)udc_name); 1508 (char *)udc_name);
1504 if (!res) { 1509 if (!res) {
1505 ret = -ENODEV; 1510 ret = -ENODEV;
1506 printk(KERN_ERR "platform_get_resource_byname error.\n"); 1511 printk(KERN_ERR "platform_get_resource_byname error.\n");
@@ -1548,7 +1553,7 @@ static int __init m66592_probe(struct platform_device *pdev)
1548 m66592->bi_bufnum = M66592_BASE_BUFNUM; 1553 m66592->bi_bufnum = M66592_BASE_BUFNUM;
1549 1554
1550 ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED, 1555 ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED,
1551 udc_name, m66592); 1556 udc_name, m66592);
1552 if (ret < 0) { 1557 if (ret < 0) {
1553 printk(KERN_ERR "request_irq error (%d)\n", ret); 1558 printk(KERN_ERR "request_irq error (%d)\n", ret);
1554 goto clean_up; 1559 goto clean_up;
@@ -1563,7 +1568,7 @@ static int __init m66592_probe(struct platform_device *pdev)
1563 if (i != 0) { 1568 if (i != 0) {
1564 INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list); 1569 INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list);
1565 list_add_tail(&m66592->ep[i].ep.ep_list, 1570 list_add_tail(&m66592->ep[i].ep.ep_list,
1566 &m66592->gadget.ep_list); 1571 &m66592->gadget.ep_list);
1567 } 1572 }
1568 ep->m66592 = m66592; 1573 ep->m66592 = m66592;
1569 INIT_LIST_HEAD(&ep->queue); 1574 INIT_LIST_HEAD(&ep->queue);
@@ -1583,20 +1588,18 @@ static int __init m66592_probe(struct platform_device *pdev)
1583 1588
1584 the_controller = m66592; 1589 the_controller = m66592;
1585 1590
1586 /* AV: leaks */
1587 m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); 1591 m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
1588 if (m66592->ep0_req == NULL) 1592 if (m66592->ep0_req == NULL)
1589 goto clean_up; 1593 goto clean_up2;
1590 /* AV: leaks, and do we really need it separately allocated? */ 1594 m66592->ep0_req->complete = nop_completion;
1591 m66592->ep0_buf = kzalloc(2, GFP_KERNEL);
1592 if (m66592->ep0_buf == NULL)
1593 goto clean_up;
1594 1595
1595 init_controller(m66592); 1596 init_controller(m66592);
1596 1597
1597 printk("driver %s, %s\n", udc_name, DRIVER_VERSION); 1598 dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
1598 return 0; 1599 return 0;
1599 1600
1601clean_up2:
1602 free_irq(irq, m66592);
1600clean_up: 1603clean_up:
1601 if (m66592) { 1604 if (m66592) {
1602 if (m66592->ep0_req) 1605 if (m66592->ep0_req)
@@ -1611,10 +1614,7 @@ clean_up:
1611 1614
1612/*-------------------------------------------------------------------------*/ 1615/*-------------------------------------------------------------------------*/
1613static struct platform_driver m66592_driver = { 1616static struct platform_driver m66592_driver = {
1614 .probe = m66592_probe, 1617 .remove = __exit_p(m66592_remove),
1615 .remove = m66592_remove,
1616 .suspend = m66592_suspend,
1617 .resume = m66592_resume,
1618 .driver = { 1618 .driver = {
1619 .name = (char *) udc_name, 1619 .name = (char *) udc_name,
1620 }, 1620 },
@@ -1622,7 +1622,7 @@ static struct platform_driver m66592_driver = {
1622 1622
1623static int __init m66592_udc_init(void) 1623static int __init m66592_udc_init(void)
1624{ 1624{
1625 return platform_driver_register(&m66592_driver); 1625 return platform_driver_probe(&m66592_driver, m66592_probe);
1626} 1626}
1627module_init(m66592_udc_init); 1627module_init(m66592_udc_init);
1628 1628
@@ -1631,4 +1631,3 @@ static void __exit m66592_udc_cleanup(void)
1631 platform_driver_unregister(&m66592_driver); 1631 platform_driver_unregister(&m66592_driver);
1632} 1632}
1633module_exit(m66592_udc_cleanup); 1633module_exit(m66592_udc_cleanup);
1634
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h
index 26b54f8b8945..bfa0c645f229 100644
--- a/drivers/usb/gadget/m66592-udc.h
+++ b/drivers/usb/gadget/m66592-udc.h
@@ -24,73 +24,73 @@
24#define __M66592_UDC_H__ 24#define __M66592_UDC_H__
25 25
26#define M66592_SYSCFG 0x00 26#define M66592_SYSCFG 0x00
27#define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ 27#define M66592_XTAL 0xC000 /* b15-14: Crystal selection */
28#define M66592_XTAL48 0x8000 /* 48MHz */ 28#define M66592_XTAL48 0x8000 /* 48MHz */
29#define M66592_XTAL24 0x4000 /* 24MHz */ 29#define M66592_XTAL24 0x4000 /* 24MHz */
30#define M66592_XTAL12 0x0000 /* 12MHz */ 30#define M66592_XTAL12 0x0000 /* 12MHz */
31#define M66592_XCKE 0x2000 /* b13: External clock enable */ 31#define M66592_XCKE 0x2000 /* b13: External clock enable */
32#define M66592_RCKE 0x1000 /* b12: Register clock enable */ 32#define M66592_RCKE 0x1000 /* b12: Register clock enable */
33#define M66592_PLLC 0x0800 /* b11: PLL control */ 33#define M66592_PLLC 0x0800 /* b11: PLL control */
34#define M66592_SCKE 0x0400 /* b10: USB clock enable */ 34#define M66592_SCKE 0x0400 /* b10: USB clock enable */
35#define M66592_ATCKM 0x0100 /* b8: Automatic supply functional enable */ 35#define M66592_ATCKM 0x0100 /* b8: Automatic clock supply */
36#define M66592_HSE 0x0080 /* b7: Hi-speed enable */ 36#define M66592_HSE 0x0080 /* b7: Hi-speed enable */
37#define M66592_DCFM 0x0040 /* b6: Controller function select */ 37#define M66592_DCFM 0x0040 /* b6: Controller function select */
38#define M66592_DMRPD 0x0020 /* b5: D- pull down control */ 38#define M66592_DMRPD 0x0020 /* b5: D- pull down control */
39#define M66592_DPRPU 0x0010 /* b4: D+ pull up control */ 39#define M66592_DPRPU 0x0010 /* b4: D+ pull up control */
40#define M66592_FSRPC 0x0004 /* b2: Full-speed receiver enable */ 40#define M66592_FSRPC 0x0004 /* b2: Full-speed receiver enable */
41#define M66592_PCUT 0x0002 /* b1: Low power sleep enable */ 41#define M66592_PCUT 0x0002 /* b1: Low power sleep enable */
42#define M66592_USBE 0x0001 /* b0: USB module operation enable */ 42#define M66592_USBE 0x0001 /* b0: USB module operation enable */
43 43
44#define M66592_SYSSTS 0x02 44#define M66592_SYSSTS 0x02
45#define M66592_LNST 0x0003 /* b1-0: D+, D- line status */ 45#define M66592_LNST 0x0003 /* b1-0: D+, D- line status */
46#define M66592_SE1 0x0003 /* SE1 */ 46#define M66592_SE1 0x0003 /* SE1 */
47#define M66592_KSTS 0x0002 /* K State */ 47#define M66592_KSTS 0x0002 /* K State */
48#define M66592_JSTS 0x0001 /* J State */ 48#define M66592_JSTS 0x0001 /* J State */
49#define M66592_SE0 0x0000 /* SE0 */ 49#define M66592_SE0 0x0000 /* SE0 */
50 50
51#define M66592_DVSTCTR 0x04 51#define M66592_DVSTCTR 0x04
52#define M66592_WKUP 0x0100 /* b8: Remote wakeup */ 52#define M66592_WKUP 0x0100 /* b8: Remote wakeup */
53#define M66592_RWUPE 0x0080 /* b7: Remote wakeup sense */ 53#define M66592_RWUPE 0x0080 /* b7: Remote wakeup sense */
54#define M66592_USBRST 0x0040 /* b6: USB reset enable */ 54#define M66592_USBRST 0x0040 /* b6: USB reset enable */
55#define M66592_RESUME 0x0020 /* b5: Resume enable */ 55#define M66592_RESUME 0x0020 /* b5: Resume enable */
56#define M66592_UACT 0x0010 /* b4: USB bus enable */ 56#define M66592_UACT 0x0010 /* b4: USB bus enable */
57#define M66592_RHST 0x0003 /* b1-0: Reset handshake status */ 57#define M66592_RHST 0x0003 /* b1-0: Reset handshake status */
58#define M66592_HSMODE 0x0003 /* Hi-Speed mode */ 58#define M66592_HSMODE 0x0003 /* Hi-Speed mode */
59#define M66592_FSMODE 0x0002 /* Full-Speed mode */ 59#define M66592_FSMODE 0x0002 /* Full-Speed mode */
60#define M66592_HSPROC 0x0001 /* HS handshake is processing */ 60#define M66592_HSPROC 0x0001 /* HS handshake is processing */
61 61
62#define M66592_TESTMODE 0x06 62#define M66592_TESTMODE 0x06
63#define M66592_UTST 0x000F /* b4-0: Test select */ 63#define M66592_UTST 0x000F /* b4-0: Test select */
64#define M66592_H_TST_PACKET 0x000C /* HOST TEST Packet */ 64#define M66592_H_TST_PACKET 0x000C /* HOST TEST Packet */
65#define M66592_H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ 65#define M66592_H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */
66#define M66592_H_TST_K 0x000A /* HOST TEST K */ 66#define M66592_H_TST_K 0x000A /* HOST TEST K */
67#define M66592_H_TST_J 0x0009 /* HOST TEST J */ 67#define M66592_H_TST_J 0x0009 /* HOST TEST J */
68#define M66592_H_TST_NORMAL 0x0000 /* HOST Normal Mode */ 68#define M66592_H_TST_NORMAL 0x0000 /* HOST Normal Mode */
69#define M66592_P_TST_PACKET 0x0004 /* PERI TEST Packet */ 69#define M66592_P_TST_PACKET 0x0004 /* PERI TEST Packet */
70#define M66592_P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ 70#define M66592_P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */
71#define M66592_P_TST_K 0x0002 /* PERI TEST K */ 71#define M66592_P_TST_K 0x0002 /* PERI TEST K */
72#define M66592_P_TST_J 0x0001 /* PERI TEST J */ 72#define M66592_P_TST_J 0x0001 /* PERI TEST J */
73#define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */ 73#define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */
74 74
75#define M66592_PINCFG 0x0A 75#define M66592_PINCFG 0x0A
76#define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */ 76#define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */
77#define M66592_BIGEND 0x0100 /* b8: Big endian mode */ 77#define M66592_BIGEND 0x0100 /* b8: Big endian mode */
78 78
79#define M66592_DMA0CFG 0x0C 79#define M66592_DMA0CFG 0x0C
80#define M66592_DMA1CFG 0x0E 80#define M66592_DMA1CFG 0x0E
81#define M66592_DREQA 0x4000 /* b14: Dreq active select */ 81#define M66592_DREQA 0x4000 /* b14: Dreq active select */
82#define M66592_BURST 0x2000 /* b13: Burst mode */ 82#define M66592_BURST 0x2000 /* b13: Burst mode */
83#define M66592_DACKA 0x0400 /* b10: Dack active select */ 83#define M66592_DACKA 0x0400 /* b10: Dack active select */
84#define M66592_DFORM 0x0380 /* b9-7: DMA mode select */ 84#define M66592_DFORM 0x0380 /* b9-7: DMA mode select */
85#define M66592_CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ 85#define M66592_CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */
86#define M66592_CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ 86#define M66592_CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */
87#define M66592_CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ 87#define M66592_CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */
88#define M66592_SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ 88#define M66592_SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */
89#define M66592_SPLIT_DACK_DSTB 0x0300 /* DACK + DSTB0 mode (SPLIT bus) */ 89#define M66592_SPLIT_DACK_DSTB 0x0300 /* DACK + DSTB0 mode (SPLIT bus) */
90#define M66592_DENDA 0x0040 /* b6: Dend active select */ 90#define M66592_DENDA 0x0040 /* b6: Dend active select */
91#define M66592_PKTM 0x0020 /* b5: Packet mode */ 91#define M66592_PKTM 0x0020 /* b5: Packet mode */
92#define M66592_DENDE 0x0010 /* b4: Dend enable */ 92#define M66592_DENDE 0x0010 /* b4: Dend enable */
93#define M66592_OBUS 0x0004 /* b2: OUTbus mode */ 93#define M66592_OBUS 0x0004 /* b2: OUTbus mode */
94 94
95#define M66592_CFIFO 0x10 95#define M66592_CFIFO 0x10
96#define M66592_D0FIFO 0x14 96#define M66592_D0FIFO 0x14
@@ -99,300 +99,300 @@
99#define M66592_CFIFOSEL 0x1E 99#define M66592_CFIFOSEL 0x1E
100#define M66592_D0FIFOSEL 0x24 100#define M66592_D0FIFOSEL 0x24
101#define M66592_D1FIFOSEL 0x2A 101#define M66592_D1FIFOSEL 0x2A
102#define M66592_RCNT 0x8000 /* b15: Read count mode */ 102#define M66592_RCNT 0x8000 /* b15: Read count mode */
103#define M66592_REW 0x4000 /* b14: Buffer rewind */ 103#define M66592_REW 0x4000 /* b14: Buffer rewind */
104#define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */ 104#define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */
105#define M66592_DREQE 0x1000 /* b12: DREQ output enable */ 105#define M66592_DREQE 0x1000 /* b12: DREQ output enable */
106#define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO access */ 106#define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO */
107#define M66592_MBW_8 0x0000 /* 8bit */ 107#define M66592_MBW_8 0x0000 /* 8bit */
108#define M66592_MBW_16 0x0400 /* 16bit */ 108#define M66592_MBW_16 0x0400 /* 16bit */
109#define M66592_TRENB 0x0200 /* b9: Transaction counter enable */ 109#define M66592_TRENB 0x0200 /* b9: Transaction counter enable */
110#define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */ 110#define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */
111#define M66592_DEZPM 0x0080 /* b7: Zero-length packet additional mode */ 111#define M66592_DEZPM 0x0080 /* b7: Zero-length packet mode */
112#define M66592_ISEL 0x0020 /* b5: DCP FIFO port direction select */ 112#define M66592_ISEL 0x0020 /* b5: DCP FIFO port direction select */
113#define M66592_CURPIPE 0x0007 /* b2-0: PIPE select */ 113#define M66592_CURPIPE 0x0007 /* b2-0: PIPE select */
114 114
115#define M66592_CFIFOCTR 0x20 115#define M66592_CFIFOCTR 0x20
116#define M66592_D0FIFOCTR 0x26 116#define M66592_D0FIFOCTR 0x26
117#define M66592_D1FIFOCTR 0x2c 117#define M66592_D1FIFOCTR 0x2c
118#define M66592_BVAL 0x8000 /* b15: Buffer valid flag */ 118#define M66592_BVAL 0x8000 /* b15: Buffer valid flag */
119#define M66592_BCLR 0x4000 /* b14: Buffer clear */ 119#define M66592_BCLR 0x4000 /* b14: Buffer clear */
120#define M66592_FRDY 0x2000 /* b13: FIFO ready */ 120#define M66592_FRDY 0x2000 /* b13: FIFO ready */
121#define M66592_DTLN 0x0FFF /* b11-0: FIFO received data length */ 121#define M66592_DTLN 0x0FFF /* b11-0: FIFO received data length */
122 122
123#define M66592_CFIFOSIE 0x22 123#define M66592_CFIFOSIE 0x22
124#define M66592_TGL 0x8000 /* b15: Buffer toggle */ 124#define M66592_TGL 0x8000 /* b15: Buffer toggle */
125#define M66592_SCLR 0x4000 /* b14: Buffer clear */ 125#define M66592_SCLR 0x4000 /* b14: Buffer clear */
126#define M66592_SBUSY 0x2000 /* b13: SIE_FIFO busy */ 126#define M66592_SBUSY 0x2000 /* b13: SIE_FIFO busy */
127 127
128#define M66592_D0FIFOTRN 0x28 128#define M66592_D0FIFOTRN 0x28
129#define M66592_D1FIFOTRN 0x2E 129#define M66592_D1FIFOTRN 0x2E
130#define M66592_TRNCNT 0xFFFF /* b15-0: Transaction counter */ 130#define M66592_TRNCNT 0xFFFF /* b15-0: Transaction counter */
131 131
132#define M66592_INTENB0 0x30 132#define M66592_INTENB0 0x30
133#define M66592_VBSE 0x8000 /* b15: VBUS interrupt */ 133#define M66592_VBSE 0x8000 /* b15: VBUS interrupt */
134#define M66592_RSME 0x4000 /* b14: Resume interrupt */ 134#define M66592_RSME 0x4000 /* b14: Resume interrupt */
135#define M66592_SOFE 0x2000 /* b13: Frame update interrupt */ 135#define M66592_SOFE 0x2000 /* b13: Frame update interrupt */
136#define M66592_DVSE 0x1000 /* b12: Device state transition interrupt */ 136#define M66592_DVSE 0x1000 /* b12: Device state transition interrupt */
137#define M66592_CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ 137#define M66592_CTRE 0x0800 /* b11: Control transfer stage transition irq */
138#define M66592_BEMPE 0x0400 /* b10: Buffer empty interrupt */ 138#define M66592_BEMPE 0x0400 /* b10: Buffer empty interrupt */
139#define M66592_NRDYE 0x0200 /* b9: Buffer not ready interrupt */ 139#define M66592_NRDYE 0x0200 /* b9: Buffer not ready interrupt */
140#define M66592_BRDYE 0x0100 /* b8: Buffer ready interrupt */ 140#define M66592_BRDYE 0x0100 /* b8: Buffer ready interrupt */
141#define M66592_URST 0x0080 /* b7: USB reset detected interrupt */ 141#define M66592_URST 0x0080 /* b7: USB reset detected interrupt */
142#define M66592_SADR 0x0040 /* b6: Set address executed interrupt */ 142#define M66592_SADR 0x0040 /* b6: Set address executed interrupt */
143#define M66592_SCFG 0x0020 /* b5: Set configuration executed interrupt */ 143#define M66592_SCFG 0x0020 /* b5: Set configuration executed interrupt */
144#define M66592_SUSP 0x0010 /* b4: Suspend detected interrupt */ 144#define M66592_SUSP 0x0010 /* b4: Suspend detected interrupt */
145#define M66592_WDST 0x0008 /* b3: Control write data stage completed interrupt */ 145#define M66592_WDST 0x0008 /* b3: Control write data stage completed irq */
146#define M66592_RDST 0x0004 /* b2: Control read data stage completed interrupt */ 146#define M66592_RDST 0x0004 /* b2: Control read data stage completed irq */
147#define M66592_CMPL 0x0002 /* b1: Control transfer complete interrupt */ 147#define M66592_CMPL 0x0002 /* b1: Control transfer complete interrupt */
148#define M66592_SERR 0x0001 /* b0: Sequence error interrupt */ 148#define M66592_SERR 0x0001 /* b0: Sequence error interrupt */
149 149
150#define M66592_INTENB1 0x32 150#define M66592_INTENB1 0x32
151#define M66592_BCHGE 0x4000 /* b14: USB us chenge interrupt */ 151#define M66592_BCHGE 0x4000 /* b14: USB us chenge interrupt */
152#define M66592_DTCHE 0x1000 /* b12: Detach sense interrupt */ 152#define M66592_DTCHE 0x1000 /* b12: Detach sense interrupt */
153#define M66592_SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ 153#define M66592_SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */
154#define M66592_SACKE 0x0010 /* b4: SETUP ACK interrupt */ 154#define M66592_SACKE 0x0010 /* b4: SETUP ACK interrupt */
155#define M66592_BRDYM 0x0004 /* b2: BRDY clear timing */ 155#define M66592_BRDYM 0x0004 /* b2: BRDY clear timing */
156#define M66592_INTL 0x0002 /* b1: Interrupt sense select */ 156#define M66592_INTL 0x0002 /* b1: Interrupt sense select */
157#define M66592_PCSE 0x0001 /* b0: PCUT enable by CS assert */ 157#define M66592_PCSE 0x0001 /* b0: PCUT enable by CS assert */
158 158
159#define M66592_BRDYENB 0x36 159#define M66592_BRDYENB 0x36
160#define M66592_BRDYSTS 0x46 160#define M66592_BRDYSTS 0x46
161#define M66592_BRDY7 0x0080 /* b7: PIPE7 */ 161#define M66592_BRDY7 0x0080 /* b7: PIPE7 */
162#define M66592_BRDY6 0x0040 /* b6: PIPE6 */ 162#define M66592_BRDY6 0x0040 /* b6: PIPE6 */
163#define M66592_BRDY5 0x0020 /* b5: PIPE5 */ 163#define M66592_BRDY5 0x0020 /* b5: PIPE5 */
164#define M66592_BRDY4 0x0010 /* b4: PIPE4 */ 164#define M66592_BRDY4 0x0010 /* b4: PIPE4 */
165#define M66592_BRDY3 0x0008 /* b3: PIPE3 */ 165#define M66592_BRDY3 0x0008 /* b3: PIPE3 */
166#define M66592_BRDY2 0x0004 /* b2: PIPE2 */ 166#define M66592_BRDY2 0x0004 /* b2: PIPE2 */
167#define M66592_BRDY1 0x0002 /* b1: PIPE1 */ 167#define M66592_BRDY1 0x0002 /* b1: PIPE1 */
168#define M66592_BRDY0 0x0001 /* b1: PIPE0 */ 168#define M66592_BRDY0 0x0001 /* b1: PIPE0 */
169 169
170#define M66592_NRDYENB 0x38 170#define M66592_NRDYENB 0x38
171#define M66592_NRDYSTS 0x48 171#define M66592_NRDYSTS 0x48
172#define M66592_NRDY7 0x0080 /* b7: PIPE7 */ 172#define M66592_NRDY7 0x0080 /* b7: PIPE7 */
173#define M66592_NRDY6 0x0040 /* b6: PIPE6 */ 173#define M66592_NRDY6 0x0040 /* b6: PIPE6 */
174#define M66592_NRDY5 0x0020 /* b5: PIPE5 */ 174#define M66592_NRDY5 0x0020 /* b5: PIPE5 */
175#define M66592_NRDY4 0x0010 /* b4: PIPE4 */ 175#define M66592_NRDY4 0x0010 /* b4: PIPE4 */
176#define M66592_NRDY3 0x0008 /* b3: PIPE3 */ 176#define M66592_NRDY3 0x0008 /* b3: PIPE3 */
177#define M66592_NRDY2 0x0004 /* b2: PIPE2 */ 177#define M66592_NRDY2 0x0004 /* b2: PIPE2 */
178#define M66592_NRDY1 0x0002 /* b1: PIPE1 */ 178#define M66592_NRDY1 0x0002 /* b1: PIPE1 */
179#define M66592_NRDY0 0x0001 /* b1: PIPE0 */ 179#define M66592_NRDY0 0x0001 /* b1: PIPE0 */
180 180
181#define M66592_BEMPENB 0x3A 181#define M66592_BEMPENB 0x3A
182#define M66592_BEMPSTS 0x4A 182#define M66592_BEMPSTS 0x4A
183#define M66592_BEMP7 0x0080 /* b7: PIPE7 */ 183#define M66592_BEMP7 0x0080 /* b7: PIPE7 */
184#define M66592_BEMP6 0x0040 /* b6: PIPE6 */ 184#define M66592_BEMP6 0x0040 /* b6: PIPE6 */
185#define M66592_BEMP5 0x0020 /* b5: PIPE5 */ 185#define M66592_BEMP5 0x0020 /* b5: PIPE5 */
186#define M66592_BEMP4 0x0010 /* b4: PIPE4 */ 186#define M66592_BEMP4 0x0010 /* b4: PIPE4 */
187#define M66592_BEMP3 0x0008 /* b3: PIPE3 */ 187#define M66592_BEMP3 0x0008 /* b3: PIPE3 */
188#define M66592_BEMP2 0x0004 /* b2: PIPE2 */ 188#define M66592_BEMP2 0x0004 /* b2: PIPE2 */
189#define M66592_BEMP1 0x0002 /* b1: PIPE1 */ 189#define M66592_BEMP1 0x0002 /* b1: PIPE1 */
190#define M66592_BEMP0 0x0001 /* b0: PIPE0 */ 190#define M66592_BEMP0 0x0001 /* b0: PIPE0 */
191 191
192#define M66592_SOFCFG 0x3C 192#define M66592_SOFCFG 0x3C
193#define M66592_SOFM 0x000C /* b3-2: SOF palse mode */ 193#define M66592_SOFM 0x000C /* b3-2: SOF palse mode */
194#define M66592_SOF_125US 0x0008 /* SOF OUT 125us uFrame Signal */ 194#define M66592_SOF_125US 0x0008 /* SOF OUT 125us uFrame Signal */
195#define M66592_SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ 195#define M66592_SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */
196#define M66592_SOF_DISABLE 0x0000 /* SOF OUT Disable */ 196#define M66592_SOF_DISABLE 0x0000 /* SOF OUT Disable */
197 197
198#define M66592_INTSTS0 0x40 198#define M66592_INTSTS0 0x40
199#define M66592_VBINT 0x8000 /* b15: VBUS interrupt */ 199#define M66592_VBINT 0x8000 /* b15: VBUS interrupt */
200#define M66592_RESM 0x4000 /* b14: Resume interrupt */ 200#define M66592_RESM 0x4000 /* b14: Resume interrupt */
201#define M66592_SOFR 0x2000 /* b13: SOF frame update interrupt */ 201#define M66592_SOFR 0x2000 /* b13: SOF frame update interrupt */
202#define M66592_DVST 0x1000 /* b12: Device state transition interrupt */ 202#define M66592_DVST 0x1000 /* b12: Device state transition */
203#define M66592_CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ 203#define M66592_CTRT 0x0800 /* b11: Control stage transition */
204#define M66592_BEMP 0x0400 /* b10: Buffer empty interrupt */ 204#define M66592_BEMP 0x0400 /* b10: Buffer empty interrupt */
205#define M66592_NRDY 0x0200 /* b9: Buffer not ready interrupt */ 205#define M66592_NRDY 0x0200 /* b9: Buffer not ready interrupt */
206#define M66592_BRDY 0x0100 /* b8: Buffer ready interrupt */ 206#define M66592_BRDY 0x0100 /* b8: Buffer ready interrupt */
207#define M66592_VBSTS 0x0080 /* b7: VBUS input port */ 207#define M66592_VBSTS 0x0080 /* b7: VBUS input port */
208#define M66592_DVSQ 0x0070 /* b6-4: Device state */ 208#define M66592_DVSQ 0x0070 /* b6-4: Device state */
209#define M66592_DS_SPD_CNFG 0x0070 /* Suspend Configured */ 209#define M66592_DS_SPD_CNFG 0x0070 /* Suspend Configured */
210#define M66592_DS_SPD_ADDR 0x0060 /* Suspend Address */ 210#define M66592_DS_SPD_ADDR 0x0060 /* Suspend Address */
211#define M66592_DS_SPD_DFLT 0x0050 /* Suspend Default */ 211#define M66592_DS_SPD_DFLT 0x0050 /* Suspend Default */
212#define M66592_DS_SPD_POWR 0x0040 /* Suspend Powered */ 212#define M66592_DS_SPD_POWR 0x0040 /* Suspend Powered */
213#define M66592_DS_SUSP 0x0040 /* Suspend */ 213#define M66592_DS_SUSP 0x0040 /* Suspend */
214#define M66592_DS_CNFG 0x0030 /* Configured */ 214#define M66592_DS_CNFG 0x0030 /* Configured */
215#define M66592_DS_ADDS 0x0020 /* Address */ 215#define M66592_DS_ADDS 0x0020 /* Address */
216#define M66592_DS_DFLT 0x0010 /* Default */ 216#define M66592_DS_DFLT 0x0010 /* Default */
217#define M66592_DS_POWR 0x0000 /* Powered */ 217#define M66592_DS_POWR 0x0000 /* Powered */
218#define M66592_DVSQS 0x0030 /* b5-4: Device state */ 218#define M66592_DVSQS 0x0030 /* b5-4: Device state */
219#define M66592_VALID 0x0008 /* b3: Setup packet detected flag */ 219#define M66592_VALID 0x0008 /* b3: Setup packet detected flag */
220#define M66592_CTSQ 0x0007 /* b2-0: Control transfer stage */ 220#define M66592_CTSQ 0x0007 /* b2-0: Control transfer stage */
221#define M66592_CS_SQER 0x0006 /* Sequence error */ 221#define M66592_CS_SQER 0x0006 /* Sequence error */
222#define M66592_CS_WRND 0x0005 /* Control write nodata status stage */ 222#define M66592_CS_WRND 0x0005 /* Control write nodata status */
223#define M66592_CS_WRSS 0x0004 /* Control write status stage */ 223#define M66592_CS_WRSS 0x0004 /* Control write status stage */
224#define M66592_CS_WRDS 0x0003 /* Control write data stage */ 224#define M66592_CS_WRDS 0x0003 /* Control write data stage */
225#define M66592_CS_RDSS 0x0002 /* Control read status stage */ 225#define M66592_CS_RDSS 0x0002 /* Control read status stage */
226#define M66592_CS_RDDS 0x0001 /* Control read data stage */ 226#define M66592_CS_RDDS 0x0001 /* Control read data stage */
227#define M66592_CS_IDST 0x0000 /* Idle or setup stage */ 227#define M66592_CS_IDST 0x0000 /* Idle or setup stage */
228 228
229#define M66592_INTSTS1 0x42 229#define M66592_INTSTS1 0x42
230#define M66592_BCHG 0x4000 /* b14: USB bus chenge interrupt */ 230#define M66592_BCHG 0x4000 /* b14: USB bus chenge interrupt */
231#define M66592_DTCH 0x1000 /* b12: Detach sense interrupt */ 231#define M66592_DTCH 0x1000 /* b12: Detach sense interrupt */
232#define M66592_SIGN 0x0020 /* b5: SETUP IGNORE interrupt */ 232#define M66592_SIGN 0x0020 /* b5: SETUP IGNORE interrupt */
233#define M66592_SACK 0x0010 /* b4: SETUP ACK interrupt */ 233#define M66592_SACK 0x0010 /* b4: SETUP ACK interrupt */
234 234
235#define M66592_FRMNUM 0x4C 235#define M66592_FRMNUM 0x4C
236#define M66592_OVRN 0x8000 /* b15: Overrun error */ 236#define M66592_OVRN 0x8000 /* b15: Overrun error */
237#define M66592_CRCE 0x4000 /* b14: Received data error */ 237#define M66592_CRCE 0x4000 /* b14: Received data error */
238#define M66592_SOFRM 0x0800 /* b11: SOF output mode */ 238#define M66592_SOFRM 0x0800 /* b11: SOF output mode */
239#define M66592_FRNM 0x07FF /* b10-0: Frame number */ 239#define M66592_FRNM 0x07FF /* b10-0: Frame number */
240 240
241#define M66592_UFRMNUM 0x4E 241#define M66592_UFRMNUM 0x4E
242#define M66592_UFRNM 0x0007 /* b2-0: Micro frame number */ 242#define M66592_UFRNM 0x0007 /* b2-0: Micro frame number */
243 243
244#define M66592_RECOVER 0x50 244#define M66592_RECOVER 0x50
245#define M66592_STSRECOV 0x0700 /* Status recovery */ 245#define M66592_STSRECOV 0x0700 /* Status recovery */
246#define M66592_STSR_HI 0x0400 /* FULL(0) or HI(1) Speed */ 246#define M66592_STSR_HI 0x0400 /* FULL(0) or HI(1) Speed */
247#define M66592_STSR_DEFAULT 0x0100 /* Default state */ 247#define M66592_STSR_DEFAULT 0x0100 /* Default state */
248#define M66592_STSR_ADDRESS 0x0200 /* Address state */ 248#define M66592_STSR_ADDRESS 0x0200 /* Address state */
249#define M66592_STSR_CONFIG 0x0300 /* Configured state */ 249#define M66592_STSR_CONFIG 0x0300 /* Configured state */
250#define M66592_USBADDR 0x007F /* b6-0: USB address */ 250#define M66592_USBADDR 0x007F /* b6-0: USB address */
251 251
252#define M66592_USBREQ 0x54 252#define M66592_USBREQ 0x54
253#define M66592_bRequest 0xFF00 /* b15-8: bRequest */ 253#define M66592_bRequest 0xFF00 /* b15-8: bRequest */
254#define M66592_GET_STATUS 0x0000 254#define M66592_GET_STATUS 0x0000
255#define M66592_CLEAR_FEATURE 0x0100 255#define M66592_CLEAR_FEATURE 0x0100
256#define M66592_ReqRESERVED 0x0200 256#define M66592_ReqRESERVED 0x0200
257#define M66592_SET_FEATURE 0x0300 257#define M66592_SET_FEATURE 0x0300
258#define M66592_ReqRESERVED1 0x0400 258#define M66592_ReqRESERVED1 0x0400
259#define M66592_SET_ADDRESS 0x0500 259#define M66592_SET_ADDRESS 0x0500
260#define M66592_GET_DESCRIPTOR 0x0600 260#define M66592_GET_DESCRIPTOR 0x0600
261#define M66592_SET_DESCRIPTOR 0x0700 261#define M66592_SET_DESCRIPTOR 0x0700
262#define M66592_GET_CONFIGURATION 0x0800 262#define M66592_GET_CONFIGURATION 0x0800
263#define M66592_SET_CONFIGURATION 0x0900 263#define M66592_SET_CONFIGURATION 0x0900
264#define M66592_GET_INTERFACE 0x0A00 264#define M66592_GET_INTERFACE 0x0A00
265#define M66592_SET_INTERFACE 0x0B00 265#define M66592_SET_INTERFACE 0x0B00
266#define M66592_SYNCH_FRAME 0x0C00 266#define M66592_SYNCH_FRAME 0x0C00
267#define M66592_bmRequestType 0x00FF /* b7-0: bmRequestType */ 267#define M66592_bmRequestType 0x00FF /* b7-0: bmRequestType */
268#define M66592_bmRequestTypeDir 0x0080 /* b7 : Data transfer direction */ 268#define M66592_bmRequestTypeDir 0x0080 /* b7 : Data direction */
269#define M66592_HOST_TO_DEVICE 0x0000 269#define M66592_HOST_TO_DEVICE 0x0000
270#define M66592_DEVICE_TO_HOST 0x0080 270#define M66592_DEVICE_TO_HOST 0x0080
271#define M66592_bmRequestTypeType 0x0060 /* b6-5: Type */ 271#define M66592_bmRequestTypeType 0x0060 /* b6-5: Type */
272#define M66592_STANDARD 0x0000 272#define M66592_STANDARD 0x0000
273#define M66592_CLASS 0x0020 273#define M66592_CLASS 0x0020
274#define M66592_VENDOR 0x0040 274#define M66592_VENDOR 0x0040
275#define M66592_bmRequestTypeRecip 0x001F /* b4-0: Recipient */ 275#define M66592_bmRequestTypeRecip 0x001F /* b4-0: Recipient */
276#define M66592_DEVICE 0x0000 276#define M66592_DEVICE 0x0000
277#define M66592_INTERFACE 0x0001 277#define M66592_INTERFACE 0x0001
278#define M66592_ENDPOINT 0x0002 278#define M66592_ENDPOINT 0x0002
279 279
280#define M66592_USBVAL 0x56 280#define M66592_USBVAL 0x56
281#define M66592_wValue 0xFFFF /* b15-0: wValue */ 281#define M66592_wValue 0xFFFF /* b15-0: wValue */
282/* Standard Feature Selector */ 282/* Standard Feature Selector */
283#define M66592_ENDPOINT_HALT 0x0000 283#define M66592_ENDPOINT_HALT 0x0000
284#define M66592_DEVICE_REMOTE_WAKEUP 0x0001 284#define M66592_DEVICE_REMOTE_WAKEUP 0x0001
285#define M66592_TEST_MODE 0x0002 285#define M66592_TEST_MODE 0x0002
286/* Descriptor Types */ 286/* Descriptor Types */
287#define M66592_DT_TYPE 0xFF00 287#define M66592_DT_TYPE 0xFF00
288#define M66592_GET_DT_TYPE(v) (((v) & DT_TYPE) >> 8) 288#define M66592_GET_DT_TYPE(v) (((v) & DT_TYPE) >> 8)
289#define M66592_DT_DEVICE 0x01 289#define M66592_DT_DEVICE 0x01
290#define M66592_DT_CONFIGURATION 0x02 290#define M66592_DT_CONFIGURATION 0x02
291#define M66592_DT_STRING 0x03 291#define M66592_DT_STRING 0x03
292#define M66592_DT_INTERFACE 0x04 292#define M66592_DT_INTERFACE 0x04
293#define M66592_DT_ENDPOINT 0x05 293#define M66592_DT_ENDPOINT 0x05
294#define M66592_DT_DEVICE_QUALIFIER 0x06 294#define M66592_DT_DEVICE_QUALIFIER 0x06
295#define M66592_DT_OTHER_SPEED_CONFIGURATION 0x07 295#define M66592_DT_OTHER_SPEED_CONFIGURATION 0x07
296#define M66592_DT_INTERFACE_POWER 0x08 296#define M66592_DT_INTERFACE_POWER 0x08
297#define M66592_DT_INDEX 0x00FF 297#define M66592_DT_INDEX 0x00FF
298#define M66592_CONF_NUM 0x00FF 298#define M66592_CONF_NUM 0x00FF
299#define M66592_ALT_SET 0x00FF 299#define M66592_ALT_SET 0x00FF
300 300
301#define M66592_USBINDEX 0x58 301#define M66592_USBINDEX 0x58
302#define M66592_wIndex 0xFFFF /* b15-0: wIndex */ 302#define M66592_wIndex 0xFFFF /* b15-0: wIndex */
303#define M66592_TEST_SELECT 0xFF00 /* b15-b8: Test Mode Selectors */ 303#define M66592_TEST_SELECT 0xFF00 /* b15-b8: Test Mode */
304#define M66592_TEST_J 0x0100 /* Test_J */ 304#define M66592_TEST_J 0x0100 /* Test_J */
305#define M66592_TEST_K 0x0200 /* Test_K */ 305#define M66592_TEST_K 0x0200 /* Test_K */
306#define M66592_TEST_SE0_NAK 0x0300 /* Test_SE0_NAK */ 306#define M66592_TEST_SE0_NAK 0x0300 /* Test_SE0_NAK */
307#define M66592_TEST_PACKET 0x0400 /* Test_Packet */ 307#define M66592_TEST_PACKET 0x0400 /* Test_Packet */
308#define M66592_TEST_FORCE_ENABLE 0x0500 /* Test_Force_Enable */ 308#define M66592_TEST_FORCE_ENABLE 0x0500 /* Test_Force_Enable */
309#define M66592_TEST_STSelectors 0x0600 /* Standard test selectors */ 309#define M66592_TEST_STSelectors 0x0600 /* Standard test selectors */
310#define M66592_TEST_Reserved 0x4000 /* Reserved */ 310#define M66592_TEST_Reserved 0x4000 /* Reserved */
311#define M66592_TEST_VSTModes 0xC000 /* Vendor-specific test modes */ 311#define M66592_TEST_VSTModes 0xC000 /* Vendor-specific tests */
312#define M66592_EP_DIR 0x0080 /* b7: Endpoint Direction */ 312#define M66592_EP_DIR 0x0080 /* b7: Endpoint Direction */
313#define M66592_EP_DIR_IN 0x0080 313#define M66592_EP_DIR_IN 0x0080
314#define M66592_EP_DIR_OUT 0x0000 314#define M66592_EP_DIR_OUT 0x0000
315 315
316#define M66592_USBLENG 0x5A 316#define M66592_USBLENG 0x5A
317#define M66592_wLength 0xFFFF /* b15-0: wLength */ 317#define M66592_wLength 0xFFFF /* b15-0: wLength */
318 318
319#define M66592_DCPCFG 0x5C 319#define M66592_DCPCFG 0x5C
320#define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode select */ 320#define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */
321#define M66592_DIR 0x0010 /* b4: Control transfer DIR select */ 321#define M66592_DIR 0x0010 /* b4: Control transfer DIR select */
322 322
323#define M66592_DCPMAXP 0x5E 323#define M66592_DCPMAXP 0x5E
324#define M66592_DEVSEL 0xC000 /* b15-14: Device address select */ 324#define M66592_DEVSEL 0xC000 /* b15-14: Device address select */
325#define M66592_DEVICE_0 0x0000 /* Device address 0 */ 325#define M66592_DEVICE_0 0x0000 /* Device address 0 */
326#define M66592_DEVICE_1 0x4000 /* Device address 1 */ 326#define M66592_DEVICE_1 0x4000 /* Device address 1 */
327#define M66592_DEVICE_2 0x8000 /* Device address 2 */ 327#define M66592_DEVICE_2 0x8000 /* Device address 2 */
328#define M66592_DEVICE_3 0xC000 /* Device address 3 */ 328#define M66592_DEVICE_3 0xC000 /* Device address 3 */
329#define M66592_MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ 329#define M66592_MAXP 0x007F /* b6-0: Maxpacket size of ep0 */
330 330
331#define M66592_DCPCTR 0x60 331#define M66592_DCPCTR 0x60
332#define M66592_BSTS 0x8000 /* b15: Buffer status */ 332#define M66592_BSTS 0x8000 /* b15: Buffer status */
333#define M66592_SUREQ 0x4000 /* b14: Send USB request */ 333#define M66592_SUREQ 0x4000 /* b14: Send USB request */
334#define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ 334#define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */
335#define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ 335#define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */
336#define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ 336#define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */
337#define M66592_CCPL 0x0004 /* b2: Enable control transfer complete */ 337#define M66592_CCPL 0x0004 /* b2: control transfer complete */
338#define M66592_PID 0x0003 /* b1-0: Response PID */ 338#define M66592_PID 0x0003 /* b1-0: Response PID */
339#define M66592_PID_STALL 0x0002 /* STALL */ 339#define M66592_PID_STALL 0x0002 /* STALL */
340#define M66592_PID_BUF 0x0001 /* BUF */ 340#define M66592_PID_BUF 0x0001 /* BUF */
341#define M66592_PID_NAK 0x0000 /* NAK */ 341#define M66592_PID_NAK 0x0000 /* NAK */
342 342
343#define M66592_PIPESEL 0x64 343#define M66592_PIPESEL 0x64
344#define M66592_PIPENM 0x0007 /* b2-0: Pipe select */ 344#define M66592_PIPENM 0x0007 /* b2-0: Pipe select */
345#define M66592_PIPE0 0x0000 /* PIPE 0 */ 345#define M66592_PIPE0 0x0000 /* PIPE 0 */
346#define M66592_PIPE1 0x0001 /* PIPE 1 */ 346#define M66592_PIPE1 0x0001 /* PIPE 1 */
347#define M66592_PIPE2 0x0002 /* PIPE 2 */ 347#define M66592_PIPE2 0x0002 /* PIPE 2 */
348#define M66592_PIPE3 0x0003 /* PIPE 3 */ 348#define M66592_PIPE3 0x0003 /* PIPE 3 */
349#define M66592_PIPE4 0x0004 /* PIPE 4 */ 349#define M66592_PIPE4 0x0004 /* PIPE 4 */
350#define M66592_PIPE5 0x0005 /* PIPE 5 */ 350#define M66592_PIPE5 0x0005 /* PIPE 5 */
351#define M66592_PIPE6 0x0006 /* PIPE 6 */ 351#define M66592_PIPE6 0x0006 /* PIPE 6 */
352#define M66592_PIPE7 0x0007 /* PIPE 7 */ 352#define M66592_PIPE7 0x0007 /* PIPE 7 */
353 353
354#define M66592_PIPECFG 0x66 354#define M66592_PIPECFG 0x66
355#define M66592_TYP 0xC000 /* b15-14: Transfer type */ 355#define M66592_TYP 0xC000 /* b15-14: Transfer type */
356#define M66592_ISO 0xC000 /* Isochronous */ 356#define M66592_ISO 0xC000 /* Isochronous */
357#define M66592_INT 0x8000 /* Interrupt */ 357#define M66592_INT 0x8000 /* Interrupt */
358#define M66592_BULK 0x4000 /* Bulk */ 358#define M66592_BULK 0x4000 /* Bulk */
359#define M66592_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */ 359#define M66592_BFRE 0x0400 /* b10: Buffer ready interrupt mode */
360#define M66592_DBLB 0x0200 /* b9: Double buffer mode select */ 360#define M66592_DBLB 0x0200 /* b9: Double buffer mode select */
361#define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode select */ 361#define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */
362#define M66592_SHTNAK 0x0080 /* b7: Transfer end NAK */ 362#define M66592_SHTNAK 0x0080 /* b7: Transfer end NAK */
363#define M66592_DIR 0x0010 /* b4: Transfer direction select */ 363#define M66592_DIR 0x0010 /* b4: Transfer direction select */
364#define M66592_DIR_H_OUT 0x0010 /* HOST OUT */ 364#define M66592_DIR_H_OUT 0x0010 /* HOST OUT */
365#define M66592_DIR_P_IN 0x0010 /* PERI IN */ 365#define M66592_DIR_P_IN 0x0010 /* PERI IN */
366#define M66592_DIR_H_IN 0x0000 /* HOST IN */ 366#define M66592_DIR_H_IN 0x0000 /* HOST IN */
367#define M66592_DIR_P_OUT 0x0000 /* PERI OUT */ 367#define M66592_DIR_P_OUT 0x0000 /* PERI OUT */
368#define M66592_EPNUM 0x000F /* b3-0: Eendpoint number select */ 368#define M66592_EPNUM 0x000F /* b3-0: Eendpoint number select */
369#define M66592_EP1 0x0001 369#define M66592_EP1 0x0001
370#define M66592_EP2 0x0002 370#define M66592_EP2 0x0002
371#define M66592_EP3 0x0003 371#define M66592_EP3 0x0003
372#define M66592_EP4 0x0004 372#define M66592_EP4 0x0004
373#define M66592_EP5 0x0005 373#define M66592_EP5 0x0005
374#define M66592_EP6 0x0006 374#define M66592_EP6 0x0006
375#define M66592_EP7 0x0007 375#define M66592_EP7 0x0007
376#define M66592_EP8 0x0008 376#define M66592_EP8 0x0008
377#define M66592_EP9 0x0009 377#define M66592_EP9 0x0009
378#define M66592_EP10 0x000A 378#define M66592_EP10 0x000A
379#define M66592_EP11 0x000B 379#define M66592_EP11 0x000B
380#define M66592_EP12 0x000C 380#define M66592_EP12 0x000C
381#define M66592_EP13 0x000D 381#define M66592_EP13 0x000D
382#define M66592_EP14 0x000E 382#define M66592_EP14 0x000E
383#define M66592_EP15 0x000F 383#define M66592_EP15 0x000F
384 384
385#define M66592_PIPEBUF 0x68 385#define M66592_PIPEBUF 0x68
386#define M66592_BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ 386#define M66592_BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */
387#define M66592_BUF_SIZE(x) ((((x) / 64) - 1) << 10) 387#define M66592_BUF_SIZE(x) ((((x) / 64) - 1) << 10)
388#define M66592_BUFNMB 0x00FF /* b7-0: Pipe buffer number */ 388#define M66592_BUFNMB 0x00FF /* b7-0: Pipe buffer number */
389 389
390#define M66592_PIPEMAXP 0x6A 390#define M66592_PIPEMAXP 0x6A
391#define M66592_MXPS 0x07FF /* b10-0: Maxpacket size */ 391#define M66592_MXPS 0x07FF /* b10-0: Maxpacket size */
392 392
393#define M66592_PIPEPERI 0x6C 393#define M66592_PIPEPERI 0x6C
394#define M66592_IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ 394#define M66592_IFIS 0x1000 /* b12: ISO in-buffer flush mode */
395#define M66592_IITV 0x0007 /* b2-0: Isochronous interval */ 395#define M66592_IITV 0x0007 /* b2-0: ISO interval */
396 396
397#define M66592_PIPE1CTR 0x70 397#define M66592_PIPE1CTR 0x70
398#define M66592_PIPE2CTR 0x72 398#define M66592_PIPE2CTR 0x72
@@ -401,19 +401,17 @@
401#define M66592_PIPE5CTR 0x78 401#define M66592_PIPE5CTR 0x78
402#define M66592_PIPE6CTR 0x7A 402#define M66592_PIPE6CTR 0x7A
403#define M66592_PIPE7CTR 0x7C 403#define M66592_PIPE7CTR 0x7C
404#define M66592_BSTS 0x8000 /* b15: Buffer status */ 404#define M66592_BSTS 0x8000 /* b15: Buffer status */
405#define M66592_INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ 405#define M66592_INBUFM 0x4000 /* b14: IN buffer monitor (PIPE 1-5) */
406#define M66592_ACLRM 0x0200 /* b9: Out buffer auto clear mode */ 406#define M66592_ACLRM 0x0200 /* b9: Out buffer auto clear mode */
407#define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ 407#define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */
408#define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ 408#define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */
409#define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ 409#define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */
410#define M66592_PID 0x0003 /* b1-0: Response PID */ 410#define M66592_PID 0x0003 /* b1-0: Response PID */
411 411
412#define M66592_INVALID_REG 0x7E 412#define M66592_INVALID_REG 0x7E
413 413
414 414
415#define __iomem
416
417#define get_pipectr_addr(pipenum) (M66592_PIPE1CTR + (pipenum - 1) * 2) 415#define get_pipectr_addr(pipenum) (M66592_PIPE1CTR + (pipenum - 1) * 2)
418 416
419#define M66592_MAX_SAMPLING 10 417#define M66592_MAX_SAMPLING 10
@@ -449,7 +447,7 @@ struct m66592_ep {
449 struct m66592 *m66592; 447 struct m66592 *m66592;
450 448
451 struct list_head queue; 449 struct list_head queue;
452 unsigned busy:1; 450 unsigned busy:1;
453 unsigned internal_ccpl:1; /* use only control */ 451 unsigned internal_ccpl:1; /* use only control */
454 452
455 /* this member can able to after m66592_enable */ 453 /* this member can able to after m66592_enable */
@@ -477,7 +475,7 @@ struct m66592 {
477 struct m66592_ep *epaddr2ep[16]; 475 struct m66592_ep *epaddr2ep[16];
478 476
479 struct usb_request *ep0_req; /* for internal request */ 477 struct usb_request *ep0_req; /* for internal request */
480 u16 *ep0_buf; /* for internal request */ 478 u16 ep0_data; /* for internal request */
481 479
482 struct timer_list timer; 480 struct timer_list timer;
483 481
@@ -527,8 +525,8 @@ static inline u16 m66592_read(struct m66592 *m66592, unsigned long offset)
527} 525}
528 526
529static inline void m66592_read_fifo(struct m66592 *m66592, 527static inline void m66592_read_fifo(struct m66592 *m66592,
530 unsigned long offset, 528 unsigned long offset,
531 void *buf, unsigned long len) 529 void *buf, unsigned long len)
532{ 530{
533 unsigned long fifoaddr = (unsigned long)m66592->reg + offset; 531 unsigned long fifoaddr = (unsigned long)m66592->reg + offset;
534 532
@@ -543,8 +541,8 @@ static inline void m66592_write(struct m66592 *m66592, u16 val,
543} 541}
544 542
545static inline void m66592_write_fifo(struct m66592 *m66592, 543static inline void m66592_write_fifo(struct m66592 *m66592,
546 unsigned long offset, 544 unsigned long offset,
547 void *buf, unsigned long len) 545 void *buf, unsigned long len)
548{ 546{
549 unsigned long fifoaddr = (unsigned long)m66592->reg + offset; 547 unsigned long fifoaddr = (unsigned long)m66592->reg + offset;
550 unsigned long odd = len & 0x0001; 548 unsigned long odd = len & 0x0001;
@@ -558,7 +556,7 @@ static inline void m66592_write_fifo(struct m66592 *m66592,
558} 556}
559 557
560static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat, 558static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat,
561 unsigned long offset) 559 unsigned long offset)
562{ 560{
563 u16 tmp; 561 u16 tmp;
564 tmp = m66592_read(m66592, offset); 562 tmp = m66592_read(m66592, offset);
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 38138bb9ddb0..9cd98e73dc1d 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -33,6 +33,7 @@
33#include <linux/device.h> 33#include <linux/device.h>
34#include <linux/tty.h> 34#include <linux/tty.h>
35#include <linux/tty_flip.h> 35#include <linux/tty_flip.h>
36#include <linux/mutex.h>
36 37
37#include <asm/byteorder.h> 38#include <asm/byteorder.h>
38#include <asm/io.h> 39#include <asm/io.h>
@@ -258,7 +259,7 @@ static const char *EP_IN_NAME;
258static const char *EP_OUT_NAME; 259static const char *EP_OUT_NAME;
259static const char *EP_NOTIFY_NAME; 260static const char *EP_NOTIFY_NAME;
260 261
261static struct semaphore gs_open_close_sem[GS_NUM_PORTS]; 262static struct mutex gs_open_close_lock[GS_NUM_PORTS];
262 263
263static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE; 264static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
264static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE; 265static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
@@ -595,7 +596,7 @@ static int __init gs_module_init(void)
595 tty_set_operations(gs_tty_driver, &gs_tty_ops); 596 tty_set_operations(gs_tty_driver, &gs_tty_ops);
596 597
597 for (i=0; i < GS_NUM_PORTS; i++) 598 for (i=0; i < GS_NUM_PORTS; i++)
598 sema_init(&gs_open_close_sem[i], 1); 599 mutex_init(&gs_open_close_lock[i]);
599 600
600 retval = tty_register_driver(gs_tty_driver); 601 retval = tty_register_driver(gs_tty_driver);
601 if (retval) { 602 if (retval) {
@@ -635,7 +636,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
635 struct gs_port *port; 636 struct gs_port *port;
636 struct gs_dev *dev; 637 struct gs_dev *dev;
637 struct gs_buf *buf; 638 struct gs_buf *buf;
638 struct semaphore *sem; 639 struct mutex *mtx;
639 int ret; 640 int ret;
640 641
641 port_num = tty->index; 642 port_num = tty->index;
@@ -656,10 +657,10 @@ static int gs_open(struct tty_struct *tty, struct file *file)
656 return -ENODEV; 657 return -ENODEV;
657 } 658 }
658 659
659 sem = &gs_open_close_sem[port_num]; 660 mtx = &gs_open_close_lock[port_num];
660 if (down_interruptible(sem)) { 661 if (mutex_lock_interruptible(mtx)) {
661 printk(KERN_ERR 662 printk(KERN_ERR
662 "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n", 663 "gs_open: (%d,%p,%p) interrupted waiting for mutex\n",
663 port_num, tty, file); 664 port_num, tty, file);
664 return -ERESTARTSYS; 665 return -ERESTARTSYS;
665 } 666 }
@@ -754,12 +755,12 @@ static int gs_open(struct tty_struct *tty, struct file *file)
754 755
755exit_unlock_port: 756exit_unlock_port:
756 spin_unlock_irqrestore(&port->port_lock, flags); 757 spin_unlock_irqrestore(&port->port_lock, flags);
757 up(sem); 758 mutex_unlock(mtx);
758 return ret; 759 return ret;
759 760
760exit_unlock_dev: 761exit_unlock_dev:
761 spin_unlock_irqrestore(&dev->dev_lock, flags); 762 spin_unlock_irqrestore(&dev->dev_lock, flags);
762 up(sem); 763 mutex_unlock(mtx);
763 return ret; 764 return ret;
764 765
765} 766}
@@ -781,7 +782,7 @@ exit_unlock_dev:
781static void gs_close(struct tty_struct *tty, struct file *file) 782static void gs_close(struct tty_struct *tty, struct file *file)
782{ 783{
783 struct gs_port *port = tty->driver_data; 784 struct gs_port *port = tty->driver_data;
784 struct semaphore *sem; 785 struct mutex *mtx;
785 786
786 if (port == NULL) { 787 if (port == NULL) {
787 printk(KERN_ERR "gs_close: NULL port pointer\n"); 788 printk(KERN_ERR "gs_close: NULL port pointer\n");
@@ -790,8 +791,8 @@ static void gs_close(struct tty_struct *tty, struct file *file)
790 791
791 gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file); 792 gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file);
792 793
793 sem = &gs_open_close_sem[port->port_num]; 794 mtx = &gs_open_close_lock[port->port_num];
794 down(sem); 795 mutex_lock(mtx);
795 796
796 spin_lock_irq(&port->port_lock); 797 spin_lock_irq(&port->port_lock);
797 798
@@ -846,7 +847,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
846 847
847exit: 848exit:
848 spin_unlock_irq(&port->port_lock); 849 spin_unlock_irq(&port->port_lock);
849 up(sem); 850 mutex_unlock(mtx);
850} 851}
851 852
852/* 853/*