diff options
author | Haavard Skinnemoen <hskinnemoen@atmel.com> | 2007-10-10 05:29:43 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2007-10-12 17:55:33 -0400 |
commit | 914a3f3b375493eb44ad652a431939258cf34f71 (patch) | |
tree | 83f603584d81e5fe2f8800ed56c2410448111f5c /drivers/usb/gadget/atmel_usba_udc.c | |
parent | c604e851486eabcbeb73e984279d436ce121fd5d (diff) |
USB: add atmel_usba_udc driver
This is a driver for the Atmel USBA UDC which can be found integrated
on AT32AP700x AVR32 processors. For hardware documentation, please see
the AT32AP7000 data sheet:
http://www.atmel.com/dyn/resources/prod_documents/doc32003.pdf
This is a dual speed controller (connects at high or full speed).
The driver supports up to 7 control, bulk, interrupt and isochronous
endpoints with some constraints. Bulk, interrupt and isochronous
transfers are driven by DMA.
Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/gadget/atmel_usba_udc.c')
-rw-r--r-- | drivers/usb/gadget/atmel_usba_udc.c | 2038 |
1 files changed, 2038 insertions, 0 deletions
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c new file mode 100644 index 000000000000..2bb28a583937 --- /dev/null +++ b/drivers/usb/gadget/atmel_usba_udc.c | |||
@@ -0,0 +1,2038 @@ | |||
1 | /* | ||
2 | * Driver for the Atmel USBA high speed USB device controller | ||
3 | * | ||
4 | * Copyright (C) 2005-2007 Atmel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/clk.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/usb/ch9.h> | ||
20 | #include <linux/usb/gadget.h> | ||
21 | #include <linux/delay.h> | ||
22 | |||
23 | #include <asm/gpio.h> | ||
24 | #include <asm/arch/board.h> | ||
25 | |||
26 | #include "atmel_usba_udc.h" | ||
27 | |||
28 | |||
29 | static struct usba_udc the_udc; | ||
30 | |||
31 | #ifdef CONFIG_USB_GADGET_DEBUG_FS | ||
32 | #include <linux/debugfs.h> | ||
33 | #include <linux/uaccess.h> | ||
34 | |||
35 | static int queue_dbg_open(struct inode *inode, struct file *file) | ||
36 | { | ||
37 | struct usba_ep *ep = inode->i_private; | ||
38 | struct usba_request *req, *req_copy; | ||
39 | struct list_head *queue_data; | ||
40 | |||
41 | queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL); | ||
42 | if (!queue_data) | ||
43 | return -ENOMEM; | ||
44 | INIT_LIST_HEAD(queue_data); | ||
45 | |||
46 | spin_lock_irq(&ep->udc->lock); | ||
47 | list_for_each_entry(req, &ep->queue, queue) { | ||
48 | req_copy = kmalloc(sizeof(*req_copy), GFP_ATOMIC); | ||
49 | if (!req_copy) | ||
50 | goto fail; | ||
51 | memcpy(req_copy, req, sizeof(*req_copy)); | ||
52 | list_add_tail(&req_copy->queue, queue_data); | ||
53 | } | ||
54 | spin_unlock_irq(&ep->udc->lock); | ||
55 | |||
56 | file->private_data = queue_data; | ||
57 | return 0; | ||
58 | |||
59 | fail: | ||
60 | spin_unlock_irq(&ep->udc->lock); | ||
61 | list_for_each_entry_safe(req, req_copy, queue_data, queue) { | ||
62 | list_del(&req->queue); | ||
63 | kfree(req); | ||
64 | } | ||
65 | kfree(queue_data); | ||
66 | return -ENOMEM; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0 | ||
71 | * | ||
72 | * b: buffer address | ||
73 | * l: buffer length | ||
74 | * I/i: interrupt/no interrupt | ||
75 | * Z/z: zero/no zero | ||
76 | * S/s: short ok/short not ok | ||
77 | * s: status | ||
78 | * n: nr_packets | ||
79 | * F/f: submitted/not submitted to FIFO | ||
80 | * D/d: using/not using DMA | ||
81 | * L/l: last transaction/not last transaction | ||
82 | */ | ||
83 | static ssize_t queue_dbg_read(struct file *file, char __user *buf, | ||
84 | size_t nbytes, loff_t *ppos) | ||
85 | { | ||
86 | struct list_head *queue = file->private_data; | ||
87 | struct usba_request *req, *tmp_req; | ||
88 | size_t len, remaining, actual = 0; | ||
89 | char tmpbuf[38]; | ||
90 | |||
91 | if (!access_ok(VERIFY_WRITE, buf, nbytes)) | ||
92 | return -EFAULT; | ||
93 | |||
94 | mutex_lock(&file->f_dentry->d_inode->i_mutex); | ||
95 | list_for_each_entry_safe(req, tmp_req, queue, queue) { | ||
96 | len = snprintf(tmpbuf, sizeof(tmpbuf), | ||
97 | "%8p %08x %c%c%c %5d %c%c%c\n", | ||
98 | req->req.buf, req->req.length, | ||
99 | req->req.no_interrupt ? 'i' : 'I', | ||
100 | req->req.zero ? 'Z' : 'z', | ||
101 | req->req.short_not_ok ? 's' : 'S', | ||
102 | req->req.status, | ||
103 | req->submitted ? 'F' : 'f', | ||
104 | req->using_dma ? 'D' : 'd', | ||
105 | req->last_transaction ? 'L' : 'l'); | ||
106 | len = min(len, sizeof(tmpbuf)); | ||
107 | if (len > nbytes) | ||
108 | break; | ||
109 | |||
110 | list_del(&req->queue); | ||
111 | kfree(req); | ||
112 | |||
113 | remaining = __copy_to_user(buf, tmpbuf, len); | ||
114 | actual += len - remaining; | ||
115 | if (remaining) | ||
116 | break; | ||
117 | |||
118 | nbytes -= len; | ||
119 | buf += len; | ||
120 | } | ||
121 | mutex_unlock(&file->f_dentry->d_inode->i_mutex); | ||
122 | |||
123 | return actual; | ||
124 | } | ||
125 | |||
126 | static int queue_dbg_release(struct inode *inode, struct file *file) | ||
127 | { | ||
128 | struct list_head *queue_data = file->private_data; | ||
129 | struct usba_request *req, *tmp_req; | ||
130 | |||
131 | list_for_each_entry_safe(req, tmp_req, queue_data, queue) { | ||
132 | list_del(&req->queue); | ||
133 | kfree(req); | ||
134 | } | ||
135 | kfree(queue_data); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int regs_dbg_open(struct inode *inode, struct file *file) | ||
140 | { | ||
141 | struct usba_udc *udc; | ||
142 | unsigned int i; | ||
143 | u32 *data; | ||
144 | int ret = -ENOMEM; | ||
145 | |||
146 | mutex_lock(&inode->i_mutex); | ||
147 | udc = inode->i_private; | ||
148 | data = kmalloc(inode->i_size, GFP_KERNEL); | ||
149 | if (!data) | ||
150 | goto out; | ||
151 | |||
152 | spin_lock_irq(&udc->lock); | ||
153 | for (i = 0; i < inode->i_size / 4; i++) | ||
154 | data[i] = __raw_readl(udc->regs + i * 4); | ||
155 | spin_unlock_irq(&udc->lock); | ||
156 | |||
157 | file->private_data = data; | ||
158 | ret = 0; | ||
159 | |||
160 | out: | ||
161 | mutex_unlock(&inode->i_mutex); | ||
162 | |||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | static ssize_t regs_dbg_read(struct file *file, char __user *buf, | ||
167 | size_t nbytes, loff_t *ppos) | ||
168 | { | ||
169 | struct inode *inode = file->f_dentry->d_inode; | ||
170 | int ret; | ||
171 | |||
172 | mutex_lock(&inode->i_mutex); | ||
173 | ret = simple_read_from_buffer(buf, nbytes, ppos, | ||
174 | file->private_data, | ||
175 | file->f_dentry->d_inode->i_size); | ||
176 | mutex_unlock(&inode->i_mutex); | ||
177 | |||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | static int regs_dbg_release(struct inode *inode, struct file *file) | ||
182 | { | ||
183 | kfree(file->private_data); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | const struct file_operations queue_dbg_fops = { | ||
188 | .owner = THIS_MODULE, | ||
189 | .open = queue_dbg_open, | ||
190 | .llseek = no_llseek, | ||
191 | .read = queue_dbg_read, | ||
192 | .release = queue_dbg_release, | ||
193 | }; | ||
194 | |||
195 | const struct file_operations regs_dbg_fops = { | ||
196 | .owner = THIS_MODULE, | ||
197 | .open = regs_dbg_open, | ||
198 | .llseek = generic_file_llseek, | ||
199 | .read = regs_dbg_read, | ||
200 | .release = regs_dbg_release, | ||
201 | }; | ||
202 | |||
203 | static void usba_ep_init_debugfs(struct usba_udc *udc, | ||
204 | struct usba_ep *ep) | ||
205 | { | ||
206 | struct dentry *ep_root; | ||
207 | |||
208 | ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root); | ||
209 | if (!ep_root) | ||
210 | goto err_root; | ||
211 | ep->debugfs_dir = ep_root; | ||
212 | |||
213 | ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root, | ||
214 | ep, &queue_dbg_fops); | ||
215 | if (!ep->debugfs_queue) | ||
216 | goto err_queue; | ||
217 | |||
218 | if (ep->can_dma) { | ||
219 | ep->debugfs_dma_status | ||
220 | = debugfs_create_u32("dma_status", 0400, ep_root, | ||
221 | &ep->last_dma_status); | ||
222 | if (!ep->debugfs_dma_status) | ||
223 | goto err_dma_status; | ||
224 | } | ||
225 | if (ep_is_control(ep)) { | ||
226 | ep->debugfs_state | ||
227 | = debugfs_create_u32("state", 0400, ep_root, | ||
228 | &ep->state); | ||
229 | if (!ep->debugfs_state) | ||
230 | goto err_state; | ||
231 | } | ||
232 | |||
233 | return; | ||
234 | |||
235 | err_state: | ||
236 | if (ep->can_dma) | ||
237 | debugfs_remove(ep->debugfs_dma_status); | ||
238 | err_dma_status: | ||
239 | debugfs_remove(ep->debugfs_queue); | ||
240 | err_queue: | ||
241 | debugfs_remove(ep_root); | ||
242 | err_root: | ||
243 | dev_err(&ep->udc->pdev->dev, | ||
244 | "failed to create debugfs directory for %s\n", ep->ep.name); | ||
245 | } | ||
246 | |||
247 | static void usba_ep_cleanup_debugfs(struct usba_ep *ep) | ||
248 | { | ||
249 | debugfs_remove(ep->debugfs_queue); | ||
250 | debugfs_remove(ep->debugfs_dma_status); | ||
251 | debugfs_remove(ep->debugfs_state); | ||
252 | debugfs_remove(ep->debugfs_dir); | ||
253 | ep->debugfs_dma_status = NULL; | ||
254 | ep->debugfs_dir = NULL; | ||
255 | } | ||
256 | |||
257 | static void usba_init_debugfs(struct usba_udc *udc) | ||
258 | { | ||
259 | struct dentry *root, *regs; | ||
260 | struct resource *regs_resource; | ||
261 | |||
262 | root = debugfs_create_dir(udc->gadget.name, NULL); | ||
263 | if (IS_ERR(root) || !root) | ||
264 | goto err_root; | ||
265 | udc->debugfs_root = root; | ||
266 | |||
267 | regs = debugfs_create_file("regs", 0400, root, udc, ®s_dbg_fops); | ||
268 | if (!regs) | ||
269 | goto err_regs; | ||
270 | |||
271 | regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM, | ||
272 | CTRL_IOMEM_ID); | ||
273 | regs->d_inode->i_size = regs_resource->end - regs_resource->start + 1; | ||
274 | udc->debugfs_regs = regs; | ||
275 | |||
276 | usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0)); | ||
277 | |||
278 | return; | ||
279 | |||
280 | err_regs: | ||
281 | debugfs_remove(root); | ||
282 | err_root: | ||
283 | udc->debugfs_root = NULL; | ||
284 | dev_err(&udc->pdev->dev, "debugfs is not available\n"); | ||
285 | } | ||
286 | |||
287 | static void usba_cleanup_debugfs(struct usba_udc *udc) | ||
288 | { | ||
289 | usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0)); | ||
290 | debugfs_remove(udc->debugfs_regs); | ||
291 | debugfs_remove(udc->debugfs_root); | ||
292 | udc->debugfs_regs = NULL; | ||
293 | udc->debugfs_root = NULL; | ||
294 | } | ||
295 | #else | ||
296 | static inline void usba_ep_init_debugfs(struct usba_udc *udc, | ||
297 | struct usba_ep *ep) | ||
298 | { | ||
299 | |||
300 | } | ||
301 | |||
302 | static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep) | ||
303 | { | ||
304 | |||
305 | } | ||
306 | |||
307 | static inline void usba_init_debugfs(struct usba_udc *udc) | ||
308 | { | ||
309 | |||
310 | } | ||
311 | |||
312 | static inline void usba_cleanup_debugfs(struct usba_udc *udc) | ||
313 | { | ||
314 | |||
315 | } | ||
316 | #endif | ||
317 | |||
318 | static int vbus_is_present(struct usba_udc *udc) | ||
319 | { | ||
320 | if (udc->vbus_pin != -1) | ||
321 | return gpio_get_value(udc->vbus_pin); | ||
322 | |||
323 | /* No Vbus detection: Assume always present */ | ||
324 | return 1; | ||
325 | } | ||
326 | |||
327 | static void copy_to_fifo(void __iomem *fifo, const void *buf, int len) | ||
328 | { | ||
329 | unsigned long tmp; | ||
330 | |||
331 | DBG(DBG_FIFO, "copy to FIFO (len %d):\n", len); | ||
332 | for (; len > 0; len -= 4, buf += 4, fifo += 4) { | ||
333 | tmp = *(unsigned long *)buf; | ||
334 | if (len >= 4) { | ||
335 | DBG(DBG_FIFO, " -> %08lx\n", tmp); | ||
336 | __raw_writel(tmp, fifo); | ||
337 | } else { | ||
338 | do { | ||
339 | DBG(DBG_FIFO, " -> %02lx\n", tmp >> 24); | ||
340 | __raw_writeb(tmp >> 24, fifo); | ||
341 | fifo++; | ||
342 | tmp <<= 8; | ||
343 | } while (--len); | ||
344 | break; | ||
345 | } | ||
346 | } | ||
347 | } | ||
348 | |||
349 | static void copy_from_fifo(void *buf, void __iomem *fifo, int len) | ||
350 | { | ||
351 | union { | ||
352 | unsigned long *w; | ||
353 | unsigned char *b; | ||
354 | } p; | ||
355 | unsigned long tmp; | ||
356 | |||
357 | DBG(DBG_FIFO, "copy from FIFO (len %d):\n", len); | ||
358 | for (p.w = buf; len > 0; len -= 4, p.w++, fifo += 4) { | ||
359 | if (len >= 4) { | ||
360 | tmp = __raw_readl(fifo); | ||
361 | *p.w = tmp; | ||
362 | DBG(DBG_FIFO, " -> %08lx\n", tmp); | ||
363 | } else { | ||
364 | do { | ||
365 | tmp = __raw_readb(fifo); | ||
366 | *p.b = tmp; | ||
367 | DBG(DBG_FIFO, " -> %02lx\n", tmp); | ||
368 | fifo++, p.b++; | ||
369 | } while (--len); | ||
370 | } | ||
371 | } | ||
372 | } | ||
373 | |||
374 | static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) | ||
375 | { | ||
376 | unsigned int transaction_len; | ||
377 | |||
378 | transaction_len = req->req.length - req->req.actual; | ||
379 | req->last_transaction = 1; | ||
380 | if (transaction_len > ep->ep.maxpacket) { | ||
381 | transaction_len = ep->ep.maxpacket; | ||
382 | req->last_transaction = 0; | ||
383 | } else if (transaction_len == ep->ep.maxpacket && req->req.zero) | ||
384 | req->last_transaction = 0; | ||
385 | |||
386 | DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n", | ||
387 | ep->ep.name, req, transaction_len, | ||
388 | req->last_transaction ? ", done" : ""); | ||
389 | |||
390 | copy_to_fifo(ep->fifo, req->req.buf + req->req.actual, transaction_len); | ||
391 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); | ||
392 | req->req.actual += transaction_len; | ||
393 | } | ||
394 | |||
395 | static void submit_request(struct usba_ep *ep, struct usba_request *req) | ||
396 | { | ||
397 | DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n", | ||
398 | ep->ep.name, req, req->req.length); | ||
399 | |||
400 | req->req.actual = 0; | ||
401 | req->submitted = 1; | ||
402 | |||
403 | if (req->using_dma) { | ||
404 | if (req->req.length == 0) { | ||
405 | usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); | ||
406 | return; | ||
407 | } | ||
408 | |||
409 | if (req->req.zero) | ||
410 | usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET); | ||
411 | else | ||
412 | usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET); | ||
413 | |||
414 | usba_dma_writel(ep, ADDRESS, req->req.dma); | ||
415 | usba_dma_writel(ep, CONTROL, req->ctrl); | ||
416 | } else { | ||
417 | next_fifo_transaction(ep, req); | ||
418 | if (req->last_transaction) { | ||
419 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); | ||
420 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); | ||
421 | } else { | ||
422 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | ||
423 | usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); | ||
424 | } | ||
425 | } | ||
426 | } | ||
427 | |||
428 | static void submit_next_request(struct usba_ep *ep) | ||
429 | { | ||
430 | struct usba_request *req; | ||
431 | |||
432 | if (list_empty(&ep->queue)) { | ||
433 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY); | ||
434 | return; | ||
435 | } | ||
436 | |||
437 | req = list_entry(ep->queue.next, struct usba_request, queue); | ||
438 | if (!req->submitted) | ||
439 | submit_request(ep, req); | ||
440 | } | ||
441 | |||
442 | static void send_status(struct usba_udc *udc, struct usba_ep *ep) | ||
443 | { | ||
444 | ep->state = STATUS_STAGE_IN; | ||
445 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); | ||
446 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); | ||
447 | } | ||
448 | |||
449 | static void receive_data(struct usba_ep *ep) | ||
450 | { | ||
451 | struct usba_udc *udc = ep->udc; | ||
452 | struct usba_request *req; | ||
453 | unsigned long status; | ||
454 | unsigned int bytecount, nr_busy; | ||
455 | int is_complete = 0; | ||
456 | |||
457 | status = usba_ep_readl(ep, STA); | ||
458 | nr_busy = USBA_BFEXT(BUSY_BANKS, status); | ||
459 | |||
460 | DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy); | ||
461 | |||
462 | while (nr_busy > 0) { | ||
463 | if (list_empty(&ep->queue)) { | ||
464 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | ||
465 | break; | ||
466 | } | ||
467 | req = list_entry(ep->queue.next, | ||
468 | struct usba_request, queue); | ||
469 | |||
470 | bytecount = USBA_BFEXT(BYTE_COUNT, status); | ||
471 | |||
472 | if (status & (1 << 31)) | ||
473 | is_complete = 1; | ||
474 | if (req->req.actual + bytecount >= req->req.length) { | ||
475 | is_complete = 1; | ||
476 | bytecount = req->req.length - req->req.actual; | ||
477 | } | ||
478 | |||
479 | copy_from_fifo(req->req.buf + req->req.actual, | ||
480 | ep->fifo, bytecount); | ||
481 | req->req.actual += bytecount; | ||
482 | |||
483 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); | ||
484 | |||
485 | if (is_complete) { | ||
486 | DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name); | ||
487 | req->req.status = 0; | ||
488 | list_del_init(&req->queue); | ||
489 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | ||
490 | spin_unlock(&udc->lock); | ||
491 | req->req.complete(&ep->ep, &req->req); | ||
492 | spin_lock(&udc->lock); | ||
493 | } | ||
494 | |||
495 | status = usba_ep_readl(ep, STA); | ||
496 | nr_busy = USBA_BFEXT(BUSY_BANKS, status); | ||
497 | |||
498 | if (is_complete && ep_is_control(ep)) { | ||
499 | send_status(udc, ep); | ||
500 | break; | ||
501 | } | ||
502 | } | ||
503 | } | ||
504 | |||
505 | static void | ||
506 | request_complete(struct usba_ep *ep, struct usba_request *req, int status) | ||
507 | { | ||
508 | struct usba_udc *udc = ep->udc; | ||
509 | |||
510 | WARN_ON(!list_empty(&req->queue)); | ||
511 | |||
512 | if (req->req.status == -EINPROGRESS) | ||
513 | req->req.status = status; | ||
514 | |||
515 | if (req->mapped) { | ||
516 | dma_unmap_single( | ||
517 | &udc->pdev->dev, req->req.dma, req->req.length, | ||
518 | ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
519 | req->req.dma = DMA_ADDR_INVALID; | ||
520 | req->mapped = 0; | ||
521 | } | ||
522 | |||
523 | DBG(DBG_GADGET | DBG_REQ, | ||
524 | "%s: req %p complete: status %d, actual %u\n", | ||
525 | ep->ep.name, req, req->req.status, req->req.actual); | ||
526 | |||
527 | spin_unlock(&udc->lock); | ||
528 | req->req.complete(&ep->ep, &req->req); | ||
529 | spin_lock(&udc->lock); | ||
530 | } | ||
531 | |||
532 | static void | ||
533 | request_complete_list(struct usba_ep *ep, struct list_head *list, int status) | ||
534 | { | ||
535 | struct usba_request *req, *tmp_req; | ||
536 | |||
537 | list_for_each_entry_safe(req, tmp_req, list, queue) { | ||
538 | list_del_init(&req->queue); | ||
539 | request_complete(ep, req, status); | ||
540 | } | ||
541 | } | ||
542 | |||
543 | static int | ||
544 | usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | ||
545 | { | ||
546 | struct usba_ep *ep = to_usba_ep(_ep); | ||
547 | struct usba_udc *udc = ep->udc; | ||
548 | unsigned long flags, ept_cfg, maxpacket; | ||
549 | unsigned int nr_trans; | ||
550 | |||
551 | DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); | ||
552 | |||
553 | maxpacket = le16_to_cpu(desc->wMaxPacketSize) & 0x7ff; | ||
554 | |||
555 | if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index) | ||
556 | || ep->index == 0 | ||
557 | || desc->bDescriptorType != USB_DT_ENDPOINT | ||
558 | || maxpacket == 0 | ||
559 | || maxpacket > ep->fifo_size) { | ||
560 | DBG(DBG_ERR, "ep_enable: Invalid argument"); | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | |||
564 | ep->is_isoc = 0; | ||
565 | ep->is_in = 0; | ||
566 | |||
567 | if (maxpacket <= 8) | ||
568 | ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); | ||
569 | else | ||
570 | /* LSB is bit 1, not 0 */ | ||
571 | ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3); | ||
572 | |||
573 | DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n", | ||
574 | ep->ep.name, ept_cfg, maxpacket); | ||
575 | |||
576 | if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) { | ||
577 | ep->is_in = 1; | ||
578 | ept_cfg |= USBA_EPT_DIR_IN; | ||
579 | } | ||
580 | |||
581 | switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { | ||
582 | case USB_ENDPOINT_XFER_CONTROL: | ||
583 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); | ||
584 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); | ||
585 | break; | ||
586 | case USB_ENDPOINT_XFER_ISOC: | ||
587 | if (!ep->can_isoc) { | ||
588 | DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n", | ||
589 | ep->ep.name); | ||
590 | return -EINVAL; | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Bits 11:12 specify number of _additional_ | ||
595 | * transactions per microframe. | ||
596 | */ | ||
597 | nr_trans = ((le16_to_cpu(desc->wMaxPacketSize) >> 11) & 3) + 1; | ||
598 | if (nr_trans > 3) | ||
599 | return -EINVAL; | ||
600 | |||
601 | ep->is_isoc = 1; | ||
602 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); | ||
603 | |||
604 | /* | ||
605 | * Do triple-buffering on high-bandwidth iso endpoints. | ||
606 | */ | ||
607 | if (nr_trans > 1 && ep->nr_banks == 3) | ||
608 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE); | ||
609 | else | ||
610 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); | ||
611 | ept_cfg |= USBA_BF(NB_TRANS, nr_trans); | ||
612 | break; | ||
613 | case USB_ENDPOINT_XFER_BULK: | ||
614 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); | ||
615 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); | ||
616 | break; | ||
617 | case USB_ENDPOINT_XFER_INT: | ||
618 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); | ||
619 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); | ||
620 | break; | ||
621 | } | ||
622 | |||
623 | spin_lock_irqsave(&ep->udc->lock, flags); | ||
624 | |||
625 | if (ep->desc) { | ||
626 | spin_unlock_irqrestore(&ep->udc->lock, flags); | ||
627 | DBG(DBG_ERR, "ep%d already enabled\n", ep->index); | ||
628 | return -EBUSY; | ||
629 | } | ||
630 | |||
631 | ep->desc = desc; | ||
632 | ep->ep.maxpacket = maxpacket; | ||
633 | |||
634 | usba_ep_writel(ep, CFG, ept_cfg); | ||
635 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); | ||
636 | |||
637 | if (ep->can_dma) { | ||
638 | u32 ctrl; | ||
639 | |||
640 | usba_writel(udc, INT_ENB, | ||
641 | (usba_readl(udc, INT_ENB) | ||
642 | | USBA_BF(EPT_INT, 1 << ep->index) | ||
643 | | USBA_BF(DMA_INT, 1 << ep->index))); | ||
644 | ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA; | ||
645 | usba_ep_writel(ep, CTL_ENB, ctrl); | ||
646 | } else { | ||
647 | usba_writel(udc, INT_ENB, | ||
648 | (usba_readl(udc, INT_ENB) | ||
649 | | USBA_BF(EPT_INT, 1 << ep->index))); | ||
650 | } | ||
651 | |||
652 | spin_unlock_irqrestore(&udc->lock, flags); | ||
653 | |||
654 | DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index, | ||
655 | (unsigned long)usba_ep_readl(ep, CFG)); | ||
656 | DBG(DBG_HW, "INT_ENB after init: %#08lx\n", | ||
657 | (unsigned long)usba_readl(udc, INT_ENB)); | ||
658 | |||
659 | return 0; | ||
660 | } | ||
661 | |||
662 | static int usba_ep_disable(struct usb_ep *_ep) | ||
663 | { | ||
664 | struct usba_ep *ep = to_usba_ep(_ep); | ||
665 | struct usba_udc *udc = ep->udc; | ||
666 | LIST_HEAD(req_list); | ||
667 | unsigned long flags; | ||
668 | |||
669 | DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name); | ||
670 | |||
671 | spin_lock_irqsave(&udc->lock, flags); | ||
672 | |||
673 | if (!ep->desc) { | ||
674 | spin_unlock_irqrestore(&udc->lock, flags); | ||
675 | DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name); | ||
676 | return -EINVAL; | ||
677 | } | ||
678 | ep->desc = NULL; | ||
679 | |||
680 | list_splice_init(&ep->queue, &req_list); | ||
681 | if (ep->can_dma) { | ||
682 | usba_dma_writel(ep, CONTROL, 0); | ||
683 | usba_dma_writel(ep, ADDRESS, 0); | ||
684 | usba_dma_readl(ep, STATUS); | ||
685 | } | ||
686 | usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE); | ||
687 | usba_writel(udc, INT_ENB, | ||
688 | usba_readl(udc, INT_ENB) | ||
689 | & ~USBA_BF(EPT_INT, 1 << ep->index)); | ||
690 | |||
691 | request_complete_list(ep, &req_list, -ESHUTDOWN); | ||
692 | |||
693 | spin_unlock_irqrestore(&udc->lock, flags); | ||
694 | |||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | static struct usb_request * | ||
699 | usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) | ||
700 | { | ||
701 | struct usba_request *req; | ||
702 | |||
703 | DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags); | ||
704 | |||
705 | req = kzalloc(sizeof(*req), gfp_flags); | ||
706 | if (!req) | ||
707 | return NULL; | ||
708 | |||
709 | INIT_LIST_HEAD(&req->queue); | ||
710 | req->req.dma = DMA_ADDR_INVALID; | ||
711 | |||
712 | return &req->req; | ||
713 | } | ||
714 | |||
715 | static void | ||
716 | usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) | ||
717 | { | ||
718 | struct usba_request *req = to_usba_req(_req); | ||
719 | |||
720 | DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req); | ||
721 | |||
722 | kfree(req); | ||
723 | } | ||
724 | |||
725 | static int queue_dma(struct usba_udc *udc, struct usba_ep *ep, | ||
726 | struct usba_request *req, gfp_t gfp_flags) | ||
727 | { | ||
728 | unsigned long flags; | ||
729 | int ret; | ||
730 | |||
731 | DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n", | ||
732 | ep->ep.name, req->req.length, req->req.dma, | ||
733 | req->req.zero ? 'Z' : 'z', | ||
734 | req->req.short_not_ok ? 'S' : 's', | ||
735 | req->req.no_interrupt ? 'I' : 'i'); | ||
736 | |||
737 | if (req->req.length > 0x10000) { | ||
738 | /* Lengths from 0 to 65536 (inclusive) are supported */ | ||
739 | DBG(DBG_ERR, "invalid request length %u\n", req->req.length); | ||
740 | return -EINVAL; | ||
741 | } | ||
742 | |||
743 | req->using_dma = 1; | ||
744 | |||
745 | if (req->req.dma == DMA_ADDR_INVALID) { | ||
746 | req->req.dma = dma_map_single( | ||
747 | &udc->pdev->dev, req->req.buf, req->req.length, | ||
748 | ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
749 | req->mapped = 1; | ||
750 | } else { | ||
751 | dma_sync_single_for_device( | ||
752 | &udc->pdev->dev, req->req.dma, req->req.length, | ||
753 | ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
754 | req->mapped = 0; | ||
755 | } | ||
756 | |||
757 | req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) | ||
758 | | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE | ||
759 | | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; | ||
760 | |||
761 | if (ep->is_in) | ||
762 | req->ctrl |= USBA_DMA_END_BUF_EN; | ||
763 | |||
764 | /* | ||
765 | * Add this request to the queue and submit for DMA if | ||
766 | * possible. Check if we're still alive first -- we may have | ||
767 | * received a reset since last time we checked. | ||
768 | */ | ||
769 | ret = -ESHUTDOWN; | ||
770 | spin_lock_irqsave(&udc->lock, flags); | ||
771 | if (ep->desc) { | ||
772 | if (list_empty(&ep->queue)) | ||
773 | submit_request(ep, req); | ||
774 | |||
775 | list_add_tail(&req->queue, &ep->queue); | ||
776 | ret = 0; | ||
777 | } | ||
778 | spin_unlock_irqrestore(&udc->lock, flags); | ||
779 | |||
780 | return ret; | ||
781 | } | ||
782 | |||
783 | static int | ||
784 | usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) | ||
785 | { | ||
786 | struct usba_request *req = to_usba_req(_req); | ||
787 | struct usba_ep *ep = to_usba_ep(_ep); | ||
788 | struct usba_udc *udc = ep->udc; | ||
789 | unsigned long flags; | ||
790 | int ret; | ||
791 | |||
792 | DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n", | ||
793 | ep->ep.name, req, _req->length); | ||
794 | |||
795 | if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || !ep->desc) | ||
796 | return -ESHUTDOWN; | ||
797 | |||
798 | req->submitted = 0; | ||
799 | req->using_dma = 0; | ||
800 | req->last_transaction = 0; | ||
801 | |||
802 | _req->status = -EINPROGRESS; | ||
803 | _req->actual = 0; | ||
804 | |||
805 | if (ep->can_dma) | ||
806 | return queue_dma(udc, ep, req, gfp_flags); | ||
807 | |||
808 | /* May have received a reset since last time we checked */ | ||
809 | ret = -ESHUTDOWN; | ||
810 | spin_lock_irqsave(&udc->lock, flags); | ||
811 | if (ep->desc) { | ||
812 | list_add_tail(&req->queue, &ep->queue); | ||
813 | |||
814 | if (ep->is_in || (ep_is_control(ep) | ||
815 | && (ep->state == DATA_STAGE_IN | ||
816 | || ep->state == STATUS_STAGE_IN))) | ||
817 | usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); | ||
818 | else | ||
819 | usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); | ||
820 | ret = 0; | ||
821 | } | ||
822 | spin_unlock_irqrestore(&udc->lock, flags); | ||
823 | |||
824 | return ret; | ||
825 | } | ||
826 | |||
827 | static void | ||
828 | usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status) | ||
829 | { | ||
830 | req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status); | ||
831 | } | ||
832 | |||
833 | static int stop_dma(struct usba_ep *ep, u32 *pstatus) | ||
834 | { | ||
835 | unsigned int timeout; | ||
836 | u32 status; | ||
837 | |||
838 | /* | ||
839 | * Stop the DMA controller. When writing both CH_EN | ||
840 | * and LINK to 0, the other bits are not affected. | ||
841 | */ | ||
842 | usba_dma_writel(ep, CONTROL, 0); | ||
843 | |||
844 | /* Wait for the FIFO to empty */ | ||
845 | for (timeout = 40; timeout; --timeout) { | ||
846 | status = usba_dma_readl(ep, STATUS); | ||
847 | if (!(status & USBA_DMA_CH_EN)) | ||
848 | break; | ||
849 | udelay(1); | ||
850 | } | ||
851 | |||
852 | if (pstatus) | ||
853 | *pstatus = status; | ||
854 | |||
855 | if (timeout == 0) { | ||
856 | dev_err(&ep->udc->pdev->dev, | ||
857 | "%s: timed out waiting for DMA FIFO to empty\n", | ||
858 | ep->ep.name); | ||
859 | return -ETIMEDOUT; | ||
860 | } | ||
861 | |||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) | ||
866 | { | ||
867 | struct usba_ep *ep = to_usba_ep(_ep); | ||
868 | struct usba_udc *udc = ep->udc; | ||
869 | struct usba_request *req = to_usba_req(_req); | ||
870 | unsigned long flags; | ||
871 | u32 status; | ||
872 | |||
873 | DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", | ||
874 | ep->ep.name, req); | ||
875 | |||
876 | spin_lock_irqsave(&udc->lock, flags); | ||
877 | |||
878 | if (req->using_dma) { | ||
879 | /* | ||
880 | * If this request is currently being transferred, | ||
881 | * stop the DMA controller and reset the FIFO. | ||
882 | */ | ||
883 | if (ep->queue.next == &req->queue) { | ||
884 | status = usba_dma_readl(ep, STATUS); | ||
885 | if (status & USBA_DMA_CH_EN) | ||
886 | stop_dma(ep, &status); | ||
887 | |||
888 | #ifdef CONFIG_USB_GADGET_DEBUG_FS | ||
889 | ep->last_dma_status = status; | ||
890 | #endif | ||
891 | |||
892 | usba_writel(udc, EPT_RST, 1 << ep->index); | ||
893 | |||
894 | usba_update_req(ep, req, status); | ||
895 | } | ||
896 | } | ||
897 | |||
898 | /* | ||
899 | * Errors should stop the queue from advancing until the | ||
900 | * completion function returns. | ||
901 | */ | ||
902 | list_del_init(&req->queue); | ||
903 | |||
904 | request_complete(ep, req, -ECONNRESET); | ||
905 | |||
906 | /* Process the next request if any */ | ||
907 | submit_next_request(ep); | ||
908 | spin_unlock_irqrestore(&udc->lock, flags); | ||
909 | |||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | static int usba_ep_set_halt(struct usb_ep *_ep, int value) | ||
914 | { | ||
915 | struct usba_ep *ep = to_usba_ep(_ep); | ||
916 | struct usba_udc *udc = ep->udc; | ||
917 | unsigned long flags; | ||
918 | int ret = 0; | ||
919 | |||
920 | DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name, | ||
921 | value ? "set" : "clear"); | ||
922 | |||
923 | if (!ep->desc) { | ||
924 | DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n", | ||
925 | ep->ep.name); | ||
926 | return -ENODEV; | ||
927 | } | ||
928 | if (ep->is_isoc) { | ||
929 | DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n", | ||
930 | ep->ep.name); | ||
931 | return -ENOTTY; | ||
932 | } | ||
933 | |||
934 | spin_lock_irqsave(&udc->lock, flags); | ||
935 | |||
936 | /* | ||
937 | * We can't halt IN endpoints while there are still data to be | ||
938 | * transferred | ||
939 | */ | ||
940 | if (!list_empty(&ep->queue) | ||
941 | || ((value && ep->is_in && (usba_ep_readl(ep, STA) | ||
942 | & USBA_BF(BUSY_BANKS, -1L))))) { | ||
943 | ret = -EAGAIN; | ||
944 | } else { | ||
945 | if (value) | ||
946 | usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); | ||
947 | else | ||
948 | usba_ep_writel(ep, CLR_STA, | ||
949 | USBA_FORCE_STALL | USBA_TOGGLE_CLR); | ||
950 | usba_ep_readl(ep, STA); | ||
951 | } | ||
952 | |||
953 | spin_unlock_irqrestore(&udc->lock, flags); | ||
954 | |||
955 | return ret; | ||
956 | } | ||
957 | |||
958 | static int usba_ep_fifo_status(struct usb_ep *_ep) | ||
959 | { | ||
960 | struct usba_ep *ep = to_usba_ep(_ep); | ||
961 | |||
962 | return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); | ||
963 | } | ||
964 | |||
965 | static void usba_ep_fifo_flush(struct usb_ep *_ep) | ||
966 | { | ||
967 | struct usba_ep *ep = to_usba_ep(_ep); | ||
968 | struct usba_udc *udc = ep->udc; | ||
969 | |||
970 | usba_writel(udc, EPT_RST, 1 << ep->index); | ||
971 | } | ||
972 | |||
973 | static const struct usb_ep_ops usba_ep_ops = { | ||
974 | .enable = usba_ep_enable, | ||
975 | .disable = usba_ep_disable, | ||
976 | .alloc_request = usba_ep_alloc_request, | ||
977 | .free_request = usba_ep_free_request, | ||
978 | .queue = usba_ep_queue, | ||
979 | .dequeue = usba_ep_dequeue, | ||
980 | .set_halt = usba_ep_set_halt, | ||
981 | .fifo_status = usba_ep_fifo_status, | ||
982 | .fifo_flush = usba_ep_fifo_flush, | ||
983 | }; | ||
984 | |||
985 | static int usba_udc_get_frame(struct usb_gadget *gadget) | ||
986 | { | ||
987 | struct usba_udc *udc = to_usba_udc(gadget); | ||
988 | |||
989 | return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM)); | ||
990 | } | ||
991 | |||
992 | static const struct usb_gadget_ops usba_udc_ops = { | ||
993 | .get_frame = usba_udc_get_frame, | ||
994 | }; | ||
995 | |||
996 | #define EP(nam, idx, maxpkt, maxbk, dma, isoc) \ | ||
997 | { \ | ||
998 | .ep = { \ | ||
999 | .ops = &usba_ep_ops, \ | ||
1000 | .name = nam, \ | ||
1001 | .maxpacket = maxpkt, \ | ||
1002 | }, \ | ||
1003 | .udc = &the_udc, \ | ||
1004 | .queue = LIST_HEAD_INIT(usba_ep[idx].queue), \ | ||
1005 | .fifo_size = maxpkt, \ | ||
1006 | .nr_banks = maxbk, \ | ||
1007 | .index = idx, \ | ||
1008 | .can_dma = dma, \ | ||
1009 | .can_isoc = isoc, \ | ||
1010 | } | ||
1011 | |||
1012 | static struct usba_ep usba_ep[] = { | ||
1013 | EP("ep0", 0, 64, 1, 0, 0), | ||
1014 | EP("ep1in-bulk", 1, 512, 2, 1, 1), | ||
1015 | EP("ep2out-bulk", 2, 512, 2, 1, 1), | ||
1016 | EP("ep3in-int", 3, 64, 3, 1, 0), | ||
1017 | EP("ep4out-int", 4, 64, 3, 1, 0), | ||
1018 | EP("ep5in-iso", 5, 1024, 3, 1, 1), | ||
1019 | EP("ep6out-iso", 6, 1024, 3, 1, 1), | ||
1020 | }; | ||
1021 | #undef EP | ||
1022 | |||
1023 | static struct usb_endpoint_descriptor usba_ep0_desc = { | ||
1024 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
1025 | .bDescriptorType = USB_DT_ENDPOINT, | ||
1026 | .bEndpointAddress = 0, | ||
1027 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, | ||
1028 | .wMaxPacketSize = __constant_cpu_to_le16(64), | ||
1029 | /* FIXME: I have no idea what to put here */ | ||
1030 | .bInterval = 1, | ||
1031 | }; | ||
1032 | |||
1033 | static void nop_release(struct device *dev) | ||
1034 | { | ||
1035 | |||
1036 | } | ||
1037 | |||
1038 | static struct usba_udc the_udc = { | ||
1039 | .gadget = { | ||
1040 | .ops = &usba_udc_ops, | ||
1041 | .ep0 = &usba_ep[0].ep, | ||
1042 | .ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list), | ||
1043 | .is_dualspeed = 1, | ||
1044 | .name = "atmel_usba_udc", | ||
1045 | .dev = { | ||
1046 | .bus_id = "gadget", | ||
1047 | .release = nop_release, | ||
1048 | }, | ||
1049 | }, | ||
1050 | |||
1051 | .lock = SPIN_LOCK_UNLOCKED, | ||
1052 | }; | ||
1053 | |||
1054 | /* | ||
1055 | * Called with interrupts disabled and udc->lock held. | ||
1056 | */ | ||
1057 | static void reset_all_endpoints(struct usba_udc *udc) | ||
1058 | { | ||
1059 | struct usba_ep *ep; | ||
1060 | struct usba_request *req, *tmp_req; | ||
1061 | |||
1062 | usba_writel(udc, EPT_RST, ~0UL); | ||
1063 | |||
1064 | ep = to_usba_ep(udc->gadget.ep0); | ||
1065 | list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) { | ||
1066 | list_del_init(&req->queue); | ||
1067 | request_complete(ep, req, -ECONNRESET); | ||
1068 | } | ||
1069 | |||
1070 | list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { | ||
1071 | if (ep->desc) | ||
1072 | usba_ep_disable(&ep->ep); | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) | ||
1077 | { | ||
1078 | struct usba_ep *ep; | ||
1079 | |||
1080 | if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) | ||
1081 | return to_usba_ep(udc->gadget.ep0); | ||
1082 | |||
1083 | list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) { | ||
1084 | u8 bEndpointAddress; | ||
1085 | |||
1086 | if (!ep->desc) | ||
1087 | continue; | ||
1088 | bEndpointAddress = ep->desc->bEndpointAddress; | ||
1089 | if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) | ||
1090 | continue; | ||
1091 | if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) | ||
1092 | == (wIndex & USB_ENDPOINT_NUMBER_MASK)) | ||
1093 | return ep; | ||
1094 | } | ||
1095 | |||
1096 | return NULL; | ||
1097 | } | ||
1098 | |||
1099 | /* Called with interrupts disabled and udc->lock held */ | ||
1100 | static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep) | ||
1101 | { | ||
1102 | usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); | ||
1103 | ep->state = WAIT_FOR_SETUP; | ||
1104 | } | ||
1105 | |||
1106 | static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep) | ||
1107 | { | ||
1108 | if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL) | ||
1109 | return 1; | ||
1110 | return 0; | ||
1111 | } | ||
1112 | |||
1113 | static inline void set_address(struct usba_udc *udc, unsigned int addr) | ||
1114 | { | ||
1115 | u32 regval; | ||
1116 | |||
1117 | DBG(DBG_BUS, "setting address %u...\n", addr); | ||
1118 | regval = usba_readl(udc, CTRL); | ||
1119 | regval = USBA_BFINS(DEV_ADDR, addr, regval); | ||
1120 | usba_writel(udc, CTRL, regval); | ||
1121 | } | ||
1122 | |||
1123 | static int do_test_mode(struct usba_udc *udc) | ||
1124 | { | ||
1125 | static const char test_packet_buffer[] = { | ||
1126 | /* JKJKJKJK * 9 */ | ||
1127 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
1128 | /* JJKKJJKK * 8 */ | ||
1129 | 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, | ||
1130 | /* JJKKJJKK * 8 */ | ||
1131 | 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, | ||
1132 | /* JJJJJJJKKKKKKK * 8 */ | ||
1133 | 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
1134 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
1135 | /* JJJJJJJK * 8 */ | ||
1136 | 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, | ||
1137 | /* {JKKKKKKK * 10}, JK */ | ||
1138 | 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E | ||
1139 | }; | ||
1140 | struct usba_ep *ep; | ||
1141 | struct device *dev = &udc->pdev->dev; | ||
1142 | int test_mode; | ||
1143 | |||
1144 | test_mode = udc->test_mode; | ||
1145 | |||
1146 | /* Start from a clean slate */ | ||
1147 | reset_all_endpoints(udc); | ||
1148 | |||
1149 | switch (test_mode) { | ||
1150 | case 0x0100: | ||
1151 | /* Test_J */ | ||
1152 | usba_writel(udc, TST, USBA_TST_J_MODE); | ||
1153 | dev_info(dev, "Entering Test_J mode...\n"); | ||
1154 | break; | ||
1155 | case 0x0200: | ||
1156 | /* Test_K */ | ||
1157 | usba_writel(udc, TST, USBA_TST_K_MODE); | ||
1158 | dev_info(dev, "Entering Test_K mode...\n"); | ||
1159 | break; | ||
1160 | case 0x0300: | ||
1161 | /* | ||
1162 | * Test_SE0_NAK: Force high-speed mode and set up ep0 | ||
1163 | * for Bulk IN transfers | ||
1164 | */ | ||
1165 | ep = &usba_ep[0]; | ||
1166 | usba_writel(udc, TST, | ||
1167 | USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH)); | ||
1168 | usba_ep_writel(ep, CFG, | ||
1169 | USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) | ||
1170 | | USBA_EPT_DIR_IN | ||
1171 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) | ||
1172 | | USBA_BF(BK_NUMBER, 1)); | ||
1173 | if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { | ||
1174 | set_protocol_stall(udc, ep); | ||
1175 | dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n"); | ||
1176 | } else { | ||
1177 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); | ||
1178 | dev_info(dev, "Entering Test_SE0_NAK mode...\n"); | ||
1179 | } | ||
1180 | break; | ||
1181 | case 0x0400: | ||
1182 | /* Test_Packet */ | ||
1183 | ep = &usba_ep[0]; | ||
1184 | usba_ep_writel(ep, CFG, | ||
1185 | USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) | ||
1186 | | USBA_EPT_DIR_IN | ||
1187 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) | ||
1188 | | USBA_BF(BK_NUMBER, 1)); | ||
1189 | if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { | ||
1190 | set_protocol_stall(udc, ep); | ||
1191 | dev_err(dev, "Test_Packet: ep0 not mapped\n"); | ||
1192 | } else { | ||
1193 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); | ||
1194 | usba_writel(udc, TST, USBA_TST_PKT_MODE); | ||
1195 | copy_to_fifo(ep->fifo, test_packet_buffer, | ||
1196 | sizeof(test_packet_buffer)); | ||
1197 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); | ||
1198 | dev_info(dev, "Entering Test_Packet mode...\n"); | ||
1199 | } | ||
1200 | break; | ||
1201 | default: | ||
1202 | dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode); | ||
1203 | return -EINVAL; | ||
1204 | } | ||
1205 | |||
1206 | return 0; | ||
1207 | } | ||
1208 | |||
1209 | /* Avoid overly long expressions */ | ||
1210 | static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq) | ||
1211 | { | ||
1212 | if (crq->wValue == __constant_cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP)) | ||
1213 | return true; | ||
1214 | return false; | ||
1215 | } | ||
1216 | |||
1217 | static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq) | ||
1218 | { | ||
1219 | if (crq->wValue == __constant_cpu_to_le16(USB_DEVICE_TEST_MODE)) | ||
1220 | return true; | ||
1221 | return false; | ||
1222 | } | ||
1223 | |||
1224 | static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq) | ||
1225 | { | ||
1226 | if (crq->wValue == __constant_cpu_to_le16(USB_ENDPOINT_HALT)) | ||
1227 | return true; | ||
1228 | return false; | ||
1229 | } | ||
1230 | |||
1231 | static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep, | ||
1232 | struct usb_ctrlrequest *crq) | ||
1233 | { | ||
1234 | int retval = 0;; | ||
1235 | |||
1236 | switch (crq->bRequest) { | ||
1237 | case USB_REQ_GET_STATUS: { | ||
1238 | u16 status; | ||
1239 | |||
1240 | if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) { | ||
1241 | /* Self-powered, no remote wakeup */ | ||
1242 | status = __constant_cpu_to_le16(1 << 0); | ||
1243 | } else if (crq->bRequestType | ||
1244 | == (USB_DIR_IN | USB_RECIP_INTERFACE)) { | ||
1245 | status = __constant_cpu_to_le16(0); | ||
1246 | } else if (crq->bRequestType | ||
1247 | == (USB_DIR_IN | USB_RECIP_ENDPOINT)) { | ||
1248 | struct usba_ep *target; | ||
1249 | |||
1250 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); | ||
1251 | if (!target) | ||
1252 | goto stall; | ||
1253 | |||
1254 | status = 0; | ||
1255 | if (is_stalled(udc, target)) | ||
1256 | status |= __constant_cpu_to_le16(1); | ||
1257 | } else | ||
1258 | goto delegate; | ||
1259 | |||
1260 | /* Write directly to the FIFO. No queueing is done. */ | ||
1261 | if (crq->wLength != __constant_cpu_to_le16(sizeof(status))) | ||
1262 | goto stall; | ||
1263 | ep->state = DATA_STAGE_IN; | ||
1264 | __raw_writew(status, ep->fifo); | ||
1265 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); | ||
1266 | break; | ||
1267 | } | ||
1268 | |||
1269 | case USB_REQ_CLEAR_FEATURE: { | ||
1270 | if (crq->bRequestType == USB_RECIP_DEVICE) { | ||
1271 | if (feature_is_dev_remote_wakeup(crq)) { | ||
1272 | /* TODO: Handle REMOTE_WAKEUP */ | ||
1273 | } else { | ||
1274 | /* Can't CLEAR_FEATURE TEST_MODE */ | ||
1275 | goto stall; | ||
1276 | } | ||
1277 | } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { | ||
1278 | struct usba_ep *target; | ||
1279 | |||
1280 | if (crq->wLength != __constant_cpu_to_le16(0) | ||
1281 | || !feature_is_ep_halt(crq)) | ||
1282 | goto stall; | ||
1283 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); | ||
1284 | if (!target) | ||
1285 | goto stall; | ||
1286 | |||
1287 | usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL); | ||
1288 | if (target->index != 0) | ||
1289 | usba_ep_writel(target, CLR_STA, | ||
1290 | USBA_TOGGLE_CLR); | ||
1291 | } else { | ||
1292 | goto delegate; | ||
1293 | } | ||
1294 | |||
1295 | send_status(udc, ep); | ||
1296 | break; | ||
1297 | } | ||
1298 | |||
1299 | case USB_REQ_SET_FEATURE: { | ||
1300 | if (crq->bRequestType == USB_RECIP_DEVICE) { | ||
1301 | if (feature_is_dev_test_mode(crq)) { | ||
1302 | send_status(udc, ep); | ||
1303 | ep->state = STATUS_STAGE_TEST; | ||
1304 | udc->test_mode = le16_to_cpu(crq->wIndex); | ||
1305 | return 0; | ||
1306 | } else if (feature_is_dev_remote_wakeup(crq)) { | ||
1307 | /* TODO: Handle REMOTE_WAKEUP */ | ||
1308 | } else { | ||
1309 | goto stall; | ||
1310 | } | ||
1311 | } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { | ||
1312 | struct usba_ep *target; | ||
1313 | |||
1314 | if (crq->wLength != __constant_cpu_to_le16(0) | ||
1315 | || !feature_is_ep_halt(crq)) | ||
1316 | goto stall; | ||
1317 | |||
1318 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); | ||
1319 | if (!target) | ||
1320 | goto stall; | ||
1321 | |||
1322 | usba_ep_writel(target, SET_STA, USBA_FORCE_STALL); | ||
1323 | } else | ||
1324 | goto delegate; | ||
1325 | |||
1326 | send_status(udc, ep); | ||
1327 | break; | ||
1328 | } | ||
1329 | |||
1330 | case USB_REQ_SET_ADDRESS: | ||
1331 | if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) | ||
1332 | goto delegate; | ||
1333 | |||
1334 | set_address(udc, le16_to_cpu(crq->wValue)); | ||
1335 | send_status(udc, ep); | ||
1336 | ep->state = STATUS_STAGE_ADDR; | ||
1337 | break; | ||
1338 | |||
1339 | default: | ||
1340 | delegate: | ||
1341 | spin_unlock(&udc->lock); | ||
1342 | retval = udc->driver->setup(&udc->gadget, crq); | ||
1343 | spin_lock(&udc->lock); | ||
1344 | } | ||
1345 | |||
1346 | return retval; | ||
1347 | |||
1348 | stall: | ||
1349 | printk(KERN_ERR | ||
1350 | "udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, " | ||
1351 | "halting endpoint...\n", | ||
1352 | ep->ep.name, crq->bRequestType, crq->bRequest, | ||
1353 | le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex), | ||
1354 | le16_to_cpu(crq->wLength)); | ||
1355 | set_protocol_stall(udc, ep); | ||
1356 | return -1; | ||
1357 | } | ||
1358 | |||
1359 | static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep) | ||
1360 | { | ||
1361 | struct usba_request *req; | ||
1362 | u32 epstatus; | ||
1363 | u32 epctrl; | ||
1364 | |||
1365 | restart: | ||
1366 | epstatus = usba_ep_readl(ep, STA); | ||
1367 | epctrl = usba_ep_readl(ep, CTL); | ||
1368 | |||
1369 | DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n", | ||
1370 | ep->ep.name, ep->state, epstatus, epctrl); | ||
1371 | |||
1372 | req = NULL; | ||
1373 | if (!list_empty(&ep->queue)) | ||
1374 | req = list_entry(ep->queue.next, | ||
1375 | struct usba_request, queue); | ||
1376 | |||
1377 | if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { | ||
1378 | if (req->submitted) | ||
1379 | next_fifo_transaction(ep, req); | ||
1380 | else | ||
1381 | submit_request(ep, req); | ||
1382 | |||
1383 | if (req->last_transaction) { | ||
1384 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); | ||
1385 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); | ||
1386 | } | ||
1387 | goto restart; | ||
1388 | } | ||
1389 | if ((epstatus & epctrl) & USBA_TX_COMPLETE) { | ||
1390 | usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE); | ||
1391 | |||
1392 | switch (ep->state) { | ||
1393 | case DATA_STAGE_IN: | ||
1394 | usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); | ||
1395 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | ||
1396 | ep->state = STATUS_STAGE_OUT; | ||
1397 | break; | ||
1398 | case STATUS_STAGE_ADDR: | ||
1399 | /* Activate our new address */ | ||
1400 | usba_writel(udc, CTRL, (usba_readl(udc, CTRL) | ||
1401 | | USBA_FADDR_EN)); | ||
1402 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | ||
1403 | ep->state = WAIT_FOR_SETUP; | ||
1404 | break; | ||
1405 | case STATUS_STAGE_IN: | ||
1406 | if (req) { | ||
1407 | list_del_init(&req->queue); | ||
1408 | request_complete(ep, req, 0); | ||
1409 | submit_next_request(ep); | ||
1410 | } | ||
1411 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | ||
1412 | ep->state = WAIT_FOR_SETUP; | ||
1413 | break; | ||
1414 | case STATUS_STAGE_TEST: | ||
1415 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | ||
1416 | ep->state = WAIT_FOR_SETUP; | ||
1417 | if (do_test_mode(udc)) | ||
1418 | set_protocol_stall(udc, ep); | ||
1419 | break; | ||
1420 | default: | ||
1421 | printk(KERN_ERR | ||
1422 | "udc: %s: TXCOMP: Invalid endpoint state %d, " | ||
1423 | "halting endpoint...\n", | ||
1424 | ep->ep.name, ep->state); | ||
1425 | set_protocol_stall(udc, ep); | ||
1426 | break; | ||
1427 | } | ||
1428 | |||
1429 | goto restart; | ||
1430 | } | ||
1431 | if ((epstatus & epctrl) & USBA_RX_BK_RDY) { | ||
1432 | switch (ep->state) { | ||
1433 | case STATUS_STAGE_OUT: | ||
1434 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); | ||
1435 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | ||
1436 | |||
1437 | if (req) { | ||
1438 | list_del_init(&req->queue); | ||
1439 | request_complete(ep, req, 0); | ||
1440 | } | ||
1441 | ep->state = WAIT_FOR_SETUP; | ||
1442 | break; | ||
1443 | |||
1444 | case DATA_STAGE_OUT: | ||
1445 | receive_data(ep); | ||
1446 | break; | ||
1447 | |||
1448 | default: | ||
1449 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); | ||
1450 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | ||
1451 | printk(KERN_ERR | ||
1452 | "udc: %s: RXRDY: Invalid endpoint state %d, " | ||
1453 | "halting endpoint...\n", | ||
1454 | ep->ep.name, ep->state); | ||
1455 | set_protocol_stall(udc, ep); | ||
1456 | break; | ||
1457 | } | ||
1458 | |||
1459 | goto restart; | ||
1460 | } | ||
1461 | if (epstatus & USBA_RX_SETUP) { | ||
1462 | union { | ||
1463 | struct usb_ctrlrequest crq; | ||
1464 | unsigned long data[2]; | ||
1465 | } crq; | ||
1466 | unsigned int pkt_len; | ||
1467 | int ret; | ||
1468 | |||
1469 | if (ep->state != WAIT_FOR_SETUP) { | ||
1470 | /* | ||
1471 | * Didn't expect a SETUP packet at this | ||
1472 | * point. Clean up any pending requests (which | ||
1473 | * may be successful). | ||
1474 | */ | ||
1475 | int status = -EPROTO; | ||
1476 | |||
1477 | /* | ||
1478 | * RXRDY and TXCOMP are dropped when SETUP | ||
1479 | * packets arrive. Just pretend we received | ||
1480 | * the status packet. | ||
1481 | */ | ||
1482 | if (ep->state == STATUS_STAGE_OUT | ||
1483 | || ep->state == STATUS_STAGE_IN) { | ||
1484 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | ||
1485 | status = 0; | ||
1486 | } | ||
1487 | |||
1488 | if (req) { | ||
1489 | list_del_init(&req->queue); | ||
1490 | request_complete(ep, req, status); | ||
1491 | } | ||
1492 | } | ||
1493 | |||
1494 | pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); | ||
1495 | DBG(DBG_HW, "Packet length: %u\n", pkt_len); | ||
1496 | if (pkt_len != sizeof(crq)) { | ||
1497 | printk(KERN_WARNING "udc: Invalid packet length %u " | ||
1498 | "(expected %lu)\n", pkt_len, sizeof(crq)); | ||
1499 | set_protocol_stall(udc, ep); | ||
1500 | return; | ||
1501 | } | ||
1502 | |||
1503 | DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo); | ||
1504 | copy_from_fifo(crq.data, ep->fifo, sizeof(crq)); | ||
1505 | |||
1506 | /* Free up one bank in the FIFO so that we can | ||
1507 | * generate or receive a reply right away. */ | ||
1508 | usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP); | ||
1509 | |||
1510 | /* printk(KERN_DEBUG "setup: %d: %02x.%02x\n", | ||
1511 | ep->state, crq.crq.bRequestType, | ||
1512 | crq.crq.bRequest); */ | ||
1513 | |||
1514 | if (crq.crq.bRequestType & USB_DIR_IN) { | ||
1515 | /* | ||
1516 | * The USB 2.0 spec states that "if wLength is | ||
1517 | * zero, there is no data transfer phase." | ||
1518 | * However, testusb #14 seems to actually | ||
1519 | * expect a data phase even if wLength = 0... | ||
1520 | */ | ||
1521 | ep->state = DATA_STAGE_IN; | ||
1522 | } else { | ||
1523 | if (crq.crq.wLength != __constant_cpu_to_le16(0)) | ||
1524 | ep->state = DATA_STAGE_OUT; | ||
1525 | else | ||
1526 | ep->state = STATUS_STAGE_IN; | ||
1527 | } | ||
1528 | |||
1529 | ret = -1; | ||
1530 | if (ep->index == 0) | ||
1531 | ret = handle_ep0_setup(udc, ep, &crq.crq); | ||
1532 | else { | ||
1533 | spin_unlock(&udc->lock); | ||
1534 | ret = udc->driver->setup(&udc->gadget, &crq.crq); | ||
1535 | spin_lock(&udc->lock); | ||
1536 | } | ||
1537 | |||
1538 | DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n", | ||
1539 | crq.crq.bRequestType, crq.crq.bRequest, | ||
1540 | le16_to_cpu(crq.crq.wLength), ep->state, ret); | ||
1541 | |||
1542 | if (ret < 0) { | ||
1543 | /* Let the host know that we failed */ | ||
1544 | set_protocol_stall(udc, ep); | ||
1545 | } | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep) | ||
1550 | { | ||
1551 | struct usba_request *req; | ||
1552 | u32 epstatus; | ||
1553 | u32 epctrl; | ||
1554 | |||
1555 | epstatus = usba_ep_readl(ep, STA); | ||
1556 | epctrl = usba_ep_readl(ep, CTL); | ||
1557 | |||
1558 | DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus); | ||
1559 | |||
1560 | while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { | ||
1561 | DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name); | ||
1562 | |||
1563 | if (list_empty(&ep->queue)) { | ||
1564 | dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n"); | ||
1565 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); | ||
1566 | return; | ||
1567 | } | ||
1568 | |||
1569 | req = list_entry(ep->queue.next, struct usba_request, queue); | ||
1570 | |||
1571 | if (req->using_dma) { | ||
1572 | /* Send a zero-length packet */ | ||
1573 | usba_ep_writel(ep, SET_STA, | ||
1574 | USBA_TX_PK_RDY); | ||
1575 | usba_ep_writel(ep, CTL_DIS, | ||
1576 | USBA_TX_PK_RDY); | ||
1577 | list_del_init(&req->queue); | ||
1578 | submit_next_request(ep); | ||
1579 | request_complete(ep, req, 0); | ||
1580 | } else { | ||
1581 | if (req->submitted) | ||
1582 | next_fifo_transaction(ep, req); | ||
1583 | else | ||
1584 | submit_request(ep, req); | ||
1585 | |||
1586 | if (req->last_transaction) { | ||
1587 | list_del_init(&req->queue); | ||
1588 | submit_next_request(ep); | ||
1589 | request_complete(ep, req, 0); | ||
1590 | } | ||
1591 | } | ||
1592 | |||
1593 | epstatus = usba_ep_readl(ep, STA); | ||
1594 | epctrl = usba_ep_readl(ep, CTL); | ||
1595 | } | ||
1596 | if ((epstatus & epctrl) & USBA_RX_BK_RDY) { | ||
1597 | DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); | ||
1598 | receive_data(ep); | ||
1599 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); | ||
1600 | } | ||
1601 | } | ||
1602 | |||
1603 | static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep) | ||
1604 | { | ||
1605 | struct usba_request *req; | ||
1606 | u32 status, control, pending; | ||
1607 | |||
1608 | status = usba_dma_readl(ep, STATUS); | ||
1609 | control = usba_dma_readl(ep, CONTROL); | ||
1610 | #ifdef CONFIG_USB_GADGET_DEBUG_FS | ||
1611 | ep->last_dma_status = status; | ||
1612 | #endif | ||
1613 | pending = status & control; | ||
1614 | DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control); | ||
1615 | |||
1616 | if (status & USBA_DMA_CH_EN) { | ||
1617 | dev_err(&udc->pdev->dev, | ||
1618 | "DMA_CH_EN is set after transfer is finished!\n"); | ||
1619 | dev_err(&udc->pdev->dev, | ||
1620 | "status=%#08x, pending=%#08x, control=%#08x\n", | ||
1621 | status, pending, control); | ||
1622 | |||
1623 | /* | ||
1624 | * try to pretend nothing happened. We might have to | ||
1625 | * do something here... | ||
1626 | */ | ||
1627 | } | ||
1628 | |||
1629 | if (list_empty(&ep->queue)) | ||
1630 | /* Might happen if a reset comes along at the right moment */ | ||
1631 | return; | ||
1632 | |||
1633 | if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) { | ||
1634 | req = list_entry(ep->queue.next, struct usba_request, queue); | ||
1635 | usba_update_req(ep, req, status); | ||
1636 | |||
1637 | list_del_init(&req->queue); | ||
1638 | submit_next_request(ep); | ||
1639 | request_complete(ep, req, 0); | ||
1640 | } | ||
1641 | } | ||
1642 | |||
1643 | static irqreturn_t usba_udc_irq(int irq, void *devid) | ||
1644 | { | ||
1645 | struct usba_udc *udc = devid; | ||
1646 | u32 status; | ||
1647 | u32 dma_status; | ||
1648 | u32 ep_status; | ||
1649 | |||
1650 | spin_lock(&udc->lock); | ||
1651 | |||
1652 | status = usba_readl(udc, INT_STA); | ||
1653 | DBG(DBG_INT, "irq, status=%#08x\n", status); | ||
1654 | |||
1655 | if (status & USBA_DET_SUSPEND) { | ||
1656 | usba_writel(udc, INT_CLR, USBA_DET_SUSPEND); | ||
1657 | DBG(DBG_BUS, "Suspend detected\n"); | ||
1658 | if (udc->gadget.speed != USB_SPEED_UNKNOWN | ||
1659 | && udc->driver && udc->driver->suspend) { | ||
1660 | spin_unlock(&udc->lock); | ||
1661 | udc->driver->suspend(&udc->gadget); | ||
1662 | spin_lock(&udc->lock); | ||
1663 | } | ||
1664 | } | ||
1665 | |||
1666 | if (status & USBA_WAKE_UP) { | ||
1667 | usba_writel(udc, INT_CLR, USBA_WAKE_UP); | ||
1668 | DBG(DBG_BUS, "Wake Up CPU detected\n"); | ||
1669 | } | ||
1670 | |||
1671 | if (status & USBA_END_OF_RESUME) { | ||
1672 | usba_writel(udc, INT_CLR, USBA_END_OF_RESUME); | ||
1673 | DBG(DBG_BUS, "Resume detected\n"); | ||
1674 | if (udc->gadget.speed != USB_SPEED_UNKNOWN | ||
1675 | && udc->driver && udc->driver->resume) { | ||
1676 | spin_unlock(&udc->lock); | ||
1677 | udc->driver->resume(&udc->gadget); | ||
1678 | spin_lock(&udc->lock); | ||
1679 | } | ||
1680 | } | ||
1681 | |||
1682 | dma_status = USBA_BFEXT(DMA_INT, status); | ||
1683 | if (dma_status) { | ||
1684 | int i; | ||
1685 | |||
1686 | for (i = 1; i < USBA_NR_ENDPOINTS; i++) | ||
1687 | if (dma_status & (1 << i)) | ||
1688 | usba_dma_irq(udc, &usba_ep[i]); | ||
1689 | } | ||
1690 | |||
1691 | ep_status = USBA_BFEXT(EPT_INT, status); | ||
1692 | if (ep_status) { | ||
1693 | int i; | ||
1694 | |||
1695 | for (i = 0; i < USBA_NR_ENDPOINTS; i++) | ||
1696 | if (ep_status & (1 << i)) { | ||
1697 | if (ep_is_control(&usba_ep[i])) | ||
1698 | usba_control_irq(udc, &usba_ep[i]); | ||
1699 | else | ||
1700 | usba_ep_irq(udc, &usba_ep[i]); | ||
1701 | } | ||
1702 | } | ||
1703 | |||
1704 | if (status & USBA_END_OF_RESET) { | ||
1705 | struct usba_ep *ep0; | ||
1706 | |||
1707 | usba_writel(udc, INT_CLR, USBA_END_OF_RESET); | ||
1708 | reset_all_endpoints(udc); | ||
1709 | |||
1710 | if (status & USBA_HIGH_SPEED) { | ||
1711 | DBG(DBG_BUS, "High-speed bus reset detected\n"); | ||
1712 | udc->gadget.speed = USB_SPEED_HIGH; | ||
1713 | } else { | ||
1714 | DBG(DBG_BUS, "Full-speed bus reset detected\n"); | ||
1715 | udc->gadget.speed = USB_SPEED_FULL; | ||
1716 | } | ||
1717 | |||
1718 | ep0 = &usba_ep[0]; | ||
1719 | ep0->desc = &usba_ep0_desc; | ||
1720 | ep0->state = WAIT_FOR_SETUP; | ||
1721 | usba_ep_writel(ep0, CFG, | ||
1722 | (USBA_BF(EPT_SIZE, EP0_EPT_SIZE) | ||
1723 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL) | ||
1724 | | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE))); | ||
1725 | usba_ep_writel(ep0, CTL_ENB, | ||
1726 | USBA_EPT_ENABLE | USBA_RX_SETUP); | ||
1727 | usba_writel(udc, INT_ENB, | ||
1728 | (usba_readl(udc, INT_ENB) | ||
1729 | | USBA_BF(EPT_INT, 1) | ||
1730 | | USBA_DET_SUSPEND | ||
1731 | | USBA_END_OF_RESUME)); | ||
1732 | |||
1733 | if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED)) | ||
1734 | dev_warn(&udc->pdev->dev, | ||
1735 | "WARNING: EP0 configuration is invalid!\n"); | ||
1736 | } | ||
1737 | |||
1738 | spin_unlock(&udc->lock); | ||
1739 | |||
1740 | return IRQ_HANDLED; | ||
1741 | } | ||
1742 | |||
1743 | static irqreturn_t usba_vbus_irq(int irq, void *devid) | ||
1744 | { | ||
1745 | struct usba_udc *udc = devid; | ||
1746 | int vbus; | ||
1747 | |||
1748 | /* debounce */ | ||
1749 | udelay(10); | ||
1750 | |||
1751 | spin_lock(&udc->lock); | ||
1752 | |||
1753 | /* May happen if Vbus pin toggles during probe() */ | ||
1754 | if (!udc->driver) | ||
1755 | goto out; | ||
1756 | |||
1757 | vbus = gpio_get_value(udc->vbus_pin); | ||
1758 | if (vbus != udc->vbus_prev) { | ||
1759 | if (vbus) { | ||
1760 | usba_writel(udc, CTRL, USBA_EN_USBA); | ||
1761 | usba_writel(udc, INT_ENB, USBA_END_OF_RESET); | ||
1762 | } else { | ||
1763 | udc->gadget.speed = USB_SPEED_UNKNOWN; | ||
1764 | reset_all_endpoints(udc); | ||
1765 | usba_writel(udc, CTRL, 0); | ||
1766 | spin_unlock(&udc->lock); | ||
1767 | udc->driver->disconnect(&udc->gadget); | ||
1768 | spin_lock(&udc->lock); | ||
1769 | } | ||
1770 | udc->vbus_prev = vbus; | ||
1771 | } | ||
1772 | |||
1773 | out: | ||
1774 | spin_unlock(&udc->lock); | ||
1775 | |||
1776 | return IRQ_HANDLED; | ||
1777 | } | ||
1778 | |||
1779 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | ||
1780 | { | ||
1781 | struct usba_udc *udc = &the_udc; | ||
1782 | unsigned long flags; | ||
1783 | int ret; | ||
1784 | |||
1785 | if (!udc->pdev) | ||
1786 | return -ENODEV; | ||
1787 | |||
1788 | spin_lock_irqsave(&udc->lock, flags); | ||
1789 | if (udc->driver) { | ||
1790 | spin_unlock_irqrestore(&udc->lock, flags); | ||
1791 | return -EBUSY; | ||
1792 | } | ||
1793 | |||
1794 | udc->driver = driver; | ||
1795 | udc->gadget.dev.driver = &driver->driver; | ||
1796 | spin_unlock_irqrestore(&udc->lock, flags); | ||
1797 | |||
1798 | clk_enable(udc->pclk); | ||
1799 | clk_enable(udc->hclk); | ||
1800 | |||
1801 | ret = driver->bind(&udc->gadget); | ||
1802 | if (ret) { | ||
1803 | DBG(DBG_ERR, "Could not bind to driver %s: error %d\n", | ||
1804 | driver->driver.name, ret); | ||
1805 | goto err_driver_bind; | ||
1806 | } | ||
1807 | |||
1808 | DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name); | ||
1809 | |||
1810 | udc->vbus_prev = 0; | ||
1811 | if (udc->vbus_pin != -1) | ||
1812 | enable_irq(gpio_to_irq(udc->vbus_pin)); | ||
1813 | |||
1814 | /* If Vbus is present, enable the controller and wait for reset */ | ||
1815 | spin_lock_irqsave(&udc->lock, flags); | ||
1816 | if (vbus_is_present(udc) && udc->vbus_prev == 0) { | ||
1817 | usba_writel(udc, CTRL, USBA_EN_USBA); | ||
1818 | usba_writel(udc, INT_ENB, USBA_END_OF_RESET); | ||
1819 | } | ||
1820 | spin_unlock_irqrestore(&udc->lock, flags); | ||
1821 | |||
1822 | return 0; | ||
1823 | |||
1824 | err_driver_bind: | ||
1825 | udc->driver = NULL; | ||
1826 | udc->gadget.dev.driver = NULL; | ||
1827 | return ret; | ||
1828 | } | ||
1829 | EXPORT_SYMBOL(usb_gadget_register_driver); | ||
1830 | |||
1831 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | ||
1832 | { | ||
1833 | struct usba_udc *udc = &the_udc; | ||
1834 | unsigned long flags; | ||
1835 | |||
1836 | if (!udc->pdev) | ||
1837 | return -ENODEV; | ||
1838 | if (driver != udc->driver) | ||
1839 | return -EINVAL; | ||
1840 | |||
1841 | if (udc->vbus_pin != -1) | ||
1842 | disable_irq(gpio_to_irq(udc->vbus_pin)); | ||
1843 | |||
1844 | spin_lock_irqsave(&udc->lock, flags); | ||
1845 | udc->gadget.speed = USB_SPEED_UNKNOWN; | ||
1846 | reset_all_endpoints(udc); | ||
1847 | spin_unlock_irqrestore(&udc->lock, flags); | ||
1848 | |||
1849 | /* This will also disable the DP pullup */ | ||
1850 | usba_writel(udc, CTRL, 0); | ||
1851 | |||
1852 | driver->unbind(&udc->gadget); | ||
1853 | udc->gadget.dev.driver = NULL; | ||
1854 | udc->driver = NULL; | ||
1855 | |||
1856 | clk_disable(udc->hclk); | ||
1857 | clk_disable(udc->pclk); | ||
1858 | |||
1859 | DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name); | ||
1860 | |||
1861 | return 0; | ||
1862 | } | ||
1863 | EXPORT_SYMBOL(usb_gadget_unregister_driver); | ||
1864 | |||
1865 | static int __init usba_udc_probe(struct platform_device *pdev) | ||
1866 | { | ||
1867 | struct usba_platform_data *pdata = pdev->dev.platform_data; | ||
1868 | struct resource *regs, *fifo; | ||
1869 | struct clk *pclk, *hclk; | ||
1870 | struct usba_udc *udc = &the_udc; | ||
1871 | int irq, ret, i; | ||
1872 | |||
1873 | regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID); | ||
1874 | fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID); | ||
1875 | if (!regs || !fifo) | ||
1876 | return -ENXIO; | ||
1877 | |||
1878 | irq = platform_get_irq(pdev, 0); | ||
1879 | if (irq < 0) | ||
1880 | return irq; | ||
1881 | |||
1882 | pclk = clk_get(&pdev->dev, "pclk"); | ||
1883 | if (IS_ERR(pclk)) | ||
1884 | return PTR_ERR(pclk); | ||
1885 | hclk = clk_get(&pdev->dev, "hclk"); | ||
1886 | if (IS_ERR(hclk)) { | ||
1887 | ret = PTR_ERR(hclk); | ||
1888 | goto err_get_hclk; | ||
1889 | } | ||
1890 | |||
1891 | udc->pdev = pdev; | ||
1892 | udc->pclk = pclk; | ||
1893 | udc->hclk = hclk; | ||
1894 | udc->vbus_pin = -1; | ||
1895 | |||
1896 | ret = -ENOMEM; | ||
1897 | udc->regs = ioremap(regs->start, regs->end - regs->start + 1); | ||
1898 | if (!udc->regs) { | ||
1899 | dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n"); | ||
1900 | goto err_map_regs; | ||
1901 | } | ||
1902 | dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n", | ||
1903 | (unsigned long)regs->start, udc->regs); | ||
1904 | udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1); | ||
1905 | if (!udc->fifo) { | ||
1906 | dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n"); | ||
1907 | goto err_map_fifo; | ||
1908 | } | ||
1909 | dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n", | ||
1910 | (unsigned long)fifo->start, udc->fifo); | ||
1911 | |||
1912 | device_initialize(&udc->gadget.dev); | ||
1913 | udc->gadget.dev.parent = &pdev->dev; | ||
1914 | udc->gadget.dev.dma_mask = pdev->dev.dma_mask; | ||
1915 | |||
1916 | platform_set_drvdata(pdev, udc); | ||
1917 | |||
1918 | /* Make sure we start from a clean slate */ | ||
1919 | clk_enable(pclk); | ||
1920 | usba_writel(udc, CTRL, 0); | ||
1921 | clk_disable(pclk); | ||
1922 | |||
1923 | INIT_LIST_HEAD(&usba_ep[0].ep.ep_list); | ||
1924 | usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0); | ||
1925 | usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0); | ||
1926 | usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0); | ||
1927 | for (i = 1; i < ARRAY_SIZE(usba_ep); i++) { | ||
1928 | struct usba_ep *ep = &usba_ep[i]; | ||
1929 | |||
1930 | ep->ep_regs = udc->regs + USBA_EPT_BASE(i); | ||
1931 | ep->dma_regs = udc->regs + USBA_DMA_BASE(i); | ||
1932 | ep->fifo = udc->fifo + USBA_FIFO_BASE(i); | ||
1933 | |||
1934 | list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); | ||
1935 | } | ||
1936 | |||
1937 | ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc); | ||
1938 | if (ret) { | ||
1939 | dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n", | ||
1940 | irq, ret); | ||
1941 | goto err_request_irq; | ||
1942 | } | ||
1943 | udc->irq = irq; | ||
1944 | |||
1945 | ret = device_add(&udc->gadget.dev); | ||
1946 | if (ret) { | ||
1947 | dev_dbg(&pdev->dev, "Could not add gadget: %d\n", ret); | ||
1948 | goto err_device_add; | ||
1949 | } | ||
1950 | |||
1951 | if (pdata && pdata->vbus_pin != GPIO_PIN_NONE) { | ||
1952 | if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) { | ||
1953 | udc->vbus_pin = pdata->vbus_pin; | ||
1954 | |||
1955 | ret = request_irq(gpio_to_irq(udc->vbus_pin), | ||
1956 | usba_vbus_irq, 0, | ||
1957 | "atmel_usba_udc", udc); | ||
1958 | if (ret) { | ||
1959 | gpio_free(udc->vbus_pin); | ||
1960 | udc->vbus_pin = -1; | ||
1961 | dev_warn(&udc->pdev->dev, | ||
1962 | "failed to request vbus irq; " | ||
1963 | "assuming always on\n"); | ||
1964 | } else { | ||
1965 | disable_irq(gpio_to_irq(udc->vbus_pin)); | ||
1966 | } | ||
1967 | } | ||
1968 | } | ||
1969 | |||
1970 | usba_init_debugfs(udc); | ||
1971 | for (i = 1; i < ARRAY_SIZE(usba_ep); i++) | ||
1972 | usba_ep_init_debugfs(udc, &usba_ep[i]); | ||
1973 | |||
1974 | return 0; | ||
1975 | |||
1976 | err_device_add: | ||
1977 | free_irq(irq, udc); | ||
1978 | err_request_irq: | ||
1979 | iounmap(udc->fifo); | ||
1980 | err_map_fifo: | ||
1981 | iounmap(udc->regs); | ||
1982 | err_map_regs: | ||
1983 | clk_put(hclk); | ||
1984 | err_get_hclk: | ||
1985 | clk_put(pclk); | ||
1986 | |||
1987 | platform_set_drvdata(pdev, NULL); | ||
1988 | |||
1989 | return ret; | ||
1990 | } | ||
1991 | |||
1992 | static int __exit usba_udc_remove(struct platform_device *pdev) | ||
1993 | { | ||
1994 | struct usba_udc *udc; | ||
1995 | int i; | ||
1996 | |||
1997 | udc = platform_get_drvdata(pdev); | ||
1998 | |||
1999 | for (i = 1; i < ARRAY_SIZE(usba_ep); i++) | ||
2000 | usba_ep_cleanup_debugfs(&usba_ep[i]); | ||
2001 | usba_cleanup_debugfs(udc); | ||
2002 | |||
2003 | if (udc->vbus_pin != -1) | ||
2004 | gpio_free(udc->vbus_pin); | ||
2005 | |||
2006 | free_irq(udc->irq, udc); | ||
2007 | iounmap(udc->fifo); | ||
2008 | iounmap(udc->regs); | ||
2009 | clk_put(udc->hclk); | ||
2010 | clk_put(udc->pclk); | ||
2011 | |||
2012 | device_unregister(&udc->gadget.dev); | ||
2013 | |||
2014 | return 0; | ||
2015 | } | ||
2016 | |||
2017 | static struct platform_driver udc_driver = { | ||
2018 | .remove = __exit_p(usba_udc_remove), | ||
2019 | .driver = { | ||
2020 | .name = "atmel_usba_udc", | ||
2021 | }, | ||
2022 | }; | ||
2023 | |||
2024 | static int __init udc_init(void) | ||
2025 | { | ||
2026 | return platform_driver_probe(&udc_driver, usba_udc_probe); | ||
2027 | } | ||
2028 | module_init(udc_init); | ||
2029 | |||
2030 | static void __exit udc_exit(void) | ||
2031 | { | ||
2032 | platform_driver_unregister(&udc_driver); | ||
2033 | } | ||
2034 | module_exit(udc_exit); | ||
2035 | |||
2036 | MODULE_DESCRIPTION("Atmel USBA UDC driver"); | ||
2037 | MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); | ||
2038 | MODULE_LICENSE("GPL"); | ||