aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/renesas_usbhs/mod_gadget.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/renesas_usbhs/mod_gadget.c')
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c840
1 files changed, 215 insertions, 625 deletions
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 547486ccd059..ba79dbf5adbc 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -26,26 +26,25 @@
26 */ 26 */
27struct usbhsg_request { 27struct usbhsg_request {
28 struct usb_request req; 28 struct usb_request req;
29 struct list_head node; 29 struct usbhs_pkt pkt;
30}; 30};
31 31
32#define EP_NAME_SIZE 8 32#define EP_NAME_SIZE 8
33struct usbhsg_gpriv; 33struct usbhsg_gpriv;
34struct usbhsg_pipe_handle;
35struct usbhsg_uep { 34struct usbhsg_uep {
36 struct usb_ep ep; 35 struct usb_ep ep;
37 struct usbhs_pipe *pipe; 36 struct usbhs_pipe *pipe;
38 struct list_head list;
39 37
40 char ep_name[EP_NAME_SIZE]; 38 char ep_name[EP_NAME_SIZE];
41 39
42 struct usbhsg_gpriv *gpriv; 40 struct usbhsg_gpriv *gpriv;
43 struct usbhsg_pipe_handle *handler; 41 struct usbhs_pkt_handle *handler;
44}; 42};
45 43
46struct usbhsg_gpriv { 44struct usbhsg_gpriv {
47 struct usb_gadget gadget; 45 struct usb_gadget gadget;
48 struct usbhs_mod mod; 46 struct usbhs_mod mod;
47 struct list_head link;
49 48
50 struct usbhsg_uep *uep; 49 struct usbhsg_uep *uep;
51 int uep_size; 50 int uep_size;
@@ -58,12 +57,6 @@ struct usbhsg_gpriv {
58#define USBHSG_STATUS_WEDGE (1 << 2) 57#define USBHSG_STATUS_WEDGE (1 << 2)
59}; 58};
60 59
61struct usbhsg_pipe_handle {
62 int (*prepare)(struct usbhsg_uep *uep, struct usbhsg_request *ureq);
63 int (*try_run)(struct usbhsg_uep *uep, struct usbhsg_request *ureq);
64 void (*irq_mask)(struct usbhsg_uep *uep, int enable);
65};
66
67struct usbhsg_recip_handle { 60struct usbhsg_recip_handle {
68 char *name; 61 char *name;
69 int (*device)(struct usbhs_priv *priv, struct usbhsg_uep *uep, 62 int (*device)(struct usbhs_priv *priv, struct usbhsg_uep *uep,
@@ -100,7 +93,6 @@ struct usbhsg_recip_handle {
100 container_of(r, struct usbhsg_request, req) 93 container_of(r, struct usbhsg_request, req)
101 94
102#define usbhsg_ep_to_uep(e) container_of(e, struct usbhsg_uep, ep) 95#define usbhsg_ep_to_uep(e) container_of(e, struct usbhsg_uep, ep)
103#define usbhsg_gpriv_to_lock(gp) usbhs_priv_to_lock((gp)->mod.priv)
104#define usbhsg_gpriv_to_dev(gp) usbhs_priv_to_dev((gp)->mod.priv) 96#define usbhsg_gpriv_to_dev(gp) usbhs_priv_to_dev((gp)->mod.priv)
105#define usbhsg_gpriv_to_priv(gp) ((gp)->mod.priv) 97#define usbhsg_gpriv_to_priv(gp) ((gp)->mod.priv)
106#define usbhsg_gpriv_to_dcp(gp) ((gp)->uep) 98#define usbhsg_gpriv_to_dcp(gp) ((gp)->uep)
@@ -110,6 +102,10 @@ struct usbhsg_recip_handle {
110#define usbhsg_pipe_to_uep(p) ((p)->mod_private) 102#define usbhsg_pipe_to_uep(p) ((p)->mod_private)
111#define usbhsg_is_dcp(u) ((u) == usbhsg_gpriv_to_dcp((u)->gpriv)) 103#define usbhsg_is_dcp(u) ((u) == usbhsg_gpriv_to_dcp((u)->gpriv))
112 104
105#define usbhsg_ureq_to_pkt(u) (&(u)->pkt)
106#define usbhsg_pkt_to_ureq(i) \
107 container_of(i, struct usbhsg_request, pkt)
108
113#define usbhsg_is_not_connected(gp) ((gp)->gadget.speed == USB_SPEED_UNKNOWN) 109#define usbhsg_is_not_connected(gp) ((gp)->gadget.speed == USB_SPEED_UNKNOWN)
114 110
115/* status */ 111/* status */
@@ -118,37 +114,18 @@ struct usbhsg_recip_handle {
118#define usbhsg_status_clr(gp, b) (gp->status &= ~b) 114#define usbhsg_status_clr(gp, b) (gp->status &= ~b)
119#define usbhsg_status_has(gp, b) (gp->status & b) 115#define usbhsg_status_has(gp, b) (gp->status & b)
120 116
121/* 117/* controller */
122 * usbhsg_trylock 118LIST_HEAD(the_controller_link);
123 *
124 * This driver don't use spin_try_lock
125 * to avoid warning of CONFIG_DEBUG_SPINLOCK
126 */
127static spinlock_t *usbhsg_trylock(struct usbhsg_gpriv *gpriv,
128 unsigned long *flags)
129{
130 spinlock_t *lock = usbhsg_gpriv_to_lock(gpriv);
131
132 /* check spin lock status
133 * to avoid deadlock/nest */
134 if (spin_is_locked(lock))
135 return NULL;
136 119
137 spin_lock_irqsave(lock, *flags); 120#define usbhsg_for_each_controller(gpriv)\
138 121 list_for_each_entry(gpriv, &the_controller_link, link)
139 return lock; 122#define usbhsg_controller_register(gpriv)\
140} 123 list_add_tail(&(gpriv)->link, &the_controller_link)
141 124#define usbhsg_controller_unregister(gpriv)\
142static void usbhsg_unlock(spinlock_t *lock, unsigned long *flags) 125 list_del_init(&(gpriv)->link)
143{
144 if (!lock)
145 return;
146
147 spin_unlock_irqrestore(lock, *flags);
148}
149 126
150/* 127/*
151 * list push/pop 128 * queue push/pop
152 */ 129 */
153static void usbhsg_queue_push(struct usbhsg_uep *uep, 130static void usbhsg_queue_push(struct usbhsg_uep *uep,
154 struct usbhsg_request *ureq) 131 struct usbhsg_request *ureq)
@@ -156,79 +133,17 @@ static void usbhsg_queue_push(struct usbhsg_uep *uep,
156 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); 133 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
157 struct device *dev = usbhsg_gpriv_to_dev(gpriv); 134 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
158 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 135 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
136 struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq);
137 struct usb_request *req = &ureq->req;
159 138
160 /* 139 req->actual = 0;
161 ********* assume under spin lock ********* 140 req->status = -EINPROGRESS;
162 */ 141 usbhs_pkt_push(pipe, pkt, uep->handler,
163 list_del_init(&ureq->node); 142 req->buf, req->length, req->zero);
164 list_add_tail(&ureq->node, &uep->list);
165 ureq->req.actual = 0;
166 ureq->req.status = -EINPROGRESS;
167 143
168 dev_dbg(dev, "pipe %d : queue push (%d)\n", 144 dev_dbg(dev, "pipe %d : queue push (%d)\n",
169 usbhs_pipe_number(pipe), 145 usbhs_pipe_number(pipe),
170 ureq->req.length); 146 req->length);
171}
172
173static struct usbhsg_request *usbhsg_queue_get(struct usbhsg_uep *uep)
174{
175 /*
176 ********* assume under spin lock *********
177 */
178 if (list_empty(&uep->list))
179 return NULL;
180
181 return list_entry(uep->list.next, struct usbhsg_request, node);
182}
183
184#define usbhsg_queue_prepare(uep) __usbhsg_queue_handler(uep, 1);
185#define usbhsg_queue_handle(uep) __usbhsg_queue_handler(uep, 0);
186static int __usbhsg_queue_handler(struct usbhsg_uep *uep, int prepare)
187{
188 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
189 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
190 struct usbhsg_request *ureq;
191 spinlock_t *lock;
192 unsigned long flags;
193 int ret = 0;
194
195 if (!uep->handler) {
196 dev_err(dev, "no handler function\n");
197 return -EIO;
198 }
199
200 /*
201 * CAUTION [*queue handler*]
202 *
203 * This function will be called for start/restart queue operation.
204 * OTOH the most much worry for USB driver is spinlock nest.
205 * Specially it are
206 * - usb_ep_ops :: queue
207 * - usb_request :: complete
208 *
209 * But the caller of this function need not care about spinlock.
210 * This function is using usbhsg_trylock for it.
211 * if "is_locked" is 1, this mean this function lock it.
212 * but if it is 0, this mean it is already under spin lock.
213 * see also
214 * CAUTION [*endpoint queue*]
215 * CAUTION [*request complete*]
216 */
217
218 /****************** spin try lock *******************/
219 lock = usbhsg_trylock(gpriv, &flags);
220
221 ureq = usbhsg_queue_get(uep);
222 if (ureq) {
223 if (prepare)
224 ret = uep->handler->prepare(uep, ureq);
225 else
226 ret = uep->handler->try_run(uep, ureq);
227 }
228 usbhsg_unlock(lock, &flags);
229 /******************** spin unlock ******************/
230
231 return ret;
232} 147}
233 148
234static void usbhsg_queue_pop(struct usbhsg_uep *uep, 149static void usbhsg_queue_pop(struct usbhsg_uep *uep,
@@ -239,289 +154,91 @@ static void usbhsg_queue_pop(struct usbhsg_uep *uep,
239 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 154 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
240 struct device *dev = usbhsg_gpriv_to_dev(gpriv); 155 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
241 156
242 /*
243 ********* assume under spin lock *********
244 */
245
246 /*
247 * CAUTION [*request complete*]
248 *
249 * There is a possibility not to be called in correct order
250 * if "complete" is called without spinlock.
251 *
252 * So, this function assume it is under spinlock,
253 * and call usb_request :: complete.
254 *
255 * But this "complete" will push next usb_request.
256 * It mean "usb_ep_ops :: queue" which is using spinlock is called
257 * under spinlock.
258 *
259 * To avoid dead-lock, this driver is using usbhsg_trylock.
260 * CAUTION [*endpoint queue*]
261 * CAUTION [*queue handler*]
262 */
263
264 dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); 157 dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe));
265 158
266 list_del_init(&ureq->node);
267
268 ureq->req.status = status; 159 ureq->req.status = status;
269 ureq->req.complete(&uep->ep, &ureq->req); 160 ureq->req.complete(&uep->ep, &ureq->req);
270
271 /* more request ? */
272 if (0 == status)
273 usbhsg_queue_prepare(uep);
274} 161}
275 162
276/* 163static void usbhsg_queue_done(struct usbhs_pkt *pkt)
277 * irq enable/disable function
278 */
279#define usbhsg_irq_callback_ctrl(uep, status, enable) \
280 ({ \
281 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); \
282 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); \
283 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); \
284 struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
285 if (!mod) \
286 return; \
287 if (enable) \
288 mod->irq_##status |= (1 << usbhs_pipe_number(pipe)); \
289 else \
290 mod->irq_##status &= ~(1 << usbhs_pipe_number(pipe)); \
291 usbhs_irq_callback_update(priv, mod); \
292 })
293
294static void usbhsg_irq_empty_ctrl(struct usbhsg_uep *uep, int enable)
295{ 164{
296 usbhsg_irq_callback_ctrl(uep, bempsts, enable); 165 struct usbhs_pipe *pipe = pkt->pipe;
297} 166 struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
167 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
298 168
299static void usbhsg_irq_ready_ctrl(struct usbhsg_uep *uep, int enable) 169 ureq->req.actual = pkt->actual;
300{
301 usbhsg_irq_callback_ctrl(uep, brdysts, enable);
302}
303
304/*
305 * handler function
306 */
307static int usbhsg_try_run_ctrl_stage_end(struct usbhsg_uep *uep,
308 struct usbhsg_request *ureq)
309{
310 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
311
312 /*
313 ********* assume under spin lock *********
314 */
315 170
316 usbhs_dcp_control_transfer_done(pipe);
317 usbhsg_queue_pop(uep, ureq, 0); 171 usbhsg_queue_pop(uep, ureq, 0);
318
319 return 0;
320} 172}
321 173
322static int usbhsg_try_run_send_packet(struct usbhsg_uep *uep, 174/*
323 struct usbhsg_request *ureq) 175 * dma map/unmap
176 */
177static int usbhsg_dma_map(struct device *dev,
178 struct usbhs_pkt *pkt,
179 enum dma_data_direction dir)
324{ 180{
325 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 181 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
326 struct usb_request *req = &ureq->req; 182 struct usb_request *req = &ureq->req;
327 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
328 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
329 void *buf;
330 int remainder, send;
331 int is_done = 0;
332 int enable;
333 int maxp;
334 183
335 /* 184 if (pkt->dma != DMA_ADDR_INVALID) {
336 ********* assume under spin lock ********* 185 dev_err(dev, "dma is already mapped\n");
337 */ 186 return -EIO;
338
339 maxp = usbhs_pipe_get_maxpacket(pipe);
340 buf = req->buf + req->actual;
341 remainder = req->length - req->actual;
342
343 send = usbhs_fifo_write(pipe, buf, remainder);
344
345 /*
346 * send < 0 : pipe busy
347 * send = 0 : send zero packet
348 * send > 0 : send data
349 *
350 * send <= max_packet
351 */
352 if (send > 0)
353 req->actual += send;
354
355 /* send all packet ? */
356 if (send < remainder)
357 is_done = 0; /* there are remainder data */
358 else if (send < maxp)
359 is_done = 1; /* short packet */
360 else
361 is_done = !req->zero; /* send zero packet ? */
362
363 dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
364 usbhs_pipe_number(pipe),
365 remainder, send, is_done, req->zero);
366
367 /*
368 * enable interrupt and send again in irq handler
369 * if it still have remainder data which should be sent.
370 */
371 enable = !is_done;
372 uep->handler->irq_mask(uep, enable);
373
374 /*
375 * usbhs_fifo_enable execute
376 * - after callback_update,
377 * - before queue_pop / stage_end
378 */
379 usbhs_fifo_enable(pipe);
380
381 /*
382 * all data were sent ?
383 */
384 if (is_done) {
385 /* it care below call in
386 "function mode" */
387 if (usbhsg_is_dcp(uep))
388 usbhs_dcp_control_transfer_done(pipe);
389
390 usbhsg_queue_pop(uep, ureq, 0);
391 } 187 }
392 188
393 return 0; 189 if (req->dma == DMA_ADDR_INVALID) {
394} 190 pkt->dma = dma_map_single(dev, pkt->buf, pkt->length, dir);
395 191 } else {
396static int usbhsg_prepare_send_packet(struct usbhsg_uep *uep, 192 dma_sync_single_for_device(dev, req->dma, req->length, dir);
397 struct usbhsg_request *ureq) 193 pkt->dma = req->dma;
398{ 194 }
399 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
400
401 /*
402 ********* assume under spin lock *********
403 */
404 195
405 usbhs_fifo_prepare_write(pipe); 196 if (dma_mapping_error(dev, pkt->dma)) {
406 usbhsg_try_run_send_packet(uep, ureq); 197 dev_err(dev, "dma mapping error %x\n", pkt->dma);
198 return -EIO;
199 }
407 200
408 return 0; 201 return 0;
409} 202}
410 203
411static int usbhsg_try_run_receive_packet(struct usbhsg_uep *uep, 204static int usbhsg_dma_unmap(struct device *dev,
412 struct usbhsg_request *ureq) 205 struct usbhs_pkt *pkt,
206 enum dma_data_direction dir)
413{ 207{
414 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 208 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
415 struct usb_request *req = &ureq->req; 209 struct usb_request *req = &ureq->req;
416 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
417 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
418 void *buf;
419 int maxp;
420 int remainder, recv;
421 int is_done = 0;
422
423 /*
424 ********* assume under spin lock *********
425 */
426
427 maxp = usbhs_pipe_get_maxpacket(pipe);
428 buf = req->buf + req->actual;
429 remainder = req->length - req->actual;
430
431 recv = usbhs_fifo_read(pipe, buf, remainder);
432 /*
433 * recv < 0 : pipe busy
434 * recv >= 0 : receive data
435 *
436 * recv <= max_packet
437 */
438 if (recv < 0)
439 return -EBUSY;
440
441 /* update parameters */
442 req->actual += recv;
443
444 if ((recv == remainder) || /* receive all data */
445 (recv < maxp)) /* short packet */
446 is_done = 1;
447 210
448 dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n", 211 if (pkt->dma == DMA_ADDR_INVALID) {
449 usbhs_pipe_number(pipe), 212 dev_err(dev, "dma is not mapped\n");
450 remainder, recv, is_done, req->zero); 213 return -EIO;
214 }
451 215
452 /* read all data ? */ 216 if (req->dma == DMA_ADDR_INVALID)
453 if (is_done) { 217 dma_unmap_single(dev, pkt->dma, pkt->length, dir);
454 int disable = 0; 218 else
219 dma_sync_single_for_cpu(dev, req->dma, req->length, dir);
455 220
456 uep->handler->irq_mask(uep, disable); 221 pkt->dma = DMA_ADDR_INVALID;
457 usbhs_fifo_disable(pipe);
458 usbhsg_queue_pop(uep, ureq, 0);
459 }
460 222
461 return 0; 223 return 0;
462} 224}
463 225
464static int usbhsg_prepare_receive_packet(struct usbhsg_uep *uep, 226static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
465 struct usbhsg_request *ureq)
466{ 227{
467 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 228 struct usbhs_pipe *pipe = pkt->pipe;
468 int enable = 1; 229 struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
469 int ret; 230 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
470 231 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
471 /* 232 enum dma_data_direction dir;
472 ********* assume under spin lock *********
473 */
474
475 ret = usbhs_fifo_prepare_read(pipe);
476 if (ret < 0)
477 return ret;
478 233
479 /* 234 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
480 * data will be read in interrupt handler
481 */
482 uep->handler->irq_mask(uep, enable);
483 235
484 return ret; 236 if (map)
237 return usbhsg_dma_map(dev, pkt, dir);
238 else
239 return usbhsg_dma_unmap(dev, pkt, dir);
485} 240}
486 241
487static struct usbhsg_pipe_handle usbhsg_handler_send_by_empty = {
488 .prepare = usbhsg_prepare_send_packet,
489 .try_run = usbhsg_try_run_send_packet,
490 .irq_mask = usbhsg_irq_empty_ctrl,
491};
492
493static struct usbhsg_pipe_handle usbhsg_handler_send_by_ready = {
494 .prepare = usbhsg_prepare_send_packet,
495 .try_run = usbhsg_try_run_send_packet,
496 .irq_mask = usbhsg_irq_ready_ctrl,
497};
498
499static struct usbhsg_pipe_handle usbhsg_handler_recv_by_ready = {
500 .prepare = usbhsg_prepare_receive_packet,
501 .try_run = usbhsg_try_run_receive_packet,
502 .irq_mask = usbhsg_irq_ready_ctrl,
503};
504
505static struct usbhsg_pipe_handle usbhsg_handler_ctrl_stage_end = {
506 .prepare = usbhsg_try_run_ctrl_stage_end,
507 .try_run = usbhsg_try_run_ctrl_stage_end,
508};
509
510/*
511 * DCP pipe can NOT use "ready interrupt" for "send"
512 * it should use "empty" interrupt.
513 * see
514 * "Operation" - "Interrupt Function" - "BRDY Interrupt"
515 *
516 * on the other hand, normal pipe can use "ready interrupt" for "send"
517 * even though it is single/double buffer
518 */
519#define usbhsg_handler_send_ctrl usbhsg_handler_send_by_empty
520#define usbhsg_handler_recv_ctrl usbhsg_handler_recv_by_ready
521
522#define usbhsg_handler_send_packet usbhsg_handler_send_by_ready
523#define usbhsg_handler_recv_packet usbhsg_handler_recv_by_ready
524
525/* 242/*
526 * USB_TYPE_STANDARD / clear feature functions 243 * USB_TYPE_STANDARD / clear feature functions
527 */ 244 */
@@ -546,15 +263,13 @@ static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv,
546 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 263 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
547 264
548 if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) { 265 if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) {
549 usbhs_fifo_disable(pipe); 266 usbhs_pipe_disable(pipe);
550 usbhs_pipe_clear_sequence(pipe); 267 usbhs_pipe_clear_sequence(pipe);
551 usbhs_fifo_enable(pipe); 268 usbhs_pipe_enable(pipe);
552 } 269 }
553 270
554 usbhsg_recip_handler_std_control_done(priv, uep, ctrl); 271 usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
555 272
556 usbhsg_queue_prepare(uep);
557
558 return 0; 273 return 0;
559} 274}
560 275
@@ -575,6 +290,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
575 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); 290 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
576 struct device *dev = usbhsg_gpriv_to_dev(gpriv); 291 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
577 struct usbhsg_uep *uep; 292 struct usbhsg_uep *uep;
293 struct usbhs_pipe *pipe;
578 int recip = ctrl->bRequestType & USB_RECIP_MASK; 294 int recip = ctrl->bRequestType & USB_RECIP_MASK;
579 int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; 295 int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
580 int ret; 296 int ret;
@@ -583,9 +299,11 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
583 char *msg; 299 char *msg;
584 300
585 uep = usbhsg_gpriv_to_nth_uep(gpriv, nth); 301 uep = usbhsg_gpriv_to_nth_uep(gpriv, nth);
586 if (!usbhsg_uep_to_pipe(uep)) { 302 pipe = usbhsg_uep_to_pipe(uep);
303 if (!pipe) {
587 dev_err(dev, "wrong recip request\n"); 304 dev_err(dev, "wrong recip request\n");
588 return -EINVAL; 305 ret = -EINVAL;
306 goto usbhsg_recip_run_handle_end;
589 } 307 }
590 308
591 switch (recip) { 309 switch (recip) {
@@ -608,10 +326,20 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
608 } 326 }
609 327
610 if (func) { 328 if (func) {
329 unsigned long flags;
330
611 dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg); 331 dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg);
332
333 /******************** spin lock ********************/
334 usbhs_lock(priv, flags);
612 ret = func(priv, uep, ctrl); 335 ret = func(priv, uep, ctrl);
336 usbhs_unlock(priv, flags);
337 /******************** spin unlock ******************/
613 } 338 }
614 339
340usbhsg_recip_run_handle_end:
341 usbhs_pkt_start(pipe);
342
615 return ret; 343 return ret;
616} 344}
617 345
@@ -660,13 +388,13 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
660 388
661 switch (stage) { 389 switch (stage) {
662 case READ_DATA_STAGE: 390 case READ_DATA_STAGE:
663 dcp->handler = &usbhsg_handler_send_ctrl; 391 dcp->handler = &usbhs_fifo_pio_push_handler;
664 break; 392 break;
665 case WRITE_DATA_STAGE: 393 case WRITE_DATA_STAGE:
666 dcp->handler = &usbhsg_handler_recv_ctrl; 394 dcp->handler = &usbhs_fifo_pio_pop_handler;
667 break; 395 break;
668 case NODATA_STATUS_STAGE: 396 case NODATA_STATUS_STAGE:
669 dcp->handler = &usbhsg_handler_ctrl_stage_end; 397 dcp->handler = &usbhs_ctrl_stage_end_handler;
670 break; 398 break;
671 default: 399 default:
672 return ret; 400 return ret;
@@ -695,128 +423,27 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
695 ret = gpriv->driver->setup(&gpriv->gadget, &ctrl); 423 ret = gpriv->driver->setup(&gpriv->gadget, &ctrl);
696 424
697 if (ret < 0) 425 if (ret < 0)
698 usbhs_fifo_stall(pipe); 426 usbhs_pipe_stall(pipe);
699 427
700 return ret; 428 return ret;
701} 429}
702 430
703static int usbhsg_irq_empty(struct usbhs_priv *priv,
704 struct usbhs_irq_state *irq_state)
705{
706 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
707 struct usbhsg_uep *uep;
708 struct usbhs_pipe *pipe;
709 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
710 int i, ret;
711
712 if (!irq_state->bempsts) {
713 dev_err(dev, "debug %s !!\n", __func__);
714 return -EIO;
715 }
716
717 dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
718
719 /*
720 * search interrupted "pipe"
721 * not "uep".
722 */
723 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
724 if (!(irq_state->bempsts & (1 << i)))
725 continue;
726
727 uep = usbhsg_pipe_to_uep(pipe);
728 ret = usbhsg_queue_handle(uep);
729 if (ret < 0)
730 dev_err(dev, "send error %d : %d\n", i, ret);
731 }
732
733 return 0;
734}
735
736static int usbhsg_irq_ready(struct usbhs_priv *priv,
737 struct usbhs_irq_state *irq_state)
738{
739 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
740 struct usbhsg_uep *uep;
741 struct usbhs_pipe *pipe;
742 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
743 int i, ret;
744
745 if (!irq_state->brdysts) {
746 dev_err(dev, "debug %s !!\n", __func__);
747 return -EIO;
748 }
749
750 dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
751
752 /*
753 * search interrupted "pipe"
754 * not "uep".
755 */
756 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
757 if (!(irq_state->brdysts & (1 << i)))
758 continue;
759
760 uep = usbhsg_pipe_to_uep(pipe);
761 ret = usbhsg_queue_handle(uep);
762 if (ret < 0)
763 dev_err(dev, "receive error %d : %d\n", i, ret);
764 }
765
766 return 0;
767}
768
769/* 431/*
770 * 432 *
771 * usb_dcp_ops 433 * usb_dcp_ops
772 * 434 *
773 */ 435 */
774static int usbhsg_dcp_enable(struct usbhsg_uep *uep)
775{
776 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
777 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
778 struct usbhs_pipe *pipe;
779
780 /*
781 ********* assume under spin lock *********
782 */
783
784 pipe = usbhs_dcp_malloc(priv);
785 if (!pipe)
786 return -EIO;
787
788 uep->pipe = pipe;
789 uep->pipe->mod_private = uep;
790 INIT_LIST_HEAD(&uep->list);
791
792 return 0;
793}
794
795#define usbhsg_dcp_disable usbhsg_pipe_disable
796static int usbhsg_pipe_disable(struct usbhsg_uep *uep) 436static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
797{ 437{
798 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 438 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
799 struct usbhsg_request *ureq; 439 struct usbhs_pkt *pkt;
800 int disable = 0;
801
802 /*
803 ********* assume under spin lock *********
804 */
805 440
806 usbhs_fifo_disable(pipe); 441 usbhs_pipe_disable(pipe);
807
808 /*
809 * disable pipe irq
810 */
811 usbhsg_irq_empty_ctrl(uep, disable);
812 usbhsg_irq_ready_ctrl(uep, disable);
813 442
814 while (1) { 443 while (1) {
815 ureq = usbhsg_queue_get(uep); 444 pkt = usbhs_pkt_pop(pipe, NULL);
816 if (!ureq) 445 if (!pkt)
817 break; 446 break;
818
819 usbhsg_queue_pop(uep, ureq, -ECONNRESET);
820 } 447 }
821 448
822 return 0; 449 return 0;
@@ -843,57 +470,44 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
843 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); 470 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
844 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); 471 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
845 struct usbhs_pipe *pipe; 472 struct usbhs_pipe *pipe;
846 spinlock_t *lock;
847 unsigned long flags;
848 int ret = -EIO; 473 int ret = -EIO;
849 474
850 /* 475 /*
851 * if it already have pipe, 476 * if it already have pipe,
852 * nothing to do 477 * nothing to do
853 */ 478 */
854 if (uep->pipe) 479 if (uep->pipe) {
480 usbhs_pipe_clear(uep->pipe);
481 usbhs_pipe_clear_sequence(uep->pipe);
855 return 0; 482 return 0;
856 483 }
857 /******************** spin lock ********************/
858 lock = usbhsg_trylock(gpriv, &flags);
859 484
860 pipe = usbhs_pipe_malloc(priv, desc); 485 pipe = usbhs_pipe_malloc(priv, desc);
861 if (pipe) { 486 if (pipe) {
862 uep->pipe = pipe; 487 uep->pipe = pipe;
863 pipe->mod_private = uep; 488 pipe->mod_private = uep;
864 INIT_LIST_HEAD(&uep->list);
865 489
490 /*
491 * usbhs_fifo_dma_push/pop_handler try to
492 * use dmaengine if possible.
493 * It will use pio handler if impossible.
494 */
866 if (usb_endpoint_dir_in(desc)) 495 if (usb_endpoint_dir_in(desc))
867 uep->handler = &usbhsg_handler_send_packet; 496 uep->handler = &usbhs_fifo_dma_push_handler;
868 else 497 else
869 uep->handler = &usbhsg_handler_recv_packet; 498 uep->handler = &usbhs_fifo_dma_pop_handler;
870 499
871 ret = 0; 500 ret = 0;
872 } 501 }
873 502
874 usbhsg_unlock(lock, &flags);
875 /******************** spin unlock ******************/
876
877 return ret; 503 return ret;
878} 504}
879 505
880static int usbhsg_ep_disable(struct usb_ep *ep) 506static int usbhsg_ep_disable(struct usb_ep *ep)
881{ 507{
882 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); 508 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
883 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
884 spinlock_t *lock;
885 unsigned long flags;
886 int ret;
887
888 /******************** spin lock ********************/
889 lock = usbhsg_trylock(gpriv, &flags);
890 509
891 ret = usbhsg_pipe_disable(uep); 510 return usbhsg_pipe_disable(uep);
892
893 usbhsg_unlock(lock, &flags);
894 /******************** spin unlock ******************/
895
896 return ret;
897} 511}
898 512
899static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep, 513static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
@@ -905,7 +519,10 @@ static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
905 if (!ureq) 519 if (!ureq)
906 return NULL; 520 return NULL;
907 521
908 INIT_LIST_HEAD(&ureq->node); 522 usbhs_pkt_init(usbhsg_ureq_to_pkt(ureq));
523
524 ureq->req.dma = DMA_ADDR_INVALID;
525
909 return &ureq->req; 526 return &ureq->req;
910} 527}
911 528
@@ -914,7 +531,7 @@ static void usbhsg_ep_free_request(struct usb_ep *ep,
914{ 531{
915 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); 532 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
916 533
917 WARN_ON(!list_empty(&ureq->node)); 534 WARN_ON(!list_empty(&ureq->pkt.node));
918 kfree(ureq); 535 kfree(ureq);
919} 536}
920 537
@@ -925,69 +542,27 @@ static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req,
925 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); 542 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
926 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); 543 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
927 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 544 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
928 spinlock_t *lock;
929 unsigned long flags;
930 int ret = 0;
931
932 /*
933 * CAUTION [*endpoint queue*]
934 *
935 * This function will be called from usb_request :: complete
936 * or usb driver timing.
937 * If this function is called from usb_request :: complete,
938 * it is already under spinlock on this driver.
939 * but it is called frm usb driver, this function should call spinlock.
940 *
941 * This function is using usbshg_trylock to solve this issue.
942 * if "is_locked" is 1, this mean this function lock it.
943 * but if it is 0, this mean it is already under spin lock.
944 * see also
945 * CAUTION [*queue handler*]
946 * CAUTION [*request complete*]
947 */
948
949 /******************** spin lock ********************/
950 lock = usbhsg_trylock(gpriv, &flags);
951 545
952 /* param check */ 546 /* param check */
953 if (usbhsg_is_not_connected(gpriv) || 547 if (usbhsg_is_not_connected(gpriv) ||
954 unlikely(!gpriv->driver) || 548 unlikely(!gpriv->driver) ||
955 unlikely(!pipe)) 549 unlikely(!pipe))
956 ret = -ESHUTDOWN; 550 return -ESHUTDOWN;
957 else
958 usbhsg_queue_push(uep, ureq);
959
960 usbhsg_unlock(lock, &flags);
961 /******************** spin unlock ******************/
962 551
963 usbhsg_queue_prepare(uep); 552 usbhsg_queue_push(uep, ureq);
964 553
965 return ret; 554 return 0;
966} 555}
967 556
968static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) 557static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
969{ 558{
970 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); 559 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
971 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); 560 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
972 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); 561 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
973 spinlock_t *lock;
974 unsigned long flags;
975
976 /*
977 * see
978 * CAUTION [*queue handler*]
979 * CAUTION [*endpoint queue*]
980 * CAUTION [*request complete*]
981 */
982
983 /******************** spin lock ********************/
984 lock = usbhsg_trylock(gpriv, &flags);
985 562
563 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
986 usbhsg_queue_pop(uep, ureq, -ECONNRESET); 564 usbhsg_queue_pop(uep, ureq, -ECONNRESET);
987 565
988 usbhsg_unlock(lock, &flags);
989 /******************** spin unlock ******************/
990
991 return 0; 566 return 0;
992} 567}
993 568
@@ -996,42 +571,32 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
996 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); 571 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
997 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 572 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
998 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); 573 struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
574 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
999 struct device *dev = usbhsg_gpriv_to_dev(gpriv); 575 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
1000 spinlock_t *lock;
1001 unsigned long flags; 576 unsigned long flags;
1002 int ret = -EAGAIN;
1003 577
1004 /* 578 usbhsg_pipe_disable(uep);
1005 * see
1006 * CAUTION [*queue handler*]
1007 * CAUTION [*endpoint queue*]
1008 * CAUTION [*request complete*]
1009 */
1010 579
1011 /******************** spin lock ********************/ 580 dev_dbg(dev, "set halt %d (pipe %d)\n",
1012 lock = usbhsg_trylock(gpriv, &flags); 581 halt, usbhs_pipe_number(pipe));
1013 if (!usbhsg_queue_get(uep)) {
1014 582
1015 dev_dbg(dev, "set halt %d (pipe %d)\n", 583 /******************** spin lock ********************/
1016 halt, usbhs_pipe_number(pipe)); 584 usbhs_lock(priv, flags);
1017
1018 if (halt)
1019 usbhs_fifo_stall(pipe);
1020 else
1021 usbhs_fifo_disable(pipe);
1022 585
1023 if (halt && wedge) 586 if (halt)
1024 usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); 587 usbhs_pipe_stall(pipe);
1025 else 588 else
1026 usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); 589 usbhs_pipe_disable(pipe);
1027 590
1028 ret = 0; 591 if (halt && wedge)
1029 } 592 usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE);
593 else
594 usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
1030 595
1031 usbhsg_unlock(lock, &flags); 596 usbhs_unlock(priv, flags);
1032 /******************** spin unlock ******************/ 597 /******************** spin unlock ******************/
1033 598
1034 return ret; 599 return 0;
1035} 600}
1036 601
1037static int usbhsg_ep_set_halt(struct usb_ep *ep, int value) 602static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
@@ -1067,28 +632,40 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
1067 struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); 632 struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
1068 struct usbhs_mod *mod = usbhs_mod_get_current(priv); 633 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1069 struct device *dev = usbhs_priv_to_dev(priv); 634 struct device *dev = usbhs_priv_to_dev(priv);
1070 spinlock_t *lock;
1071 unsigned long flags; 635 unsigned long flags;
636 int ret = 0;
1072 637
1073 /******************** spin lock ********************/ 638 /******************** spin lock ********************/
1074 lock = usbhsg_trylock(gpriv, &flags); 639 usbhs_lock(priv, flags);
1075 640
1076 /*
1077 * enable interrupt and systems if ready
1078 */
1079 usbhsg_status_set(gpriv, status); 641 usbhsg_status_set(gpriv, status);
1080 if (!(usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) && 642 if (!(usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) &&
1081 usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD))) 643 usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD)))
1082 goto usbhsg_try_start_unlock; 644 ret = -1; /* not ready */
645
646 usbhs_unlock(priv, flags);
647 /******************** spin unlock ********************/
648
649 if (ret < 0)
650 return 0; /* not ready is not error */
1083 651
652 /*
653 * enable interrupt and systems if ready
654 */
1084 dev_dbg(dev, "start gadget\n"); 655 dev_dbg(dev, "start gadget\n");
1085 656
1086 /* 657 /*
1087 * pipe initialize and enable DCP 658 * pipe initialize and enable DCP
1088 */ 659 */
1089 usbhs_pipe_init(priv); 660 usbhs_pipe_init(priv,
661 usbhsg_queue_done,
662 usbhsg_dma_map_ctrl);
663 usbhs_fifo_init(priv);
1090 usbhsg_uep_init(gpriv); 664 usbhsg_uep_init(gpriv);
1091 usbhsg_dcp_enable(dcp); 665
666 /* dcp init */
667 dcp->pipe = usbhs_dcp_malloc(priv);
668 dcp->pipe->mod_private = dcp;
1092 669
1093 /* 670 /*
1094 * system config enble 671 * system config enble
@@ -1105,16 +682,8 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
1105 */ 682 */
1106 mod->irq_dev_state = usbhsg_irq_dev_state; 683 mod->irq_dev_state = usbhsg_irq_dev_state;
1107 mod->irq_ctrl_stage = usbhsg_irq_ctrl_stage; 684 mod->irq_ctrl_stage = usbhsg_irq_ctrl_stage;
1108 mod->irq_empty = usbhsg_irq_empty;
1109 mod->irq_ready = usbhsg_irq_ready;
1110 mod->irq_bempsts = 0;
1111 mod->irq_brdysts = 0;
1112 usbhs_irq_callback_update(priv, mod); 685 usbhs_irq_callback_update(priv, mod);
1113 686
1114usbhsg_try_start_unlock:
1115 usbhsg_unlock(lock, &flags);
1116 /******************** spin unlock ********************/
1117
1118 return 0; 687 return 0;
1119} 688}
1120 689
@@ -1124,31 +693,33 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
1124 struct usbhs_mod *mod = usbhs_mod_get_current(priv); 693 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1125 struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); 694 struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
1126 struct device *dev = usbhs_priv_to_dev(priv); 695 struct device *dev = usbhs_priv_to_dev(priv);
1127 spinlock_t *lock;
1128 unsigned long flags; 696 unsigned long flags;
697 int ret = 0;
1129 698
1130 /******************** spin lock ********************/ 699 /******************** spin lock ********************/
1131 lock = usbhsg_trylock(gpriv, &flags); 700 usbhs_lock(priv, flags);
1132 701
1133 /*
1134 * disable interrupt and systems if 1st try
1135 */
1136 usbhsg_status_clr(gpriv, status); 702 usbhsg_status_clr(gpriv, status);
1137 if (!usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) && 703 if (!usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) &&
1138 !usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD)) 704 !usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD))
1139 goto usbhsg_try_stop_unlock; 705 ret = -1; /* already done */
706
707 usbhs_unlock(priv, flags);
708 /******************** spin unlock ********************/
709
710 if (ret < 0)
711 return 0; /* already done is not error */
712
713 /*
714 * disable interrupt and systems if 1st try
715 */
716 usbhs_fifo_quit(priv);
1140 717
1141 /* disable all irq */ 718 /* disable all irq */
1142 mod->irq_dev_state = NULL; 719 mod->irq_dev_state = NULL;
1143 mod->irq_ctrl_stage = NULL; 720 mod->irq_ctrl_stage = NULL;
1144 mod->irq_empty = NULL;
1145 mod->irq_ready = NULL;
1146 mod->irq_bempsts = 0;
1147 mod->irq_brdysts = 0;
1148 usbhs_irq_callback_update(priv, mod); 721 usbhs_irq_callback_update(priv, mod);
1149 722
1150 usbhsg_dcp_disable(dcp);
1151
1152 gpriv->gadget.speed = USB_SPEED_UNKNOWN; 723 gpriv->gadget.speed = USB_SPEED_UNKNOWN;
1153 724
1154 /* disable sys */ 725 /* disable sys */
@@ -1156,8 +727,7 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
1156 usbhs_sys_function_ctrl(priv, 0); 727 usbhs_sys_function_ctrl(priv, 0);
1157 usbhs_sys_usb_ctrl(priv, 0); 728 usbhs_sys_usb_ctrl(priv, 0);
1158 729
1159 usbhsg_unlock(lock, &flags); 730 usbhsg_pipe_disable(dcp);
1160 /******************** spin unlock ********************/
1161 731
1162 if (gpriv->driver && 732 if (gpriv->driver &&
1163 gpriv->driver->disconnect) 733 gpriv->driver->disconnect)
@@ -1166,11 +736,6 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
1166 dev_dbg(dev, "stop gadget\n"); 736 dev_dbg(dev, "stop gadget\n");
1167 737
1168 return 0; 738 return 0;
1169
1170usbhsg_try_stop_unlock:
1171 usbhsg_unlock(lock, &flags);
1172
1173 return 0;
1174} 739}
1175 740
1176/* 741/*
@@ -1178,11 +743,10 @@ usbhsg_try_stop_unlock:
1178 * linux usb function 743 * linux usb function
1179 * 744 *
1180 */ 745 */
1181struct usbhsg_gpriv *the_controller; 746static int usbhsg_gadget_start(struct usb_gadget_driver *driver,
1182int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1183 int (*bind)(struct usb_gadget *)) 747 int (*bind)(struct usb_gadget *))
1184{ 748{
1185 struct usbhsg_gpriv *gpriv = the_controller; 749 struct usbhsg_gpriv *gpriv;
1186 struct usbhs_priv *priv; 750 struct usbhs_priv *priv;
1187 struct device *dev; 751 struct device *dev;
1188 int ret; 752 int ret;
@@ -1192,10 +756,17 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1192 !driver->setup || 756 !driver->setup ||
1193 driver->speed != USB_SPEED_HIGH) 757 driver->speed != USB_SPEED_HIGH)
1194 return -EINVAL; 758 return -EINVAL;
1195 if (!gpriv) 759
1196 return -ENODEV; 760 /*
1197 if (gpriv->driver) 761 * find unused controller
1198 return -EBUSY; 762 */
763 usbhsg_for_each_controller(gpriv) {
764 if (!gpriv->driver)
765 goto find_unused_controller;
766 }
767 return -ENODEV;
768
769find_unused_controller:
1199 770
1200 dev = usbhsg_gpriv_to_dev(gpriv); 771 dev = usbhsg_gpriv_to_dev(gpriv);
1201 priv = usbhsg_gpriv_to_priv(gpriv); 772 priv = usbhsg_gpriv_to_priv(gpriv);
@@ -1229,22 +800,28 @@ add_fail:
1229 800
1230 return ret; 801 return ret;
1231} 802}
1232EXPORT_SYMBOL(usb_gadget_probe_driver);
1233 803
1234int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 804static int usbhsg_gadget_stop(struct usb_gadget_driver *driver)
1235{ 805{
1236 struct usbhsg_gpriv *gpriv = the_controller; 806 struct usbhsg_gpriv *gpriv;
1237 struct usbhs_priv *priv; 807 struct usbhs_priv *priv;
1238 struct device *dev = usbhsg_gpriv_to_dev(gpriv); 808 struct device *dev;
1239
1240 if (!gpriv)
1241 return -ENODEV;
1242 809
1243 if (!driver || 810 if (!driver ||
1244 !driver->unbind || 811 !driver->unbind)
1245 driver != gpriv->driver)
1246 return -EINVAL; 812 return -EINVAL;
1247 813
814 /*
815 * find controller
816 */
817 usbhsg_for_each_controller(gpriv) {
818 if (gpriv->driver == driver)
819 goto find_matching_controller;
820 }
821 return -ENODEV;
822
823find_matching_controller:
824
1248 dev = usbhsg_gpriv_to_dev(gpriv); 825 dev = usbhsg_gpriv_to_dev(gpriv);
1249 priv = usbhsg_gpriv_to_priv(gpriv); 826 priv = usbhsg_gpriv_to_priv(gpriv);
1250 827
@@ -1260,7 +837,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1260 837
1261 return 0; 838 return 0;
1262} 839}
1263EXPORT_SYMBOL(usb_gadget_unregister_driver);
1264 840
1265/* 841/*
1266 * usb gadget ops 842 * usb gadget ops
@@ -1275,6 +851,8 @@ static int usbhsg_get_frame(struct usb_gadget *gadget)
1275 851
1276static struct usb_gadget_ops usbhsg_gadget_ops = { 852static struct usb_gadget_ops usbhsg_gadget_ops = {
1277 .get_frame = usbhsg_get_frame, 853 .get_frame = usbhsg_get_frame,
854 .start = usbhsg_gadget_start,
855 .stop = usbhsg_gadget_stop,
1278}; 856};
1279 857
1280static int usbhsg_start(struct usbhs_priv *priv) 858static int usbhsg_start(struct usbhs_priv *priv)
@@ -1294,6 +872,7 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
1294 struct device *dev = usbhs_priv_to_dev(priv); 872 struct device *dev = usbhs_priv_to_dev(priv);
1295 int pipe_size = usbhs_get_dparam(priv, pipe_size); 873 int pipe_size = usbhs_get_dparam(priv, pipe_size);
1296 int i; 874 int i;
875 int ret;
1297 876
1298 gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL); 877 gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL);
1299 if (!gpriv) { 878 if (!gpriv) {
@@ -1304,6 +883,7 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
1304 uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL); 883 uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL);
1305 if (!uep) { 884 if (!uep) {
1306 dev_err(dev, "Could not allocate ep\n"); 885 dev_err(dev, "Could not allocate ep\n");
886 ret = -ENOMEM;
1307 goto usbhs_mod_gadget_probe_err_gpriv; 887 goto usbhs_mod_gadget_probe_err_gpriv;
1308 } 888 }
1309 889
@@ -1350,7 +930,6 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
1350 uep->ep.name = uep->ep_name; 930 uep->ep.name = uep->ep_name;
1351 uep->ep.ops = &usbhsg_ep_ops; 931 uep->ep.ops = &usbhsg_ep_ops;
1352 INIT_LIST_HEAD(&uep->ep.ep_list); 932 INIT_LIST_HEAD(&uep->ep.ep_list);
1353 INIT_LIST_HEAD(&uep->list);
1354 933
1355 /* init DCP */ 934 /* init DCP */
1356 if (usbhsg_is_dcp(uep)) { 935 if (usbhsg_is_dcp(uep)) {
@@ -1364,22 +943,33 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
1364 } 943 }
1365 } 944 }
1366 945
1367 the_controller = gpriv; 946 usbhsg_controller_register(gpriv);
947
948 ret = usb_add_gadget_udc(dev, &gpriv->gadget);
949 if (ret)
950 goto err_add_udc;
951
1368 952
1369 dev_info(dev, "gadget probed\n"); 953 dev_info(dev, "gadget probed\n");
1370 954
1371 return 0; 955 return 0;
956err_add_udc:
957 kfree(gpriv->uep);
1372 958
1373usbhs_mod_gadget_probe_err_gpriv: 959usbhs_mod_gadget_probe_err_gpriv:
1374 kfree(gpriv); 960 kfree(gpriv);
1375 961
1376 return -ENOMEM; 962 return ret;
1377} 963}
1378 964
1379void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv) 965void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv)
1380{ 966{
1381 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); 967 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
1382 968
969 usb_del_gadget_udc(&gpriv->gadget);
970
971 usbhsg_controller_unregister(gpriv);
972
1383 kfree(gpriv->uep); 973 kfree(gpriv->uep);
1384 kfree(gpriv); 974 kfree(gpriv);
1385} 975}