aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/gadget/u_ether.c
diff options
context:
space:
mode:
authorDavid Brownell <dbrownell@users.sourceforge.net>2008-06-19 21:19:28 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2008-07-21 18:16:12 -0400
commit2b3d942c4878084a37991a65e66512c02b8fa2ad (patch)
treed7e70b94b002a08d5a31b56d88dd62b63b686d6a /drivers/usb/gadget/u_ether.c
parent15b2d2b529d11449910ac86f6093124bce8f6103 (diff)
usb ethernet gadget: split out network core
Abstract the peripheral side Ethernet-over-USB link layer code from the all-in-one Ethernet gadget driver into a component that can be called by various functions, so the various flavors can be split apart and selectively reused. A notable difference from the approach taken with the serial link layer code (beyond talking to NET not TTY) is that because of the initialization requirements, this only supports one network link. (And one set of Ethernet link addresses.) That is, each configuration may have only one instance of a network function. This doesn't change behavior; the current code has that same restriction. If you want multiple logical links, that can easily be done using network layer tools. Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/gadget/u_ether.c')
-rw-r--r--drivers/usb/gadget/u_ether.c967
1 files changed, 967 insertions, 0 deletions
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
new file mode 100644
index 000000000000..5458f43a8668
--- /dev/null
+++ b/drivers/usb/gadget/u_ether.c
@@ -0,0 +1,967 @@
1/*
2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23/* #define VERBOSE_DEBUG */
24
25#include <linux/kernel.h>
26#include <linux/utsname.h>
27#include <linux/device.h>
28#include <linux/ctype.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31
32#include "u_ether.h"
33
34
35/*
36 * This component encapsulates the Ethernet link glue needed to provide
37 * one (!) network link through the USB gadget stack, normally "usb0".
38 *
39 * The control and data models are handled by the function driver which
40 * connects to this code; such as CDC Ethernet, "CDC Subset", or RNDIS.
41 * That includes all descriptor and endpoint management.
42 *
43 * Link level addressing is handled by this component using module
44 * parameters; if no such parameters are provided, random link level
45 * addresses are used. Each end of the link uses one address. The
46 * host end address is exported in various ways, and is often recorded
47 * in configuration databases.
48 *
49 * The driver which assembles each configuration using such a link is
50 * responsible for ensuring that each configuration includes at most one
51 * instance of is network link. (The network layer provides ways for
52 * this single "physical" link to be used by multiple virtual links.)
53 */
54
55#define DRIVER_VERSION "29-May-2008"
56
57struct eth_dev {
58 /* lock is held while accessing port_usb
59 * or updating its backlink port_usb->ioport
60 */
61 spinlock_t lock;
62 struct gether *port_usb;
63
64 struct net_device *net;
65 struct usb_gadget *gadget;
66
67 spinlock_t req_lock; /* guard {rx,tx}_reqs */
68 struct list_head tx_reqs, rx_reqs;
69 atomic_t tx_qlen;
70
71 unsigned header_len;
72 struct sk_buff *(*wrap)(struct sk_buff *skb);
73 int (*unwrap)(struct sk_buff *skb);
74
75 struct work_struct work;
76
77 unsigned long todo;
78#define WORK_RX_MEMORY 0
79
80 bool zlp;
81 u8 host_mac[ETH_ALEN];
82};
83
84/*-------------------------------------------------------------------------*/
85
86#define RX_EXTRA 20 /* bytes guarding against rx overflows */
87
88#define DEFAULT_QLEN 2 /* double buffering by default */
89
90
91#ifdef CONFIG_USB_GADGET_DUALSPEED
92
93static unsigned qmult = 5;
94module_param(qmult, uint, S_IRUGO|S_IWUSR);
95MODULE_PARM_DESC(qmult, "queue length multiplier at high speed");
96
97#else /* full speed (low speed doesn't do bulk) */
98#define qmult 1
99#endif
100
101/* for dual-speed hardware, use deeper queues at highspeed */
102static inline int qlen(struct usb_gadget *gadget)
103{
104 if (gadget_is_dualspeed(gadget) && gadget->speed == USB_SPEED_HIGH)
105 return qmult * DEFAULT_QLEN;
106 else
107 return DEFAULT_QLEN;
108}
109
110/*-------------------------------------------------------------------------*/
111
112/* REVISIT there must be a better way than having two sets
113 * of debug calls ...
114 */
115
116#undef DBG
117#undef VDBG
118#undef ERROR
119#undef WARN
120#undef INFO
121
122#define xprintk(d, level, fmt, args...) \
123 printk(level "%s: " fmt , (d)->net->name , ## args)
124
125#ifdef DEBUG
126#undef DEBUG
127#define DBG(dev, fmt, args...) \
128 xprintk(dev , KERN_DEBUG , fmt , ## args)
129#else
130#define DBG(dev, fmt, args...) \
131 do { } while (0)
132#endif /* DEBUG */
133
134#ifdef VERBOSE_DEBUG
135#define VDBG DBG
136#else
137#define VDBG(dev, fmt, args...) \
138 do { } while (0)
139#endif /* DEBUG */
140
141#define ERROR(dev, fmt, args...) \
142 xprintk(dev , KERN_ERR , fmt , ## args)
143#define WARN(dev, fmt, args...) \
144 xprintk(dev , KERN_WARNING , fmt , ## args)
145#define INFO(dev, fmt, args...) \
146 xprintk(dev , KERN_INFO , fmt , ## args)
147
148/*-------------------------------------------------------------------------*/
149
150/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
151
152static int eth_change_mtu(struct net_device *net, int new_mtu)
153{
154 struct eth_dev *dev = netdev_priv(net);
155 unsigned long flags;
156 int status = 0;
157
158 /* don't change MTU on "live" link (peer won't know) */
159 spin_lock_irqsave(&dev->lock, flags);
160 if (dev->port_usb)
161 status = -EBUSY;
162 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
163 status = -ERANGE;
164 else
165 net->mtu = new_mtu;
166 spin_unlock_irqrestore(&dev->lock, flags);
167
168 return status;
169}
170
171static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
172{
173 struct eth_dev *dev = netdev_priv(net);
174
175 strlcpy(p->driver, "g_ether", sizeof p->driver);
176 strlcpy(p->version, DRIVER_VERSION, sizeof p->version);
177 strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
178 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
179}
180
181static u32 eth_get_link(struct net_device *net)
182{
183 struct eth_dev *dev = netdev_priv(net);
184 return dev->gadget->speed != USB_SPEED_UNKNOWN;
185}
186
187/* REVISIT can also support:
188 * - WOL (by tracking suspends and issuing remote wakeup)
189 * - msglevel (implies updated messaging)
190 * - ... probably more ethtool ops
191 */
192
193static struct ethtool_ops ops = {
194 .get_drvinfo = eth_get_drvinfo,
195 .get_link = eth_get_link
196};
197
198static void defer_kevent(struct eth_dev *dev, int flag)
199{
200 if (test_and_set_bit(flag, &dev->todo))
201 return;
202 if (!schedule_work(&dev->work))
203 ERROR(dev, "kevent %d may have been dropped\n", flag);
204 else
205 DBG(dev, "kevent %d scheduled\n", flag);
206}
207
208static void rx_complete(struct usb_ep *ep, struct usb_request *req);
209
210static int
211rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
212{
213 struct sk_buff *skb;
214 int retval = -ENOMEM;
215 size_t size = 0;
216 struct usb_ep *out;
217 unsigned long flags;
218
219 spin_lock_irqsave(&dev->lock, flags);
220 if (dev->port_usb)
221 out = dev->port_usb->out_ep;
222 else
223 out = NULL;
224 spin_unlock_irqrestore(&dev->lock, flags);
225
226 if (!out)
227 return -ENOTCONN;
228
229
230 /* Padding up to RX_EXTRA handles minor disagreements with host.
231 * Normally we use the USB "terminate on short read" convention;
232 * so allow up to (N*maxpacket), since that memory is normally
233 * already allocated. Some hardware doesn't deal well with short
234 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
235 * byte off the end (to force hardware errors on overflow).
236 *
237 * RNDIS uses internal framing, and explicitly allows senders to
238 * pad to end-of-packet. That's potentially nice for speed, but
239 * means receivers can't recover lost synch on their own (because
240 * new packets don't only start after a short RX).
241 */
242 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
243 size += dev->port_usb->header_len;
244 size += out->maxpacket - 1;
245 size -= size % out->maxpacket;
246
247 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
248 if (skb == NULL) {
249 DBG(dev, "no rx skb\n");
250 goto enomem;
251 }
252
253 /* Some platforms perform better when IP packets are aligned,
254 * but on at least one, checksumming fails otherwise. Note:
255 * RNDIS headers involve variable numbers of LE32 values.
256 */
257 skb_reserve(skb, NET_IP_ALIGN);
258
259 req->buf = skb->data;
260 req->length = size;
261 req->complete = rx_complete;
262 req->context = skb;
263
264 retval = usb_ep_queue(out, req, gfp_flags);
265 if (retval == -ENOMEM)
266enomem:
267 defer_kevent(dev, WORK_RX_MEMORY);
268 if (retval) {
269 DBG(dev, "rx submit --> %d\n", retval);
270 if (skb)
271 dev_kfree_skb_any(skb);
272 spin_lock_irqsave(&dev->req_lock, flags);
273 list_add(&req->list, &dev->rx_reqs);
274 spin_unlock_irqrestore(&dev->req_lock, flags);
275 }
276 return retval;
277}
278
279static void rx_complete(struct usb_ep *ep, struct usb_request *req)
280{
281 struct sk_buff *skb = req->context;
282 struct eth_dev *dev = ep->driver_data;
283 int status = req->status;
284
285 switch (status) {
286
287 /* normal completion */
288 case 0:
289 skb_put(skb, req->actual);
290 if (dev->unwrap)
291 status = dev->unwrap(skb);
292 if (status < 0
293 || ETH_HLEN > skb->len
294 || skb->len > ETH_FRAME_LEN) {
295 dev->net->stats.rx_errors++;
296 dev->net->stats.rx_length_errors++;
297 DBG(dev, "rx length %d\n", skb->len);
298 break;
299 }
300
301 skb->protocol = eth_type_trans(skb, dev->net);
302 dev->net->stats.rx_packets++;
303 dev->net->stats.rx_bytes += skb->len;
304
305 /* no buffer copies needed, unless hardware can't
306 * use skb buffers.
307 */
308 status = netif_rx(skb);
309 skb = NULL;
310 break;
311
312 /* software-driven interface shutdown */
313 case -ECONNRESET: /* unlink */
314 case -ESHUTDOWN: /* disconnect etc */
315 VDBG(dev, "rx shutdown, code %d\n", status);
316 goto quiesce;
317
318 /* for hardware automagic (such as pxa) */
319 case -ECONNABORTED: /* endpoint reset */
320 DBG(dev, "rx %s reset\n", ep->name);
321 defer_kevent(dev, WORK_RX_MEMORY);
322quiesce:
323 dev_kfree_skb_any(skb);
324 goto clean;
325
326 /* data overrun */
327 case -EOVERFLOW:
328 dev->net->stats.rx_over_errors++;
329 /* FALLTHROUGH */
330
331 default:
332 dev->net->stats.rx_errors++;
333 DBG(dev, "rx status %d\n", status);
334 break;
335 }
336
337 if (skb)
338 dev_kfree_skb_any(skb);
339 if (!netif_running(dev->net)) {
340clean:
341 spin_lock(&dev->req_lock);
342 list_add(&req->list, &dev->rx_reqs);
343 spin_unlock(&dev->req_lock);
344 req = NULL;
345 }
346 if (req)
347 rx_submit(dev, req, GFP_ATOMIC);
348}
349
350static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
351{
352 unsigned i;
353 struct usb_request *req;
354
355 if (!n)
356 return -ENOMEM;
357
358 /* queue/recycle up to N requests */
359 i = n;
360 list_for_each_entry(req, list, list) {
361 if (i-- == 0)
362 goto extra;
363 }
364 while (i--) {
365 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
366 if (!req)
367 return list_empty(list) ? -ENOMEM : 0;
368 list_add(&req->list, list);
369 }
370 return 0;
371
372extra:
373 /* free extras */
374 for (;;) {
375 struct list_head *next;
376
377 next = req->list.next;
378 list_del(&req->list);
379 usb_ep_free_request(ep, req);
380
381 if (next == list)
382 break;
383
384 req = container_of(next, struct usb_request, list);
385 }
386 return 0;
387}
388
389static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
390{
391 int status;
392
393 spin_lock(&dev->req_lock);
394 status = prealloc(&dev->tx_reqs, link->in_ep, n);
395 if (status < 0)
396 goto fail;
397 status = prealloc(&dev->rx_reqs, link->out_ep, n);
398 if (status < 0)
399 goto fail;
400 goto done;
401fail:
402 DBG(dev, "can't alloc requests\n");
403done:
404 spin_unlock(&dev->req_lock);
405 return status;
406}
407
408static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
409{
410 struct usb_request *req;
411 unsigned long flags;
412
413 /* fill unused rxq slots with some skb */
414 spin_lock_irqsave(&dev->req_lock, flags);
415 while (!list_empty(&dev->rx_reqs)) {
416 req = container_of(dev->rx_reqs.next,
417 struct usb_request, list);
418 list_del_init(&req->list);
419 spin_unlock_irqrestore(&dev->req_lock, flags);
420
421 if (rx_submit(dev, req, gfp_flags) < 0) {
422 defer_kevent(dev, WORK_RX_MEMORY);
423 return;
424 }
425
426 spin_lock_irqsave(&dev->req_lock, flags);
427 }
428 spin_unlock_irqrestore(&dev->req_lock, flags);
429}
430
431static void eth_work(struct work_struct *work)
432{
433 struct eth_dev *dev = container_of(work, struct eth_dev, work);
434
435 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
436 if (netif_running(dev->net))
437 rx_fill(dev, GFP_KERNEL);
438 }
439
440 if (dev->todo)
441 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
442}
443
444static void tx_complete(struct usb_ep *ep, struct usb_request *req)
445{
446 struct sk_buff *skb = req->context;
447 struct eth_dev *dev = ep->driver_data;
448
449 switch (req->status) {
450 default:
451 dev->net->stats.tx_errors++;
452 VDBG(dev, "tx err %d\n", req->status);
453 /* FALLTHROUGH */
454 case -ECONNRESET: /* unlink */
455 case -ESHUTDOWN: /* disconnect etc */
456 break;
457 case 0:
458 dev->net->stats.tx_bytes += skb->len;
459 }
460 dev->net->stats.tx_packets++;
461
462 spin_lock(&dev->req_lock);
463 list_add(&req->list, &dev->tx_reqs);
464 spin_unlock(&dev->req_lock);
465 dev_kfree_skb_any(skb);
466
467 atomic_dec(&dev->tx_qlen);
468 if (netif_carrier_ok(dev->net))
469 netif_wake_queue(dev->net);
470}
471
472static inline int is_promisc(u16 cdc_filter)
473{
474 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
475}
476
477static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
478{
479 struct eth_dev *dev = netdev_priv(net);
480 int length = skb->len;
481 int retval;
482 struct usb_request *req = NULL;
483 unsigned long flags;
484 struct usb_ep *in;
485 u16 cdc_filter;
486
487 spin_lock_irqsave(&dev->lock, flags);
488 if (dev->port_usb) {
489 in = dev->port_usb->in_ep;
490 cdc_filter = dev->port_usb->cdc_filter;
491 } else {
492 in = NULL;
493 cdc_filter = 0;
494 }
495 spin_unlock_irqrestore(&dev->lock, flags);
496
497 if (!in) {
498 dev_kfree_skb_any(skb);
499 return 0;
500 }
501
502 /* apply outgoing CDC or RNDIS filters */
503 if (!is_promisc(cdc_filter)) {
504 u8 *dest = skb->data;
505
506 if (is_multicast_ether_addr(dest)) {
507 u16 type;
508
509 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
510 * SET_ETHERNET_MULTICAST_FILTERS requests
511 */
512 if (is_broadcast_ether_addr(dest))
513 type = USB_CDC_PACKET_TYPE_BROADCAST;
514 else
515 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
516 if (!(cdc_filter & type)) {
517 dev_kfree_skb_any(skb);
518 return 0;
519 }
520 }
521 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
522 }
523
524 spin_lock_irqsave(&dev->req_lock, flags);
525 /*
526 * this freelist can be empty if an interrupt triggered disconnect()
527 * and reconfigured the gadget (shutting down this queue) after the
528 * network stack decided to xmit but before we got the spinlock.
529 */
530 if (list_empty(&dev->tx_reqs)) {
531 spin_unlock_irqrestore(&dev->req_lock, flags);
532 return 1;
533 }
534
535 req = container_of(dev->tx_reqs.next, struct usb_request, list);
536 list_del(&req->list);
537
538 /* temporarily stop TX queue when the freelist empties */
539 if (list_empty(&dev->tx_reqs))
540 netif_stop_queue(net);
541 spin_unlock_irqrestore(&dev->req_lock, flags);
542
543 /* no buffer copies needed, unless the network stack did it
544 * or the hardware can't use skb buffers.
545 * or there's not enough space for extra headers we need
546 */
547 if (dev->wrap) {
548 struct sk_buff *skb_new;
549
550 skb_new = dev->wrap(skb);
551 if (!skb_new)
552 goto drop;
553
554 dev_kfree_skb_any(skb);
555 skb = skb_new;
556 length = skb->len;
557 }
558 req->buf = skb->data;
559 req->context = skb;
560 req->complete = tx_complete;
561
562 /* use zlp framing on tx for strict CDC-Ether conformance,
563 * though any robust network rx path ignores extra padding.
564 * and some hardware doesn't like to write zlps.
565 */
566 req->zero = 1;
567 if (!dev->zlp && (length % in->maxpacket) == 0)
568 length++;
569
570 req->length = length;
571
572 /* throttle highspeed IRQ rate back slightly */
573 if (gadget_is_dualspeed(dev->gadget))
574 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
575 ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
576 : 0;
577
578 retval = usb_ep_queue(in, req, GFP_ATOMIC);
579 switch (retval) {
580 default:
581 DBG(dev, "tx queue err %d\n", retval);
582 break;
583 case 0:
584 net->trans_start = jiffies;
585 atomic_inc(&dev->tx_qlen);
586 }
587
588 if (retval) {
589drop:
590 dev->net->stats.tx_dropped++;
591 dev_kfree_skb_any(skb);
592 spin_lock_irqsave(&dev->req_lock, flags);
593 if (list_empty(&dev->tx_reqs))
594 netif_start_queue(net);
595 list_add(&req->list, &dev->tx_reqs);
596 spin_unlock_irqrestore(&dev->req_lock, flags);
597 }
598 return 0;
599}
600
601/*-------------------------------------------------------------------------*/
602
603static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
604{
605 DBG(dev, "%s\n", __func__);
606
607 /* fill the rx queue */
608 rx_fill(dev, gfp_flags);
609
610 /* and open the tx floodgates */
611 atomic_set(&dev->tx_qlen, 0);
612 netif_wake_queue(dev->net);
613}
614
615static int eth_open(struct net_device *net)
616{
617 struct eth_dev *dev = netdev_priv(net);
618 struct gether *link;
619
620 DBG(dev, "%s\n", __func__);
621 if (netif_carrier_ok(dev->net))
622 eth_start(dev, GFP_KERNEL);
623
624 spin_lock_irq(&dev->lock);
625 link = dev->port_usb;
626 if (link && link->open)
627 link->open(link);
628 spin_unlock_irq(&dev->lock);
629
630 return 0;
631}
632
633static int eth_stop(struct net_device *net)
634{
635 struct eth_dev *dev = netdev_priv(net);
636 unsigned long flags;
637
638 VDBG(dev, "%s\n", __func__);
639 netif_stop_queue(net);
640
641 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
642 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
643 dev->net->stats.rx_errors, dev->net->stats.tx_errors
644 );
645
646 /* ensure there are no more active requests */
647 spin_lock_irqsave(&dev->lock, flags);
648 if (dev->port_usb) {
649 struct gether *link = dev->port_usb;
650
651 if (link->close)
652 link->close(link);
653
654 /* NOTE: we have no abort-queue primitive we could use
655 * to cancel all pending I/O. Instead, we disable then
656 * reenable the endpoints ... this idiom may leave toggle
657 * wrong, but that's a self-correcting error.
658 *
659 * REVISIT: we *COULD* just let the transfers complete at
660 * their own pace; the network stack can handle old packets.
661 * For the moment we leave this here, since it works.
662 */
663 usb_ep_disable(link->in_ep);
664 usb_ep_disable(link->out_ep);
665 if (netif_carrier_ok(net)) {
666 DBG(dev, "host still using in/out endpoints\n");
667 usb_ep_enable(link->in_ep, link->in);
668 usb_ep_enable(link->out_ep, link->out);
669 }
670 }
671 spin_unlock_irqrestore(&dev->lock, flags);
672
673 return 0;
674}
675
676/*-------------------------------------------------------------------------*/
677
678/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
679static char *dev_addr;
680module_param(dev_addr, charp, S_IRUGO);
681MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
682
683/* this address is invisible to ifconfig */
684static char *host_addr;
685module_param(host_addr, charp, S_IRUGO);
686MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
687
688
689static u8 __init nibble(unsigned char c)
690{
691 if (isdigit(c))
692 return c - '0';
693 c = toupper(c);
694 if (isxdigit(c))
695 return 10 + c - 'A';
696 return 0;
697}
698
699static int __init get_ether_addr(const char *str, u8 *dev_addr)
700{
701 if (str) {
702 unsigned i;
703
704 for (i = 0; i < 6; i++) {
705 unsigned char num;
706
707 if ((*str == '.') || (*str == ':'))
708 str++;
709 num = nibble(*str++) << 4;
710 num |= (nibble(*str++));
711 dev_addr [i] = num;
712 }
713 if (is_valid_ether_addr(dev_addr))
714 return 0;
715 }
716 random_ether_addr(dev_addr);
717 return 1;
718}
719
720static struct eth_dev *the_dev;
721
722
723/**
724 * gether_setup - initialize one ethernet-over-usb link
725 * @g: gadget to associated with these links
726 * @ethaddr: NULL, or a buffer in which the ethernet address of the
727 * host side of the link is recorded
728 * Context: may sleep
729 *
730 * This sets up the single network link that may be exported by a
731 * gadget driver using this framework. The link layer addresses are
732 * set up using module parameters.
733 *
734 * Returns negative errno, or zero on success
735 */
736int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
737{
738 struct eth_dev *dev;
739 struct net_device *net;
740 int status;
741
742 if (the_dev)
743 return -EBUSY;
744
745 net = alloc_etherdev(sizeof *dev);
746 if (!net)
747 return -ENOMEM;
748
749 dev = netdev_priv(net);
750 spin_lock_init(&dev->lock);
751 spin_lock_init(&dev->req_lock);
752 INIT_WORK(&dev->work, eth_work);
753 INIT_LIST_HEAD(&dev->tx_reqs);
754 INIT_LIST_HEAD(&dev->rx_reqs);
755
756 /* network device setup */
757 dev->net = net;
758 strcpy(net->name, "usb%d");
759
760 if (get_ether_addr(dev_addr, net->dev_addr))
761 dev_warn(&g->dev,
762 "using random %s ethernet address\n", "self");
763 if (get_ether_addr(host_addr, dev->host_mac))
764 dev_warn(&g->dev,
765 "using random %s ethernet address\n", "host");
766
767 if (ethaddr)
768 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
769
770 net->change_mtu = eth_change_mtu;
771 net->hard_start_xmit = eth_start_xmit;
772 net->open = eth_open;
773 net->stop = eth_stop;
774 /* watchdog_timeo, tx_timeout ... */
775 /* set_multicast_list */
776 SET_ETHTOOL_OPS(net, &ops);
777
778 /* two kinds of host-initiated state changes:
779 * - iff DATA transfer is active, carrier is "on"
780 * - tx queueing enabled if open *and* carrier is "on"
781 */
782 netif_stop_queue(net);
783 netif_carrier_off(net);
784
785 dev->gadget = g;
786 SET_NETDEV_DEV(net, &g->dev);
787
788 status = register_netdev(net);
789 if (status < 0) {
790 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
791 free_netdev(net);
792 } else {
793 DECLARE_MAC_BUF(tmp);
794
795 INFO(dev, "MAC %s\n", print_mac(tmp, net->dev_addr));
796 INFO(dev, "HOST MAC %s\n", print_mac(tmp, dev->host_mac));
797
798 the_dev = dev;
799 }
800
801 return status;
802}
803
804/**
805 * gether_cleanup - remove Ethernet-over-USB device
806 * Context: may sleep
807 *
808 * This is called to free all resources allocated by @gether_setup().
809 */
810void gether_cleanup(void)
811{
812 if (!the_dev)
813 return;
814
815 unregister_netdev(the_dev->net);
816 free_netdev(the_dev->net);
817
818 /* assuming we used keventd, it must quiesce too */
819 flush_scheduled_work();
820
821 the_dev = NULL;
822}
823
824
825/**
826 * gether_connect - notify network layer that USB link is active
827 * @link: the USB link, set up with endpoints, descriptors matching
828 * current device speed, and any framing wrapper(s) set up.
829 * Context: irqs blocked
830 *
831 * This is called to activate endpoints and let the network layer know
832 * the connection is active ("carrier detect"). It may cause the I/O
833 * queues to open and start letting network packets flow, but will in
834 * any case activate the endpoints so that they respond properly to the
835 * USB host.
836 *
837 * Verify net_device pointer returned using IS_ERR(). If it doesn't
838 * indicate some error code (negative errno), ep->driver_data values
839 * have been overwritten.
840 */
841struct net_device *gether_connect(struct gether *link)
842{
843 struct eth_dev *dev = the_dev;
844 int result = 0;
845
846 if (!dev)
847 return ERR_PTR(-EINVAL);
848
849 link->in_ep->driver_data = dev;
850 result = usb_ep_enable(link->in_ep, link->in);
851 if (result != 0) {
852 DBG(dev, "enable %s --> %d\n",
853 link->in_ep->name, result);
854 goto fail0;
855 }
856
857 link->out_ep->driver_data = dev;
858 result = usb_ep_enable(link->out_ep, link->out);
859 if (result != 0) {
860 DBG(dev, "enable %s --> %d\n",
861 link->out_ep->name, result);
862 goto fail1;
863 }
864
865 if (result == 0)
866 result = alloc_requests(dev, link, qlen(dev->gadget));
867
868 if (result == 0) {
869 dev->zlp = link->is_zlp_ok;
870 DBG(dev, "qlen %d\n", qlen(dev->gadget));
871
872 dev->header_len = link->header_len;
873 dev->unwrap = link->unwrap;
874 dev->wrap = link->wrap;
875
876 spin_lock(&dev->lock);
877 dev->port_usb = link;
878 link->ioport = dev;
879 spin_unlock(&dev->lock);
880
881 netif_carrier_on(dev->net);
882 if (netif_running(dev->net))
883 eth_start(dev, GFP_ATOMIC);
884
885 /* on error, disable any endpoints */
886 } else {
887 (void) usb_ep_disable(link->out_ep);
888fail1:
889 (void) usb_ep_disable(link->in_ep);
890 }
891fail0:
892 /* caller is responsible for cleanup on error */
893 if (result < 0)
894 return ERR_PTR(result);
895 return dev->net;
896}
897
898/**
899 * gether_disconnect - notify network layer that USB link is inactive
900 * @link: the USB link, on which gether_connect() was called
901 * Context: irqs blocked
902 *
903 * This is called to deactivate endpoints and let the network layer know
904 * the connection went inactive ("no carrier").
905 *
906 * On return, the state is as if gether_connect() had never been called.
907 * The endpoints are inactive, and accordingly without active USB I/O.
908 * Pointers to endpoint descriptors and endpoint private data are nulled.
909 */
910void gether_disconnect(struct gether *link)
911{
912 struct eth_dev *dev = link->ioport;
913 struct usb_request *req;
914
915 WARN_ON(!dev);
916 if (!dev)
917 return;
918
919 DBG(dev, "%s\n", __func__);
920
921 netif_stop_queue(dev->net);
922 netif_carrier_off(dev->net);
923
924 /* disable endpoints, forcing (synchronous) completion
925 * of all pending i/o. then free the request objects
926 * and forget about the endpoints.
927 */
928 usb_ep_disable(link->in_ep);
929 spin_lock(&dev->req_lock);
930 while (!list_empty(&dev->tx_reqs)) {
931 req = container_of(dev->tx_reqs.next,
932 struct usb_request, list);
933 list_del(&req->list);
934
935 spin_unlock(&dev->req_lock);
936 usb_ep_free_request(link->in_ep, req);
937 spin_lock(&dev->req_lock);
938 }
939 spin_unlock(&dev->req_lock);
940 link->in_ep->driver_data = NULL;
941 link->in = NULL;
942
943 usb_ep_disable(link->out_ep);
944 spin_lock(&dev->req_lock);
945 while (!list_empty(&dev->rx_reqs)) {
946 req = container_of(dev->rx_reqs.next,
947 struct usb_request, list);
948 list_del(&req->list);
949
950 spin_unlock(&dev->req_lock);
951 usb_ep_free_request(link->out_ep, req);
952 spin_lock(&dev->req_lock);
953 }
954 spin_unlock(&dev->req_lock);
955 link->out_ep->driver_data = NULL;
956 link->out = NULL;
957
958 /* finish forgetting about this USB link episode */
959 dev->header_len = 0;
960 dev->unwrap = NULL;
961 dev->wrap = NULL;
962
963 spin_lock(&dev->lock);
964 dev->port_usb = NULL;
965 link->ioport = NULL;
966 spin_unlock(&dev->lock);
967}