aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/gadget
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/gadget')
-rw-r--r--drivers/usb/gadget/Kconfig389
-rw-r--r--drivers/usb/gadget/Makefile30
-rw-r--r--drivers/usb/gadget/config.c117
-rw-r--r--drivers/usb/gadget/dummy_hcd.c1793
-rw-r--r--drivers/usb/gadget/epautoconf.c310
-rw-r--r--drivers/usb/gadget/ether.c2660
-rw-r--r--drivers/usb/gadget/file_storage.c4139
-rw-r--r--drivers/usb/gadget/gadget_chips.h92
-rw-r--r--drivers/usb/gadget/goku_udc.c1984
-rw-r--r--drivers/usb/gadget/goku_udc.h290
-rw-r--r--drivers/usb/gadget/inode.c2110
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.c2167
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.h261
-rw-r--r--drivers/usb/gadget/ndis.h217
-rw-r--r--drivers/usb/gadget/net2280.c2967
-rw-r--r--drivers/usb/gadget/net2280.h728
-rw-r--r--drivers/usb/gadget/omap_udc.c2872
-rw-r--r--drivers/usb/gadget/omap_udc.h208
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c2648
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.h320
-rw-r--r--drivers/usb/gadget/rndis.c1428
-rw-r--r--drivers/usb/gadget/rndis.h348
-rw-r--r--drivers/usb/gadget/serial.c2436
-rw-r--r--drivers/usb/gadget/usbstring.c136
-rw-r--r--drivers/usb/gadget/zero.c1357
25 files changed, 32007 insertions, 0 deletions
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
new file mode 100644
index 000000000000..3b24f9f2c234
--- /dev/null
+++ b/drivers/usb/gadget/Kconfig
@@ -0,0 +1,389 @@
1#
2# USB Gadget support on a system involves
3# (a) a peripheral controller, and
4# (b) the gadget driver using it.
5#
6# NOTE: Gadget support ** DOES NOT ** depend on host-side CONFIG_USB !!
7#
8# - Host systems (like PCs) need CONFIG_USB (with "A" jacks).
9# - Peripherals (like PDAs) need CONFIG_USB_GADGET (with "B" jacks).
10# - Some systems have both kinds of of controller.
11#
12# With help from a special transceiver and a "Mini-AB" jack, systems with
13# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
14#
15menu "USB Gadget Support"
16
17config USB_GADGET
18 tristate "Support for USB Gadgets"
19 help
20 USB is a master/slave protocol, organized with one master
21 host (such as a PC) controlling up to 127 peripheral devices.
22 The USB hardware is asymmetric, which makes it easier to set up:
23 you can't connect a "to-the-host" connector to a peripheral.
24
25 Linux can run in the host, or in the peripheral. In both cases
26 you need a low level bus controller driver, and some software
27 talking to it. Peripheral controllers are often discrete silicon,
28 or are integrated with the CPU in a microcontroller. The more
29 familiar host side controllers have names like like "EHCI", "OHCI",
30 or "UHCI", and are usually integrated into southbridges on PC
31 motherboards.
32
33 Enable this configuration option if you want to run Linux inside
34 a USB peripheral device. Configure one hardware driver for your
35 peripheral/device side bus controller, and a "gadget driver" for
36 your peripheral protocol. (If you use modular gadget drivers,
37 you may configure more than one.)
38
39 If in doubt, say "N" and don't enable these drivers; most people
40 don't have this kind of hardware (except maybe inside Linux PDAs).
41
42 For more information, see <http://www.linux-usb.org/gadget> and
43 the kernel DocBook documentation for this API.
44
45config USB_GADGET_DEBUG_FILES
46 boolean "Debugging information files"
47 depends on USB_GADGET && PROC_FS
48 help
49 Some of the drivers in the "gadget" framework can expose
50 debugging information in files such as /proc/driver/udc
51 (for a peripheral controller). The information in these
52 files may help when you're troubleshooting or bringing up a
53 driver on a new board. Enable these files by choosing "Y"
54 here. If in doubt, or to conserve kernel memory, say "N".
55
56#
57# USB Peripheral Controller Support
58#
59choice
60 prompt "USB Peripheral Controller"
61 depends on USB_GADGET
62 help
63 A USB device uses a controller to talk to its host.
64 Systems should have only one such upstream link.
65 Many controller drivers are platform-specific; these
66 often need board-specific hooks.
67
68config USB_GADGET_NET2280
69 boolean "NetChip 2280"
70 depends on PCI
71 select USB_GADGET_DUALSPEED
72 help
73 NetChip 2280 is a PCI based USB peripheral controller which
74 supports both full and high speed USB 2.0 data transfers.
75
76 It has six configurable endpoints, as well as endpoint zero
77 (for control transfers) and several endpoints with dedicated
78 functions.
79
80 Say "y" to link the driver statically, or "m" to build a
81 dynamically linked module called "net2280" and force all
82 gadget drivers to also be dynamically linked.
83
84config USB_NET2280
85 tristate
86 depends on USB_GADGET_NET2280
87 default USB_GADGET
88
89config USB_GADGET_PXA2XX
90 boolean "PXA 25x or IXP 4xx"
91 depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
92 help
93 Intel's PXA 25x series XScale ARM-5TE processors include
94 an integrated full speed USB 1.1 device controller. The
95 controller in the IXP 4xx series is register-compatible.
96
97 It has fifteen fixed-function endpoints, as well as endpoint
98 zero (for control transfers).
99
100 Say "y" to link the driver statically, or "m" to build a
101 dynamically linked module called "pxa2xx_udc" and force all
102 gadget drivers to also be dynamically linked.
103
104config USB_PXA2XX
105 tristate
106 depends on USB_GADGET_PXA2XX
107 default USB_GADGET
108
109# if there's only one gadget driver, using only two bulk endpoints,
110# don't waste memory for the other endpoints
111config USB_PXA2XX_SMALL
112 depends on USB_GADGET_PXA2XX
113 bool
114 default n if USB_ETH_RNDIS
115 default y if USB_ZERO
116 default y if USB_ETH
117 default y if USB_G_SERIAL
118
119config USB_GADGET_GOKU
120 boolean "Toshiba TC86C001 'Goku-S'"
121 depends on PCI
122 help
123 The Toshiba TC86C001 is a PCI device which includes controllers
124 for full speed USB devices, IDE, I2C, SIO, plus a USB host (OHCI).
125
126 The device controller has three configurable (bulk or interrupt)
127 endpoints, plus endpoint zero (for control transfers).
128
129 Say "y" to link the driver statically, or "m" to build a
130 dynamically linked module called "goku_udc" and to force all
131 gadget drivers to also be dynamically linked.
132
133config USB_GOKU
134 tristate
135 depends on USB_GADGET_GOKU
136 default USB_GADGET
137
138
139config USB_GADGET_LH7A40X
140 boolean "LH7A40X"
141 depends on ARCH_LH7A40X
142 help
143 This driver provides USB Device Controller driver for LH7A40x
144
145config USB_LH7A40X
146 tristate
147 depends on USB_GADGET_LH7A40X
148 default USB_GADGET
149
150
151config USB_GADGET_OMAP
152 boolean "OMAP USB Device Controller"
153 depends on ARCH_OMAP
154 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
155 help
156 Many Texas Instruments OMAP processors have flexible full
157 speed USB device controllers, with support for up to 30
158 endpoints (plus endpoint zero). This driver supports the
159 controller in the OMAP 1611, and should work with controllers
160 in other OMAP processors too, given minor tweaks.
161
162 Say "y" to link the driver statically, or "m" to build a
163 dynamically linked module called "omap_udc" and force all
164 gadget drivers to also be dynamically linked.
165
166config USB_OMAP
167 tristate
168 depends on USB_GADGET_OMAP
169 default USB_GADGET
170
171config USB_OTG
172 boolean "OTG Support"
173 depends on USB_GADGET_OMAP && ARCH_OMAP_OTG && USB_OHCI_HCD
174 help
175 The most notable feature of USB OTG is support for a
176 "Dual-Role" device, which can act as either a device
177 or a host. The initial role choice can be changed
178 later, when two dual-role devices talk to each other.
179
180 Select this only if your OMAP board has a Mini-AB connector.
181
182
183config USB_GADGET_DUMMY_HCD
184 boolean "Dummy HCD (DEVELOPMENT)"
185 depends on USB && EXPERIMENTAL
186 select USB_GADGET_DUALSPEED
187 help
188 This host controller driver emulates USB, looping all data transfer
189 requests back to a USB "gadget driver" in the same host. The host
190 side is the master; the gadget side is the slave. Gadget drivers
191 can be high, full, or low speed; and they have access to endpoints
192 like those from NET2280, PXA2xx, or SA1100 hardware.
193
194 This may help in some stages of creating a driver to embed in a
195 Linux device, since it lets you debug several parts of the gadget
196 driver without its hardware or drivers being involved.
197
198 Since such a gadget side driver needs to interoperate with a host
199 side Linux-USB device driver, this may help to debug both sides
200 of a USB protocol stack.
201
202 Say "y" to link the driver statically, or "m" to build a
203 dynamically linked module called "dummy_hcd" and force all
204 gadget drivers to also be dynamically linked.
205
206config USB_DUMMY_HCD
207 tristate
208 depends on USB_GADGET_DUMMY_HCD
209 default USB_GADGET
210
211# NOTE: Please keep dummy_hcd LAST so that "real hardware" appears
212# first and will be selected by default.
213
214endchoice
215
216config USB_GADGET_DUALSPEED
217 bool
218 depends on USB_GADGET
219 default n
220 help
221 Means that gadget drivers should include extra descriptors
222 and code to handle dual-speed controllers.
223
224#
225# USB Gadget Drivers
226#
227choice
228 tristate "USB Gadget Drivers"
229 depends on USB_GADGET
230 default USB_ETH
231 help
232 A Linux "Gadget Driver" talks to the USB Peripheral Controller
233 driver through the abstract "gadget" API. Some other operating
234 systems call these "client" drivers, of which "class drivers"
235 are a subset (implementing a USB device class specification).
236 A gadget driver implements one or more USB functions using
237 the peripheral hardware.
238
239 Gadget drivers are hardware-neutral, or "platform independent",
240 except that they sometimes must understand quirks or limitations
241 of the particular controllers they work with. For example, when
242 a controller doesn't support alternate configurations or provide
243 enough of the right types of endpoints, the gadget driver might
244 not be able work with that controller, or might need to implement
245 a less common variant of a device class protocol.
246
247# this first set of drivers all depend on bulk-capable hardware.
248
249config USB_ZERO
250 tristate "Gadget Zero (DEVELOPMENT)"
251 depends on EXPERIMENTAL
252 help
253 Gadget Zero is a two-configuration device. It either sinks and
254 sources bulk data; or it loops back a configurable number of
255 transfers. It also implements control requests, for "chapter 9"
256 conformance. The driver needs only two bulk-capable endpoints, so
257 it can work on top of most device-side usb controllers. It's
258 useful for testing, and is also a working example showing how
259 USB "gadget drivers" can be written.
260
261 Make this be the first driver you try using on top of any new
262 USB peripheral controller driver. Then you can use host-side
263 test software, like the "usbtest" driver, to put your hardware
264 and its driver through a basic set of functional tests.
265
266 Gadget Zero also works with the host-side "usb-skeleton" driver,
267 and with many kinds of host-side test software. You may need
268 to tweak product and vendor IDs before host software knows about
269 this device, and arrange to select an appropriate configuration.
270
271 Say "y" to link the driver statically, or "m" to build a
272 dynamically linked module called "g_zero".
273
274config USB_ZERO_HNPTEST
275 boolean "HNP Test Device"
276 depends on USB_ZERO && USB_OTG
277 help
278 You can configure this device to enumerate using the device
279 identifiers of the USB-OTG test device. That means that when
280 this gadget connects to another OTG device, with this one using
281 the "B-Peripheral" role, that device will use HNP to let this
282 one serve as the USB host instead (in the "B-Host" role).
283
284config USB_ETH
285 tristate "Ethernet Gadget (with CDC Ethernet support)"
286 depends on NET
287 help
288 This driver implements Ethernet style communication, in either
289 of two ways:
290
291 - The "Communication Device Class" (CDC) Ethernet Control Model.
292 That protocol is often avoided with pure Ethernet adapters, in
293 favor of simpler vendor-specific hardware, but is widely
294 supported by firmware for smart network devices.
295
296 - On hardware can't implement that protocol, a simple CDC subset
297 is used, placing fewer demands on USB.
298
299 RNDIS support is a third option, more demanding than that subset.
300
301 Within the USB device, this gadget driver exposes a network device
302 "usbX", where X depends on what other networking devices you have.
303 Treat it like a two-node Ethernet link: host, and gadget.
304
305 The Linux-USB host-side "usbnet" driver interoperates with this
306 driver, so that deep I/O queues can be supported. On 2.4 kernels,
307 use "CDCEther" instead, if you're using the CDC option. That CDC
308 mode should also interoperate with standard CDC Ethernet class
309 drivers on other host operating systems.
310
311 Say "y" to link the driver statically, or "m" to build a
312 dynamically linked module called "g_ether".
313
314config USB_ETH_RNDIS
315 bool "RNDIS support (EXPERIMENTAL)"
316 depends on USB_ETH && EXPERIMENTAL
317 default y
318 help
319 Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
320 and Microsoft provides redistributable binary RNDIS drivers for
321 older versions of Windows.
322
323 If you say "y" here, the Ethernet gadget driver will try to provide
324 a second device configuration, supporting RNDIS to talk to such
325 Microsoft USB hosts.
326
327 To make MS-Windows work with this, use Documentation/usb/linux.inf
328 as the "driver info file". For versions of MS-Windows older than
329 XP, you'll need to download drivers from Microsoft's website; a URL
330 is given in comments found in that info file.
331
332config USB_GADGETFS
333 tristate "Gadget Filesystem (EXPERIMENTAL)"
334 depends on EXPERIMENTAL
335 help
336 This driver provides a filesystem based API that lets user mode
337 programs implement a single-configuration USB device, including
338 endpoint I/O and control requests that don't relate to enumeration.
339 All endpoints, transfer speeds, and transfer types supported by
340 the hardware are available, through read() and write() calls.
341
342 Say "y" to link the driver statically, or "m" to build a
343 dynamically linked module called "gadgetfs".
344
345config USB_FILE_STORAGE
346 tristate "File-backed Storage Gadget"
347 help
348 The File-backed Storage Gadget acts as a USB Mass Storage
349 disk drive. As its storage repository it can use a regular
350 file or a block device (in much the same way as the "loop"
351 device driver), specified as a module parameter.
352
353 Say "y" to link the driver statically, or "m" to build a
354 dynamically linked module called "g_file_storage".
355
356config USB_FILE_STORAGE_TEST
357 bool "File-backed Storage Gadget testing version"
358 depends on USB_FILE_STORAGE
359 default n
360 help
361 Say "y" to generate the larger testing version of the
362 File-backed Storage Gadget, useful for probing the
363 behavior of USB Mass Storage hosts. Not needed for
364 normal operation.
365
366config USB_G_SERIAL
367 tristate "Serial Gadget (with CDC ACM support)"
368 help
369 The Serial Gadget talks to the Linux-USB generic serial driver.
370 This driver supports a CDC-ACM module option, which can be used
371 to interoperate with MS-Windows hosts or with the Linux-USB
372 "cdc-acm" driver.
373
374 Say "y" to link the driver statically, or "m" to build a
375 dynamically linked module called "g_serial".
376
377 For more information, see Documentation/usb/gadget_serial.txt
378 which includes instructions and a "driver info file" needed to
379 make MS-Windows work with this driver.
380
381
382# put drivers that need isochronous transfer support (for audio
383# or video class gadget drivers), or specific hardware, here.
384
385# - none yet
386
387endchoice
388
389endmenu
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
new file mode 100644
index 000000000000..d5fd04d886e6
--- /dev/null
+++ b/drivers/usb/gadget/Makefile
@@ -0,0 +1,30 @@
1#
2# USB peripheral controller drivers
3#
4obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
5obj-$(CONFIG_USB_NET2280) += net2280.o
6obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o
7obj-$(CONFIG_USB_GOKU) += goku_udc.o
8obj-$(CONFIG_USB_OMAP) += omap_udc.o
9obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
10
11#
12# USB gadget drivers
13#
14g_zero-objs := zero.o usbstring.o config.o epautoconf.o
15g_ether-objs := ether.o usbstring.o config.o epautoconf.o
16g_serial-objs := serial.o usbstring.o config.o epautoconf.o
17gadgetfs-objs := inode.o
18g_file_storage-objs := file_storage.o usbstring.o config.o \
19 epautoconf.o
20
21ifeq ($(CONFIG_USB_ETH_RNDIS),y)
22 g_ether-objs += rndis.o
23endif
24
25obj-$(CONFIG_USB_ZERO) += g_zero.o
26obj-$(CONFIG_USB_ETH) += g_ether.o
27obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
28obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
29obj-$(CONFIG_USB_G_SERIAL) += g_serial.o
30
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
new file mode 100644
index 000000000000..83b4866df9af
--- /dev/null
+++ b/drivers/usb/gadget/config.c
@@ -0,0 +1,117 @@
1/*
2 * usb/gadget/config.c -- simplify building config descriptors
3 *
4 * Copyright (C) 2003 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/errno.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/string.h>
25#include <linux/device.h>
26
27#include <linux/usb_ch9.h>
28#include <linux/usb_gadget.h>
29
30
31/**
32 * usb_descriptor_fillbuf - fill buffer with descriptors
33 * @buf: Buffer to be filled
34 * @buflen: Size of buf
35 * @src: Array of descriptor pointers, terminated by null pointer.
36 *
37 * Copies descriptors into the buffer, returning the length or a
38 * negative error code if they can't all be copied. Useful when
39 * assembling descriptors for an associated set of interfaces used
40 * as part of configuring a composite device; or in other cases where
41 * sets of descriptors need to be marshaled.
42 */
43int
44usb_descriptor_fillbuf(void *buf, unsigned buflen,
45 const struct usb_descriptor_header **src)
46{
47 u8 *dest = buf;
48
49 if (!src)
50 return -EINVAL;
51
52 /* fill buffer from src[] until null descriptor ptr */
53 for (; 0 != *src; src++) {
54 unsigned len = (*src)->bLength;
55
56 if (len > buflen)
57 return -EINVAL;
58 memcpy(dest, *src, len);
59 buflen -= len;
60 dest += len;
61 }
62 return dest - (u8 *)buf;
63}
64
65
66/**
67 * usb_gadget_config_buf - builts a complete configuration descriptor
68 * @config: Header for the descriptor, including characteristics such
69 * as power requirements and number of interfaces.
70 * @desc: Null-terminated vector of pointers to the descriptors (interface,
71 * endpoint, etc) defining all functions in this device configuration.
72 * @buf: Buffer for the resulting configuration descriptor.
73 * @length: Length of buffer. If this is not big enough to hold the
74 * entire configuration descriptor, an error code will be returned.
75 *
76 * This copies descriptors into the response buffer, building a descriptor
77 * for that configuration. It returns the buffer length or a negative
78 * status code. The config.wTotalLength field is set to match the length
79 * of the result, but other descriptor fields (including power usage and
80 * interface count) must be set by the caller.
81 *
82 * Gadget drivers could use this when constructing a config descriptor
83 * in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the
84 * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed.
85 */
86int usb_gadget_config_buf(
87 const struct usb_config_descriptor *config,
88 void *buf,
89 unsigned length,
90 const struct usb_descriptor_header **desc
91)
92{
93 struct usb_config_descriptor *cp = buf;
94 int len;
95
96 /* config descriptor first */
97 if (length < USB_DT_CONFIG_SIZE || !desc)
98 return -EINVAL;
99 *cp = *config;
100
101 /* then interface/endpoint/class/vendor/... */
102 len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
103 length - USB_DT_CONFIG_SIZE, desc);
104 if (len < 0)
105 return len;
106 len += USB_DT_CONFIG_SIZE;
107 if (len > 0xffff)
108 return -EINVAL;
109
110 /* patch up the config descriptor */
111 cp->bLength = USB_DT_CONFIG_SIZE;
112 cp->bDescriptorType = USB_DT_CONFIG;
113 cp->wTotalLength = cpu_to_le16(len);
114 cp->bmAttributes |= USB_CONFIG_ATT_ONE;
115 return len;
116}
117
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
new file mode 100644
index 000000000000..8ef8a9cd9ac4
--- /dev/null
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -0,0 +1,1793 @@
1/*
2 * dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
3 *
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5 *
6 * Copyright (C) 2003 David Brownell
7 * Copyright (C) 2003-2005 Alan Stern
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24
25/*
26 * This exposes a device side "USB gadget" API, driven by requests to a
27 * Linux-USB host controller driver. USB traffic is simulated; there's
28 * no need for USB hardware. Use this with two other drivers:
29 *
30 * - Gadget driver, responding to requests (slave);
31 * - Host-side device driver, as already familiar in Linux.
32 *
33 * Having this all in one kernel can help some stages of development,
34 * bypassing some hardware (and driver) issues. UML could help too.
35 */
36
37#define DEBUG
38
39#include <linux/config.h>
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/delay.h>
43#include <linux/ioport.h>
44#include <linux/sched.h>
45#include <linux/slab.h>
46#include <linux/smp_lock.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/timer.h>
50#include <linux/list.h>
51#include <linux/interrupt.h>
52#include <linux/version.h>
53
54#include <linux/usb.h>
55#include <linux/usb_gadget.h>
56
57#include <asm/byteorder.h>
58#include <asm/io.h>
59#include <asm/irq.h>
60#include <asm/system.h>
61#include <asm/unaligned.h>
62
63
64#include "../core/hcd.h"
65
66
67#define DRIVER_DESC "USB Host+Gadget Emulator"
68#define DRIVER_VERSION "17 Dec 2004"
69
70static const char driver_name [] = "dummy_hcd";
71static const char driver_desc [] = "USB Host+Gadget Emulator";
72
73static const char gadget_name [] = "dummy_udc";
74
75MODULE_DESCRIPTION (DRIVER_DESC);
76MODULE_AUTHOR ("David Brownell");
77MODULE_LICENSE ("GPL");
78
79/*-------------------------------------------------------------------------*/
80
81/* gadget side driver data structres */
82struct dummy_ep {
83 struct list_head queue;
84 unsigned long last_io; /* jiffies timestamp */
85 struct usb_gadget *gadget;
86 const struct usb_endpoint_descriptor *desc;
87 struct usb_ep ep;
88 unsigned halted : 1;
89 unsigned already_seen : 1;
90 unsigned setup_stage : 1;
91};
92
93struct dummy_request {
94 struct list_head queue; /* ep's requests */
95 struct usb_request req;
96};
97
98static inline struct dummy_ep *usb_ep_to_dummy_ep (struct usb_ep *_ep)
99{
100 return container_of (_ep, struct dummy_ep, ep);
101}
102
103static inline struct dummy_request *usb_request_to_dummy_request
104 (struct usb_request *_req)
105{
106 return container_of (_req, struct dummy_request, req);
107}
108
109/*-------------------------------------------------------------------------*/
110
111/*
112 * Every device has ep0 for control requests, plus up to 30 more endpoints,
113 * in one of two types:
114 *
115 * - Configurable: direction (in/out), type (bulk, iso, etc), and endpoint
116 * number can be changed. Names like "ep-a" are used for this type.
117 *
118 * - Fixed Function: in other cases. some characteristics may be mutable;
119 * that'd be hardware-specific. Names like "ep12out-bulk" are used.
120 *
121 * Gadget drivers are responsible for not setting up conflicting endpoint
122 * configurations, illegal or unsupported packet lengths, and so on.
123 */
124
125static const char ep0name [] = "ep0";
126
127static const char *const ep_name [] = {
128 ep0name, /* everyone has ep0 */
129
130 /* act like a net2280: high speed, six configurable endpoints */
131 "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
132
133 /* or like pxa250: fifteen fixed function endpoints */
134 "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
135 "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
136 "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
137 "ep15in-int",
138
139 /* or like sa1100: two fixed function endpoints */
140 "ep1out-bulk", "ep2in-bulk",
141};
142#define DUMMY_ENDPOINTS (sizeof(ep_name)/sizeof(char *))
143
144#define FIFO_SIZE 64
145
146struct urbp {
147 struct urb *urb;
148 struct list_head urbp_list;
149};
150
151struct dummy {
152 spinlock_t lock;
153
154 /*
155 * SLAVE/GADGET side support
156 */
157 struct dummy_ep ep [DUMMY_ENDPOINTS];
158 int address;
159 struct usb_gadget gadget;
160 struct usb_gadget_driver *driver;
161 struct dummy_request fifo_req;
162 u8 fifo_buf [FIFO_SIZE];
163 u16 devstatus;
164
165 /*
166 * MASTER/HOST side support
167 */
168 struct timer_list timer;
169 u32 port_status;
170 unsigned resuming:1;
171 unsigned long re_timeout;
172
173 struct usb_device *udev;
174 struct list_head urbp_list;
175};
176
177static inline struct dummy *hcd_to_dummy (struct usb_hcd *hcd)
178{
179 return (struct dummy *) (hcd->hcd_priv);
180}
181
182static inline struct usb_hcd *dummy_to_hcd (struct dummy *dum)
183{
184 return container_of((void *) dum, struct usb_hcd, hcd_priv);
185}
186
187static inline struct device *dummy_dev (struct dummy *dum)
188{
189 return dummy_to_hcd(dum)->self.controller;
190}
191
192static inline struct dummy *ep_to_dummy (struct dummy_ep *ep)
193{
194 return container_of (ep->gadget, struct dummy, gadget);
195}
196
197static inline struct dummy *gadget_to_dummy (struct usb_gadget *gadget)
198{
199 return container_of (gadget, struct dummy, gadget);
200}
201
202static inline struct dummy *gadget_dev_to_dummy (struct device *dev)
203{
204 return container_of (dev, struct dummy, gadget.dev);
205}
206
207static struct dummy *the_controller;
208
209/*-------------------------------------------------------------------------*/
210
211/*
212 * This "hardware" may look a bit odd in diagnostics since it's got both
213 * host and device sides; and it binds different drivers to each side.
214 */
215static struct platform_device the_pdev;
216
217static struct device_driver dummy_driver = {
218 .name = (char *) driver_name,
219 .bus = &platform_bus_type,
220};
221
222/*-------------------------------------------------------------------------*/
223
224/* SLAVE/GADGET SIDE DRIVER
225 *
226 * This only tracks gadget state. All the work is done when the host
227 * side tries some (emulated) i/o operation. Real device controller
228 * drivers would do real i/o using dma, fifos, irqs, timers, etc.
229 */
230
231#define is_enabled(dum) \
232 (dum->port_status & USB_PORT_STAT_ENABLE)
233
234static int
235dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
236{
237 struct dummy *dum;
238 struct dummy_ep *ep;
239 unsigned max;
240 int retval;
241
242 ep = usb_ep_to_dummy_ep (_ep);
243 if (!_ep || !desc || ep->desc || _ep->name == ep0name
244 || desc->bDescriptorType != USB_DT_ENDPOINT)
245 return -EINVAL;
246 dum = ep_to_dummy (ep);
247 if (!dum->driver || !is_enabled (dum))
248 return -ESHUTDOWN;
249 max = le16_to_cpu(desc->wMaxPacketSize) & 0x3ff;
250
251 /* drivers must not request bad settings, since lower levels
252 * (hardware or its drivers) may not check. some endpoints
253 * can't do iso, many have maxpacket limitations, etc.
254 *
255 * since this "hardware" driver is here to help debugging, we
256 * have some extra sanity checks. (there could be more though,
257 * especially for "ep9out" style fixed function ones.)
258 */
259 retval = -EINVAL;
260 switch (desc->bmAttributes & 0x03) {
261 case USB_ENDPOINT_XFER_BULK:
262 if (strstr (ep->ep.name, "-iso")
263 || strstr (ep->ep.name, "-int")) {
264 goto done;
265 }
266 switch (dum->gadget.speed) {
267 case USB_SPEED_HIGH:
268 if (max == 512)
269 break;
270 /* conserve return statements */
271 default:
272 switch (max) {
273 case 8: case 16: case 32: case 64:
274 /* we'll fake any legal size */
275 break;
276 default:
277 case USB_SPEED_LOW:
278 goto done;
279 }
280 }
281 break;
282 case USB_ENDPOINT_XFER_INT:
283 if (strstr (ep->ep.name, "-iso")) /* bulk is ok */
284 goto done;
285 /* real hardware might not handle all packet sizes */
286 switch (dum->gadget.speed) {
287 case USB_SPEED_HIGH:
288 if (max <= 1024)
289 break;
290 /* save a return statement */
291 case USB_SPEED_FULL:
292 if (max <= 64)
293 break;
294 /* save a return statement */
295 default:
296 if (max <= 8)
297 break;
298 goto done;
299 }
300 break;
301 case USB_ENDPOINT_XFER_ISOC:
302 if (strstr (ep->ep.name, "-bulk")
303 || strstr (ep->ep.name, "-int"))
304 goto done;
305 /* real hardware might not handle all packet sizes */
306 switch (dum->gadget.speed) {
307 case USB_SPEED_HIGH:
308 if (max <= 1024)
309 break;
310 /* save a return statement */
311 case USB_SPEED_FULL:
312 if (max <= 1023)
313 break;
314 /* save a return statement */
315 default:
316 goto done;
317 }
318 break;
319 default:
320 /* few chips support control except on ep0 */
321 goto done;
322 }
323
324 _ep->maxpacket = max;
325 ep->desc = desc;
326
327 dev_dbg (dummy_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d\n",
328 _ep->name,
329 desc->bEndpointAddress & 0x0f,
330 (desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
331 ({ char *val;
332 switch (desc->bmAttributes & 0x03) {
333 case USB_ENDPOINT_XFER_BULK: val = "bulk"; break;
334 case USB_ENDPOINT_XFER_ISOC: val = "iso"; break;
335 case USB_ENDPOINT_XFER_INT: val = "intr"; break;
336 default: val = "ctrl"; break;
337 }; val; }),
338 max);
339
340 /* at this point real hardware should be NAKing transfers
341 * to that endpoint, until a buffer is queued to it.
342 */
343 retval = 0;
344done:
345 return retval;
346}
347
348/* called with spinlock held */
349static void nuke (struct dummy *dum, struct dummy_ep *ep)
350{
351 while (!list_empty (&ep->queue)) {
352 struct dummy_request *req;
353
354 req = list_entry (ep->queue.next, struct dummy_request, queue);
355 list_del_init (&req->queue);
356 req->req.status = -ESHUTDOWN;
357
358 spin_unlock (&dum->lock);
359 req->req.complete (&ep->ep, &req->req);
360 spin_lock (&dum->lock);
361 }
362}
363
364static int dummy_disable (struct usb_ep *_ep)
365{
366 struct dummy_ep *ep;
367 struct dummy *dum;
368 unsigned long flags;
369 int retval;
370
371 ep = usb_ep_to_dummy_ep (_ep);
372 if (!_ep || !ep->desc || _ep->name == ep0name)
373 return -EINVAL;
374 dum = ep_to_dummy (ep);
375
376 spin_lock_irqsave (&dum->lock, flags);
377 ep->desc = NULL;
378 retval = 0;
379 nuke (dum, ep);
380 spin_unlock_irqrestore (&dum->lock, flags);
381
382 dev_dbg (dummy_dev(dum), "disabled %s\n", _ep->name);
383 return retval;
384}
385
386static struct usb_request *
387dummy_alloc_request (struct usb_ep *_ep, int mem_flags)
388{
389 struct dummy_ep *ep;
390 struct dummy_request *req;
391
392 if (!_ep)
393 return NULL;
394 ep = usb_ep_to_dummy_ep (_ep);
395
396 req = kmalloc (sizeof *req, mem_flags);
397 if (!req)
398 return NULL;
399 memset (req, 0, sizeof *req);
400 INIT_LIST_HEAD (&req->queue);
401 return &req->req;
402}
403
404static void
405dummy_free_request (struct usb_ep *_ep, struct usb_request *_req)
406{
407 struct dummy_ep *ep;
408 struct dummy_request *req;
409
410 ep = usb_ep_to_dummy_ep (_ep);
411 if (!ep || !_req || (!ep->desc && _ep->name != ep0name))
412 return;
413
414 req = usb_request_to_dummy_request (_req);
415 WARN_ON (!list_empty (&req->queue));
416 kfree (req);
417}
418
419static void *
420dummy_alloc_buffer (
421 struct usb_ep *_ep,
422 unsigned bytes,
423 dma_addr_t *dma,
424 int mem_flags
425) {
426 char *retval;
427 struct dummy_ep *ep;
428 struct dummy *dum;
429
430 ep = usb_ep_to_dummy_ep (_ep);
431 dum = ep_to_dummy (ep);
432
433 if (!dum->driver)
434 return NULL;
435 retval = kmalloc (bytes, mem_flags);
436 *dma = (dma_addr_t) retval;
437 return retval;
438}
439
440static void
441dummy_free_buffer (
442 struct usb_ep *_ep,
443 void *buf,
444 dma_addr_t dma,
445 unsigned bytes
446) {
447 if (bytes)
448 kfree (buf);
449}
450
451static void
452fifo_complete (struct usb_ep *ep, struct usb_request *req)
453{
454}
455
456static int
457dummy_queue (struct usb_ep *_ep, struct usb_request *_req, int mem_flags)
458{
459 struct dummy_ep *ep;
460 struct dummy_request *req;
461 struct dummy *dum;
462 unsigned long flags;
463
464 req = usb_request_to_dummy_request (_req);
465 if (!_req || !list_empty (&req->queue) || !_req->complete)
466 return -EINVAL;
467
468 ep = usb_ep_to_dummy_ep (_ep);
469 if (!_ep || (!ep->desc && _ep->name != ep0name))
470 return -EINVAL;
471
472 dum = ep_to_dummy (ep);
473 if (!dum->driver || !is_enabled (dum))
474 return -ESHUTDOWN;
475
476#if 0
477 dev_dbg (dummy_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n",
478 ep, _req, _ep->name, _req->length, _req->buf);
479#endif
480
481 _req->status = -EINPROGRESS;
482 _req->actual = 0;
483 spin_lock_irqsave (&dum->lock, flags);
484
485 /* implement an emulated single-request FIFO */
486 if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
487 list_empty (&dum->fifo_req.queue) &&
488 list_empty (&ep->queue) &&
489 _req->length <= FIFO_SIZE) {
490 req = &dum->fifo_req;
491 req->req = *_req;
492 req->req.buf = dum->fifo_buf;
493 memcpy (dum->fifo_buf, _req->buf, _req->length);
494 req->req.context = dum;
495 req->req.complete = fifo_complete;
496
497 spin_unlock (&dum->lock);
498 _req->actual = _req->length;
499 _req->status = 0;
500 _req->complete (_ep, _req);
501 spin_lock (&dum->lock);
502 }
503 list_add_tail (&req->queue, &ep->queue);
504 spin_unlock_irqrestore (&dum->lock, flags);
505
506 /* real hardware would likely enable transfers here, in case
507 * it'd been left NAKing.
508 */
509 return 0;
510}
511
512static int dummy_dequeue (struct usb_ep *_ep, struct usb_request *_req)
513{
514 struct dummy_ep *ep;
515 struct dummy *dum;
516 int retval = -EINVAL;
517 unsigned long flags;
518 struct dummy_request *req = NULL;
519
520 if (!_ep || !_req)
521 return retval;
522 ep = usb_ep_to_dummy_ep (_ep);
523 dum = ep_to_dummy (ep);
524
525 if (!dum->driver)
526 return -ESHUTDOWN;
527
528 spin_lock_irqsave (&dum->lock, flags);
529 list_for_each_entry (req, &ep->queue, queue) {
530 if (&req->req == _req) {
531 list_del_init (&req->queue);
532 _req->status = -ECONNRESET;
533 retval = 0;
534 break;
535 }
536 }
537 spin_unlock_irqrestore (&dum->lock, flags);
538
539 if (retval == 0) {
540 dev_dbg (dummy_dev(dum),
541 "dequeued req %p from %s, len %d buf %p\n",
542 req, _ep->name, _req->length, _req->buf);
543 _req->complete (_ep, _req);
544 }
545 return retval;
546}
547
548static int
549dummy_set_halt (struct usb_ep *_ep, int value)
550{
551 struct dummy_ep *ep;
552 struct dummy *dum;
553
554 if (!_ep)
555 return -EINVAL;
556 ep = usb_ep_to_dummy_ep (_ep);
557 dum = ep_to_dummy (ep);
558 if (!dum->driver)
559 return -ESHUTDOWN;
560 if (!value)
561 ep->halted = 0;
562 else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
563 !list_empty (&ep->queue))
564 return -EAGAIN;
565 else
566 ep->halted = 1;
567 /* FIXME clear emulated data toggle too */
568 return 0;
569}
570
571static const struct usb_ep_ops dummy_ep_ops = {
572 .enable = dummy_enable,
573 .disable = dummy_disable,
574
575 .alloc_request = dummy_alloc_request,
576 .free_request = dummy_free_request,
577
578 .alloc_buffer = dummy_alloc_buffer,
579 .free_buffer = dummy_free_buffer,
580 /* map, unmap, ... eventually hook the "generic" dma calls */
581
582 .queue = dummy_queue,
583 .dequeue = dummy_dequeue,
584
585 .set_halt = dummy_set_halt,
586};
587
588/*-------------------------------------------------------------------------*/
589
590/* there are both host and device side versions of this call ... */
591static int dummy_g_get_frame (struct usb_gadget *_gadget)
592{
593 struct timeval tv;
594
595 do_gettimeofday (&tv);
596 return tv.tv_usec / 1000;
597}
598
599static int dummy_wakeup (struct usb_gadget *_gadget)
600{
601 struct dummy *dum;
602
603 dum = gadget_to_dummy (_gadget);
604 if ((dum->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) == 0
605 || !(dum->port_status & (1 << USB_PORT_FEAT_SUSPEND)))
606 return -EINVAL;
607
608 /* hub notices our request, issues downstream resume, etc */
609 dum->resuming = 1;
610 dum->port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
611 return 0;
612}
613
614static int dummy_set_selfpowered (struct usb_gadget *_gadget, int value)
615{
616 struct dummy *dum;
617
618 dum = gadget_to_dummy (_gadget);
619 if (value)
620 dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
621 else
622 dum->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
623 return 0;
624}
625
626static const struct usb_gadget_ops dummy_ops = {
627 .get_frame = dummy_g_get_frame,
628 .wakeup = dummy_wakeup,
629 .set_selfpowered = dummy_set_selfpowered,
630};
631
632/*-------------------------------------------------------------------------*/
633
634/* "function" sysfs attribute */
635static ssize_t
636show_function (struct device *dev, char *buf)
637{
638 struct dummy *dum = gadget_dev_to_dummy (dev);
639
640 if (!dum->driver || !dum->driver->function)
641 return 0;
642 return scnprintf (buf, PAGE_SIZE, "%s\n", dum->driver->function);
643}
644DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
645
646/*-------------------------------------------------------------------------*/
647
648/*
649 * Driver registration/unregistration.
650 *
651 * This is basically hardware-specific; there's usually only one real USB
652 * device (not host) controller since that's how USB devices are intended
653 * to work. So most implementations of these api calls will rely on the
654 * fact that only one driver will ever bind to the hardware. But curious
655 * hardware can be built with discrete components, so the gadget API doesn't
656 * require that assumption.
657 *
658 * For this emulator, it might be convenient to create a usb slave device
659 * for each driver that registers: just add to a big root hub.
660 */
661
662static void
663dummy_udc_release (struct device *dev)
664{
665}
666
667static void
668dummy_pdev_release (struct device *dev)
669{
670}
671
672static int
673dummy_register_udc (struct dummy *dum)
674{
675 int rc;
676
677 strcpy (dum->gadget.dev.bus_id, "udc");
678 dum->gadget.dev.parent = dummy_dev(dum);
679 dum->gadget.dev.release = dummy_udc_release;
680
681 rc = device_register (&dum->gadget.dev);
682 if (rc == 0)
683 device_create_file (&dum->gadget.dev, &dev_attr_function);
684 return rc;
685}
686
687static void
688dummy_unregister_udc (struct dummy *dum)
689{
690 device_remove_file (&dum->gadget.dev, &dev_attr_function);
691 device_unregister (&dum->gadget.dev);
692}
693
694int
695usb_gadget_register_driver (struct usb_gadget_driver *driver)
696{
697 struct dummy *dum = the_controller;
698 int retval, i;
699
700 if (!dum)
701 return -EINVAL;
702 if (dum->driver)
703 return -EBUSY;
704 if (!driver->bind || !driver->unbind || !driver->setup
705 || driver->speed == USB_SPEED_UNKNOWN)
706 return -EINVAL;
707
708 /*
709 * SLAVE side init ... the layer above hardware, which
710 * can't enumerate without help from the driver we're binding.
711 */
712 dum->gadget.name = gadget_name;
713 dum->gadget.ops = &dummy_ops;
714 dum->gadget.is_dualspeed = 1;
715
716 dum->devstatus = 0;
717 dum->resuming = 0;
718
719 INIT_LIST_HEAD (&dum->gadget.ep_list);
720 for (i = 0; i < DUMMY_ENDPOINTS; i++) {
721 struct dummy_ep *ep = &dum->ep [i];
722
723 if (!ep_name [i])
724 break;
725 ep->ep.name = ep_name [i];
726 ep->ep.ops = &dummy_ep_ops;
727 list_add_tail (&ep->ep.ep_list, &dum->gadget.ep_list);
728 ep->halted = ep->already_seen = ep->setup_stage = 0;
729 ep->ep.maxpacket = ~0;
730 ep->last_io = jiffies;
731 ep->gadget = &dum->gadget;
732 ep->desc = NULL;
733 INIT_LIST_HEAD (&ep->queue);
734 }
735
736 dum->gadget.ep0 = &dum->ep [0].ep;
737 dum->ep [0].ep.maxpacket = 64;
738 list_del_init (&dum->ep [0].ep.ep_list);
739 INIT_LIST_HEAD(&dum->fifo_req.queue);
740
741 dum->driver = driver;
742 dum->gadget.dev.driver = &driver->driver;
743 dev_dbg (dummy_dev(dum), "binding gadget driver '%s'\n",
744 driver->driver.name);
745 if ((retval = driver->bind (&dum->gadget)) != 0) {
746 dum->driver = NULL;
747 dum->gadget.dev.driver = NULL;
748 return retval;
749 }
750
751 // FIXME: Check these calls for errors and re-order
752 driver->driver.bus = dum->gadget.dev.parent->bus;
753 driver_register (&driver->driver);
754
755 device_bind_driver (&dum->gadget.dev);
756
757 /* khubd will enumerate this in a while */
758 dum->port_status |= USB_PORT_STAT_CONNECTION
759 | (1 << USB_PORT_FEAT_C_CONNECTION);
760 return 0;
761}
762EXPORT_SYMBOL (usb_gadget_register_driver);
763
764/* caller must hold lock */
765static void
766stop_activity (struct dummy *dum, struct usb_gadget_driver *driver)
767{
768 struct dummy_ep *ep;
769
770 /* prevent any more requests */
771 dum->address = 0;
772
773 /* The timer is left running so that outstanding URBs can fail */
774
775 /* nuke any pending requests first, so driver i/o is quiesced */
776 list_for_each_entry (ep, &dum->gadget.ep_list, ep.ep_list)
777 nuke (dum, ep);
778
779 /* driver now does any non-usb quiescing necessary */
780 if (driver) {
781 spin_unlock (&dum->lock);
782 driver->disconnect (&dum->gadget);
783 spin_lock (&dum->lock);
784 }
785}
786
787int
788usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
789{
790 struct dummy *dum = the_controller;
791 unsigned long flags;
792
793 if (!dum)
794 return -ENODEV;
795 if (!driver || driver != dum->driver)
796 return -EINVAL;
797
798 dev_dbg (dummy_dev(dum), "unregister gadget driver '%s'\n",
799 driver->driver.name);
800
801 spin_lock_irqsave (&dum->lock, flags);
802 stop_activity (dum, driver);
803 dum->port_status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE |
804 USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
805 dum->port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
806 spin_unlock_irqrestore (&dum->lock, flags);
807
808 driver->unbind (&dum->gadget);
809 dum->driver = NULL;
810
811 device_release_driver (&dum->gadget.dev);
812
813 driver_unregister (&driver->driver);
814
815 return 0;
816}
817EXPORT_SYMBOL (usb_gadget_unregister_driver);
818
819#undef is_enabled
820
821int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
822{
823 return -ENOSYS;
824}
825EXPORT_SYMBOL (net2280_set_fifo_mode);
826
827/*-------------------------------------------------------------------------*/
828
829/* MASTER/HOST SIDE DRIVER
830 *
831 * this uses the hcd framework to hook up to host side drivers.
832 * its root hub will only have one device, otherwise it acts like
833 * a normal host controller.
834 *
835 * when urbs are queued, they're just stuck on a list that we
836 * scan in a timer callback. that callback connects writes from
837 * the host with reads from the device, and so on, based on the
838 * usb 2.0 rules.
839 */
840
841static int dummy_urb_enqueue (
842 struct usb_hcd *hcd,
843 struct usb_host_endpoint *ep,
844 struct urb *urb,
845 int mem_flags
846) {
847 struct dummy *dum;
848 struct urbp *urbp;
849 unsigned long flags;
850
851 if (!urb->transfer_buffer && urb->transfer_buffer_length)
852 return -EINVAL;
853
854 urbp = kmalloc (sizeof *urbp, mem_flags);
855 if (!urbp)
856 return -ENOMEM;
857 urbp->urb = urb;
858
859 dum = hcd_to_dummy (hcd);
860 spin_lock_irqsave (&dum->lock, flags);
861
862 if (!dum->udev) {
863 dum->udev = urb->dev;
864 usb_get_dev (dum->udev);
865 } else if (unlikely (dum->udev != urb->dev))
866 dev_err (dummy_dev(dum), "usb_device address has changed!\n");
867
868 list_add_tail (&urbp->urbp_list, &dum->urbp_list);
869 urb->hcpriv = urbp;
870 if (usb_pipetype (urb->pipe) == PIPE_CONTROL)
871 urb->error_count = 1; /* mark as a new urb */
872
873 /* kick the scheduler, it'll do the rest */
874 if (!timer_pending (&dum->timer))
875 mod_timer (&dum->timer, jiffies + 1);
876
877 spin_unlock_irqrestore (&dum->lock, flags);
878 return 0;
879}
880
881static int dummy_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
882{
883 /* giveback happens automatically in timer callback */
884 return 0;
885}
886
887static void maybe_set_status (struct urb *urb, int status)
888{
889 spin_lock (&urb->lock);
890 if (urb->status == -EINPROGRESS)
891 urb->status = status;
892 spin_unlock (&urb->lock);
893}
894
895/* transfer up to a frame's worth; caller must own lock */
896static int
897transfer (struct dummy *dum, struct urb *urb, struct dummy_ep *ep, int limit)
898{
899 struct dummy_request *req;
900
901top:
902 /* if there's no request queued, the device is NAKing; return */
903 list_for_each_entry (req, &ep->queue, queue) {
904 unsigned host_len, dev_len, len;
905 int is_short, to_host;
906 int rescan = 0;
907
908 /* 1..N packets of ep->ep.maxpacket each ... the last one
909 * may be short (including zero length).
910 *
911 * writer can send a zlp explicitly (length 0) or implicitly
912 * (length mod maxpacket zero, and 'zero' flag); they always
913 * terminate reads.
914 */
915 host_len = urb->transfer_buffer_length - urb->actual_length;
916 dev_len = req->req.length - req->req.actual;
917 len = min (host_len, dev_len);
918
919 /* FIXME update emulated data toggle too */
920
921 to_host = usb_pipein (urb->pipe);
922 if (unlikely (len == 0))
923 is_short = 1;
924 else {
925 char *ubuf, *rbuf;
926
927 /* not enough bandwidth left? */
928 if (limit < ep->ep.maxpacket && limit < len)
929 break;
930 len = min (len, (unsigned) limit);
931 if (len == 0)
932 break;
933
934 /* use an extra pass for the final short packet */
935 if (len > ep->ep.maxpacket) {
936 rescan = 1;
937 len -= (len % ep->ep.maxpacket);
938 }
939 is_short = (len % ep->ep.maxpacket) != 0;
940
941 /* else transfer packet(s) */
942 ubuf = urb->transfer_buffer + urb->actual_length;
943 rbuf = req->req.buf + req->req.actual;
944 if (to_host)
945 memcpy (ubuf, rbuf, len);
946 else
947 memcpy (rbuf, ubuf, len);
948 ep->last_io = jiffies;
949
950 limit -= len;
951 urb->actual_length += len;
952 req->req.actual += len;
953 }
954
955 /* short packets terminate, maybe with overflow/underflow.
956 * it's only really an error to write too much.
957 *
958 * partially filling a buffer optionally blocks queue advances
959 * (so completion handlers can clean up the queue) but we don't
960 * need to emulate such data-in-flight. so we only show part
961 * of the URB_SHORT_NOT_OK effect: completion status.
962 */
963 if (is_short) {
964 if (host_len == dev_len) {
965 req->req.status = 0;
966 maybe_set_status (urb, 0);
967 } else if (to_host) {
968 req->req.status = 0;
969 if (dev_len > host_len)
970 maybe_set_status (urb, -EOVERFLOW);
971 else
972 maybe_set_status (urb,
973 (urb->transfer_flags
974 & URB_SHORT_NOT_OK)
975 ? -EREMOTEIO : 0);
976 } else if (!to_host) {
977 maybe_set_status (urb, 0);
978 if (host_len > dev_len)
979 req->req.status = -EOVERFLOW;
980 else
981 req->req.status = 0;
982 }
983
984 /* many requests terminate without a short packet */
985 } else {
986 if (req->req.length == req->req.actual
987 && !req->req.zero)
988 req->req.status = 0;
989 if (urb->transfer_buffer_length == urb->actual_length
990 && !(urb->transfer_flags
991 & URB_ZERO_PACKET)) {
992 maybe_set_status (urb, 0);
993 }
994 }
995
996 /* device side completion --> continuable */
997 if (req->req.status != -EINPROGRESS) {
998 list_del_init (&req->queue);
999
1000 spin_unlock (&dum->lock);
1001 req->req.complete (&ep->ep, &req->req);
1002 spin_lock (&dum->lock);
1003
1004 /* requests might have been unlinked... */
1005 rescan = 1;
1006 }
1007
1008 /* host side completion --> terminate */
1009 if (urb->status != -EINPROGRESS)
1010 break;
1011
1012 /* rescan to continue with any other queued i/o */
1013 if (rescan)
1014 goto top;
1015 }
1016 return limit;
1017}
1018
1019static int periodic_bytes (struct dummy *dum, struct dummy_ep *ep)
1020{
1021 int limit = ep->ep.maxpacket;
1022
1023 if (dum->gadget.speed == USB_SPEED_HIGH) {
1024 int tmp;
1025
1026 /* high bandwidth mode */
1027 tmp = le16_to_cpu(ep->desc->wMaxPacketSize);
1028 tmp = le16_to_cpu (tmp);
1029 tmp = (tmp >> 11) & 0x03;
1030 tmp *= 8 /* applies to entire frame */;
1031 limit += limit * tmp;
1032 }
1033 return limit;
1034}
1035
1036#define is_active(dum) ((dum->port_status & \
1037 (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \
1038 USB_PORT_STAT_SUSPEND)) \
1039 == (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE))
1040
1041static struct dummy_ep *find_endpoint (struct dummy *dum, u8 address)
1042{
1043 int i;
1044
1045 if (!is_active (dum))
1046 return NULL;
1047 if ((address & ~USB_DIR_IN) == 0)
1048 return &dum->ep [0];
1049 for (i = 1; i < DUMMY_ENDPOINTS; i++) {
1050 struct dummy_ep *ep = &dum->ep [i];
1051
1052 if (!ep->desc)
1053 continue;
1054 if (ep->desc->bEndpointAddress == address)
1055 return ep;
1056 }
1057 return NULL;
1058}
1059
1060#undef is_active
1061
1062#define Dev_Request (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1063#define Dev_InRequest (Dev_Request | USB_DIR_IN)
1064#define Intf_Request (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
1065#define Intf_InRequest (Intf_Request | USB_DIR_IN)
1066#define Ep_Request (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
1067#define Ep_InRequest (Ep_Request | USB_DIR_IN)
1068
1069/* drive both sides of the transfers; looks like irq handlers to
1070 * both drivers except the callbacks aren't in_irq().
1071 */
1072static void dummy_timer (unsigned long _dum)
1073{
1074 struct dummy *dum = (struct dummy *) _dum;
1075 struct urbp *urbp, *tmp;
1076 unsigned long flags;
1077 int limit, total;
1078 int i;
1079
1080 /* simplistic model for one frame's bandwidth */
1081 switch (dum->gadget.speed) {
1082 case USB_SPEED_LOW:
1083 total = 8/*bytes*/ * 12/*packets*/;
1084 break;
1085 case USB_SPEED_FULL:
1086 total = 64/*bytes*/ * 19/*packets*/;
1087 break;
1088 case USB_SPEED_HIGH:
1089 total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/;
1090 break;
1091 default:
1092 dev_err (dummy_dev(dum), "bogus device speed\n");
1093 return;
1094 }
1095
1096 /* FIXME if HZ != 1000 this will probably misbehave ... */
1097
1098 /* look at each urb queued by the host side driver */
1099 spin_lock_irqsave (&dum->lock, flags);
1100
1101 if (!dum->udev) {
1102 dev_err (dummy_dev(dum),
1103 "timer fired with no URBs pending?\n");
1104 spin_unlock_irqrestore (&dum->lock, flags);
1105 return;
1106 }
1107
1108 for (i = 0; i < DUMMY_ENDPOINTS; i++) {
1109 if (!ep_name [i])
1110 break;
1111 dum->ep [i].already_seen = 0;
1112 }
1113
1114restart:
1115 list_for_each_entry_safe (urbp, tmp, &dum->urbp_list, urbp_list) {
1116 struct urb *urb;
1117 struct dummy_request *req;
1118 u8 address;
1119 struct dummy_ep *ep = NULL;
1120 int type;
1121
1122 urb = urbp->urb;
1123 if (urb->status != -EINPROGRESS) {
1124 /* likely it was just unlinked */
1125 goto return_urb;
1126 }
1127 type = usb_pipetype (urb->pipe);
1128
1129 /* used up this frame's non-periodic bandwidth?
1130 * FIXME there's infinite bandwidth for control and
1131 * periodic transfers ... unrealistic.
1132 */
1133 if (total <= 0 && type == PIPE_BULK)
1134 continue;
1135
1136 /* find the gadget's ep for this request (if configured) */
1137 address = usb_pipeendpoint (urb->pipe);
1138 if (usb_pipein (urb->pipe))
1139 address |= USB_DIR_IN;
1140 ep = find_endpoint(dum, address);
1141 if (!ep) {
1142 /* set_configuration() disagreement */
1143 dev_dbg (dummy_dev(dum),
1144 "no ep configured for urb %p\n",
1145 urb);
1146 maybe_set_status (urb, -EPROTO);
1147 goto return_urb;
1148 }
1149
1150 if (ep->already_seen)
1151 continue;
1152 ep->already_seen = 1;
1153 if (ep == &dum->ep [0] && urb->error_count) {
1154 ep->setup_stage = 1; /* a new urb */
1155 urb->error_count = 0;
1156 }
1157 if (ep->halted && !ep->setup_stage) {
1158 /* NOTE: must not be iso! */
1159 dev_dbg (dummy_dev(dum), "ep %s halted, urb %p\n",
1160 ep->ep.name, urb);
1161 maybe_set_status (urb, -EPIPE);
1162 goto return_urb;
1163 }
1164 /* FIXME make sure both ends agree on maxpacket */
1165
1166 /* handle control requests */
1167 if (ep == &dum->ep [0] && ep->setup_stage) {
1168 struct usb_ctrlrequest setup;
1169 int value = 1;
1170 struct dummy_ep *ep2;
1171
1172 setup = *(struct usb_ctrlrequest*) urb->setup_packet;
1173 le16_to_cpus (&setup.wIndex);
1174 le16_to_cpus (&setup.wValue);
1175 le16_to_cpus (&setup.wLength);
1176 if (setup.wLength != urb->transfer_buffer_length) {
1177 maybe_set_status (urb, -EOVERFLOW);
1178 goto return_urb;
1179 }
1180
1181 /* paranoia, in case of stale queued data */
1182 list_for_each_entry (req, &ep->queue, queue) {
1183 list_del_init (&req->queue);
1184 req->req.status = -EOVERFLOW;
1185 dev_dbg (dummy_dev(dum), "stale req = %p\n",
1186 req);
1187
1188 spin_unlock (&dum->lock);
1189 req->req.complete (&ep->ep, &req->req);
1190 spin_lock (&dum->lock);
1191 ep->already_seen = 0;
1192 goto restart;
1193 }
1194
1195 /* gadget driver never sees set_address or operations
1196 * on standard feature flags. some hardware doesn't
1197 * even expose them.
1198 */
1199 ep->last_io = jiffies;
1200 ep->setup_stage = 0;
1201 ep->halted = 0;
1202 switch (setup.bRequest) {
1203 case USB_REQ_SET_ADDRESS:
1204 if (setup.bRequestType != Dev_Request)
1205 break;
1206 dum->address = setup.wValue;
1207 maybe_set_status (urb, 0);
1208 dev_dbg (dummy_dev(dum), "set_address = %d\n",
1209 setup.wValue);
1210 value = 0;
1211 break;
1212 case USB_REQ_SET_FEATURE:
1213 if (setup.bRequestType == Dev_Request) {
1214 value = 0;
1215 switch (setup.wValue) {
1216 case USB_DEVICE_REMOTE_WAKEUP:
1217 break;
1218 default:
1219 value = -EOPNOTSUPP;
1220 }
1221 if (value == 0) {
1222 dum->devstatus |=
1223 (1 << setup.wValue);
1224 maybe_set_status (urb, 0);
1225 }
1226
1227 } else if (setup.bRequestType == Ep_Request) {
1228 // endpoint halt
1229 ep2 = find_endpoint (dum,
1230 setup.wIndex);
1231 if (!ep2) {
1232 value = -EOPNOTSUPP;
1233 break;
1234 }
1235 ep2->halted = 1;
1236 value = 0;
1237 maybe_set_status (urb, 0);
1238 }
1239 break;
1240 case USB_REQ_CLEAR_FEATURE:
1241 if (setup.bRequestType == Dev_Request) {
1242 switch (setup.wValue) {
1243 case USB_DEVICE_REMOTE_WAKEUP:
1244 dum->devstatus &= ~(1 <<
1245 USB_DEVICE_REMOTE_WAKEUP);
1246 value = 0;
1247 maybe_set_status (urb, 0);
1248 break;
1249 default:
1250 value = -EOPNOTSUPP;
1251 break;
1252 }
1253 } else if (setup.bRequestType == Ep_Request) {
1254 // endpoint halt
1255 ep2 = find_endpoint (dum,
1256 setup.wIndex);
1257 if (!ep2) {
1258 value = -EOPNOTSUPP;
1259 break;
1260 }
1261 ep2->halted = 0;
1262 value = 0;
1263 maybe_set_status (urb, 0);
1264 }
1265 break;
1266 case USB_REQ_GET_STATUS:
1267 if (setup.bRequestType == Dev_InRequest
1268 || setup.bRequestType
1269 == Intf_InRequest
1270 || setup.bRequestType
1271 == Ep_InRequest
1272 ) {
1273 char *buf;
1274
1275 // device: remote wakeup, selfpowered
1276 // interface: nothing
1277 // endpoint: halt
1278 buf = (char *)urb->transfer_buffer;
1279 if (urb->transfer_buffer_length > 0) {
1280 if (setup.bRequestType ==
1281 Ep_InRequest) {
1282 ep2 = find_endpoint (dum, setup.wIndex);
1283 if (!ep2) {
1284 value = -EOPNOTSUPP;
1285 break;
1286 }
1287 buf [0] = ep2->halted;
1288 } else if (setup.bRequestType ==
1289 Dev_InRequest) {
1290 buf [0] = (u8)
1291 dum->devstatus;
1292 } else
1293 buf [0] = 0;
1294 }
1295 if (urb->transfer_buffer_length > 1)
1296 buf [1] = 0;
1297 urb->actual_length = min (2,
1298 urb->transfer_buffer_length);
1299 value = 0;
1300 maybe_set_status (urb, 0);
1301 }
1302 break;
1303 }
1304
1305 /* gadget driver handles all other requests. block
1306 * until setup() returns; no reentrancy issues etc.
1307 */
1308 if (value > 0) {
1309 spin_unlock (&dum->lock);
1310 value = dum->driver->setup (&dum->gadget,
1311 &setup);
1312 spin_lock (&dum->lock);
1313
1314 if (value >= 0) {
1315 /* no delays (max 64KB data stage) */
1316 limit = 64*1024;
1317 goto treat_control_like_bulk;
1318 }
1319 /* error, see below */
1320 }
1321
1322 if (value < 0) {
1323 if (value != -EOPNOTSUPP)
1324 dev_dbg (dummy_dev(dum),
1325 "setup --> %d\n",
1326 value);
1327 maybe_set_status (urb, -EPIPE);
1328 urb->actual_length = 0;
1329 }
1330
1331 goto return_urb;
1332 }
1333
1334 /* non-control requests */
1335 limit = total;
1336 switch (usb_pipetype (urb->pipe)) {
1337 case PIPE_ISOCHRONOUS:
1338 /* FIXME is it urb->interval since the last xfer?
1339 * use urb->iso_frame_desc[i].
1340 * complete whether or not ep has requests queued.
1341 * report random errors, to debug drivers.
1342 */
1343 limit = max (limit, periodic_bytes (dum, ep));
1344 maybe_set_status (urb, -ENOSYS);
1345 break;
1346
1347 case PIPE_INTERRUPT:
1348 /* FIXME is it urb->interval since the last xfer?
1349 * this almost certainly polls too fast.
1350 */
1351 limit = max (limit, periodic_bytes (dum, ep));
1352 /* FALLTHROUGH */
1353
1354 // case PIPE_BULK: case PIPE_CONTROL:
1355 default:
1356 treat_control_like_bulk:
1357 ep->last_io = jiffies;
1358 total = transfer (dum, urb, ep, limit);
1359 break;
1360 }
1361
1362 /* incomplete transfer? */
1363 if (urb->status == -EINPROGRESS)
1364 continue;
1365
1366return_urb:
1367 urb->hcpriv = NULL;
1368 list_del (&urbp->urbp_list);
1369 kfree (urbp);
1370 if (ep)
1371 ep->already_seen = ep->setup_stage = 0;
1372
1373 spin_unlock (&dum->lock);
1374 usb_hcd_giveback_urb (dummy_to_hcd(dum), urb, NULL);
1375 spin_lock (&dum->lock);
1376
1377 goto restart;
1378 }
1379
1380 /* want a 1 msec delay here */
1381 if (!list_empty (&dum->urbp_list))
1382 mod_timer (&dum->timer, jiffies + msecs_to_jiffies(1));
1383 else {
1384 usb_put_dev (dum->udev);
1385 dum->udev = NULL;
1386 }
1387
1388 spin_unlock_irqrestore (&dum->lock, flags);
1389}
1390
1391/*-------------------------------------------------------------------------*/
1392
1393#define PORT_C_MASK \
1394 ((1 << USB_PORT_FEAT_C_CONNECTION) \
1395 | (1 << USB_PORT_FEAT_C_ENABLE) \
1396 | (1 << USB_PORT_FEAT_C_SUSPEND) \
1397 | (1 << USB_PORT_FEAT_C_OVER_CURRENT) \
1398 | (1 << USB_PORT_FEAT_C_RESET))
1399
1400static int dummy_hub_status (struct usb_hcd *hcd, char *buf)
1401{
1402 struct dummy *dum;
1403 unsigned long flags;
1404 int retval;
1405
1406 dum = hcd_to_dummy (hcd);
1407
1408 spin_lock_irqsave (&dum->lock, flags);
1409 if (!(dum->port_status & PORT_C_MASK))
1410 retval = 0;
1411 else {
1412 *buf = (1 << 1);
1413 dev_dbg (dummy_dev(dum), "port status 0x%08x has changes\n",
1414 dum->port_status);
1415 retval = 1;
1416 }
1417 spin_unlock_irqrestore (&dum->lock, flags);
1418 return retval;
1419}
1420
1421static inline void
1422hub_descriptor (struct usb_hub_descriptor *desc)
1423{
1424 memset (desc, 0, sizeof *desc);
1425 desc->bDescriptorType = 0x29;
1426 desc->bDescLength = 9;
1427 desc->wHubCharacteristics = __constant_cpu_to_le16 (0x0001);
1428 desc->bNbrPorts = 1;
1429 desc->bitmap [0] = 0xff;
1430 desc->bitmap [1] = 0xff;
1431}
1432
1433static int dummy_hub_control (
1434 struct usb_hcd *hcd,
1435 u16 typeReq,
1436 u16 wValue,
1437 u16 wIndex,
1438 char *buf,
1439 u16 wLength
1440) {
1441 struct dummy *dum;
1442 int retval = 0;
1443 unsigned long flags;
1444
1445 dum = hcd_to_dummy (hcd);
1446 spin_lock_irqsave (&dum->lock, flags);
1447 switch (typeReq) {
1448 case ClearHubFeature:
1449 break;
1450 case ClearPortFeature:
1451 switch (wValue) {
1452 case USB_PORT_FEAT_SUSPEND:
1453 if (dum->port_status & (1 << USB_PORT_FEAT_SUSPEND)) {
1454 /* 20msec resume signaling */
1455 dum->resuming = 1;
1456 dum->re_timeout = jiffies +
1457 msecs_to_jiffies(20);
1458 }
1459 break;
1460 case USB_PORT_FEAT_POWER:
1461 dum->port_status = 0;
1462 dum->resuming = 0;
1463 stop_activity(dum, dum->driver);
1464 break;
1465 default:
1466 dum->port_status &= ~(1 << wValue);
1467 }
1468 break;
1469 case GetHubDescriptor:
1470 hub_descriptor ((struct usb_hub_descriptor *) buf);
1471 break;
1472 case GetHubStatus:
1473 *(u32 *) buf = __constant_cpu_to_le32 (0);
1474 break;
1475 case GetPortStatus:
1476 if (wIndex != 1)
1477 retval = -EPIPE;
1478
1479 /* whoever resets or resumes must GetPortStatus to
1480 * complete it!!
1481 */
1482 if (dum->resuming && time_after (jiffies, dum->re_timeout)) {
1483 dum->port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
1484 dum->port_status &= ~(1 << USB_PORT_FEAT_SUSPEND);
1485 dum->resuming = 0;
1486 dum->re_timeout = 0;
1487 if (dum->driver && dum->driver->resume) {
1488 spin_unlock (&dum->lock);
1489 dum->driver->resume (&dum->gadget);
1490 spin_lock (&dum->lock);
1491 }
1492 }
1493 if ((dum->port_status & (1 << USB_PORT_FEAT_RESET)) != 0
1494 && time_after (jiffies, dum->re_timeout)) {
1495 dum->port_status |= (1 << USB_PORT_FEAT_C_RESET);
1496 dum->port_status &= ~(1 << USB_PORT_FEAT_RESET);
1497 dum->re_timeout = 0;
1498 if (dum->driver) {
1499 dum->port_status |= USB_PORT_STAT_ENABLE;
1500 /* give it the best speed we agree on */
1501 dum->gadget.speed = dum->driver->speed;
1502 dum->gadget.ep0->maxpacket = 64;
1503 switch (dum->gadget.speed) {
1504 case USB_SPEED_HIGH:
1505 dum->port_status |=
1506 USB_PORT_STAT_HIGH_SPEED;
1507 break;
1508 case USB_SPEED_LOW:
1509 dum->gadget.ep0->maxpacket = 8;
1510 dum->port_status |=
1511 USB_PORT_STAT_LOW_SPEED;
1512 break;
1513 default:
1514 dum->gadget.speed = USB_SPEED_FULL;
1515 break;
1516 }
1517 }
1518 }
1519 ((u16 *) buf)[0] = cpu_to_le16 (dum->port_status);
1520 ((u16 *) buf)[1] = cpu_to_le16 (dum->port_status >> 16);
1521 break;
1522 case SetHubFeature:
1523 retval = -EPIPE;
1524 break;
1525 case SetPortFeature:
1526 switch (wValue) {
1527 case USB_PORT_FEAT_SUSPEND:
1528 if ((dum->port_status & (1 << USB_PORT_FEAT_SUSPEND))
1529 == 0) {
1530 dum->port_status |=
1531 (1 << USB_PORT_FEAT_SUSPEND);
1532 if (dum->driver && dum->driver->suspend) {
1533 spin_unlock (&dum->lock);
1534 dum->driver->suspend (&dum->gadget);
1535 spin_lock (&dum->lock);
1536 }
1537 }
1538 break;
1539 case USB_PORT_FEAT_RESET:
1540 /* if it's already running, disconnect first */
1541 if (dum->port_status & USB_PORT_STAT_ENABLE) {
1542 dum->port_status &= ~(USB_PORT_STAT_ENABLE
1543 | USB_PORT_STAT_LOW_SPEED
1544 | USB_PORT_STAT_HIGH_SPEED);
1545 if (dum->driver) {
1546 dev_dbg (dummy_dev(dum),
1547 "disconnect\n");
1548 stop_activity (dum, dum->driver);
1549 }
1550
1551 /* FIXME test that code path! */
1552 }
1553 /* 50msec reset signaling */
1554 dum->re_timeout = jiffies + msecs_to_jiffies(50);
1555 /* FALLTHROUGH */
1556 default:
1557 dum->port_status |= (1 << wValue);
1558 }
1559 break;
1560
1561 default:
1562 dev_dbg (dummy_dev(dum),
1563 "hub control req%04x v%04x i%04x l%d\n",
1564 typeReq, wValue, wIndex, wLength);
1565
1566 /* "protocol stall" on error */
1567 retval = -EPIPE;
1568 }
1569 spin_unlock_irqrestore (&dum->lock, flags);
1570 return retval;
1571}
1572
1573
1574/*-------------------------------------------------------------------------*/
1575
1576static inline ssize_t
1577show_urb (char *buf, size_t size, struct urb *urb)
1578{
1579 int ep = usb_pipeendpoint (urb->pipe);
1580
1581 return snprintf (buf, size,
1582 "urb/%p %s ep%d%s%s len %d/%d\n",
1583 urb,
1584 ({ char *s;
1585 switch (urb->dev->speed) {
1586 case USB_SPEED_LOW: s = "ls"; break;
1587 case USB_SPEED_FULL: s = "fs"; break;
1588 case USB_SPEED_HIGH: s = "hs"; break;
1589 default: s = "?"; break;
1590 }; s; }),
1591 ep, ep ? (usb_pipein (urb->pipe) ? "in" : "out") : "",
1592 ({ char *s; \
1593 switch (usb_pipetype (urb->pipe)) { \
1594 case PIPE_CONTROL: s = ""; break; \
1595 case PIPE_BULK: s = "-bulk"; break; \
1596 case PIPE_INTERRUPT: s = "-int"; break; \
1597 default: s = "-iso"; break; \
1598 }; s;}),
1599 urb->actual_length, urb->transfer_buffer_length);
1600}
1601
1602static ssize_t
1603show_urbs (struct device *dev, char *buf)
1604{
1605 struct usb_hcd *hcd = dev_get_drvdata (dev);
1606 struct dummy *dum = hcd_to_dummy (hcd);
1607 struct urbp *urbp;
1608 size_t size = 0;
1609 unsigned long flags;
1610
1611 spin_lock_irqsave (&dum->lock, flags);
1612 list_for_each_entry (urbp, &dum->urbp_list, urbp_list) {
1613 size_t temp;
1614
1615 temp = show_urb (buf, PAGE_SIZE - size, urbp->urb);
1616 buf += temp;
1617 size += temp;
1618 }
1619 spin_unlock_irqrestore (&dum->lock, flags);
1620
1621 return size;
1622}
1623static DEVICE_ATTR (urbs, S_IRUGO, show_urbs, NULL);
1624
1625static int dummy_start (struct usb_hcd *hcd)
1626{
1627 struct dummy *dum;
1628 struct usb_device *root;
1629 int retval;
1630
1631 dum = hcd_to_dummy (hcd);
1632
1633 /*
1634 * MASTER side init ... we emulate a root hub that'll only ever
1635 * talk to one device (the slave side). Also appears in sysfs,
1636 * just like more familiar pci-based HCDs.
1637 */
1638 spin_lock_init (&dum->lock);
1639 init_timer (&dum->timer);
1640 dum->timer.function = dummy_timer;
1641 dum->timer.data = (unsigned long) dum;
1642
1643 INIT_LIST_HEAD (&dum->urbp_list);
1644
1645 root = usb_alloc_dev (NULL, &hcd->self, 0);
1646 if (!root)
1647 return -ENOMEM;
1648
1649 /* root hub enters addressed state... */
1650 hcd->state = HC_STATE_RUNNING;
1651 root->speed = USB_SPEED_HIGH;
1652
1653 /* ...then configured, so khubd sees us. */
1654 if ((retval = usb_hcd_register_root_hub (root, hcd)) != 0) {
1655 goto err1;
1656 }
1657
1658 /* only show a low-power port: just 8mA */
1659 hub_set_power_budget (root, 8);
1660
1661 if ((retval = dummy_register_udc (dum)) != 0)
1662 goto err2;
1663
1664 /* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
1665 device_create_file (dummy_dev(dum), &dev_attr_urbs);
1666 return 0;
1667
1668 err2:
1669 usb_disconnect (&hcd->self.root_hub);
1670 err1:
1671 usb_put_dev (root);
1672 hcd->state = HC_STATE_QUIESCING;
1673 return retval;
1674}
1675
1676static void dummy_stop (struct usb_hcd *hcd)
1677{
1678 struct dummy *dum;
1679
1680 dum = hcd_to_dummy (hcd);
1681
1682 device_remove_file (dummy_dev(dum), &dev_attr_urbs);
1683
1684 usb_gadget_unregister_driver (dum->driver);
1685 dummy_unregister_udc (dum);
1686
1687 dev_info (dummy_dev(dum), "stopped\n");
1688}
1689
1690/*-------------------------------------------------------------------------*/
1691
1692static int dummy_h_get_frame (struct usb_hcd *hcd)
1693{
1694 return dummy_g_get_frame (NULL);
1695}
1696
1697static const struct hc_driver dummy_hcd = {
1698 .description = (char *) driver_name,
1699 .product_desc = "Dummy host controller",
1700 .hcd_priv_size = sizeof(struct dummy),
1701
1702 .flags = HCD_USB2,
1703
1704 .start = dummy_start,
1705 .stop = dummy_stop,
1706
1707 .urb_enqueue = dummy_urb_enqueue,
1708 .urb_dequeue = dummy_urb_dequeue,
1709
1710 .get_frame_number = dummy_h_get_frame,
1711
1712 .hub_status_data = dummy_hub_status,
1713 .hub_control = dummy_hub_control,
1714};
1715
1716static int dummy_probe (struct device *dev)
1717{
1718 struct usb_hcd *hcd;
1719 int retval;
1720
1721 dev_info (dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
1722
1723 hcd = usb_create_hcd (&dummy_hcd, dev, dev->bus_id);
1724 if (!hcd)
1725 return -ENOMEM;
1726 the_controller = hcd_to_dummy (hcd);
1727
1728 retval = usb_add_hcd(hcd, 0, 0);
1729 if (retval != 0) {
1730 usb_put_hcd (hcd);
1731 the_controller = NULL;
1732 }
1733 return retval;
1734}
1735
1736static void dummy_remove (struct device *dev)
1737{
1738 struct usb_hcd *hcd;
1739
1740 hcd = dev_get_drvdata (dev);
1741 usb_remove_hcd (hcd);
1742 usb_put_hcd (hcd);
1743 the_controller = NULL;
1744}
1745
1746/*-------------------------------------------------------------------------*/
1747
1748static int dummy_pdev_detect (void)
1749{
1750 int retval;
1751
1752 retval = driver_register (&dummy_driver);
1753 if (retval < 0)
1754 return retval;
1755
1756 the_pdev.name = "hc";
1757 the_pdev.dev.driver = &dummy_driver;
1758 the_pdev.dev.release = dummy_pdev_release;
1759
1760 retval = platform_device_register (&the_pdev);
1761 if (retval < 0)
1762 driver_unregister (&dummy_driver);
1763 return retval;
1764}
1765
1766static void dummy_pdev_remove (void)
1767{
1768 platform_device_unregister (&the_pdev);
1769 driver_unregister (&dummy_driver);
1770}
1771
1772/*-------------------------------------------------------------------------*/
1773
1774static int __init init (void)
1775{
1776 int retval;
1777
1778 if (usb_disabled ())
1779 return -ENODEV;
1780 if ((retval = dummy_pdev_detect ()) != 0)
1781 return retval;
1782 if ((retval = dummy_probe (&the_pdev.dev)) != 0)
1783 dummy_pdev_remove ();
1784 return retval;
1785}
1786module_init (init);
1787
1788static void __exit cleanup (void)
1789{
1790 dummy_remove (&the_pdev.dev);
1791 dummy_pdev_remove ();
1792}
1793module_exit (cleanup);
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
new file mode 100644
index 000000000000..f7c6d758e1b0
--- /dev/null
+++ b/drivers/usb/gadget/epautoconf.c
@@ -0,0 +1,310 @@
1/*
2 * epautoconf.c -- endpoint autoconfiguration for usb gadget drivers
3 *
4 * Copyright (C) 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/types.h>
25#include <linux/device.h>
26
27#include <linux/ctype.h>
28#include <linux/string.h>
29
30#include <linux/usb_ch9.h>
31#include <linux/usb_gadget.h>
32
33#include "gadget_chips.h"
34
35
36/* we must assign addresses for configurable endpoints (like net2280) */
37static __initdata unsigned epnum;
38
39// #define MANY_ENDPOINTS
40#ifdef MANY_ENDPOINTS
41/* more than 15 configurable endpoints */
42static __initdata unsigned in_epnum;
43#endif
44
45
46/*
47 * This should work with endpoints from controller drivers sharing the
48 * same endpoint naming convention. By example:
49 *
50 * - ep1, ep2, ... address is fixed, not direction or type
51 * - ep1in, ep2out, ... address and direction are fixed, not type
52 * - ep1-bulk, ep2-bulk, ... address and type are fixed, not direction
53 * - ep1in-bulk, ep2out-iso, ... all three are fixed
54 * - ep-* ... no functionality restrictions
55 *
56 * Type suffixes are "-bulk", "-iso", or "-int". Numbers are decimal.
57 * Less common restrictions are implied by gadget_is_*().
58 *
59 * NOTE: each endpoint is unidirectional, as specified by its USB
60 * descriptor; and isn't specific to a configuration or altsetting.
61 */
62static int __init
63ep_matches (
64 struct usb_gadget *gadget,
65 struct usb_ep *ep,
66 struct usb_endpoint_descriptor *desc
67)
68{
69 u8 type;
70 const char *tmp;
71 u16 max;
72
73 /* endpoint already claimed? */
74 if (0 != ep->driver_data)
75 return 0;
76
77 /* only support ep0 for portable CONTROL traffic */
78 type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
79 if (USB_ENDPOINT_XFER_CONTROL == type)
80 return 0;
81
82 /* some other naming convention */
83 if ('e' != ep->name[0])
84 return 0;
85
86 /* type-restriction: "-iso", "-bulk", or "-int".
87 * direction-restriction: "in", "out".
88 */
89 if ('-' != ep->name[2]) {
90 tmp = strrchr (ep->name, '-');
91 if (tmp) {
92 switch (type) {
93 case USB_ENDPOINT_XFER_INT:
94 /* bulk endpoints handle interrupt transfers,
95 * except the toggle-quirky iso-synch kind
96 */
97 if ('s' == tmp[2]) // == "-iso"
98 return 0;
99 /* for now, avoid PXA "interrupt-in";
100 * it's documented as never using DATA1.
101 */
102 if (gadget_is_pxa (gadget)
103 && 'i' == tmp [1])
104 return 0;
105 break;
106 case USB_ENDPOINT_XFER_BULK:
107 if ('b' != tmp[1]) // != "-bulk"
108 return 0;
109 break;
110 case USB_ENDPOINT_XFER_ISOC:
111 if ('s' != tmp[2]) // != "-iso"
112 return 0;
113 }
114 } else {
115 tmp = ep->name + strlen (ep->name);
116 }
117
118 /* direction-restriction: "..in-..", "out-.." */
119 tmp--;
120 if (!isdigit (*tmp)) {
121 if (desc->bEndpointAddress & USB_DIR_IN) {
122 if ('n' != *tmp)
123 return 0;
124 } else {
125 if ('t' != *tmp)
126 return 0;
127 }
128 }
129 }
130
131 /* endpoint maxpacket size is an input parameter, except for bulk
132 * where it's an output parameter representing the full speed limit.
133 * the usb spec fixes high speed bulk maxpacket at 512 bytes.
134 */
135 max = 0x7ff & le16_to_cpup (&desc->wMaxPacketSize);
136 switch (type) {
137 case USB_ENDPOINT_XFER_INT:
138 /* INT: limit 64 bytes full speed, 1024 high speed */
139 if (!gadget->is_dualspeed && max > 64)
140 return 0;
141 /* FALLTHROUGH */
142
143 case USB_ENDPOINT_XFER_ISOC:
144 /* ISO: limit 1023 bytes full speed, 1024 high speed */
145 if (ep->maxpacket < max)
146 return 0;
147 if (!gadget->is_dualspeed && max > 1023)
148 return 0;
149
150 /* BOTH: "high bandwidth" works only at high speed */
151 if ((desc->wMaxPacketSize & __constant_cpu_to_le16(3<<11))) {
152 if (!gadget->is_dualspeed)
153 return 0;
154 /* configure your hardware with enough buffering!! */
155 }
156 break;
157 }
158
159 /* MATCH!! */
160
161 /* report address */
162 if (isdigit (ep->name [2])) {
163 u8 num = simple_strtol (&ep->name [2], NULL, 10);
164 desc->bEndpointAddress |= num;
165#ifdef MANY_ENDPOINTS
166 } else if (desc->bEndpointAddress & USB_DIR_IN) {
167 if (++in_epnum > 15)
168 return 0;
169 desc->bEndpointAddress = USB_DIR_IN | in_epnum;
170#endif
171 } else {
172 if (++epnum > 15)
173 return 0;
174 desc->bEndpointAddress |= epnum;
175 }
176
177 /* report (variable) full speed bulk maxpacket */
178 if (USB_ENDPOINT_XFER_BULK == type) {
179 int size = ep->maxpacket;
180
181 /* min() doesn't work on bitfields with gcc-3.5 */
182 if (size > 64)
183 size = 64;
184 desc->wMaxPacketSize = cpu_to_le16(size);
185 }
186 return 1;
187}
188
189static struct usb_ep * __init
190find_ep (struct usb_gadget *gadget, const char *name)
191{
192 struct usb_ep *ep;
193
194 list_for_each_entry (ep, &gadget->ep_list, ep_list) {
195 if (0 == strcmp (ep->name, name))
196 return ep;
197 }
198 return NULL;
199}
200
201/**
202 * usb_ep_autoconfig - choose an endpoint matching the descriptor
203 * @gadget: The device to which the endpoint must belong.
204 * @desc: Endpoint descriptor, with endpoint direction and transfer mode
205 * initialized. For periodic transfers, the maximum packet
206 * size must also be initialized. This is modified on success.
207 *
208 * By choosing an endpoint to use with the specified descriptor, this
209 * routine simplifies writing gadget drivers that work with multiple
210 * USB device controllers. The endpoint would be passed later to
211 * usb_ep_enable(), along with some descriptor.
212 *
213 * That second descriptor won't always be the same as the first one.
214 * For example, isochronous endpoints can be autoconfigured for high
215 * bandwidth, and then used in several lower bandwidth altsettings.
216 * Also, high and full speed descriptors will be different.
217 *
218 * Be sure to examine and test the results of autoconfiguration on your
219 * hardware. This code may not make the best choices about how to use the
220 * USB controller, and it can't know all the restrictions that may apply.
221 * Some combinations of driver and hardware won't be able to autoconfigure.
222 *
223 * On success, this returns an un-claimed usb_ep, and modifies the endpoint
224 * descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value
225 * is initialized as if the endpoint were used at full speed. To prevent
226 * the endpoint from being returned by a later autoconfig call, claim it
227 * by assigning ep->driver_data to some non-null value.
228 *
229 * On failure, this returns a null endpoint descriptor.
230 */
231struct usb_ep * __init usb_ep_autoconfig (
232 struct usb_gadget *gadget,
233 struct usb_endpoint_descriptor *desc
234)
235{
236 struct usb_ep *ep;
237 u8 type;
238
239 type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
240
241 /* First, apply chip-specific "best usage" knowledge.
242 * This might make a good usb_gadget_ops hook ...
243 */
244 if (gadget_is_net2280 (gadget) && type == USB_ENDPOINT_XFER_INT) {
245 /* ep-e, ep-f are PIO with only 64 byte fifos */
246 ep = find_ep (gadget, "ep-e");
247 if (ep && ep_matches (gadget, ep, desc))
248 return ep;
249 ep = find_ep (gadget, "ep-f");
250 if (ep && ep_matches (gadget, ep, desc))
251 return ep;
252
253 } else if (gadget_is_goku (gadget)) {
254 if (USB_ENDPOINT_XFER_INT == type) {
255 /* single buffering is enough */
256 ep = find_ep (gadget, "ep3-bulk");
257 if (ep && ep_matches (gadget, ep, desc))
258 return ep;
259 } else if (USB_ENDPOINT_XFER_BULK == type
260 && (USB_DIR_IN & desc->bEndpointAddress)) {
261 /* DMA may be available */
262 ep = find_ep (gadget, "ep2-bulk");
263 if (ep && ep_matches (gadget, ep, desc))
264 return ep;
265 }
266
267 } else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) {
268 /* single buffering is enough; maybe 8 byte fifo is too */
269 ep = find_ep (gadget, "ep3in-bulk");
270 if (ep && ep_matches (gadget, ep, desc))
271 return ep;
272
273 } else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) {
274 ep = find_ep (gadget, "ep1-bulk");
275 if (ep && ep_matches (gadget, ep, desc))
276 return ep;
277 }
278
279 /* Second, look at endpoints until an unclaimed one looks usable */
280 list_for_each_entry (ep, &gadget->ep_list, ep_list) {
281 if (ep_matches (gadget, ep, desc))
282 return ep;
283 }
284
285 /* Fail */
286 return NULL;
287}
288
289/**
290 * usb_ep_autoconfig_reset - reset endpoint autoconfig state
291 * @gadget: device for which autoconfig state will be reset
292 *
293 * Use this for devices where one configuration may need to assign
294 * endpoint resources very differently from the next one. It clears
295 * state such as ep->driver_data and the record of assigned endpoints
296 * used by usb_ep_autoconfig().
297 */
298void __init usb_ep_autoconfig_reset (struct usb_gadget *gadget)
299{
300 struct usb_ep *ep;
301
302 list_for_each_entry (ep, &gadget->ep_list, ep_list) {
303 ep->driver_data = NULL;
304 }
305#ifdef MANY_ENDPOINTS
306 in_epnum = 0;
307#endif
308 epnum = 0;
309}
310
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
new file mode 100644
index 000000000000..cff9fb0b73cc
--- /dev/null
+++ b/drivers/usb/gadget/ether.c
@@ -0,0 +1,2660 @@
1/*
2 * ether.c -- Ethernet gadget driver, with CDC and non-CDC options
3 *
4 * Copyright (C) 2003-2005 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22
23// #define DEBUG 1
24// #define VERBOSE
25
26#include <linux/config.h>
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/delay.h>
30#include <linux/ioport.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/smp_lock.h>
34#include <linux/errno.h>
35#include <linux/init.h>
36#include <linux/timer.h>
37#include <linux/list.h>
38#include <linux/interrupt.h>
39#include <linux/utsname.h>
40#include <linux/device.h>
41#include <linux/moduleparam.h>
42#include <linux/ctype.h>
43
44#include <asm/byteorder.h>
45#include <asm/io.h>
46#include <asm/irq.h>
47#include <asm/system.h>
48#include <asm/uaccess.h>
49#include <asm/unaligned.h>
50
51#include <linux/usb_ch9.h>
52#include <linux/usb_cdc.h>
53#include <linux/usb_gadget.h>
54
55#include <linux/random.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/ethtool.h>
59
60#include "gadget_chips.h"
61
62/*-------------------------------------------------------------------------*/
63
64/*
65 * Ethernet gadget driver -- with CDC and non-CDC options
66 * Builds on hardware support for a full duplex link.
67 *
68 * CDC Ethernet is the standard USB solution for sending Ethernet frames
69 * using USB. Real hardware tends to use the same framing protocol but look
70 * different for control features. This driver strongly prefers to use
71 * this USB-IF standard as its open-systems interoperability solution;
72 * most host side USB stacks (except from Microsoft) support it.
73 *
74 * There's some hardware that can't talk CDC. We make that hardware
75 * implement a "minimalist" vendor-agnostic CDC core: same framing, but
76 * link-level setup only requires activating the configuration.
77 * Linux supports it, but other host operating systems may not.
78 * (This is a subset of CDC Ethernet.)
79 *
80 * A third option is also in use. Rather than CDC Ethernet, or something
81 * simpler, Microsoft pushes their own approach: RNDIS. The published
82 * RNDIS specs are ambiguous and appear to be incomplete, and are also
83 * needlessly complex.
84 */
85
86#define DRIVER_DESC "Ethernet Gadget"
87#define DRIVER_VERSION "Equinox 2004"
88
89static const char shortname [] = "ether";
90static const char driver_desc [] = DRIVER_DESC;
91
92#define RX_EXTRA 20 /* guard against rx overflows */
93
94#ifdef CONFIG_USB_ETH_RNDIS
95#include "rndis.h"
96#else
97#define rndis_init() 0
98#define rndis_exit() do{}while(0)
99#endif
100
101/* CDC and RNDIS support the same host-chosen outgoing packet filters. */
102#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
103 |USB_CDC_PACKET_TYPE_DIRECTED)
104
105
106/*-------------------------------------------------------------------------*/
107
108struct eth_dev {
109 spinlock_t lock;
110 struct usb_gadget *gadget;
111 struct usb_request *req; /* for control responses */
112 struct usb_request *stat_req; /* for cdc & rndis status */
113
114 u8 config;
115 struct usb_ep *in_ep, *out_ep, *status_ep;
116 const struct usb_endpoint_descriptor
117 *in, *out, *status;
118 struct list_head tx_reqs, rx_reqs;
119
120 struct net_device *net;
121 struct net_device_stats stats;
122 atomic_t tx_qlen;
123
124 struct work_struct work;
125 unsigned zlp:1;
126 unsigned cdc:1;
127 unsigned rndis:1;
128 unsigned suspended:1;
129 u16 cdc_filter;
130 unsigned long todo;
131#define WORK_RX_MEMORY 0
132 int rndis_config;
133 u8 host_mac [ETH_ALEN];
134};
135
136/* This version autoconfigures as much as possible at run-time.
137 *
138 * It also ASSUMES a self-powered device, without remote wakeup,
139 * although remote wakeup support would make sense.
140 */
141static const char *EP_IN_NAME;
142static const char *EP_OUT_NAME;
143static const char *EP_STATUS_NAME;
144
145/*-------------------------------------------------------------------------*/
146
147/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
148 * Instead: allocate your own, using normal USB-IF procedures.
149 */
150
151/* Thanks to NetChip Technologies for donating this product ID.
152 * It's for devices with only CDC Ethernet configurations.
153 */
154#define CDC_VENDOR_NUM 0x0525 /* NetChip */
155#define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */
156
157/* For hardware that can't talk CDC, we use the same vendor ID that
158 * ARM Linux has used for ethernet-over-usb, both with sa1100 and
159 * with pxa250. We're protocol-compatible, if the host-side drivers
160 * use the endpoint descriptors. bcdDevice (version) is nonzero, so
161 * drivers that need to hard-wire endpoint numbers have a hook.
162 *
163 * The protocol is a minimal subset of CDC Ether, which works on any bulk
164 * hardware that's not deeply broken ... even on hardware that can't talk
165 * RNDIS (like SA-1100, with no interrupt endpoint, or anything that
166 * doesn't handle control-OUT).
167 */
168#define SIMPLE_VENDOR_NUM 0x049f
169#define SIMPLE_PRODUCT_NUM 0x505a
170
171/* For hardware that can talk RNDIS and either of the above protocols,
172 * use this ID ... the windows INF files will know it. Unless it's
173 * used with CDC Ethernet, Linux 2.4 hosts will need updates to choose
174 * the non-RNDIS configuration.
175 */
176#define RNDIS_VENDOR_NUM 0x0525 /* NetChip */
177#define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */
178
179
180/* Some systems will want different product identifers published in the
181 * device descriptor, either numbers or strings or both. These string
182 * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
183 */
184
185static ushort __initdata idVendor;
186module_param(idVendor, ushort, S_IRUGO);
187MODULE_PARM_DESC(idVendor, "USB Vendor ID");
188
189static ushort __initdata idProduct;
190module_param(idProduct, ushort, S_IRUGO);
191MODULE_PARM_DESC(idProduct, "USB Product ID");
192
193static ushort __initdata bcdDevice;
194module_param(bcdDevice, ushort, S_IRUGO);
195MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
196
197static char *__initdata iManufacturer;
198module_param(iManufacturer, charp, S_IRUGO);
199MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
200
201static char *__initdata iProduct;
202module_param(iProduct, charp, S_IRUGO);
203MODULE_PARM_DESC(iProduct, "USB Product string");
204
205/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
206static char *__initdata dev_addr;
207module_param(dev_addr, charp, S_IRUGO);
208MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
209
210/* this address is invisible to ifconfig */
211static char *__initdata host_addr;
212module_param(host_addr, charp, S_IRUGO);
213MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
214
215
216/*-------------------------------------------------------------------------*/
217
218/* Include CDC support if we could run on CDC-capable hardware. */
219
220#ifdef CONFIG_USB_GADGET_NET2280
221#define DEV_CONFIG_CDC
222#endif
223
224#ifdef CONFIG_USB_GADGET_DUMMY_HCD
225#define DEV_CONFIG_CDC
226#endif
227
228#ifdef CONFIG_USB_GADGET_GOKU
229#define DEV_CONFIG_CDC
230#endif
231
232#ifdef CONFIG_USB_GADGET_LH7A40X
233#define DEV_CONFIG_CDC
234#endif
235
236#ifdef CONFIG_USB_GADGET_MQ11XX
237#define DEV_CONFIG_CDC
238#endif
239
240#ifdef CONFIG_USB_GADGET_OMAP
241#define DEV_CONFIG_CDC
242#endif
243
244#ifdef CONFIG_USB_GADGET_N9604
245#define DEV_CONFIG_CDC
246#endif
247
248#ifdef CONFIG_USB_GADGET_PXA27X
249#define DEV_CONFIG_CDC
250#endif
251
252#ifdef CONFIG_USB_GADGET_AT91
253#define DEV_CONFIG_CDC
254#endif
255
256
257/* For CDC-incapable hardware, choose the simple cdc subset.
258 * Anything that talks bulk (without notable bugs) can do this.
259 */
260#ifdef CONFIG_USB_GADGET_PXA2XX
261#define DEV_CONFIG_SUBSET
262#endif
263
264#ifdef CONFIG_USB_GADGET_SH
265#define DEV_CONFIG_SUBSET
266#endif
267
268#ifdef CONFIG_USB_GADGET_SA1100
269/* use non-CDC for backwards compatibility */
270#define DEV_CONFIG_SUBSET
271#endif
272
273#ifdef CONFIG_USB_GADGET_S3C2410
274#define DEV_CONFIG_CDC
275#endif
276
277/*-------------------------------------------------------------------------*/
278
279/* "main" config is either CDC, or its simple subset */
280static inline int is_cdc(struct eth_dev *dev)
281{
282#if !defined(DEV_CONFIG_SUBSET)
283 return 1; /* only cdc possible */
284#elif !defined (DEV_CONFIG_CDC)
285 return 0; /* only subset possible */
286#else
287 return dev->cdc; /* depends on what hardware we found */
288#endif
289}
290
291/* "secondary" RNDIS config may sometimes be activated */
292static inline int rndis_active(struct eth_dev *dev)
293{
294#ifdef CONFIG_USB_ETH_RNDIS
295 return dev->rndis;
296#else
297 return 0;
298#endif
299}
300
301#define subset_active(dev) (!is_cdc(dev) && !rndis_active(dev))
302#define cdc_active(dev) ( is_cdc(dev) && !rndis_active(dev))
303
304
305
306#define DEFAULT_QLEN 2 /* double buffering by default */
307
308/* peak bulk transfer bits-per-second */
309#define HS_BPS (13 * 512 * 8 * 1000 * 8)
310#define FS_BPS (19 * 64 * 1 * 1000 * 8)
311
312#ifdef CONFIG_USB_GADGET_DUALSPEED
313
314static unsigned qmult = 5;
315module_param (qmult, uint, S_IRUGO|S_IWUSR);
316
317
318/* for dual-speed hardware, use deeper queues at highspeed */
319#define qlen(gadget) \
320 (DEFAULT_QLEN*((gadget->speed == USB_SPEED_HIGH) ? qmult : 1))
321
322/* also defer IRQs on highspeed TX */
323#define TX_DELAY qmult
324
325#define BITRATE(g) (((g)->speed == USB_SPEED_HIGH) ? HS_BPS : FS_BPS)
326
327#else /* full speed (low speed doesn't do bulk) */
328#define qlen(gadget) DEFAULT_QLEN
329
330#define BITRATE(g) FS_BPS
331#endif
332
333
334/*-------------------------------------------------------------------------*/
335
336#define xprintk(d,level,fmt,args...) \
337 printk(level "%s: " fmt , (d)->net->name , ## args)
338
339#ifdef DEBUG
340#undef DEBUG
341#define DEBUG(dev,fmt,args...) \
342 xprintk(dev , KERN_DEBUG , fmt , ## args)
343#else
344#define DEBUG(dev,fmt,args...) \
345 do { } while (0)
346#endif /* DEBUG */
347
348#ifdef VERBOSE
349#define VDEBUG DEBUG
350#else
351#define VDEBUG(dev,fmt,args...) \
352 do { } while (0)
353#endif /* DEBUG */
354
355#define ERROR(dev,fmt,args...) \
356 xprintk(dev , KERN_ERR , fmt , ## args)
357#define WARN(dev,fmt,args...) \
358 xprintk(dev , KERN_WARNING , fmt , ## args)
359#define INFO(dev,fmt,args...) \
360 xprintk(dev , KERN_INFO , fmt , ## args)
361
362/*-------------------------------------------------------------------------*/
363
364/* USB DRIVER HOOKUP (to the hardware driver, below us), mostly
365 * ep0 implementation: descriptors, config management, setup().
366 * also optional class-specific notification interrupt transfer.
367 */
368
369/*
370 * DESCRIPTORS ... most are static, but strings and (full) configuration
371 * descriptors are built on demand. For now we do either full CDC, or
372 * our simple subset, with RNDIS as an optional second configuration.
373 *
374 * RNDIS includes some CDC ACM descriptors ... like CDC Ethernet. But
375 * the class descriptors match a modem (they're ignored; it's really just
376 * Ethernet functionality), they don't need the NOP altsetting, and the
377 * status transfer endpoint isn't optional.
378 */
379
380#define STRING_MANUFACTURER 1
381#define STRING_PRODUCT 2
382#define STRING_ETHADDR 3
383#define STRING_DATA 4
384#define STRING_CONTROL 5
385#define STRING_RNDIS_CONTROL 6
386#define STRING_CDC 7
387#define STRING_SUBSET 8
388#define STRING_RNDIS 9
389
390#define USB_BUFSIZ 256 /* holds our biggest descriptor */
391
392/*
393 * This device advertises one configuration, eth_config, unless RNDIS
394 * is enabled (rndis_config) on hardware supporting at least two configs.
395 *
396 * NOTE: Controllers like superh_udc should probably be able to use
397 * an RNDIS-only configuration.
398 *
399 * FIXME define some higher-powered configurations to make it easier
400 * to recharge batteries ...
401 */
402
403#define DEV_CONFIG_VALUE 1 /* cdc or subset */
404#define DEV_RNDIS_CONFIG_VALUE 2 /* rndis; optional */
405
406static struct usb_device_descriptor
407device_desc = {
408 .bLength = sizeof device_desc,
409 .bDescriptorType = USB_DT_DEVICE,
410
411 .bcdUSB = __constant_cpu_to_le16 (0x0200),
412
413 .bDeviceClass = USB_CLASS_COMM,
414 .bDeviceSubClass = 0,
415 .bDeviceProtocol = 0,
416
417 .idVendor = __constant_cpu_to_le16 (CDC_VENDOR_NUM),
418 .idProduct = __constant_cpu_to_le16 (CDC_PRODUCT_NUM),
419 .iManufacturer = STRING_MANUFACTURER,
420 .iProduct = STRING_PRODUCT,
421 .bNumConfigurations = 1,
422};
423
424static struct usb_otg_descriptor
425otg_descriptor = {
426 .bLength = sizeof otg_descriptor,
427 .bDescriptorType = USB_DT_OTG,
428
429 .bmAttributes = USB_OTG_SRP,
430};
431
432static struct usb_config_descriptor
433eth_config = {
434 .bLength = sizeof eth_config,
435 .bDescriptorType = USB_DT_CONFIG,
436
437 /* compute wTotalLength on the fly */
438 .bNumInterfaces = 2,
439 .bConfigurationValue = DEV_CONFIG_VALUE,
440 .iConfiguration = STRING_CDC,
441 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
442 .bMaxPower = 50,
443};
444
445#ifdef CONFIG_USB_ETH_RNDIS
446static struct usb_config_descriptor
447rndis_config = {
448 .bLength = sizeof rndis_config,
449 .bDescriptorType = USB_DT_CONFIG,
450
451 /* compute wTotalLength on the fly */
452 .bNumInterfaces = 2,
453 .bConfigurationValue = DEV_RNDIS_CONFIG_VALUE,
454 .iConfiguration = STRING_RNDIS,
455 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
456 .bMaxPower = 50,
457};
458#endif
459
460/*
461 * Compared to the simple CDC subset, the full CDC Ethernet model adds
462 * three class descriptors, two interface descriptors, optional status
463 * endpoint. Both have a "data" interface and two bulk endpoints.
464 * There are also differences in how control requests are handled.
465 *
466 * RNDIS shares a lot with CDC-Ethernet, since it's a variant of
467 * the CDC-ACM (modem) spec.
468 */
469
470#ifdef DEV_CONFIG_CDC
471static struct usb_interface_descriptor
472control_intf = {
473 .bLength = sizeof control_intf,
474 .bDescriptorType = USB_DT_INTERFACE,
475
476 .bInterfaceNumber = 0,
477 /* status endpoint is optional; this may be patched later */
478 .bNumEndpoints = 1,
479 .bInterfaceClass = USB_CLASS_COMM,
480 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
481 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
482 .iInterface = STRING_CONTROL,
483};
484#endif
485
486#ifdef CONFIG_USB_ETH_RNDIS
487static const struct usb_interface_descriptor
488rndis_control_intf = {
489 .bLength = sizeof rndis_control_intf,
490 .bDescriptorType = USB_DT_INTERFACE,
491
492 .bInterfaceNumber = 0,
493 .bNumEndpoints = 1,
494 .bInterfaceClass = USB_CLASS_COMM,
495 .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
496 .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR,
497 .iInterface = STRING_RNDIS_CONTROL,
498};
499#endif
500
501#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
502
503static const struct usb_cdc_header_desc header_desc = {
504 .bLength = sizeof header_desc,
505 .bDescriptorType = USB_DT_CS_INTERFACE,
506 .bDescriptorSubType = USB_CDC_HEADER_TYPE,
507
508 .bcdCDC = __constant_cpu_to_le16 (0x0110),
509};
510
511static const struct usb_cdc_union_desc union_desc = {
512 .bLength = sizeof union_desc,
513 .bDescriptorType = USB_DT_CS_INTERFACE,
514 .bDescriptorSubType = USB_CDC_UNION_TYPE,
515
516 .bMasterInterface0 = 0, /* index of control interface */
517 .bSlaveInterface0 = 1, /* index of DATA interface */
518};
519
520#endif /* CDC || RNDIS */
521
522#ifdef CONFIG_USB_ETH_RNDIS
523
524static const struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
525 .bLength = sizeof call_mgmt_descriptor,
526 .bDescriptorType = USB_DT_CS_INTERFACE,
527 .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
528
529 .bmCapabilities = 0x00,
530 .bDataInterface = 0x01,
531};
532
533static struct usb_cdc_acm_descriptor acm_descriptor = {
534 .bLength = sizeof acm_descriptor,
535 .bDescriptorType = USB_DT_CS_INTERFACE,
536 .bDescriptorSubType = USB_CDC_ACM_TYPE,
537
538 .bmCapabilities = 0x00,
539};
540
541#endif
542
543#ifdef DEV_CONFIG_CDC
544
545static const struct usb_cdc_ether_desc ether_desc = {
546 .bLength = sizeof ether_desc,
547 .bDescriptorType = USB_DT_CS_INTERFACE,
548 .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
549
550 /* this descriptor actually adds value, surprise! */
551 .iMACAddress = STRING_ETHADDR,
552 .bmEthernetStatistics = __constant_cpu_to_le32 (0), /* no statistics */
553 .wMaxSegmentSize = __constant_cpu_to_le16 (ETH_FRAME_LEN),
554 .wNumberMCFilters = __constant_cpu_to_le16 (0),
555 .bNumberPowerFilters = 0,
556};
557
558#endif
559
560#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
561
562/* include the status endpoint if we can, even where it's optional.
563 * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
564 * packet, to simplify cancelation; and a big transfer interval, to
565 * waste less bandwidth.
566 *
567 * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
568 * if they ignore the connect/disconnect notifications that real aether
569 * can provide. more advanced cdc configurations might want to support
570 * encapsulated commands (vendor-specific, using control-OUT).
571 *
572 * RNDIS requires the status endpoint, since it uses that encapsulation
573 * mechanism for its funky RPC scheme.
574 */
575
576#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
577#define STATUS_BYTECOUNT 16 /* 8 byte header + data */
578
579static struct usb_endpoint_descriptor
580fs_status_desc = {
581 .bLength = USB_DT_ENDPOINT_SIZE,
582 .bDescriptorType = USB_DT_ENDPOINT,
583
584 .bEndpointAddress = USB_DIR_IN,
585 .bmAttributes = USB_ENDPOINT_XFER_INT,
586 .wMaxPacketSize = __constant_cpu_to_le16 (STATUS_BYTECOUNT),
587 .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
588};
589#endif
590
591#ifdef DEV_CONFIG_CDC
592
593/* the default data interface has no endpoints ... */
594
595static const struct usb_interface_descriptor
596data_nop_intf = {
597 .bLength = sizeof data_nop_intf,
598 .bDescriptorType = USB_DT_INTERFACE,
599
600 .bInterfaceNumber = 1,
601 .bAlternateSetting = 0,
602 .bNumEndpoints = 0,
603 .bInterfaceClass = USB_CLASS_CDC_DATA,
604 .bInterfaceSubClass = 0,
605 .bInterfaceProtocol = 0,
606};
607
608/* ... but the "real" data interface has two bulk endpoints */
609
610static const struct usb_interface_descriptor
611data_intf = {
612 .bLength = sizeof data_intf,
613 .bDescriptorType = USB_DT_INTERFACE,
614
615 .bInterfaceNumber = 1,
616 .bAlternateSetting = 1,
617 .bNumEndpoints = 2,
618 .bInterfaceClass = USB_CLASS_CDC_DATA,
619 .bInterfaceSubClass = 0,
620 .bInterfaceProtocol = 0,
621 .iInterface = STRING_DATA,
622};
623
624#endif
625
626#ifdef CONFIG_USB_ETH_RNDIS
627
628/* RNDIS doesn't activate by changing to the "real" altsetting */
629
630static const struct usb_interface_descriptor
631rndis_data_intf = {
632 .bLength = sizeof rndis_data_intf,
633 .bDescriptorType = USB_DT_INTERFACE,
634
635 .bInterfaceNumber = 1,
636 .bAlternateSetting = 0,
637 .bNumEndpoints = 2,
638 .bInterfaceClass = USB_CLASS_CDC_DATA,
639 .bInterfaceSubClass = 0,
640 .bInterfaceProtocol = 0,
641 .iInterface = STRING_DATA,
642};
643
644#endif
645
646#ifdef DEV_CONFIG_SUBSET
647
648/*
649 * "Simple" CDC-subset option is a simple vendor-neutral model that most
650 * full speed controllers can handle: one interface, two bulk endpoints.
651 */
652
653static const struct usb_interface_descriptor
654subset_data_intf = {
655 .bLength = sizeof subset_data_intf,
656 .bDescriptorType = USB_DT_INTERFACE,
657
658 .bInterfaceNumber = 0,
659 .bAlternateSetting = 0,
660 .bNumEndpoints = 2,
661 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
662 .bInterfaceSubClass = 0,
663 .bInterfaceProtocol = 0,
664 .iInterface = STRING_DATA,
665};
666
667#endif /* SUBSET */
668
669
670static struct usb_endpoint_descriptor
671fs_source_desc = {
672 .bLength = USB_DT_ENDPOINT_SIZE,
673 .bDescriptorType = USB_DT_ENDPOINT,
674
675 .bEndpointAddress = USB_DIR_IN,
676 .bmAttributes = USB_ENDPOINT_XFER_BULK,
677};
678
679static struct usb_endpoint_descriptor
680fs_sink_desc = {
681 .bLength = USB_DT_ENDPOINT_SIZE,
682 .bDescriptorType = USB_DT_ENDPOINT,
683
684 .bEndpointAddress = USB_DIR_OUT,
685 .bmAttributes = USB_ENDPOINT_XFER_BULK,
686};
687
688static const struct usb_descriptor_header *fs_eth_function [11] = {
689 (struct usb_descriptor_header *) &otg_descriptor,
690#ifdef DEV_CONFIG_CDC
691 /* "cdc" mode descriptors */
692 (struct usb_descriptor_header *) &control_intf,
693 (struct usb_descriptor_header *) &header_desc,
694 (struct usb_descriptor_header *) &union_desc,
695 (struct usb_descriptor_header *) &ether_desc,
696 /* NOTE: status endpoint may need to be removed */
697 (struct usb_descriptor_header *) &fs_status_desc,
698 /* data interface, with altsetting */
699 (struct usb_descriptor_header *) &data_nop_intf,
700 (struct usb_descriptor_header *) &data_intf,
701 (struct usb_descriptor_header *) &fs_source_desc,
702 (struct usb_descriptor_header *) &fs_sink_desc,
703 NULL,
704#endif /* DEV_CONFIG_CDC */
705};
706
707static inline void __init fs_subset_descriptors(void)
708{
709#ifdef DEV_CONFIG_SUBSET
710 fs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
711 fs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc;
712 fs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc;
713 fs_eth_function[4] = NULL;
714#else
715 fs_eth_function[1] = NULL;
716#endif
717}
718
719#ifdef CONFIG_USB_ETH_RNDIS
720static const struct usb_descriptor_header *fs_rndis_function [] = {
721 (struct usb_descriptor_header *) &otg_descriptor,
722 /* control interface matches ACM, not Ethernet */
723 (struct usb_descriptor_header *) &rndis_control_intf,
724 (struct usb_descriptor_header *) &header_desc,
725 (struct usb_descriptor_header *) &call_mgmt_descriptor,
726 (struct usb_descriptor_header *) &acm_descriptor,
727 (struct usb_descriptor_header *) &union_desc,
728 (struct usb_descriptor_header *) &fs_status_desc,
729 /* data interface has no altsetting */
730 (struct usb_descriptor_header *) &rndis_data_intf,
731 (struct usb_descriptor_header *) &fs_source_desc,
732 (struct usb_descriptor_header *) &fs_sink_desc,
733 NULL,
734};
735#endif
736
737#ifdef CONFIG_USB_GADGET_DUALSPEED
738
739/*
740 * usb 2.0 devices need to expose both high speed and full speed
741 * descriptors, unless they only run at full speed.
742 */
743
744#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
745static struct usb_endpoint_descriptor
746hs_status_desc = {
747 .bLength = USB_DT_ENDPOINT_SIZE,
748 .bDescriptorType = USB_DT_ENDPOINT,
749
750 .bmAttributes = USB_ENDPOINT_XFER_INT,
751 .wMaxPacketSize = __constant_cpu_to_le16 (STATUS_BYTECOUNT),
752 .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
753};
754#endif /* DEV_CONFIG_CDC */
755
756static struct usb_endpoint_descriptor
757hs_source_desc = {
758 .bLength = USB_DT_ENDPOINT_SIZE,
759 .bDescriptorType = USB_DT_ENDPOINT,
760
761 .bmAttributes = USB_ENDPOINT_XFER_BULK,
762 .wMaxPacketSize = __constant_cpu_to_le16 (512),
763};
764
765static struct usb_endpoint_descriptor
766hs_sink_desc = {
767 .bLength = USB_DT_ENDPOINT_SIZE,
768 .bDescriptorType = USB_DT_ENDPOINT,
769
770 .bmAttributes = USB_ENDPOINT_XFER_BULK,
771 .wMaxPacketSize = __constant_cpu_to_le16 (512),
772};
773
774static struct usb_qualifier_descriptor
775dev_qualifier = {
776 .bLength = sizeof dev_qualifier,
777 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
778
779 .bcdUSB = __constant_cpu_to_le16 (0x0200),
780 .bDeviceClass = USB_CLASS_COMM,
781
782 .bNumConfigurations = 1,
783};
784
785static const struct usb_descriptor_header *hs_eth_function [11] = {
786 (struct usb_descriptor_header *) &otg_descriptor,
787#ifdef DEV_CONFIG_CDC
788 /* "cdc" mode descriptors */
789 (struct usb_descriptor_header *) &control_intf,
790 (struct usb_descriptor_header *) &header_desc,
791 (struct usb_descriptor_header *) &union_desc,
792 (struct usb_descriptor_header *) &ether_desc,
793 /* NOTE: status endpoint may need to be removed */
794 (struct usb_descriptor_header *) &hs_status_desc,
795 /* data interface, with altsetting */
796 (struct usb_descriptor_header *) &data_nop_intf,
797 (struct usb_descriptor_header *) &data_intf,
798 (struct usb_descriptor_header *) &hs_source_desc,
799 (struct usb_descriptor_header *) &hs_sink_desc,
800 NULL,
801#endif /* DEV_CONFIG_CDC */
802};
803
804static inline void __init hs_subset_descriptors(void)
805{
806#ifdef DEV_CONFIG_SUBSET
807 hs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
808 hs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc;
809 hs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc;
810 hs_eth_function[4] = NULL;
811#else
812 hs_eth_function[1] = NULL;
813#endif
814}
815
816#ifdef CONFIG_USB_ETH_RNDIS
817static const struct usb_descriptor_header *hs_rndis_function [] = {
818 (struct usb_descriptor_header *) &otg_descriptor,
819 /* control interface matches ACM, not Ethernet */
820 (struct usb_descriptor_header *) &rndis_control_intf,
821 (struct usb_descriptor_header *) &header_desc,
822 (struct usb_descriptor_header *) &call_mgmt_descriptor,
823 (struct usb_descriptor_header *) &acm_descriptor,
824 (struct usb_descriptor_header *) &union_desc,
825 (struct usb_descriptor_header *) &hs_status_desc,
826 /* data interface has no altsetting */
827 (struct usb_descriptor_header *) &rndis_data_intf,
828 (struct usb_descriptor_header *) &hs_source_desc,
829 (struct usb_descriptor_header *) &hs_sink_desc,
830 NULL,
831};
832#endif
833
834
835/* maxpacket and other transfer characteristics vary by speed. */
836#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs))
837
838#else
839
840/* if there's no high speed support, maxpacket doesn't change. */
841#define ep_desc(g,hs,fs) fs
842
843static inline void __init hs_subset_descriptors(void)
844{
845}
846
847#endif /* !CONFIG_USB_GADGET_DUALSPEED */
848
849/*-------------------------------------------------------------------------*/
850
851/* descriptors that are built on-demand */
852
853static char manufacturer [50];
854static char product_desc [40] = DRIVER_DESC;
855
856#ifdef DEV_CONFIG_CDC
857/* address that the host will use ... usually assigned at random */
858static char ethaddr [2 * ETH_ALEN + 1];
859#endif
860
861/* static strings, in UTF-8 */
862static struct usb_string strings [] = {
863 { STRING_MANUFACTURER, manufacturer, },
864 { STRING_PRODUCT, product_desc, },
865 { STRING_DATA, "Ethernet Data", },
866#ifdef DEV_CONFIG_CDC
867 { STRING_CDC, "CDC Ethernet", },
868 { STRING_ETHADDR, ethaddr, },
869 { STRING_CONTROL, "CDC Communications Control", },
870#endif
871#ifdef DEV_CONFIG_SUBSET
872 { STRING_SUBSET, "CDC Ethernet Subset", },
873#endif
874#ifdef CONFIG_USB_ETH_RNDIS
875 { STRING_RNDIS, "RNDIS", },
876 { STRING_RNDIS_CONTROL, "RNDIS Communications Control", },
877#endif
878 { } /* end of list */
879};
880
881static struct usb_gadget_strings stringtab = {
882 .language = 0x0409, /* en-us */
883 .strings = strings,
884};
885
886/*
887 * one config, two interfaces: control, data.
888 * complications: class descriptors, and an altsetting.
889 */
890static int
891config_buf (enum usb_device_speed speed,
892 u8 *buf, u8 type,
893 unsigned index, int is_otg)
894{
895 int len;
896 const struct usb_config_descriptor *config;
897 const struct usb_descriptor_header **function;
898#ifdef CONFIG_USB_GADGET_DUALSPEED
899 int hs = (speed == USB_SPEED_HIGH);
900
901 if (type == USB_DT_OTHER_SPEED_CONFIG)
902 hs = !hs;
903#define which_fn(t) (hs ? hs_ ## t ## _function : fs_ ## t ## _function)
904#else
905#define which_fn(t) (fs_ ## t ## _function)
906#endif
907
908 if (index >= device_desc.bNumConfigurations)
909 return -EINVAL;
910
911#ifdef CONFIG_USB_ETH_RNDIS
912 /* list the RNDIS config first, to make Microsoft's drivers
913 * happy. DOCSIS 1.0 needs this too.
914 */
915 if (device_desc.bNumConfigurations == 2 && index == 0) {
916 config = &rndis_config;
917 function = which_fn (rndis);
918 } else
919#endif
920 {
921 config = &eth_config;
922 function = which_fn (eth);
923 }
924
925 /* for now, don't advertise srp-only devices */
926 if (!is_otg)
927 function++;
928
929 len = usb_gadget_config_buf (config, buf, USB_BUFSIZ, function);
930 if (len < 0)
931 return len;
932 ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
933 return len;
934}
935
936/*-------------------------------------------------------------------------*/
937
938static void eth_start (struct eth_dev *dev, int gfp_flags);
939static int alloc_requests (struct eth_dev *dev, unsigned n, int gfp_flags);
940
941#ifdef DEV_CONFIG_CDC
942static inline int ether_alt_ep_setup (struct eth_dev *dev, struct usb_ep *ep)
943{
944 const struct usb_endpoint_descriptor *d;
945
946 /* With CDC, the host isn't allowed to use these two data
947 * endpoints in the default altsetting for the interface.
948 * so we don't activate them yet. Reset from SET_INTERFACE.
949 *
950 * Strictly speaking RNDIS should work the same: activation is
951 * a side effect of setting a packet filter. Deactivation is
952 * from REMOTE_NDIS_HALT_MSG, reset from REMOTE_NDIS_RESET_MSG.
953 */
954
955 /* one endpoint writes data back IN to the host */
956 if (strcmp (ep->name, EP_IN_NAME) == 0) {
957 d = ep_desc (dev->gadget, &hs_source_desc, &fs_source_desc);
958 ep->driver_data = dev;
959 dev->in = d;
960
961 /* one endpoint just reads OUT packets */
962 } else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
963 d = ep_desc (dev->gadget, &hs_sink_desc, &fs_sink_desc);
964 ep->driver_data = dev;
965 dev->out = d;
966
967 /* optional status/notification endpoint */
968 } else if (EP_STATUS_NAME &&
969 strcmp (ep->name, EP_STATUS_NAME) == 0) {
970 int result;
971
972 d = ep_desc (dev->gadget, &hs_status_desc, &fs_status_desc);
973 result = usb_ep_enable (ep, d);
974 if (result < 0)
975 return result;
976
977 ep->driver_data = dev;
978 dev->status = d;
979 }
980 return 0;
981}
982#endif
983
984#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
985static inline int ether_ep_setup (struct eth_dev *dev, struct usb_ep *ep)
986{
987 int result;
988 const struct usb_endpoint_descriptor *d;
989
990 /* CDC subset is simpler: if the device is there,
991 * it's live with rx and tx endpoints.
992 *
993 * Do this as a shortcut for RNDIS too.
994 */
995
996 /* one endpoint writes data back IN to the host */
997 if (strcmp (ep->name, EP_IN_NAME) == 0) {
998 d = ep_desc (dev->gadget, &hs_source_desc, &fs_source_desc);
999 result = usb_ep_enable (ep, d);
1000 if (result < 0)
1001 return result;
1002
1003 ep->driver_data = dev;
1004 dev->in = d;
1005
1006 /* one endpoint just reads OUT packets */
1007 } else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
1008 d = ep_desc (dev->gadget, &hs_sink_desc, &fs_sink_desc);
1009 result = usb_ep_enable (ep, d);
1010 if (result < 0)
1011 return result;
1012
1013 ep->driver_data = dev;
1014 dev->out = d;
1015 }
1016
1017 return 0;
1018}
1019#endif
1020
1021static int
1022set_ether_config (struct eth_dev *dev, int gfp_flags)
1023{
1024 int result = 0;
1025 struct usb_ep *ep;
1026 struct usb_gadget *gadget = dev->gadget;
1027
1028 gadget_for_each_ep (ep, gadget) {
1029#ifdef DEV_CONFIG_CDC
1030 if (!dev->rndis && dev->cdc) {
1031 result = ether_alt_ep_setup (dev, ep);
1032 if (result == 0)
1033 continue;
1034 }
1035#endif
1036
1037#ifdef CONFIG_USB_ETH_RNDIS
1038 if (dev->rndis && strcmp (ep->name, EP_STATUS_NAME) == 0) {
1039 const struct usb_endpoint_descriptor *d;
1040 d = ep_desc (gadget, &hs_status_desc, &fs_status_desc);
1041 result = usb_ep_enable (ep, d);
1042 if (result == 0) {
1043 ep->driver_data = dev;
1044 dev->status = d;
1045 continue;
1046 }
1047 } else
1048#endif
1049
1050 {
1051#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
1052 result = ether_ep_setup (dev, ep);
1053 if (result == 0)
1054 continue;
1055#endif
1056 }
1057
1058 /* stop on error */
1059 ERROR (dev, "can't enable %s, result %d\n", ep->name, result);
1060 break;
1061 }
1062 if (!result && (!dev->in_ep || !dev->out_ep))
1063 result = -ENODEV;
1064
1065 if (result == 0)
1066 result = alloc_requests (dev, qlen (gadget), gfp_flags);
1067
1068 /* on error, disable any endpoints */
1069 if (result < 0) {
1070#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
1071 if (dev->status)
1072 (void) usb_ep_disable (dev->status_ep);
1073#endif
1074 dev->status = NULL;
1075#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
1076 if (dev->rndis || !dev->cdc) {
1077 if (dev->in)
1078 (void) usb_ep_disable (dev->in_ep);
1079 if (dev->out)
1080 (void) usb_ep_disable (dev->out_ep);
1081 }
1082#endif
1083 dev->in = NULL;
1084 dev->out = NULL;
1085 } else
1086
1087 /* activate non-CDC configs right away
1088 * this isn't strictly according to the RNDIS spec
1089 */
1090#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
1091 if (dev->rndis || !dev->cdc) {
1092 netif_carrier_on (dev->net);
1093 if (netif_running (dev->net)) {
1094 spin_unlock (&dev->lock);
1095 eth_start (dev, GFP_ATOMIC);
1096 spin_lock (&dev->lock);
1097 }
1098 }
1099#endif
1100
1101 if (result == 0)
1102 DEBUG (dev, "qlen %d\n", qlen (gadget));
1103
1104 /* caller is responsible for cleanup on error */
1105 return result;
1106}
1107
1108static void eth_reset_config (struct eth_dev *dev)
1109{
1110 struct usb_request *req;
1111
1112 if (dev->config == 0)
1113 return;
1114
1115 DEBUG (dev, "%s\n", __FUNCTION__);
1116
1117 netif_stop_queue (dev->net);
1118 netif_carrier_off (dev->net);
1119
1120 /* disable endpoints, forcing (synchronous) completion of
1121 * pending i/o. then free the requests.
1122 */
1123 if (dev->in) {
1124 usb_ep_disable (dev->in_ep);
1125 while (likely (!list_empty (&dev->tx_reqs))) {
1126 req = container_of (dev->tx_reqs.next,
1127 struct usb_request, list);
1128 list_del (&req->list);
1129 usb_ep_free_request (dev->in_ep, req);
1130 }
1131 }
1132 if (dev->out) {
1133 usb_ep_disable (dev->out_ep);
1134 while (likely (!list_empty (&dev->rx_reqs))) {
1135 req = container_of (dev->rx_reqs.next,
1136 struct usb_request, list);
1137 list_del (&req->list);
1138 usb_ep_free_request (dev->out_ep, req);
1139 }
1140 }
1141
1142 if (dev->status) {
1143 usb_ep_disable (dev->status_ep);
1144 }
1145 dev->config = 0;
1146}
1147
1148/* change our operational config. must agree with the code
1149 * that returns config descriptors, and altsetting code.
1150 */
1151static int
1152eth_set_config (struct eth_dev *dev, unsigned number, int gfp_flags)
1153{
1154 int result = 0;
1155 struct usb_gadget *gadget = dev->gadget;
1156
1157 if (number == dev->config)
1158 return 0;
1159
1160 if (gadget_is_sa1100 (gadget)
1161 && dev->config
1162 && atomic_read (&dev->tx_qlen) != 0) {
1163 /* tx fifo is full, but we can't clear it...*/
1164 INFO (dev, "can't change configurations\n");
1165 return -ESPIPE;
1166 }
1167 eth_reset_config (dev);
1168
1169 /* default: pass all packets, no multicast filtering */
1170 dev->cdc_filter = 0x000f;
1171
1172 switch (number) {
1173 case DEV_CONFIG_VALUE:
1174 dev->rndis = 0;
1175 result = set_ether_config (dev, gfp_flags);
1176 break;
1177#ifdef CONFIG_USB_ETH_RNDIS
1178 case DEV_RNDIS_CONFIG_VALUE:
1179 dev->rndis = 1;
1180 result = set_ether_config (dev, gfp_flags);
1181 break;
1182#endif
1183 default:
1184 result = -EINVAL;
1185 /* FALL THROUGH */
1186 case 0:
1187 break;
1188 }
1189
1190 if (result) {
1191 if (number)
1192 eth_reset_config (dev);
1193 usb_gadget_vbus_draw(dev->gadget,
1194 dev->gadget->is_otg ? 8 : 100);
1195 } else {
1196 char *speed;
1197 unsigned power;
1198
1199 power = 2 * eth_config.bMaxPower;
1200 usb_gadget_vbus_draw(dev->gadget, power);
1201
1202 switch (gadget->speed) {
1203 case USB_SPEED_FULL: speed = "full"; break;
1204#ifdef CONFIG_USB_GADGET_DUALSPEED
1205 case USB_SPEED_HIGH: speed = "high"; break;
1206#endif
1207 default: speed = "?"; break;
1208 }
1209
1210 dev->config = number;
1211 INFO (dev, "%s speed config #%d: %d mA, %s, using %s\n",
1212 speed, number, power, driver_desc,
1213 dev->rndis
1214 ? "RNDIS"
1215 : (dev->cdc
1216 ? "CDC Ethernet"
1217 : "CDC Ethernet Subset"));
1218 }
1219 return result;
1220}
1221
1222/*-------------------------------------------------------------------------*/
1223
1224#ifdef DEV_CONFIG_CDC
1225
1226static void eth_status_complete (struct usb_ep *ep, struct usb_request *req)
1227{
1228 struct usb_cdc_notification *event = req->buf;
1229 int value = req->status;
1230 struct eth_dev *dev = ep->driver_data;
1231
1232 /* issue the second notification if host reads the first */
1233 if (event->bNotificationType == USB_CDC_NOTIFY_NETWORK_CONNECTION
1234 && value == 0) {
1235 __le32 *data = req->buf + sizeof *event;
1236
1237 event->bmRequestType = 0xA1;
1238 event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
1239 event->wValue = __constant_cpu_to_le16 (0);
1240 event->wIndex = __constant_cpu_to_le16 (1);
1241 event->wLength = __constant_cpu_to_le16 (8);
1242
1243 /* SPEED_CHANGE data is up/down speeds in bits/sec */
1244 data [0] = data [1] = cpu_to_le32 (BITRATE (dev->gadget));
1245
1246 req->length = STATUS_BYTECOUNT;
1247 value = usb_ep_queue (ep, req, GFP_ATOMIC);
1248 DEBUG (dev, "send SPEED_CHANGE --> %d\n", value);
1249 if (value == 0)
1250 return;
1251 } else if (value != -ECONNRESET)
1252 DEBUG (dev, "event %02x --> %d\n",
1253 event->bNotificationType, value);
1254 event->bmRequestType = 0xff;
1255}
1256
1257static void issue_start_status (struct eth_dev *dev)
1258{
1259 struct usb_request *req = dev->stat_req;
1260 struct usb_cdc_notification *event;
1261 int value;
1262
1263 DEBUG (dev, "%s, flush old status first\n", __FUNCTION__);
1264
1265 /* flush old status
1266 *
1267 * FIXME ugly idiom, maybe we'd be better with just
1268 * a "cancel the whole queue" primitive since any
1269 * unlink-one primitive has way too many error modes.
1270 * here, we "know" toggle is already clear...
1271 */
1272 usb_ep_disable (dev->status_ep);
1273 usb_ep_enable (dev->status_ep, dev->status);
1274
1275 /* 3.8.1 says to issue first NETWORK_CONNECTION, then
1276 * a SPEED_CHANGE. could be useful in some configs.
1277 */
1278 event = req->buf;
1279 event->bmRequestType = 0xA1;
1280 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
1281 event->wValue = __constant_cpu_to_le16 (1); /* connected */
1282 event->wIndex = __constant_cpu_to_le16 (1);
1283 event->wLength = 0;
1284
1285 req->length = sizeof *event;
1286 req->complete = eth_status_complete;
1287 value = usb_ep_queue (dev->status_ep, req, GFP_ATOMIC);
1288 if (value < 0)
1289 DEBUG (dev, "status buf queue --> %d\n", value);
1290}
1291
1292#endif
1293
1294/*-------------------------------------------------------------------------*/
1295
1296static void eth_setup_complete (struct usb_ep *ep, struct usb_request *req)
1297{
1298 if (req->status || req->actual != req->length)
1299 DEBUG ((struct eth_dev *) ep->driver_data,
1300 "setup complete --> %d, %d/%d\n",
1301 req->status, req->actual, req->length);
1302}
1303
1304#ifdef CONFIG_USB_ETH_RNDIS
1305
1306static void rndis_response_complete (struct usb_ep *ep, struct usb_request *req)
1307{
1308 if (req->status || req->actual != req->length)
1309 DEBUG ((struct eth_dev *) ep->driver_data,
1310 "rndis response complete --> %d, %d/%d\n",
1311 req->status, req->actual, req->length);
1312
1313 /* done sending after USB_CDC_GET_ENCAPSULATED_RESPONSE */
1314}
1315
1316static void rndis_command_complete (struct usb_ep *ep, struct usb_request *req)
1317{
1318 struct eth_dev *dev = ep->driver_data;
1319 int status;
1320
1321 /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
1322 spin_lock(&dev->lock);
1323 status = rndis_msg_parser (dev->rndis_config, (u8 *) req->buf);
1324 if (status < 0)
1325 ERROR(dev, "%s: rndis parse error %d\n", __FUNCTION__, status);
1326 spin_unlock(&dev->lock);
1327}
1328
1329#endif /* RNDIS */
1330
1331/*
1332 * The setup() callback implements all the ep0 functionality that's not
1333 * handled lower down. CDC has a number of less-common features:
1334 *
1335 * - two interfaces: control, and ethernet data
1336 * - Ethernet data interface has two altsettings: default, and active
1337 * - class-specific descriptors for the control interface
1338 * - class-specific control requests
1339 */
1340static int
1341eth_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1342{
1343 struct eth_dev *dev = get_gadget_data (gadget);
1344 struct usb_request *req = dev->req;
1345 int value = -EOPNOTSUPP;
1346 u16 wIndex = ctrl->wIndex;
1347 u16 wValue = ctrl->wValue;
1348 u16 wLength = ctrl->wLength;
1349
1350 /* descriptors just go into the pre-allocated ep0 buffer,
1351 * while config change events may enable network traffic.
1352 */
1353 req->complete = eth_setup_complete;
1354 switch (ctrl->bRequest) {
1355
1356 case USB_REQ_GET_DESCRIPTOR:
1357 if (ctrl->bRequestType != USB_DIR_IN)
1358 break;
1359 switch (wValue >> 8) {
1360
1361 case USB_DT_DEVICE:
1362 value = min (wLength, (u16) sizeof device_desc);
1363 memcpy (req->buf, &device_desc, value);
1364 break;
1365#ifdef CONFIG_USB_GADGET_DUALSPEED
1366 case USB_DT_DEVICE_QUALIFIER:
1367 if (!gadget->is_dualspeed)
1368 break;
1369 value = min (wLength, (u16) sizeof dev_qualifier);
1370 memcpy (req->buf, &dev_qualifier, value);
1371 break;
1372
1373 case USB_DT_OTHER_SPEED_CONFIG:
1374 if (!gadget->is_dualspeed)
1375 break;
1376 // FALLTHROUGH
1377#endif /* CONFIG_USB_GADGET_DUALSPEED */
1378 case USB_DT_CONFIG:
1379 value = config_buf (gadget->speed, req->buf,
1380 wValue >> 8,
1381 wValue & 0xff,
1382 gadget->is_otg);
1383 if (value >= 0)
1384 value = min (wLength, (u16) value);
1385 break;
1386
1387 case USB_DT_STRING:
1388 value = usb_gadget_get_string (&stringtab,
1389 wValue & 0xff, req->buf);
1390 if (value >= 0)
1391 value = min (wLength, (u16) value);
1392 break;
1393 }
1394 break;
1395
1396 case USB_REQ_SET_CONFIGURATION:
1397 if (ctrl->bRequestType != 0)
1398 break;
1399 if (gadget->a_hnp_support)
1400 DEBUG (dev, "HNP available\n");
1401 else if (gadget->a_alt_hnp_support)
1402 DEBUG (dev, "HNP needs a different root port\n");
1403 spin_lock (&dev->lock);
1404 value = eth_set_config (dev, wValue, GFP_ATOMIC);
1405 spin_unlock (&dev->lock);
1406 break;
1407 case USB_REQ_GET_CONFIGURATION:
1408 if (ctrl->bRequestType != USB_DIR_IN)
1409 break;
1410 *(u8 *)req->buf = dev->config;
1411 value = min (wLength, (u16) 1);
1412 break;
1413
1414 case USB_REQ_SET_INTERFACE:
1415 if (ctrl->bRequestType != USB_RECIP_INTERFACE
1416 || !dev->config
1417 || wIndex > 1)
1418 break;
1419 if (!dev->cdc && wIndex != 0)
1420 break;
1421 spin_lock (&dev->lock);
1422
1423 /* PXA hardware partially handles SET_INTERFACE;
1424 * we need to kluge around that interference.
1425 */
1426 if (gadget_is_pxa (gadget)) {
1427 value = eth_set_config (dev, DEV_CONFIG_VALUE,
1428 GFP_ATOMIC);
1429 goto done_set_intf;
1430 }
1431
1432#ifdef DEV_CONFIG_CDC
1433 switch (wIndex) {
1434 case 0: /* control/master intf */
1435 if (wValue != 0)
1436 break;
1437 if (dev->status) {
1438 usb_ep_disable (dev->status_ep);
1439 usb_ep_enable (dev->status_ep, dev->status);
1440 }
1441 value = 0;
1442 break;
1443 case 1: /* data intf */
1444 if (wValue > 1)
1445 break;
1446 usb_ep_disable (dev->in_ep);
1447 usb_ep_disable (dev->out_ep);
1448
1449 /* CDC requires the data transfers not be done from
1450 * the default interface setting ... also, setting
1451 * the non-default interface clears filters etc.
1452 */
1453 if (wValue == 1) {
1454 usb_ep_enable (dev->in_ep, dev->in);
1455 usb_ep_enable (dev->out_ep, dev->out);
1456 dev->cdc_filter = DEFAULT_FILTER;
1457 netif_carrier_on (dev->net);
1458 if (dev->status)
1459 issue_start_status (dev);
1460 if (netif_running (dev->net)) {
1461 spin_unlock (&dev->lock);
1462 eth_start (dev, GFP_ATOMIC);
1463 spin_lock (&dev->lock);
1464 }
1465 } else {
1466 netif_stop_queue (dev->net);
1467 netif_carrier_off (dev->net);
1468 }
1469 value = 0;
1470 break;
1471 }
1472#else
1473 /* FIXME this is wrong, as is the assumption that
1474 * all non-PXA hardware talks real CDC ...
1475 */
1476 dev_warn (&gadget->dev, "set_interface ignored!\n");
1477#endif /* DEV_CONFIG_CDC */
1478
1479done_set_intf:
1480 spin_unlock (&dev->lock);
1481 break;
1482 case USB_REQ_GET_INTERFACE:
1483 if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)
1484 || !dev->config
1485 || wIndex > 1)
1486 break;
1487 if (!(dev->cdc || dev->rndis) && wIndex != 0)
1488 break;
1489
1490 /* for CDC, iff carrier is on, data interface is active. */
1491 if (dev->rndis || wIndex != 1)
1492 *(u8 *)req->buf = 0;
1493 else
1494 *(u8 *)req->buf = netif_carrier_ok (dev->net) ? 1 : 0;
1495 value = min (wLength, (u16) 1);
1496 break;
1497
1498#ifdef DEV_CONFIG_CDC
1499 case USB_CDC_SET_ETHERNET_PACKET_FILTER:
1500 /* see 6.2.30: no data, wIndex = interface,
1501 * wValue = packet filter bitmap
1502 */
1503 if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
1504 || !dev->cdc
1505 || dev->rndis
1506 || wLength != 0
1507 || wIndex > 1)
1508 break;
1509 DEBUG (dev, "packet filter %02x\n", wValue);
1510 dev->cdc_filter = wValue;
1511 value = 0;
1512 break;
1513
1514 /* and potentially:
1515 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
1516 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
1517 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
1518 * case USB_CDC_GET_ETHERNET_STATISTIC:
1519 */
1520
1521#endif /* DEV_CONFIG_CDC */
1522
1523#ifdef CONFIG_USB_ETH_RNDIS
1524 /* RNDIS uses the CDC command encapsulation mechanism to implement
1525 * an RPC scheme, with much getting/setting of attributes by OID.
1526 */
1527 case USB_CDC_SEND_ENCAPSULATED_COMMAND:
1528 if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
1529 || !dev->rndis
1530 || wLength > USB_BUFSIZ
1531 || wValue
1532 || rndis_control_intf.bInterfaceNumber
1533 != wIndex)
1534 break;
1535 /* read the request, then process it */
1536 value = wLength;
1537 req->complete = rndis_command_complete;
1538 /* later, rndis_control_ack () sends a notification */
1539 break;
1540
1541 case USB_CDC_GET_ENCAPSULATED_RESPONSE:
1542 if ((USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE)
1543 == ctrl->bRequestType
1544 && dev->rndis
1545 // && wLength >= 0x0400
1546 && !wValue
1547 && rndis_control_intf.bInterfaceNumber
1548 == wIndex) {
1549 u8 *buf;
1550
1551 /* return the result */
1552 buf = rndis_get_next_response (dev->rndis_config,
1553 &value);
1554 if (buf) {
1555 memcpy (req->buf, buf, value);
1556 req->complete = rndis_response_complete;
1557 rndis_free_response(dev->rndis_config, buf);
1558 }
1559 /* else stalls ... spec says to avoid that */
1560 }
1561 break;
1562#endif /* RNDIS */
1563
1564 default:
1565 VDEBUG (dev,
1566 "unknown control req%02x.%02x v%04x i%04x l%d\n",
1567 ctrl->bRequestType, ctrl->bRequest,
1568 wValue, wIndex, wLength);
1569 }
1570
1571 /* respond with data transfer before status phase? */
1572 if (value >= 0) {
1573 req->length = value;
1574 req->zero = value < wLength
1575 && (value % gadget->ep0->maxpacket) == 0;
1576 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
1577 if (value < 0) {
1578 DEBUG (dev, "ep_queue --> %d\n", value);
1579 req->status = 0;
1580 eth_setup_complete (gadget->ep0, req);
1581 }
1582 }
1583
1584 /* host either stalls (value < 0) or reports success */
1585 return value;
1586}
1587
1588static void
1589eth_disconnect (struct usb_gadget *gadget)
1590{
1591 struct eth_dev *dev = get_gadget_data (gadget);
1592 unsigned long flags;
1593
1594 spin_lock_irqsave (&dev->lock, flags);
1595 netif_stop_queue (dev->net);
1596 netif_carrier_off (dev->net);
1597 eth_reset_config (dev);
1598 spin_unlock_irqrestore (&dev->lock, flags);
1599
1600 /* FIXME RNDIS should enter RNDIS_UNINITIALIZED */
1601
1602 /* next we may get setup() calls to enumerate new connections;
1603 * or an unbind() during shutdown (including removing module).
1604 */
1605}
1606
1607/*-------------------------------------------------------------------------*/
1608
1609/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
1610
1611static int eth_change_mtu (struct net_device *net, int new_mtu)
1612{
1613 struct eth_dev *dev = netdev_priv(net);
1614
1615 // FIXME if rndis, don't change while link's live
1616
1617 if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
1618 return -ERANGE;
1619 /* no zero-length packet read wanted after mtu-sized packets */
1620 if (((new_mtu + sizeof (struct ethhdr)) % dev->in_ep->maxpacket) == 0)
1621 return -EDOM;
1622 net->mtu = new_mtu;
1623 return 0;
1624}
1625
1626static struct net_device_stats *eth_get_stats (struct net_device *net)
1627{
1628 return &((struct eth_dev *)netdev_priv(net))->stats;
1629}
1630
1631static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
1632{
1633 struct eth_dev *dev = netdev_priv(net);
1634 strlcpy(p->driver, shortname, sizeof p->driver);
1635 strlcpy(p->version, DRIVER_VERSION, sizeof p->version);
1636 strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
1637 strlcpy (p->bus_info, dev->gadget->dev.bus_id, sizeof p->bus_info);
1638}
1639
1640static u32 eth_get_link(struct net_device *net)
1641{
1642 struct eth_dev *dev = netdev_priv(net);
1643 return dev->gadget->speed != USB_SPEED_UNKNOWN;
1644}
1645
1646static struct ethtool_ops ops = {
1647 .get_drvinfo = eth_get_drvinfo,
1648 .get_link = eth_get_link
1649};
1650
1651static void defer_kevent (struct eth_dev *dev, int flag)
1652{
1653 if (test_and_set_bit (flag, &dev->todo))
1654 return;
1655 if (!schedule_work (&dev->work))
1656 ERROR (dev, "kevent %d may have been dropped\n", flag);
1657 else
1658 DEBUG (dev, "kevent %d scheduled\n", flag);
1659}
1660
1661static void rx_complete (struct usb_ep *ep, struct usb_request *req);
1662
1663static int
1664rx_submit (struct eth_dev *dev, struct usb_request *req, int gfp_flags)
1665{
1666 struct sk_buff *skb;
1667 int retval = -ENOMEM;
1668 size_t size;
1669
1670 /* Padding up to RX_EXTRA handles minor disagreements with host.
1671 * Normally we use the USB "terminate on short read" convention;
1672 * so allow up to (N*maxpacket), since that memory is normally
1673 * already allocated. Some hardware doesn't deal well with short
1674 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
1675 * byte off the end (to force hardware errors on overflow).
1676 *
1677 * RNDIS uses internal framing, and explicitly allows senders to
1678 * pad to end-of-packet. That's potentially nice for speed,
1679 * but means receivers can't recover synch on their own.
1680 */
1681 size = (sizeof (struct ethhdr) + dev->net->mtu + RX_EXTRA);
1682 size += dev->out_ep->maxpacket - 1;
1683#ifdef CONFIG_USB_ETH_RNDIS
1684 if (dev->rndis)
1685 size += sizeof (struct rndis_packet_msg_type);
1686#endif
1687 size -= size % dev->out_ep->maxpacket;
1688
1689 if ((skb = alloc_skb (size + NET_IP_ALIGN, gfp_flags)) == 0) {
1690 DEBUG (dev, "no rx skb\n");
1691 goto enomem;
1692 }
1693
1694 /* Some platforms perform better when IP packets are aligned,
1695 * but on at least one, checksumming fails otherwise. Note:
1696 * this doesn't account for variable-sized RNDIS headers.
1697 */
1698 skb_reserve(skb, NET_IP_ALIGN);
1699
1700 req->buf = skb->data;
1701 req->length = size;
1702 req->complete = rx_complete;
1703 req->context = skb;
1704
1705 retval = usb_ep_queue (dev->out_ep, req, gfp_flags);
1706 if (retval == -ENOMEM)
1707enomem:
1708 defer_kevent (dev, WORK_RX_MEMORY);
1709 if (retval) {
1710 DEBUG (dev, "rx submit --> %d\n", retval);
1711 dev_kfree_skb_any (skb);
1712 spin_lock (&dev->lock);
1713 list_add (&req->list, &dev->rx_reqs);
1714 spin_unlock (&dev->lock);
1715 }
1716 return retval;
1717}
1718
1719static void rx_complete (struct usb_ep *ep, struct usb_request *req)
1720{
1721 struct sk_buff *skb = req->context;
1722 struct eth_dev *dev = ep->driver_data;
1723 int status = req->status;
1724
1725 switch (status) {
1726
1727 /* normal completion */
1728 case 0:
1729 skb_put (skb, req->actual);
1730#ifdef CONFIG_USB_ETH_RNDIS
1731 /* we know MaxPacketsPerTransfer == 1 here */
1732 if (dev->rndis)
1733 rndis_rm_hdr (req->buf, &(skb->len));
1734#endif
1735 if (ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) {
1736 dev->stats.rx_errors++;
1737 dev->stats.rx_length_errors++;
1738 DEBUG (dev, "rx length %d\n", skb->len);
1739 break;
1740 }
1741
1742 skb->dev = dev->net;
1743 skb->protocol = eth_type_trans (skb, dev->net);
1744 dev->stats.rx_packets++;
1745 dev->stats.rx_bytes += skb->len;
1746
1747 /* no buffer copies needed, unless hardware can't
1748 * use skb buffers.
1749 */
1750 status = netif_rx (skb);
1751 skb = NULL;
1752 break;
1753
1754 /* software-driven interface shutdown */
1755 case -ECONNRESET: // unlink
1756 case -ESHUTDOWN: // disconnect etc
1757 VDEBUG (dev, "rx shutdown, code %d\n", status);
1758 goto quiesce;
1759
1760 /* for hardware automagic (such as pxa) */
1761 case -ECONNABORTED: // endpoint reset
1762 DEBUG (dev, "rx %s reset\n", ep->name);
1763 defer_kevent (dev, WORK_RX_MEMORY);
1764quiesce:
1765 dev_kfree_skb_any (skb);
1766 goto clean;
1767
1768 /* data overrun */
1769 case -EOVERFLOW:
1770 dev->stats.rx_over_errors++;
1771 // FALLTHROUGH
1772
1773 default:
1774 dev->stats.rx_errors++;
1775 DEBUG (dev, "rx status %d\n", status);
1776 break;
1777 }
1778
1779 if (skb)
1780 dev_kfree_skb_any (skb);
1781 if (!netif_running (dev->net)) {
1782clean:
1783 /* nobody reading rx_reqs, so no dev->lock */
1784 list_add (&req->list, &dev->rx_reqs);
1785 req = NULL;
1786 }
1787 if (req)
1788 rx_submit (dev, req, GFP_ATOMIC);
1789}
1790
1791static int prealloc (struct list_head *list, struct usb_ep *ep,
1792 unsigned n, int gfp_flags)
1793{
1794 unsigned i;
1795 struct usb_request *req;
1796
1797 if (!n)
1798 return -ENOMEM;
1799
1800 /* queue/recycle up to N requests */
1801 i = n;
1802 list_for_each_entry (req, list, list) {
1803 if (i-- == 0)
1804 goto extra;
1805 }
1806 while (i--) {
1807 req = usb_ep_alloc_request (ep, gfp_flags);
1808 if (!req)
1809 return list_empty (list) ? -ENOMEM : 0;
1810 list_add (&req->list, list);
1811 }
1812 return 0;
1813
1814extra:
1815 /* free extras */
1816 for (;;) {
1817 struct list_head *next;
1818
1819 next = req->list.next;
1820 list_del (&req->list);
1821 usb_ep_free_request (ep, req);
1822
1823 if (next == list)
1824 break;
1825
1826 req = container_of (next, struct usb_request, list);
1827 }
1828 return 0;
1829}
1830
1831static int alloc_requests (struct eth_dev *dev, unsigned n, int gfp_flags)
1832{
1833 int status;
1834
1835 status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags);
1836 if (status < 0)
1837 goto fail;
1838 status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags);
1839 if (status < 0)
1840 goto fail;
1841 return 0;
1842fail:
1843 DEBUG (dev, "can't alloc requests\n");
1844 return status;
1845}
1846
1847static void rx_fill (struct eth_dev *dev, int gfp_flags)
1848{
1849 struct usb_request *req;
1850 unsigned long flags;
1851
1852 clear_bit (WORK_RX_MEMORY, &dev->todo);
1853
1854 /* fill unused rxq slots with some skb */
1855 spin_lock_irqsave (&dev->lock, flags);
1856 while (!list_empty (&dev->rx_reqs)) {
1857 req = container_of (dev->rx_reqs.next,
1858 struct usb_request, list);
1859 list_del_init (&req->list);
1860 spin_unlock_irqrestore (&dev->lock, flags);
1861
1862 if (rx_submit (dev, req, gfp_flags) < 0) {
1863 defer_kevent (dev, WORK_RX_MEMORY);
1864 return;
1865 }
1866
1867 spin_lock_irqsave (&dev->lock, flags);
1868 }
1869 spin_unlock_irqrestore (&dev->lock, flags);
1870}
1871
1872static void eth_work (void *_dev)
1873{
1874 struct eth_dev *dev = _dev;
1875
1876 if (test_bit (WORK_RX_MEMORY, &dev->todo)) {
1877 if (netif_running (dev->net))
1878 rx_fill (dev, GFP_KERNEL);
1879 else
1880 clear_bit (WORK_RX_MEMORY, &dev->todo);
1881 }
1882
1883 if (dev->todo)
1884 DEBUG (dev, "work done, flags = 0x%lx\n", dev->todo);
1885}
1886
1887static void tx_complete (struct usb_ep *ep, struct usb_request *req)
1888{
1889 struct sk_buff *skb = req->context;
1890 struct eth_dev *dev = ep->driver_data;
1891
1892 switch (req->status) {
1893 default:
1894 dev->stats.tx_errors++;
1895 VDEBUG (dev, "tx err %d\n", req->status);
1896 /* FALLTHROUGH */
1897 case -ECONNRESET: // unlink
1898 case -ESHUTDOWN: // disconnect etc
1899 break;
1900 case 0:
1901 dev->stats.tx_bytes += skb->len;
1902 }
1903 dev->stats.tx_packets++;
1904
1905 spin_lock (&dev->lock);
1906 list_add (&req->list, &dev->tx_reqs);
1907 spin_unlock (&dev->lock);
1908 dev_kfree_skb_any (skb);
1909
1910 atomic_dec (&dev->tx_qlen);
1911 if (netif_carrier_ok (dev->net))
1912 netif_wake_queue (dev->net);
1913}
1914
1915static inline int eth_is_promisc (struct eth_dev *dev)
1916{
1917 /* no filters for the CDC subset; always promisc */
1918 if (subset_active (dev))
1919 return 1;
1920 return dev->cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
1921}
1922
1923static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
1924{
1925 struct eth_dev *dev = netdev_priv(net);
1926 int length = skb->len;
1927 int retval;
1928 struct usb_request *req = NULL;
1929 unsigned long flags;
1930
1931 /* apply outgoing CDC or RNDIS filters */
1932 if (!eth_is_promisc (dev)) {
1933 u8 *dest = skb->data;
1934
1935 if (dest [0] & 0x01) {
1936 u16 type;
1937
1938 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
1939 * SET_ETHERNET_MULTICAST_FILTERS requests
1940 */
1941 if (memcmp (dest, net->broadcast, ETH_ALEN) == 0)
1942 type = USB_CDC_PACKET_TYPE_BROADCAST;
1943 else
1944 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
1945 if (!(dev->cdc_filter & type)) {
1946 dev_kfree_skb_any (skb);
1947 return 0;
1948 }
1949 }
1950 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
1951 }
1952
1953 spin_lock_irqsave (&dev->lock, flags);
1954 req = container_of (dev->tx_reqs.next, struct usb_request, list);
1955 list_del (&req->list);
1956 if (list_empty (&dev->tx_reqs))
1957 netif_stop_queue (net);
1958 spin_unlock_irqrestore (&dev->lock, flags);
1959
1960 /* no buffer copies needed, unless the network stack did it
1961 * or the hardware can't use skb buffers.
1962 * or there's not enough space for any RNDIS headers we need
1963 */
1964#ifdef CONFIG_USB_ETH_RNDIS
1965 if (dev->rndis) {
1966 struct sk_buff *skb_rndis;
1967
1968 skb_rndis = skb_realloc_headroom (skb,
1969 sizeof (struct rndis_packet_msg_type));
1970 if (!skb_rndis)
1971 goto drop;
1972
1973 dev_kfree_skb_any (skb);
1974 skb = skb_rndis;
1975 rndis_add_hdr (skb);
1976 length = skb->len;
1977 }
1978#endif
1979 req->buf = skb->data;
1980 req->context = skb;
1981 req->complete = tx_complete;
1982
1983 /* use zlp framing on tx for strict CDC-Ether conformance,
1984 * though any robust network rx path ignores extra padding.
1985 * and some hardware doesn't like to write zlps.
1986 */
1987 req->zero = 1;
1988 if (!dev->zlp && (length % dev->in_ep->maxpacket) == 0)
1989 length++;
1990
1991 req->length = length;
1992
1993#ifdef CONFIG_USB_GADGET_DUALSPEED
1994 /* throttle highspeed IRQ rate back slightly */
1995 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
1996 ? ((atomic_read (&dev->tx_qlen) % TX_DELAY) != 0)
1997 : 0;
1998#endif
1999
2000 retval = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC);
2001 switch (retval) {
2002 default:
2003 DEBUG (dev, "tx queue err %d\n", retval);
2004 break;
2005 case 0:
2006 net->trans_start = jiffies;
2007 atomic_inc (&dev->tx_qlen);
2008 }
2009
2010 if (retval) {
2011#ifdef CONFIG_USB_ETH_RNDIS
2012drop:
2013#endif
2014 dev->stats.tx_dropped++;
2015 dev_kfree_skb_any (skb);
2016 spin_lock_irqsave (&dev->lock, flags);
2017 if (list_empty (&dev->tx_reqs))
2018 netif_start_queue (net);
2019 list_add (&req->list, &dev->tx_reqs);
2020 spin_unlock_irqrestore (&dev->lock, flags);
2021 }
2022 return 0;
2023}
2024
2025/*-------------------------------------------------------------------------*/
2026
2027#ifdef CONFIG_USB_ETH_RNDIS
2028
2029static void rndis_send_media_state (struct eth_dev *dev, int connect)
2030{
2031 if (!dev)
2032 return;
2033
2034 if (connect) {
2035 if (rndis_signal_connect (dev->rndis_config))
2036 return;
2037 } else {
2038 if (rndis_signal_disconnect (dev->rndis_config))
2039 return;
2040 }
2041}
2042
2043static void
2044rndis_control_ack_complete (struct usb_ep *ep, struct usb_request *req)
2045{
2046 if (req->status || req->actual != req->length)
2047 DEBUG ((struct eth_dev *) ep->driver_data,
2048 "rndis control ack complete --> %d, %d/%d\n",
2049 req->status, req->actual, req->length);
2050
2051 usb_ep_free_buffer(ep, req->buf, req->dma, 8);
2052 usb_ep_free_request(ep, req);
2053}
2054
2055static int rndis_control_ack (struct net_device *net)
2056{
2057 struct eth_dev *dev = netdev_priv(net);
2058 u32 length;
2059 struct usb_request *resp;
2060
2061 /* in case RNDIS calls this after disconnect */
2062 if (!dev->status_ep) {
2063 DEBUG (dev, "status ENODEV\n");
2064 return -ENODEV;
2065 }
2066
2067 /* Allocate memory for notification ie. ACK */
2068 resp = usb_ep_alloc_request (dev->status_ep, GFP_ATOMIC);
2069 if (!resp) {
2070 DEBUG (dev, "status ENOMEM\n");
2071 return -ENOMEM;
2072 }
2073
2074 resp->buf = usb_ep_alloc_buffer (dev->status_ep, 8,
2075 &resp->dma, GFP_ATOMIC);
2076 if (!resp->buf) {
2077 DEBUG (dev, "status buf ENOMEM\n");
2078 usb_ep_free_request (dev->status_ep, resp);
2079 return -ENOMEM;
2080 }
2081
2082 /* Send RNDIS RESPONSE_AVAILABLE notification;
2083 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE should work too
2084 */
2085 resp->length = 8;
2086 resp->complete = rndis_control_ack_complete;
2087
2088 *((__le32 *) resp->buf) = __constant_cpu_to_le32 (1);
2089 *((__le32 *) resp->buf + 1) = __constant_cpu_to_le32 (0);
2090
2091 length = usb_ep_queue (dev->status_ep, resp, GFP_ATOMIC);
2092 if (length < 0) {
2093 resp->status = 0;
2094 rndis_control_ack_complete (dev->status_ep, resp);
2095 }
2096
2097 return 0;
2098}
2099
2100#endif /* RNDIS */
2101
2102static void eth_start (struct eth_dev *dev, int gfp_flags)
2103{
2104 DEBUG (dev, "%s\n", __FUNCTION__);
2105
2106 /* fill the rx queue */
2107 rx_fill (dev, gfp_flags);
2108
2109 /* and open the tx floodgates */
2110 atomic_set (&dev->tx_qlen, 0);
2111 netif_wake_queue (dev->net);
2112#ifdef CONFIG_USB_ETH_RNDIS
2113 if (dev->rndis) {
2114 rndis_set_param_medium (dev->rndis_config,
2115 NDIS_MEDIUM_802_3,
2116 BITRATE(dev->gadget));
2117 rndis_send_media_state (dev, 1);
2118 }
2119#endif
2120}
2121
2122static int eth_open (struct net_device *net)
2123{
2124 struct eth_dev *dev = netdev_priv(net);
2125
2126 DEBUG (dev, "%s\n", __FUNCTION__);
2127 if (netif_carrier_ok (dev->net))
2128 eth_start (dev, GFP_KERNEL);
2129 return 0;
2130}
2131
2132static int eth_stop (struct net_device *net)
2133{
2134 struct eth_dev *dev = netdev_priv(net);
2135
2136 VDEBUG (dev, "%s\n", __FUNCTION__);
2137 netif_stop_queue (net);
2138
2139 DEBUG (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
2140 dev->stats.rx_packets, dev->stats.tx_packets,
2141 dev->stats.rx_errors, dev->stats.tx_errors
2142 );
2143
2144 /* ensure there are no more active requests */
2145 if (dev->config) {
2146 usb_ep_disable (dev->in_ep);
2147 usb_ep_disable (dev->out_ep);
2148 if (netif_carrier_ok (dev->net)) {
2149 DEBUG (dev, "host still using in/out endpoints\n");
2150 // FIXME idiom may leave toggle wrong here
2151 usb_ep_enable (dev->in_ep, dev->in);
2152 usb_ep_enable (dev->out_ep, dev->out);
2153 }
2154 if (dev->status_ep) {
2155 usb_ep_disable (dev->status_ep);
2156 usb_ep_enable (dev->status_ep, dev->status);
2157 }
2158 }
2159
2160#ifdef CONFIG_USB_ETH_RNDIS
2161 if (dev->rndis) {
2162 rndis_set_param_medium (dev->rndis_config,
2163 NDIS_MEDIUM_802_3, 0);
2164 rndis_send_media_state (dev, 0);
2165 }
2166#endif
2167
2168 return 0;
2169}
2170
2171/*-------------------------------------------------------------------------*/
2172
2173static struct usb_request *eth_req_alloc (struct usb_ep *ep, unsigned size)
2174{
2175 struct usb_request *req;
2176
2177 req = usb_ep_alloc_request (ep, GFP_KERNEL);
2178 if (!req)
2179 return NULL;
2180
2181 req->buf = kmalloc (size, GFP_KERNEL);
2182 if (!req->buf) {
2183 usb_ep_free_request (ep, req);
2184 req = NULL;
2185 }
2186 return req;
2187}
2188
2189static void
2190eth_req_free (struct usb_ep *ep, struct usb_request *req)
2191{
2192 kfree (req->buf);
2193 usb_ep_free_request (ep, req);
2194}
2195
2196
2197static void
2198eth_unbind (struct usb_gadget *gadget)
2199{
2200 struct eth_dev *dev = get_gadget_data (gadget);
2201
2202 DEBUG (dev, "unbind\n");
2203#ifdef CONFIG_USB_ETH_RNDIS
2204 rndis_deregister (dev->rndis_config);
2205 rndis_exit ();
2206#endif
2207
2208 /* we've already been disconnected ... no i/o is active */
2209 if (dev->req) {
2210 eth_req_free (gadget->ep0, dev->req);
2211 dev->req = NULL;
2212 }
2213 if (dev->stat_req) {
2214 eth_req_free (dev->status_ep, dev->stat_req);
2215 dev->stat_req = NULL;
2216 }
2217
2218 unregister_netdev (dev->net);
2219 free_netdev(dev->net);
2220
2221 /* assuming we used keventd, it must quiesce too */
2222 flush_scheduled_work ();
2223 set_gadget_data (gadget, NULL);
2224}
2225
2226static u8 __init nibble (unsigned char c)
2227{
2228 if (likely (isdigit (c)))
2229 return c - '0';
2230 c = toupper (c);
2231 if (likely (isxdigit (c)))
2232 return 10 + c - 'A';
2233 return 0;
2234}
2235
2236static void __init get_ether_addr (const char *str, u8 *dev_addr)
2237{
2238 if (str) {
2239 unsigned i;
2240
2241 for (i = 0; i < 6; i++) {
2242 unsigned char num;
2243
2244 if((*str == '.') || (*str == ':'))
2245 str++;
2246 num = nibble(*str++) << 4;
2247 num |= (nibble(*str++));
2248 dev_addr [i] = num;
2249 }
2250 if (is_valid_ether_addr (dev_addr))
2251 return;
2252 }
2253 random_ether_addr(dev_addr);
2254}
2255
2256static int __init
2257eth_bind (struct usb_gadget *gadget)
2258{
2259 struct eth_dev *dev;
2260 struct net_device *net;
2261 u8 cdc = 1, zlp = 1, rndis = 1;
2262 struct usb_ep *in_ep, *out_ep, *status_ep = NULL;
2263 int status = -ENOMEM;
2264
2265 /* these flags are only ever cleared; compiler take note */
2266#ifndef DEV_CONFIG_CDC
2267 cdc = 0;
2268#endif
2269#ifndef CONFIG_USB_ETH_RNDIS
2270 rndis = 0;
2271#endif
2272
2273 /* Because most host side USB stacks handle CDC Ethernet, that
2274 * standard protocol is _strongly_ preferred for interop purposes.
2275 * (By everyone except Microsoft.)
2276 */
2277 if (gadget_is_net2280 (gadget)) {
2278 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201);
2279 } else if (gadget_is_dummy (gadget)) {
2280 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0202);
2281 } else if (gadget_is_pxa (gadget)) {
2282 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0203);
2283 /* pxa doesn't support altsettings */
2284 cdc = 0;
2285 } else if (gadget_is_sh(gadget)) {
2286 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0204);
2287 /* sh doesn't support multiple interfaces or configs */
2288 cdc = 0;
2289 rndis = 0;
2290 } else if (gadget_is_sa1100 (gadget)) {
2291 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0205);
2292 /* hardware can't write zlps */
2293 zlp = 0;
2294 /* sa1100 CAN do CDC, without status endpoint ... we use
2295 * non-CDC to be compatible with ARM Linux-2.4 "usb-eth".
2296 */
2297 cdc = 0;
2298 } else if (gadget_is_goku (gadget)) {
2299 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0206);
2300 } else if (gadget_is_mq11xx (gadget)) {
2301 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0207);
2302 } else if (gadget_is_omap (gadget)) {
2303 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0208);
2304 } else if (gadget_is_lh7a40x(gadget)) {
2305 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0209);
2306 } else if (gadget_is_n9604(gadget)) {
2307 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0210);
2308 } else if (gadget_is_pxa27x(gadget)) {
2309 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0211);
2310 } else if (gadget_is_s3c2410(gadget)) {
2311 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0212);
2312 } else if (gadget_is_at91(gadget)) {
2313 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0213);
2314 } else {
2315 /* can't assume CDC works. don't want to default to
2316 * anything less functional on CDC-capable hardware,
2317 * so we fail in this case.
2318 */
2319 dev_err (&gadget->dev,
2320 "controller '%s' not recognized\n",
2321 gadget->name);
2322 return -ENODEV;
2323 }
2324 snprintf (manufacturer, sizeof manufacturer, "%s %s/%s",
2325 system_utsname.sysname, system_utsname.release,
2326 gadget->name);
2327
2328 /* If there's an RNDIS configuration, that's what Windows wants to
2329 * be using ... so use these product IDs here and in the "linux.inf"
2330 * needed to install MSFT drivers. Current Linux kernels will use
2331 * the second configuration if it's CDC Ethernet, and need some help
2332 * to choose the right configuration otherwise.
2333 */
2334 if (rndis) {
2335 device_desc.idVendor =
2336 __constant_cpu_to_le16(RNDIS_VENDOR_NUM);
2337 device_desc.idProduct =
2338 __constant_cpu_to_le16(RNDIS_PRODUCT_NUM);
2339 snprintf (product_desc, sizeof product_desc,
2340 "RNDIS/%s", driver_desc);
2341
2342 /* CDC subset ... recognized by Linux since 2.4.10, but Windows
2343 * drivers aren't widely available.
2344 */
2345 } else if (!cdc) {
2346 device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
2347 device_desc.idVendor =
2348 __constant_cpu_to_le16(SIMPLE_VENDOR_NUM);
2349 device_desc.idProduct =
2350 __constant_cpu_to_le16(SIMPLE_PRODUCT_NUM);
2351 }
2352
2353 /* support optional vendor/distro customization */
2354 if (idVendor) {
2355 if (!idProduct) {
2356 dev_err (&gadget->dev, "idVendor needs idProduct!\n");
2357 return -ENODEV;
2358 }
2359 device_desc.idVendor = cpu_to_le16(idVendor);
2360 device_desc.idProduct = cpu_to_le16(idProduct);
2361 if (bcdDevice)
2362 device_desc.bcdDevice = cpu_to_le16(bcdDevice);
2363 }
2364 if (iManufacturer)
2365 strlcpy (manufacturer, iManufacturer, sizeof manufacturer);
2366 if (iProduct)
2367 strlcpy (product_desc, iProduct, sizeof product_desc);
2368
2369 /* all we really need is bulk IN/OUT */
2370 usb_ep_autoconfig_reset (gadget);
2371 in_ep = usb_ep_autoconfig (gadget, &fs_source_desc);
2372 if (!in_ep) {
2373autoconf_fail:
2374 dev_err (&gadget->dev,
2375 "can't autoconfigure on %s\n",
2376 gadget->name);
2377 return -ENODEV;
2378 }
2379 EP_IN_NAME = in_ep->name;
2380 in_ep->driver_data = in_ep; /* claim */
2381
2382 out_ep = usb_ep_autoconfig (gadget, &fs_sink_desc);
2383 if (!out_ep)
2384 goto autoconf_fail;
2385 EP_OUT_NAME = out_ep->name;
2386 out_ep->driver_data = out_ep; /* claim */
2387
2388#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
2389 /* CDC Ethernet control interface doesn't require a status endpoint.
2390 * Since some hosts expect one, try to allocate one anyway.
2391 */
2392 if (cdc || rndis) {
2393 status_ep = usb_ep_autoconfig (gadget, &fs_status_desc);
2394 if (status_ep) {
2395 EP_STATUS_NAME = status_ep->name;
2396 status_ep->driver_data = status_ep; /* claim */
2397 } else if (rndis) {
2398 dev_err (&gadget->dev,
2399 "can't run RNDIS on %s\n",
2400 gadget->name);
2401 return -ENODEV;
2402#ifdef DEV_CONFIG_CDC
2403 /* pxa25x only does CDC subset; often used with RNDIS */
2404 } else if (cdc) {
2405 control_intf.bNumEndpoints = 0;
2406 /* FIXME remove endpoint from descriptor list */
2407#endif
2408 }
2409 }
2410#endif
2411
2412 /* one config: cdc, else minimal subset */
2413 if (!cdc) {
2414 eth_config.bNumInterfaces = 1;
2415 eth_config.iConfiguration = STRING_SUBSET;
2416 fs_subset_descriptors();
2417 hs_subset_descriptors();
2418 }
2419
2420 /* For now RNDIS is always a second config */
2421 if (rndis)
2422 device_desc.bNumConfigurations = 2;
2423
2424#ifdef CONFIG_USB_GADGET_DUALSPEED
2425 if (rndis)
2426 dev_qualifier.bNumConfigurations = 2;
2427 else if (!cdc)
2428 dev_qualifier.bDeviceClass = USB_CLASS_VENDOR_SPEC;
2429
2430 /* assumes ep0 uses the same value for both speeds ... */
2431 dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
2432
2433 /* and that all endpoints are dual-speed */
2434 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
2435 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
2436#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
2437 if (EP_STATUS_NAME)
2438 hs_status_desc.bEndpointAddress =
2439 fs_status_desc.bEndpointAddress;
2440#endif
2441#endif /* DUALSPEED */
2442
2443 device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
2444 usb_gadget_set_selfpowered (gadget);
2445
2446 if (gadget->is_otg) {
2447 otg_descriptor.bmAttributes |= USB_OTG_HNP,
2448 eth_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
2449 eth_config.bMaxPower = 4;
2450#ifdef CONFIG_USB_ETH_RNDIS
2451 rndis_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
2452 rndis_config.bMaxPower = 4;
2453#endif
2454 }
2455
2456 net = alloc_etherdev (sizeof *dev);
2457 if (!net)
2458 return status;
2459 dev = netdev_priv(net);
2460 spin_lock_init (&dev->lock);
2461 INIT_WORK (&dev->work, eth_work, dev);
2462 INIT_LIST_HEAD (&dev->tx_reqs);
2463 INIT_LIST_HEAD (&dev->rx_reqs);
2464
2465 /* network device setup */
2466 dev->net = net;
2467 SET_MODULE_OWNER (net);
2468 strcpy (net->name, "usb%d");
2469 dev->cdc = cdc;
2470 dev->zlp = zlp;
2471
2472 dev->in_ep = in_ep;
2473 dev->out_ep = out_ep;
2474 dev->status_ep = status_ep;
2475
2476 /* Module params for these addresses should come from ID proms.
2477 * The host side address is used with CDC and RNDIS, and commonly
2478 * ends up in a persistent config database.
2479 */
2480 get_ether_addr(dev_addr, net->dev_addr);
2481 if (cdc || rndis) {
2482 get_ether_addr(host_addr, dev->host_mac);
2483#ifdef DEV_CONFIG_CDC
2484 snprintf (ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X",
2485 dev->host_mac [0], dev->host_mac [1],
2486 dev->host_mac [2], dev->host_mac [3],
2487 dev->host_mac [4], dev->host_mac [5]);
2488#endif
2489 }
2490
2491 if (rndis) {
2492 status = rndis_init();
2493 if (status < 0) {
2494 dev_err (&gadget->dev, "can't init RNDIS, %d\n",
2495 status);
2496 goto fail;
2497 }
2498 }
2499
2500 net->change_mtu = eth_change_mtu;
2501 net->get_stats = eth_get_stats;
2502 net->hard_start_xmit = eth_start_xmit;
2503 net->open = eth_open;
2504 net->stop = eth_stop;
2505 // watchdog_timeo, tx_timeout ...
2506 // set_multicast_list
2507 SET_ETHTOOL_OPS(net, &ops);
2508
2509 /* preallocate control message data and buffer */
2510 dev->req = eth_req_alloc (gadget->ep0, USB_BUFSIZ);
2511 if (!dev->req)
2512 goto fail;
2513 dev->req->complete = eth_setup_complete;
2514
2515 /* ... and maybe likewise for status transfer */
2516 if (dev->status_ep) {
2517 dev->stat_req = eth_req_alloc (dev->status_ep,
2518 STATUS_BYTECOUNT);
2519 if (!dev->stat_req) {
2520 eth_req_free (gadget->ep0, dev->req);
2521 goto fail;
2522 }
2523 }
2524
2525 /* finish hookup to lower layer ... */
2526 dev->gadget = gadget;
2527 set_gadget_data (gadget, dev);
2528 gadget->ep0->driver_data = dev;
2529
2530 /* two kinds of host-initiated state changes:
2531 * - iff DATA transfer is active, carrier is "on"
2532 * - tx queueing enabled if open *and* carrier is "on"
2533 */
2534 netif_stop_queue (dev->net);
2535 netif_carrier_off (dev->net);
2536
2537 // SET_NETDEV_DEV (dev->net, &gadget->dev);
2538 status = register_netdev (dev->net);
2539 if (status < 0)
2540 goto fail1;
2541
2542 INFO (dev, "%s, version: " DRIVER_VERSION "\n", driver_desc);
2543 INFO (dev, "using %s, OUT %s IN %s%s%s\n", gadget->name,
2544 EP_OUT_NAME, EP_IN_NAME,
2545 EP_STATUS_NAME ? " STATUS " : "",
2546 EP_STATUS_NAME ? EP_STATUS_NAME : ""
2547 );
2548 INFO (dev, "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2549 net->dev_addr [0], net->dev_addr [1],
2550 net->dev_addr [2], net->dev_addr [3],
2551 net->dev_addr [4], net->dev_addr [5]);
2552
2553 if (cdc || rndis)
2554 INFO (dev, "HOST MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2555 dev->host_mac [0], dev->host_mac [1],
2556 dev->host_mac [2], dev->host_mac [3],
2557 dev->host_mac [4], dev->host_mac [5]);
2558
2559#ifdef CONFIG_USB_ETH_RNDIS
2560 if (rndis) {
2561 u32 vendorID = 0;
2562
2563 /* FIXME RNDIS vendor id == "vendor NIC code" == ? */
2564
2565 dev->rndis_config = rndis_register (rndis_control_ack);
2566 if (dev->rndis_config < 0) {
2567fail0:
2568 unregister_netdev (dev->net);
2569 status = -ENODEV;
2570 goto fail;
2571 }
2572
2573 /* these set up a lot of the OIDs that RNDIS needs */
2574 rndis_set_host_mac (dev->rndis_config, dev->host_mac);
2575 if (rndis_set_param_dev (dev->rndis_config, dev->net,
2576 &dev->stats))
2577 goto fail0;
2578 if (rndis_set_param_vendor (dev->rndis_config, vendorID,
2579 manufacturer))
2580 goto fail0;
2581 if (rndis_set_param_medium (dev->rndis_config,
2582 NDIS_MEDIUM_802_3,
2583 0))
2584 goto fail0;
2585 INFO (dev, "RNDIS ready\n");
2586 }
2587#endif
2588
2589 return status;
2590
2591fail1:
2592 dev_dbg(&gadget->dev, "register_netdev failed, %d\n", status);
2593fail:
2594 eth_unbind (gadget);
2595 return status;
2596}
2597
2598/*-------------------------------------------------------------------------*/
2599
2600static void
2601eth_suspend (struct usb_gadget *gadget)
2602{
2603 struct eth_dev *dev = get_gadget_data (gadget);
2604
2605 DEBUG (dev, "suspend\n");
2606 dev->suspended = 1;
2607}
2608
2609static void
2610eth_resume (struct usb_gadget *gadget)
2611{
2612 struct eth_dev *dev = get_gadget_data (gadget);
2613
2614 DEBUG (dev, "resume\n");
2615 dev->suspended = 0;
2616}
2617
2618/*-------------------------------------------------------------------------*/
2619
2620static struct usb_gadget_driver eth_driver = {
2621#ifdef CONFIG_USB_GADGET_DUALSPEED
2622 .speed = USB_SPEED_HIGH,
2623#else
2624 .speed = USB_SPEED_FULL,
2625#endif
2626 .function = (char *) driver_desc,
2627 .bind = eth_bind,
2628 .unbind = eth_unbind,
2629
2630 .setup = eth_setup,
2631 .disconnect = eth_disconnect,
2632
2633 .suspend = eth_suspend,
2634 .resume = eth_resume,
2635
2636 .driver = {
2637 .name = (char *) shortname,
2638 // .shutdown = ...
2639 // .suspend = ...
2640 // .resume = ...
2641 },
2642};
2643
2644MODULE_DESCRIPTION (DRIVER_DESC);
2645MODULE_AUTHOR ("David Brownell, Benedikt Spanger");
2646MODULE_LICENSE ("GPL");
2647
2648
2649static int __init init (void)
2650{
2651 return usb_gadget_register_driver (&eth_driver);
2652}
2653module_init (init);
2654
2655static void __exit cleanup (void)
2656{
2657 usb_gadget_unregister_driver (&eth_driver);
2658}
2659module_exit (cleanup);
2660
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
new file mode 100644
index 000000000000..4857f0e4ef44
--- /dev/null
+++ b/drivers/usb/gadget/file_storage.c
@@ -0,0 +1,4139 @@
1/*
2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
3 *
4 * Copyright (C) 2003-2005 Alan Stern
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
23 * later version.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39/*
40 * The File-backed Storage Gadget acts as a USB Mass Storage device,
41 * appearing to the host as a disk drive. In addition to providing an
42 * example of a genuinely useful gadget driver for a USB device, it also
43 * illustrates a technique of double-buffering for increased throughput.
44 * Last but not least, it gives an easy way to probe the behavior of the
45 * Mass Storage drivers in a USB host.
46 *
47 * Backing storage is provided by a regular file or a block device, specified
48 * by the "file" module parameter. Access can be limited to read-only by
49 * setting the optional "ro" module parameter. The gadget will indicate that
50 * it has removable media if the optional "removable" module parameter is set.
51 *
52 * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
53 * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
54 * by the optional "transport" module parameter. It also supports the
55 * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
56 * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
57 * the optional "protocol" module parameter. In addition, the default
58 * Vendor ID, Product ID, and release number can be overridden.
59 *
60 * There is support for multiple logical units (LUNs), each of which has
61 * its own backing file. The number of LUNs can be set using the optional
62 * "luns" module parameter (anywhere from 1 to 8), and the corresponding
63 * files are specified using comma-separated lists for "file" and "ro".
64 * The default number of LUNs is taken from the number of "file" elements;
65 * it is 1 if "file" is not given. If "removable" is not set then a backing
66 * file must be specified for each LUN. If it is set, then an unspecified
67 * or empty backing filename means the LUN's medium is not loaded.
68 *
69 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
70 * needed (an interrupt-out endpoint is also needed for CBI). The memory
71 * requirement amounts to two 16K buffers, size configurable by a parameter.
72 * Support is included for both full-speed and high-speed operation.
73 *
74 * Module options:
75 *
76 * file=filename[,filename...]
77 * Required if "removable" is not set, names of
78 * the files or block devices used for
79 * backing storage
80 * ro=b[,b...] Default false, booleans for read-only access
81 * removable Default false, boolean for removable media
82 * luns=N Default N = number of filenames, number of
83 * LUNs to support
84 * transport=XXX Default BBB, transport name (CB, CBI, or BBB)
85 * protocol=YYY Default SCSI, protocol name (RBC, 8020 or
86 * ATAPI, QIC, UFI, 8070, or SCSI;
87 * also 1 - 6)
88 * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID
89 * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID
90 * release=0xRRRR Override the USB release number (bcdDevice)
91 * buflen=N Default N=16384, buffer size used (will be
92 * rounded down to a multiple of
93 * PAGE_CACHE_SIZE)
94 * stall Default determined according to the type of
95 * USB device controller (usually true),
96 * boolean to permit the driver to halt
97 * bulk endpoints
98 *
99 * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro",
100 * "removable", and "luns" options are available; default values are used
101 * for everything else.
102 *
103 * The pathnames of the backing files and the ro settings are available in
104 * the attribute files "file" and "ro" in the lun<n> subdirectory of the
105 * gadget's sysfs directory. If the "removable" option is set, writing to
106 * these files will simulate ejecting/loading the medium (writing an empty
107 * line means eject) and adjusting a write-enable tab. Changes to the ro
108 * setting are not allowed when the medium is loaded.
109 *
110 * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
111 */
112
113
114/*
115 * Driver Design
116 *
117 * The FSG driver is fairly straightforward. There is a main kernel
118 * thread that handles most of the work. Interrupt routines field
119 * callbacks from the controller driver: bulk- and interrupt-request
120 * completion notifications, endpoint-0 events, and disconnect events.
121 * Completion events are passed to the main thread by wakeup calls. Many
122 * ep0 requests are handled at interrupt time, but SetInterface,
123 * SetConfiguration, and device reset requests are forwarded to the
124 * thread in the form of "exceptions" using SIGUSR1 signals (since they
125 * should interrupt any ongoing file I/O operations).
126 *
127 * The thread's main routine implements the standard command/data/status
128 * parts of a SCSI interaction. It and its subroutines are full of tests
129 * for pending signals/exceptions -- all this polling is necessary since
130 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
131 * indication that the driver really wants to be running in userspace.)
132 * An important point is that so long as the thread is alive it keeps an
133 * open reference to the backing file. This will prevent unmounting
134 * the backing file's underlying filesystem and could cause problems
135 * during system shutdown, for example. To prevent such problems, the
136 * thread catches INT, TERM, and KILL signals and converts them into
137 * an EXIT exception.
138 *
139 * In normal operation the main thread is started during the gadget's
140 * fsg_bind() callback and stopped during fsg_unbind(). But it can also
141 * exit when it receives a signal, and there's no point leaving the
142 * gadget running when the thread is dead. So just before the thread
143 * exits, it deregisters the gadget driver. This makes things a little
144 * tricky: The driver is deregistered at two places, and the exiting
145 * thread can indirectly call fsg_unbind() which in turn can tell the
146 * thread to exit. The first problem is resolved through the use of the
147 * REGISTERED atomic bitflag; the driver will only be deregistered once.
148 * The second problem is resolved by having fsg_unbind() check
149 * fsg->state; it won't try to stop the thread if the state is already
150 * FSG_STATE_TERMINATED.
151 *
152 * To provide maximum throughput, the driver uses a circular pipeline of
153 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
154 * arbitrarily long; in practice the benefits don't justify having more
155 * than 2 stages (i.e., double buffering). But it helps to think of the
156 * pipeline as being a long one. Each buffer head contains a bulk-in and
157 * a bulk-out request pointer (since the buffer can be used for both
158 * output and input -- directions always are given from the host's
159 * point of view) as well as a pointer to the buffer and various state
160 * variables.
161 *
162 * Use of the pipeline follows a simple protocol. There is a variable
163 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
164 * At any time that buffer head may still be in use from an earlier
165 * request, so each buffer head has a state variable indicating whether
166 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
167 * buffer head to be EMPTY, filling the buffer either by file I/O or by
168 * USB I/O (during which the buffer head is BUSY), and marking the buffer
169 * head FULL when the I/O is complete. Then the buffer will be emptied
170 * (again possibly by USB I/O, during which it is marked BUSY) and
171 * finally marked EMPTY again (possibly by a completion routine).
172 *
173 * A module parameter tells the driver to avoid stalling the bulk
174 * endpoints wherever the transport specification allows. This is
175 * necessary for some UDCs like the SuperH, which cannot reliably clear a
176 * halt on a bulk endpoint. However, under certain circumstances the
177 * Bulk-only specification requires a stall. In such cases the driver
178 * will halt the endpoint and set a flag indicating that it should clear
179 * the halt in software during the next device reset. Hopefully this
180 * will permit everything to work correctly. Furthermore, although the
181 * specification allows the bulk-out endpoint to halt when the host sends
182 * too much data, implementing this would cause an unavoidable race.
183 * The driver will always use the "no-stall" approach for OUT transfers.
184 *
185 * One subtle point concerns sending status-stage responses for ep0
186 * requests. Some of these requests, such as device reset, can involve
187 * interrupting an ongoing file I/O operation, which might take an
188 * arbitrarily long time. During that delay the host might give up on
189 * the original ep0 request and issue a new one. When that happens the
190 * driver should not notify the host about completion of the original
191 * request, as the host will no longer be waiting for it. So the driver
192 * assigns to each ep0 request a unique tag, and it keeps track of the
193 * tag value of the request associated with a long-running exception
194 * (device-reset, interface-change, or configuration-change). When the
195 * exception handler is finished, the status-stage response is submitted
196 * only if the current ep0 request tag is equal to the exception request
197 * tag. Thus only the most recently received ep0 request will get a
198 * status-stage response.
199 *
200 * Warning: This driver source file is too long. It ought to be split up
201 * into a header file plus about 3 separate .c files, to handle the details
202 * of the Gadget, USB Mass Storage, and SCSI protocols.
203 */
204
205
206#undef DEBUG
207#undef VERBOSE
208#undef DUMP_MSGS
209
210#include <linux/config.h>
211
212#include <asm/system.h>
213#include <asm/uaccess.h>
214
215#include <linux/bitops.h>
216#include <linux/blkdev.h>
217#include <linux/compiler.h>
218#include <linux/completion.h>
219#include <linux/dcache.h>
220#include <linux/delay.h>
221#include <linux/device.h>
222#include <linux/fcntl.h>
223#include <linux/file.h>
224#include <linux/fs.h>
225#include <linux/init.h>
226#include <linux/kernel.h>
227#include <linux/limits.h>
228#include <linux/list.h>
229#include <linux/module.h>
230#include <linux/moduleparam.h>
231#include <linux/pagemap.h>
232#include <linux/rwsem.h>
233#include <linux/sched.h>
234#include <linux/signal.h>
235#include <linux/slab.h>
236#include <linux/spinlock.h>
237#include <linux/string.h>
238#include <linux/suspend.h>
239#include <linux/utsname.h>
240#include <linux/wait.h>
241
242#include <linux/usb_ch9.h>
243#include <linux/usb_gadget.h>
244
245#include "gadget_chips.h"
246
247
248/*-------------------------------------------------------------------------*/
249
250#define DRIVER_DESC "File-backed Storage Gadget"
251#define DRIVER_NAME "g_file_storage"
252#define DRIVER_VERSION "20 October 2004"
253
254static const char longname[] = DRIVER_DESC;
255static const char shortname[] = DRIVER_NAME;
256
257MODULE_DESCRIPTION(DRIVER_DESC);
258MODULE_AUTHOR("Alan Stern");
259MODULE_LICENSE("Dual BSD/GPL");
260
261/* Thanks to NetChip Technologies for donating this product ID.
262 *
263 * DO NOT REUSE THESE IDs with any other driver!! Ever!!
264 * Instead: allocate your own, using normal USB-IF procedures. */
265#define DRIVER_VENDOR_ID 0x0525 // NetChip
266#define DRIVER_PRODUCT_ID 0xa4a5 // Linux-USB File-backed Storage Gadget
267
268
269/*
270 * This driver assumes self-powered hardware and has no way for users to
271 * trigger remote wakeup. It uses autoconfiguration to select endpoints
272 * and endpoint addresses.
273 */
274
275
276/*-------------------------------------------------------------------------*/
277
278#define xprintk(f,level,fmt,args...) \
279 dev_printk(level , &(f)->gadget->dev , fmt , ## args)
280#define yprintk(l,level,fmt,args...) \
281 dev_printk(level , &(l)->dev , fmt , ## args)
282
283#ifdef DEBUG
284#define DBG(fsg,fmt,args...) \
285 xprintk(fsg , KERN_DEBUG , fmt , ## args)
286#define LDBG(lun,fmt,args...) \
287 yprintk(lun , KERN_DEBUG , fmt , ## args)
288#define MDBG(fmt,args...) \
289 printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
290#else
291#define DBG(fsg,fmt,args...) \
292 do { } while (0)
293#define LDBG(lun,fmt,args...) \
294 do { } while (0)
295#define MDBG(fmt,args...) \
296 do { } while (0)
297#undef VERBOSE
298#undef DUMP_MSGS
299#endif /* DEBUG */
300
301#ifdef VERBOSE
302#define VDBG DBG
303#define VLDBG LDBG
304#else
305#define VDBG(fsg,fmt,args...) \
306 do { } while (0)
307#define VLDBG(lun,fmt,args...) \
308 do { } while (0)
309#endif /* VERBOSE */
310
311#define ERROR(fsg,fmt,args...) \
312 xprintk(fsg , KERN_ERR , fmt , ## args)
313#define LERROR(lun,fmt,args...) \
314 yprintk(lun , KERN_ERR , fmt , ## args)
315
316#define WARN(fsg,fmt,args...) \
317 xprintk(fsg , KERN_WARNING , fmt , ## args)
318#define LWARN(lun,fmt,args...) \
319 yprintk(lun , KERN_WARNING , fmt , ## args)
320
321#define INFO(fsg,fmt,args...) \
322 xprintk(fsg , KERN_INFO , fmt , ## args)
323#define LINFO(lun,fmt,args...) \
324 yprintk(lun , KERN_INFO , fmt , ## args)
325
326#define MINFO(fmt,args...) \
327 printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
328
329
330/*-------------------------------------------------------------------------*/
331
332/* Encapsulate the module parameter settings */
333
334#define MAX_LUNS 8
335
336 /* Arggh! There should be a module_param_array_named macro! */
337static char *file[MAX_LUNS] = {NULL, };
338static int ro[MAX_LUNS] = {0, };
339
340static struct {
341 int num_filenames;
342 int num_ros;
343 unsigned int nluns;
344
345 char *transport_parm;
346 char *protocol_parm;
347 int removable;
348 unsigned short vendor;
349 unsigned short product;
350 unsigned short release;
351 unsigned int buflen;
352 int can_stall;
353
354 int transport_type;
355 char *transport_name;
356 int protocol_type;
357 char *protocol_name;
358
359} mod_data = { // Default values
360 .transport_parm = "BBB",
361 .protocol_parm = "SCSI",
362 .removable = 0,
363 .vendor = DRIVER_VENDOR_ID,
364 .product = DRIVER_PRODUCT_ID,
365 .release = 0xffff, // Use controller chip type
366 .buflen = 16384,
367 .can_stall = 1,
368 };
369
370
371module_param_array(file, charp, &mod_data.num_filenames, S_IRUGO);
372MODULE_PARM_DESC(file, "names of backing files or devices");
373
374module_param_array(ro, bool, &mod_data.num_ros, S_IRUGO);
375MODULE_PARM_DESC(ro, "true to force read-only");
376
377module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
378MODULE_PARM_DESC(luns, "number of LUNs");
379
380module_param_named(removable, mod_data.removable, bool, S_IRUGO);
381MODULE_PARM_DESC(removable, "true to simulate removable media");
382
383
384/* In the non-TEST version, only the module parameters listed above
385 * are available. */
386#ifdef CONFIG_USB_FILE_STORAGE_TEST
387
388module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO);
389MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)");
390
391module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO);
392MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, "
393 "8070, or SCSI)");
394
395module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO);
396MODULE_PARM_DESC(vendor, "USB Vendor ID");
397
398module_param_named(product, mod_data.product, ushort, S_IRUGO);
399MODULE_PARM_DESC(product, "USB Product ID");
400
401module_param_named(release, mod_data.release, ushort, S_IRUGO);
402MODULE_PARM_DESC(release, "USB release number");
403
404module_param_named(buflen, mod_data.buflen, uint, S_IRUGO);
405MODULE_PARM_DESC(buflen, "I/O buffer size");
406
407module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
408MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
409
410#endif /* CONFIG_USB_FILE_STORAGE_TEST */
411
412
413/*-------------------------------------------------------------------------*/
414
415/* USB protocol value = the transport method */
416#define USB_PR_CBI 0x00 // Control/Bulk/Interrupt
417#define USB_PR_CB 0x01 // Control/Bulk w/o interrupt
418#define USB_PR_BULK 0x50 // Bulk-only
419
420/* USB subclass value = the protocol encapsulation */
421#define USB_SC_RBC 0x01 // Reduced Block Commands (flash)
422#define USB_SC_8020 0x02 // SFF-8020i, MMC-2, ATAPI (CD-ROM)
423#define USB_SC_QIC 0x03 // QIC-157 (tape)
424#define USB_SC_UFI 0x04 // UFI (floppy)
425#define USB_SC_8070 0x05 // SFF-8070i (removable)
426#define USB_SC_SCSI 0x06 // Transparent SCSI
427
428/* Bulk-only data structures */
429
430/* Command Block Wrapper */
431struct bulk_cb_wrap {
432 __le32 Signature; // Contains 'USBC'
433 u32 Tag; // Unique per command id
434 __le32 DataTransferLength; // Size of the data
435 u8 Flags; // Direction in bit 7
436 u8 Lun; // LUN (normally 0)
437 u8 Length; // Of the CDB, <= MAX_COMMAND_SIZE
438 u8 CDB[16]; // Command Data Block
439};
440
441#define USB_BULK_CB_WRAP_LEN 31
442#define USB_BULK_CB_SIG 0x43425355 // Spells out USBC
443#define USB_BULK_IN_FLAG 0x80
444
445/* Command Status Wrapper */
446struct bulk_cs_wrap {
447 __le32 Signature; // Should = 'USBS'
448 u32 Tag; // Same as original command
449 __le32 Residue; // Amount not transferred
450 u8 Status; // See below
451};
452
453#define USB_BULK_CS_WRAP_LEN 13
454#define USB_BULK_CS_SIG 0x53425355 // Spells out 'USBS'
455#define USB_STATUS_PASS 0
456#define USB_STATUS_FAIL 1
457#define USB_STATUS_PHASE_ERROR 2
458
459/* Bulk-only class specific requests */
460#define USB_BULK_RESET_REQUEST 0xff
461#define USB_BULK_GET_MAX_LUN_REQUEST 0xfe
462
463
464/* CBI Interrupt data structure */
465struct interrupt_data {
466 u8 bType;
467 u8 bValue;
468};
469
470#define CBI_INTERRUPT_DATA_LEN 2
471
472/* CBI Accept Device-Specific Command request */
473#define USB_CBI_ADSC_REQUEST 0x00
474
475
476#define MAX_COMMAND_SIZE 16 // Length of a SCSI Command Data Block
477
478/* SCSI commands that we recognize */
479#define SC_FORMAT_UNIT 0x04
480#define SC_INQUIRY 0x12
481#define SC_MODE_SELECT_6 0x15
482#define SC_MODE_SELECT_10 0x55
483#define SC_MODE_SENSE_6 0x1a
484#define SC_MODE_SENSE_10 0x5a
485#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
486#define SC_READ_6 0x08
487#define SC_READ_10 0x28
488#define SC_READ_12 0xa8
489#define SC_READ_CAPACITY 0x25
490#define SC_READ_FORMAT_CAPACITIES 0x23
491#define SC_RELEASE 0x17
492#define SC_REQUEST_SENSE 0x03
493#define SC_RESERVE 0x16
494#define SC_SEND_DIAGNOSTIC 0x1d
495#define SC_START_STOP_UNIT 0x1b
496#define SC_SYNCHRONIZE_CACHE 0x35
497#define SC_TEST_UNIT_READY 0x00
498#define SC_VERIFY 0x2f
499#define SC_WRITE_6 0x0a
500#define SC_WRITE_10 0x2a
501#define SC_WRITE_12 0xaa
502
503/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
504#define SS_NO_SENSE 0
505#define SS_COMMUNICATION_FAILURE 0x040800
506#define SS_INVALID_COMMAND 0x052000
507#define SS_INVALID_FIELD_IN_CDB 0x052400
508#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
509#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
510#define SS_MEDIUM_NOT_PRESENT 0x023a00
511#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
512#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
513#define SS_RESET_OCCURRED 0x062900
514#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
515#define SS_UNRECOVERED_READ_ERROR 0x031100
516#define SS_WRITE_ERROR 0x030c02
517#define SS_WRITE_PROTECTED 0x072700
518
519#define SK(x) ((u8) ((x) >> 16)) // Sense Key byte, etc.
520#define ASC(x) ((u8) ((x) >> 8))
521#define ASCQ(x) ((u8) (x))
522
523
524/*-------------------------------------------------------------------------*/
525
526/*
527 * These definitions will permit the compiler to avoid generating code for
528 * parts of the driver that aren't used in the non-TEST version. Even gcc
529 * can recognize when a test of a constant expression yields a dead code
530 * path.
531 */
532
533#ifdef CONFIG_USB_FILE_STORAGE_TEST
534
535#define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK)
536#define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI)
537#define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI)
538
539#else
540
541#define transport_is_bbb() 1
542#define transport_is_cbi() 0
543#define protocol_is_scsi() 1
544
545#endif /* CONFIG_USB_FILE_STORAGE_TEST */
546
547
548struct lun {
549 struct file *filp;
550 loff_t file_length;
551 loff_t num_sectors;
552
553 unsigned int ro : 1;
554 unsigned int prevent_medium_removal : 1;
555 unsigned int registered : 1;
556
557 u32 sense_data;
558 u32 sense_data_info;
559 u32 unit_attention_data;
560
561 struct device dev;
562};
563
564#define backing_file_is_open(curlun) ((curlun)->filp != NULL)
565
566static inline struct lun *dev_to_lun(struct device *dev)
567{
568 return container_of(dev, struct lun, dev);
569}
570
571
572/* Big enough to hold our biggest descriptor */
573#define EP0_BUFSIZE 256
574#define DELAYED_STATUS (EP0_BUFSIZE + 999) // An impossibly large value
575
576/* Number of buffers we will use. 2 is enough for double-buffering */
577#define NUM_BUFFERS 2
578
579enum fsg_buffer_state {
580 BUF_STATE_EMPTY = 0,
581 BUF_STATE_FULL,
582 BUF_STATE_BUSY
583};
584
585struct fsg_buffhd {
586 void *buf;
587 dma_addr_t dma;
588 volatile enum fsg_buffer_state state;
589 struct fsg_buffhd *next;
590
591 /* The NetChip 2280 is faster, and handles some protocol faults
592 * better, if we don't submit any short bulk-out read requests.
593 * So we will record the intended request length here. */
594 unsigned int bulk_out_intended_length;
595
596 struct usb_request *inreq;
597 volatile int inreq_busy;
598 struct usb_request *outreq;
599 volatile int outreq_busy;
600};
601
602enum fsg_state {
603 FSG_STATE_COMMAND_PHASE = -10, // This one isn't used anywhere
604 FSG_STATE_DATA_PHASE,
605 FSG_STATE_STATUS_PHASE,
606
607 FSG_STATE_IDLE = 0,
608 FSG_STATE_ABORT_BULK_OUT,
609 FSG_STATE_RESET,
610 FSG_STATE_INTERFACE_CHANGE,
611 FSG_STATE_CONFIG_CHANGE,
612 FSG_STATE_DISCONNECT,
613 FSG_STATE_EXIT,
614 FSG_STATE_TERMINATED
615};
616
617enum data_direction {
618 DATA_DIR_UNKNOWN = 0,
619 DATA_DIR_FROM_HOST,
620 DATA_DIR_TO_HOST,
621 DATA_DIR_NONE
622};
623
624struct fsg_dev {
625 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
626 spinlock_t lock;
627 struct usb_gadget *gadget;
628
629 /* filesem protects: backing files in use */
630 struct rw_semaphore filesem;
631
632 struct usb_ep *ep0; // Handy copy of gadget->ep0
633 struct usb_request *ep0req; // For control responses
634 volatile unsigned int ep0_req_tag;
635 const char *ep0req_name;
636
637 struct usb_request *intreq; // For interrupt responses
638 volatile int intreq_busy;
639 struct fsg_buffhd *intr_buffhd;
640
641 unsigned int bulk_out_maxpacket;
642 enum fsg_state state; // For exception handling
643 unsigned int exception_req_tag;
644
645 u8 config, new_config;
646
647 unsigned int running : 1;
648 unsigned int bulk_in_enabled : 1;
649 unsigned int bulk_out_enabled : 1;
650 unsigned int intr_in_enabled : 1;
651 unsigned int phase_error : 1;
652 unsigned int short_packet_received : 1;
653 unsigned int bad_lun_okay : 1;
654
655 unsigned long atomic_bitflags;
656#define REGISTERED 0
657#define CLEAR_BULK_HALTS 1
658#define SUSPENDED 2
659
660 struct usb_ep *bulk_in;
661 struct usb_ep *bulk_out;
662 struct usb_ep *intr_in;
663
664 struct fsg_buffhd *next_buffhd_to_fill;
665 struct fsg_buffhd *next_buffhd_to_drain;
666 struct fsg_buffhd buffhds[NUM_BUFFERS];
667
668 wait_queue_head_t thread_wqh;
669 int thread_wakeup_needed;
670 struct completion thread_notifier;
671 int thread_pid;
672 struct task_struct *thread_task;
673 sigset_t thread_signal_mask;
674
675 int cmnd_size;
676 u8 cmnd[MAX_COMMAND_SIZE];
677 enum data_direction data_dir;
678 u32 data_size;
679 u32 data_size_from_cmnd;
680 u32 tag;
681 unsigned int lun;
682 u32 residue;
683 u32 usb_amount_left;
684
685 /* The CB protocol offers no way for a host to know when a command
686 * has completed. As a result the next command may arrive early,
687 * and we will still have to handle it. For that reason we need
688 * a buffer to store new commands when using CB (or CBI, which
689 * does not oblige a host to wait for command completion either). */
690 int cbbuf_cmnd_size;
691 u8 cbbuf_cmnd[MAX_COMMAND_SIZE];
692
693 unsigned int nluns;
694 struct lun *luns;
695 struct lun *curlun;
696 struct completion lun_released;
697};
698
699typedef void (*fsg_routine_t)(struct fsg_dev *);
700
701static int inline exception_in_progress(struct fsg_dev *fsg)
702{
703 return (fsg->state > FSG_STATE_IDLE);
704}
705
706/* Make bulk-out requests be divisible by the maxpacket size */
707static void inline set_bulk_out_req_length(struct fsg_dev *fsg,
708 struct fsg_buffhd *bh, unsigned int length)
709{
710 unsigned int rem;
711
712 bh->bulk_out_intended_length = length;
713 rem = length % fsg->bulk_out_maxpacket;
714 if (rem > 0)
715 length += fsg->bulk_out_maxpacket - rem;
716 bh->outreq->length = length;
717}
718
719static struct fsg_dev *the_fsg;
720static struct usb_gadget_driver fsg_driver;
721
722static void close_backing_file(struct lun *curlun);
723static void close_all_backing_files(struct fsg_dev *fsg);
724
725
726/*-------------------------------------------------------------------------*/
727
728#ifdef DUMP_MSGS
729
730static void dump_msg(struct fsg_dev *fsg, const char *label,
731 const u8 *buf, unsigned int length)
732{
733 unsigned int start, num, i;
734 char line[52], *p;
735
736 if (length >= 512)
737 return;
738 DBG(fsg, "%s, length %u:\n", label, length);
739
740 start = 0;
741 while (length > 0) {
742 num = min(length, 16u);
743 p = line;
744 for (i = 0; i < num; ++i) {
745 if (i == 8)
746 *p++ = ' ';
747 sprintf(p, " %02x", buf[i]);
748 p += 3;
749 }
750 *p = 0;
751 printk(KERN_DEBUG "%6x: %s\n", start, line);
752 buf += num;
753 start += num;
754 length -= num;
755 }
756}
757
758static void inline dump_cdb(struct fsg_dev *fsg)
759{}
760
761#else
762
763static void inline dump_msg(struct fsg_dev *fsg, const char *label,
764 const u8 *buf, unsigned int length)
765{}
766
767static void inline dump_cdb(struct fsg_dev *fsg)
768{
769 int i;
770 char cmdbuf[3*MAX_COMMAND_SIZE + 1];
771
772 for (i = 0; i < fsg->cmnd_size; ++i)
773 sprintf(cmdbuf + i*3, " %02x", fsg->cmnd[i]);
774 VDBG(fsg, "SCSI CDB: %s\n", cmdbuf);
775}
776
777#endif /* DUMP_MSGS */
778
779
780static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
781{
782 const char *name;
783
784 if (ep == fsg->bulk_in)
785 name = "bulk-in";
786 else if (ep == fsg->bulk_out)
787 name = "bulk-out";
788 else
789 name = ep->name;
790 DBG(fsg, "%s set halt\n", name);
791 return usb_ep_set_halt(ep);
792}
793
794
795/*-------------------------------------------------------------------------*/
796
797/* Routines for unaligned data access */
798
799static u16 inline get_be16(u8 *buf)
800{
801 return ((u16) buf[0] << 8) | ((u16) buf[1]);
802}
803
804static u32 inline get_be32(u8 *buf)
805{
806 return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
807 ((u32) buf[2] << 8) | ((u32) buf[3]);
808}
809
810static void inline put_be16(u8 *buf, u16 val)
811{
812 buf[0] = val >> 8;
813 buf[1] = val;
814}
815
816static void inline put_be32(u8 *buf, u32 val)
817{
818 buf[0] = val >> 24;
819 buf[1] = val >> 16;
820 buf[2] = val >> 8;
821 buf[3] = val;
822}
823
824
825/*-------------------------------------------------------------------------*/
826
827/*
828 * DESCRIPTORS ... most are static, but strings and (full) configuration
829 * descriptors are built on demand. Also the (static) config and interface
830 * descriptors are adjusted during fsg_bind().
831 */
832#define STRING_MANUFACTURER 1
833#define STRING_PRODUCT 2
834#define STRING_SERIAL 3
835#define STRING_CONFIG 4
836#define STRING_INTERFACE 5
837
838/* There is only one configuration. */
839#define CONFIG_VALUE 1
840
841static struct usb_device_descriptor
842device_desc = {
843 .bLength = sizeof device_desc,
844 .bDescriptorType = USB_DT_DEVICE,
845
846 .bcdUSB = __constant_cpu_to_le16(0x0200),
847 .bDeviceClass = USB_CLASS_PER_INTERFACE,
848
849 /* The next three values can be overridden by module parameters */
850 .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_ID),
851 .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_ID),
852 .bcdDevice = __constant_cpu_to_le16(0xffff),
853
854 .iManufacturer = STRING_MANUFACTURER,
855 .iProduct = STRING_PRODUCT,
856 .iSerialNumber = STRING_SERIAL,
857 .bNumConfigurations = 1,
858};
859
860static struct usb_config_descriptor
861config_desc = {
862 .bLength = sizeof config_desc,
863 .bDescriptorType = USB_DT_CONFIG,
864
865 /* wTotalLength computed by usb_gadget_config_buf() */
866 .bNumInterfaces = 1,
867 .bConfigurationValue = CONFIG_VALUE,
868 .iConfiguration = STRING_CONFIG,
869 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
870 .bMaxPower = 1, // self-powered
871};
872
873static struct usb_otg_descriptor
874otg_desc = {
875 .bLength = sizeof(otg_desc),
876 .bDescriptorType = USB_DT_OTG,
877
878 .bmAttributes = USB_OTG_SRP,
879};
880
881/* There is only one interface. */
882
883static struct usb_interface_descriptor
884intf_desc = {
885 .bLength = sizeof intf_desc,
886 .bDescriptorType = USB_DT_INTERFACE,
887
888 .bNumEndpoints = 2, // Adjusted during fsg_bind()
889 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
890 .bInterfaceSubClass = USB_SC_SCSI, // Adjusted during fsg_bind()
891 .bInterfaceProtocol = USB_PR_BULK, // Adjusted during fsg_bind()
892 .iInterface = STRING_INTERFACE,
893};
894
895/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
896 * and interrupt-in. */
897
898static struct usb_endpoint_descriptor
899fs_bulk_in_desc = {
900 .bLength = USB_DT_ENDPOINT_SIZE,
901 .bDescriptorType = USB_DT_ENDPOINT,
902
903 .bEndpointAddress = USB_DIR_IN,
904 .bmAttributes = USB_ENDPOINT_XFER_BULK,
905 /* wMaxPacketSize set by autoconfiguration */
906};
907
908static struct usb_endpoint_descriptor
909fs_bulk_out_desc = {
910 .bLength = USB_DT_ENDPOINT_SIZE,
911 .bDescriptorType = USB_DT_ENDPOINT,
912
913 .bEndpointAddress = USB_DIR_OUT,
914 .bmAttributes = USB_ENDPOINT_XFER_BULK,
915 /* wMaxPacketSize set by autoconfiguration */
916};
917
918static struct usb_endpoint_descriptor
919fs_intr_in_desc = {
920 .bLength = USB_DT_ENDPOINT_SIZE,
921 .bDescriptorType = USB_DT_ENDPOINT,
922
923 .bEndpointAddress = USB_DIR_IN,
924 .bmAttributes = USB_ENDPOINT_XFER_INT,
925 .wMaxPacketSize = __constant_cpu_to_le16(2),
926 .bInterval = 32, // frames -> 32 ms
927};
928
929static const struct usb_descriptor_header *fs_function[] = {
930 (struct usb_descriptor_header *) &otg_desc,
931 (struct usb_descriptor_header *) &intf_desc,
932 (struct usb_descriptor_header *) &fs_bulk_in_desc,
933 (struct usb_descriptor_header *) &fs_bulk_out_desc,
934 (struct usb_descriptor_header *) &fs_intr_in_desc,
935 NULL,
936};
937#define FS_FUNCTION_PRE_EP_ENTRIES 2
938
939
940#ifdef CONFIG_USB_GADGET_DUALSPEED
941
942/*
943 * USB 2.0 devices need to expose both high speed and full speed
944 * descriptors, unless they only run at full speed.
945 *
946 * That means alternate endpoint descriptors (bigger packets)
947 * and a "device qualifier" ... plus more construction options
948 * for the config descriptor.
949 */
950static struct usb_qualifier_descriptor
951dev_qualifier = {
952 .bLength = sizeof dev_qualifier,
953 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
954
955 .bcdUSB = __constant_cpu_to_le16(0x0200),
956 .bDeviceClass = USB_CLASS_PER_INTERFACE,
957
958 .bNumConfigurations = 1,
959};
960
961static struct usb_endpoint_descriptor
962hs_bulk_in_desc = {
963 .bLength = USB_DT_ENDPOINT_SIZE,
964 .bDescriptorType = USB_DT_ENDPOINT,
965
966 /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
967 .bmAttributes = USB_ENDPOINT_XFER_BULK,
968 .wMaxPacketSize = __constant_cpu_to_le16(512),
969};
970
971static struct usb_endpoint_descriptor
972hs_bulk_out_desc = {
973 .bLength = USB_DT_ENDPOINT_SIZE,
974 .bDescriptorType = USB_DT_ENDPOINT,
975
976 /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
977 .bmAttributes = USB_ENDPOINT_XFER_BULK,
978 .wMaxPacketSize = __constant_cpu_to_le16(512),
979 .bInterval = 1, // NAK every 1 uframe
980};
981
982static struct usb_endpoint_descriptor
983hs_intr_in_desc = {
984 .bLength = USB_DT_ENDPOINT_SIZE,
985 .bDescriptorType = USB_DT_ENDPOINT,
986
987 /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
988 .bmAttributes = USB_ENDPOINT_XFER_INT,
989 .wMaxPacketSize = __constant_cpu_to_le16(2),
990 .bInterval = 9, // 2**(9-1) = 256 uframes -> 32 ms
991};
992
993static const struct usb_descriptor_header *hs_function[] = {
994 (struct usb_descriptor_header *) &otg_desc,
995 (struct usb_descriptor_header *) &intf_desc,
996 (struct usb_descriptor_header *) &hs_bulk_in_desc,
997 (struct usb_descriptor_header *) &hs_bulk_out_desc,
998 (struct usb_descriptor_header *) &hs_intr_in_desc,
999 NULL,
1000};
1001#define HS_FUNCTION_PRE_EP_ENTRIES 2
1002
1003/* Maxpacket and other transfer characteristics vary by speed. */
1004#define ep_desc(g,fs,hs) (((g)->speed==USB_SPEED_HIGH) ? (hs) : (fs))
1005
1006#else
1007
1008/* If there's no high speed support, always use the full-speed descriptor. */
1009#define ep_desc(g,fs,hs) fs
1010
1011#endif /* !CONFIG_USB_GADGET_DUALSPEED */
1012
1013
1014/* The CBI specification limits the serial string to 12 uppercase hexadecimal
1015 * characters. */
1016static char manufacturer[64];
1017static char serial[13];
1018
1019/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
1020static struct usb_string strings[] = {
1021 {STRING_MANUFACTURER, manufacturer},
1022 {STRING_PRODUCT, longname},
1023 {STRING_SERIAL, serial},
1024 {STRING_CONFIG, "Self-powered"},
1025 {STRING_INTERFACE, "Mass Storage"},
1026 {}
1027};
1028
1029static struct usb_gadget_strings stringtab = {
1030 .language = 0x0409, // en-us
1031 .strings = strings,
1032};
1033
1034
1035/*
1036 * Config descriptors must agree with the code that sets configurations
1037 * and with code managing interfaces and their altsettings. They must
1038 * also handle different speeds and other-speed requests.
1039 */
1040static int populate_config_buf(struct usb_gadget *gadget,
1041 u8 *buf, u8 type, unsigned index)
1042{
1043#ifdef CONFIG_USB_GADGET_DUALSPEED
1044 enum usb_device_speed speed = gadget->speed;
1045#endif
1046 int len;
1047 const struct usb_descriptor_header **function;
1048
1049 if (index > 0)
1050 return -EINVAL;
1051
1052#ifdef CONFIG_USB_GADGET_DUALSPEED
1053 if (type == USB_DT_OTHER_SPEED_CONFIG)
1054 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
1055 if (speed == USB_SPEED_HIGH)
1056 function = hs_function;
1057 else
1058#endif
1059 function = fs_function;
1060
1061 /* for now, don't advertise srp-only devices */
1062 if (!gadget->is_otg)
1063 function++;
1064
1065 len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
1066 ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
1067 return len;
1068}
1069
1070
1071/*-------------------------------------------------------------------------*/
1072
1073/* These routines may be called in process context or in_irq */
1074
1075static void wakeup_thread(struct fsg_dev *fsg)
1076{
1077 /* Tell the main thread that something has happened */
1078 fsg->thread_wakeup_needed = 1;
1079 wake_up_all(&fsg->thread_wqh);
1080}
1081
1082
1083static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
1084{
1085 unsigned long flags;
1086 struct task_struct *thread_task;
1087
1088 /* Do nothing if a higher-priority exception is already in progress.
1089 * If a lower-or-equal priority exception is in progress, preempt it
1090 * and notify the main thread by sending it a signal. */
1091 spin_lock_irqsave(&fsg->lock, flags);
1092 if (fsg->state <= new_state) {
1093 fsg->exception_req_tag = fsg->ep0_req_tag;
1094 fsg->state = new_state;
1095 thread_task = fsg->thread_task;
1096 if (thread_task)
1097 send_sig_info(SIGUSR1, SEND_SIG_FORCED, thread_task);
1098 }
1099 spin_unlock_irqrestore(&fsg->lock, flags);
1100}
1101
1102
1103/*-------------------------------------------------------------------------*/
1104
1105/* The disconnect callback and ep0 routines. These always run in_irq,
1106 * except that ep0_queue() is called in the main thread to acknowledge
1107 * completion of various requests: set config, set interface, and
1108 * Bulk-only device reset. */
1109
1110static void fsg_disconnect(struct usb_gadget *gadget)
1111{
1112 struct fsg_dev *fsg = get_gadget_data(gadget);
1113
1114 DBG(fsg, "disconnect or port reset\n");
1115 raise_exception(fsg, FSG_STATE_DISCONNECT);
1116}
1117
1118
1119static int ep0_queue(struct fsg_dev *fsg)
1120{
1121 int rc;
1122
1123 rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
1124 if (rc != 0 && rc != -ESHUTDOWN) {
1125
1126 /* We can't do much more than wait for a reset */
1127 WARN(fsg, "error in submission: %s --> %d\n",
1128 fsg->ep0->name, rc);
1129 }
1130 return rc;
1131}
1132
1133static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
1134{
1135 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data;
1136
1137 if (req->actual > 0)
1138 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
1139 if (req->status || req->actual != req->length)
1140 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
1141 req->status, req->actual, req->length);
1142 if (req->status == -ECONNRESET) // Request was cancelled
1143 usb_ep_fifo_flush(ep);
1144
1145 if (req->status == 0 && req->context)
1146 ((fsg_routine_t) (req->context))(fsg);
1147}
1148
1149
1150/*-------------------------------------------------------------------------*/
1151
1152/* Bulk and interrupt endpoint completion handlers.
1153 * These always run in_irq. */
1154
1155static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
1156{
1157 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data;
1158 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context;
1159
1160 if (req->status || req->actual != req->length)
1161 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
1162 req->status, req->actual, req->length);
1163 if (req->status == -ECONNRESET) // Request was cancelled
1164 usb_ep_fifo_flush(ep);
1165
1166 /* Hold the lock while we update the request and buffer states */
1167 spin_lock(&fsg->lock);
1168 bh->inreq_busy = 0;
1169 bh->state = BUF_STATE_EMPTY;
1170 spin_unlock(&fsg->lock);
1171 wakeup_thread(fsg);
1172}
1173
1174static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
1175{
1176 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data;
1177 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context;
1178
1179 dump_msg(fsg, "bulk-out", req->buf, req->actual);
1180 if (req->status || req->actual != bh->bulk_out_intended_length)
1181 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
1182 req->status, req->actual,
1183 bh->bulk_out_intended_length);
1184 if (req->status == -ECONNRESET) // Request was cancelled
1185 usb_ep_fifo_flush(ep);
1186
1187 /* Hold the lock while we update the request and buffer states */
1188 spin_lock(&fsg->lock);
1189 bh->outreq_busy = 0;
1190 bh->state = BUF_STATE_FULL;
1191 spin_unlock(&fsg->lock);
1192 wakeup_thread(fsg);
1193}
1194
1195
1196#ifdef CONFIG_USB_FILE_STORAGE_TEST
1197static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
1198{
1199 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data;
1200 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context;
1201
1202 if (req->status || req->actual != req->length)
1203 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
1204 req->status, req->actual, req->length);
1205 if (req->status == -ECONNRESET) // Request was cancelled
1206 usb_ep_fifo_flush(ep);
1207
1208 /* Hold the lock while we update the request and buffer states */
1209 spin_lock(&fsg->lock);
1210 fsg->intreq_busy = 0;
1211 bh->state = BUF_STATE_EMPTY;
1212 spin_unlock(&fsg->lock);
1213 wakeup_thread(fsg);
1214}
1215
1216#else
1217static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
1218{}
1219#endif /* CONFIG_USB_FILE_STORAGE_TEST */
1220
1221
1222/*-------------------------------------------------------------------------*/
1223
1224/* Ep0 class-specific handlers. These always run in_irq. */
1225
1226#ifdef CONFIG_USB_FILE_STORAGE_TEST
1227static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1228{
1229 struct usb_request *req = fsg->ep0req;
1230 static u8 cbi_reset_cmnd[6] = {
1231 SC_SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff};
1232
1233 /* Error in command transfer? */
1234 if (req->status || req->length != req->actual ||
1235 req->actual < 6 || req->actual > MAX_COMMAND_SIZE) {
1236
1237 /* Not all controllers allow a protocol stall after
1238 * receiving control-out data, but we'll try anyway. */
1239 fsg_set_halt(fsg, fsg->ep0);
1240 return; // Wait for reset
1241 }
1242
1243 /* Is it the special reset command? */
1244 if (req->actual >= sizeof cbi_reset_cmnd &&
1245 memcmp(req->buf, cbi_reset_cmnd,
1246 sizeof cbi_reset_cmnd) == 0) {
1247
1248 /* Raise an exception to stop the current operation
1249 * and reinitialize our state. */
1250 DBG(fsg, "cbi reset request\n");
1251 raise_exception(fsg, FSG_STATE_RESET);
1252 return;
1253 }
1254
1255 VDBG(fsg, "CB[I] accept device-specific command\n");
1256 spin_lock(&fsg->lock);
1257
1258 /* Save the command for later */
1259 if (fsg->cbbuf_cmnd_size)
1260 WARN(fsg, "CB[I] overwriting previous command\n");
1261 fsg->cbbuf_cmnd_size = req->actual;
1262 memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
1263
1264 spin_unlock(&fsg->lock);
1265 wakeup_thread(fsg);
1266}
1267
1268#else
1269static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1270{}
1271#endif /* CONFIG_USB_FILE_STORAGE_TEST */
1272
1273
1274static int class_setup_req(struct fsg_dev *fsg,
1275 const struct usb_ctrlrequest *ctrl)
1276{
1277 struct usb_request *req = fsg->ep0req;
1278 int value = -EOPNOTSUPP;
1279 u16 w_index = ctrl->wIndex;
1280 u16 w_length = ctrl->wLength;
1281
1282 if (!fsg->config)
1283 return value;
1284
1285 /* Handle Bulk-only class-specific requests */
1286 if (transport_is_bbb()) {
1287 switch (ctrl->bRequest) {
1288
1289 case USB_BULK_RESET_REQUEST:
1290 if (ctrl->bRequestType != (USB_DIR_OUT |
1291 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
1292 break;
1293 if (w_index != 0) {
1294 value = -EDOM;
1295 break;
1296 }
1297
1298 /* Raise an exception to stop the current operation
1299 * and reinitialize our state. */
1300 DBG(fsg, "bulk reset request\n");
1301 raise_exception(fsg, FSG_STATE_RESET);
1302 value = DELAYED_STATUS;
1303 break;
1304
1305 case USB_BULK_GET_MAX_LUN_REQUEST:
1306 if (ctrl->bRequestType != (USB_DIR_IN |
1307 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
1308 break;
1309 if (w_index != 0) {
1310 value = -EDOM;
1311 break;
1312 }
1313 VDBG(fsg, "get max LUN\n");
1314 *(u8 *) req->buf = fsg->nluns - 1;
1315 value = min(w_length, (u16) 1);
1316 break;
1317 }
1318 }
1319
1320 /* Handle CBI class-specific requests */
1321 else {
1322 switch (ctrl->bRequest) {
1323
1324 case USB_CBI_ADSC_REQUEST:
1325 if (ctrl->bRequestType != (USB_DIR_OUT |
1326 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
1327 break;
1328 if (w_index != 0) {
1329 value = -EDOM;
1330 break;
1331 }
1332 if (w_length > MAX_COMMAND_SIZE) {
1333 value = -EOVERFLOW;
1334 break;
1335 }
1336 value = w_length;
1337 fsg->ep0req->context = received_cbi_adsc;
1338 break;
1339 }
1340 }
1341
1342 if (value == -EOPNOTSUPP)
1343 VDBG(fsg,
1344 "unknown class-specific control req "
1345 "%02x.%02x v%04x i%04x l%u\n",
1346 ctrl->bRequestType, ctrl->bRequest,
1347 ctrl->wValue, w_index, w_length);
1348 return value;
1349}
1350
1351
1352/*-------------------------------------------------------------------------*/
1353
1354/* Ep0 standard request handlers. These always run in_irq. */
1355
1356static int standard_setup_req(struct fsg_dev *fsg,
1357 const struct usb_ctrlrequest *ctrl)
1358{
1359 struct usb_request *req = fsg->ep0req;
1360 int value = -EOPNOTSUPP;
1361 u16 w_index = ctrl->wIndex;
1362 u16 w_value = ctrl->wValue;
1363 u16 w_length = ctrl->wLength;
1364
1365 /* Usually this just stores reply data in the pre-allocated ep0 buffer,
1366 * but config change events will also reconfigure hardware. */
1367 switch (ctrl->bRequest) {
1368
1369 case USB_REQ_GET_DESCRIPTOR:
1370 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
1371 USB_RECIP_DEVICE))
1372 break;
1373 switch (w_value >> 8) {
1374
1375 case USB_DT_DEVICE:
1376 VDBG(fsg, "get device descriptor\n");
1377 value = min(w_length, (u16) sizeof device_desc);
1378 memcpy(req->buf, &device_desc, value);
1379 break;
1380#ifdef CONFIG_USB_GADGET_DUALSPEED
1381 case USB_DT_DEVICE_QUALIFIER:
1382 VDBG(fsg, "get device qualifier\n");
1383 if (!fsg->gadget->is_dualspeed)
1384 break;
1385 value = min(w_length, (u16) sizeof dev_qualifier);
1386 memcpy(req->buf, &dev_qualifier, value);
1387 break;
1388
1389 case USB_DT_OTHER_SPEED_CONFIG:
1390 VDBG(fsg, "get other-speed config descriptor\n");
1391 if (!fsg->gadget->is_dualspeed)
1392 break;
1393 goto get_config;
1394#endif
1395 case USB_DT_CONFIG:
1396 VDBG(fsg, "get configuration descriptor\n");
1397#ifdef CONFIG_USB_GADGET_DUALSPEED
1398 get_config:
1399#endif
1400 value = populate_config_buf(fsg->gadget,
1401 req->buf,
1402 w_value >> 8,
1403 w_value & 0xff);
1404 if (value >= 0)
1405 value = min(w_length, (u16) value);
1406 break;
1407
1408 case USB_DT_STRING:
1409 VDBG(fsg, "get string descriptor\n");
1410
1411 /* wIndex == language code */
1412 value = usb_gadget_get_string(&stringtab,
1413 w_value & 0xff, req->buf);
1414 if (value >= 0)
1415 value = min(w_length, (u16) value);
1416 break;
1417 }
1418 break;
1419
1420 /* One config, two speeds */
1421 case USB_REQ_SET_CONFIGURATION:
1422 if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
1423 USB_RECIP_DEVICE))
1424 break;
1425 VDBG(fsg, "set configuration\n");
1426 if (w_value == CONFIG_VALUE || w_value == 0) {
1427 fsg->new_config = w_value;
1428
1429 /* Raise an exception to wipe out previous transaction
1430 * state (queued bufs, etc) and set the new config. */
1431 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
1432 value = DELAYED_STATUS;
1433 }
1434 break;
1435 case USB_REQ_GET_CONFIGURATION:
1436 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
1437 USB_RECIP_DEVICE))
1438 break;
1439 VDBG(fsg, "get configuration\n");
1440 *(u8 *) req->buf = fsg->config;
1441 value = min(w_length, (u16) 1);
1442 break;
1443
1444 case USB_REQ_SET_INTERFACE:
1445 if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD |
1446 USB_RECIP_INTERFACE))
1447 break;
1448 if (fsg->config && w_index == 0) {
1449
1450 /* Raise an exception to wipe out previous transaction
1451 * state (queued bufs, etc) and install the new
1452 * interface altsetting. */
1453 raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
1454 value = DELAYED_STATUS;
1455 }
1456 break;
1457 case USB_REQ_GET_INTERFACE:
1458 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
1459 USB_RECIP_INTERFACE))
1460 break;
1461 if (!fsg->config)
1462 break;
1463 if (w_index != 0) {
1464 value = -EDOM;
1465 break;
1466 }
1467 VDBG(fsg, "get interface\n");
1468 *(u8 *) req->buf = 0;
1469 value = min(w_length, (u16) 1);
1470 break;
1471
1472 default:
1473 VDBG(fsg,
1474 "unknown control req %02x.%02x v%04x i%04x l%u\n",
1475 ctrl->bRequestType, ctrl->bRequest,
1476 w_value, w_index, w_length);
1477 }
1478
1479 return value;
1480}
1481
1482
1483static int fsg_setup(struct usb_gadget *gadget,
1484 const struct usb_ctrlrequest *ctrl)
1485{
1486 struct fsg_dev *fsg = get_gadget_data(gadget);
1487 int rc;
1488
1489 ++fsg->ep0_req_tag; // Record arrival of a new request
1490 fsg->ep0req->context = NULL;
1491 fsg->ep0req->length = 0;
1492 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
1493
1494 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
1495 rc = class_setup_req(fsg, ctrl);
1496 else
1497 rc = standard_setup_req(fsg, ctrl);
1498
1499 /* Respond with data/status or defer until later? */
1500 if (rc >= 0 && rc != DELAYED_STATUS) {
1501 fsg->ep0req->length = rc;
1502 fsg->ep0req->zero = (rc < ctrl->wLength &&
1503 (rc % gadget->ep0->maxpacket) == 0);
1504 fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
1505 "ep0-in" : "ep0-out");
1506 rc = ep0_queue(fsg);
1507 }
1508
1509 /* Device either stalls (rc < 0) or reports success */
1510 return rc;
1511}
1512
1513
1514/*-------------------------------------------------------------------------*/
1515
1516/* All the following routines run in process context */
1517
1518
1519/* Use this for bulk or interrupt transfers, not ep0 */
1520static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
1521 struct usb_request *req, volatile int *pbusy,
1522 volatile enum fsg_buffer_state *state)
1523{
1524 int rc;
1525
1526 if (ep == fsg->bulk_in)
1527 dump_msg(fsg, "bulk-in", req->buf, req->length);
1528 else if (ep == fsg->intr_in)
1529 dump_msg(fsg, "intr-in", req->buf, req->length);
1530 *pbusy = 1;
1531 *state = BUF_STATE_BUSY;
1532 rc = usb_ep_queue(ep, req, GFP_KERNEL);
1533 if (rc != 0) {
1534 *pbusy = 0;
1535 *state = BUF_STATE_EMPTY;
1536
1537 /* We can't do much more than wait for a reset */
1538
1539 /* Note: currently the net2280 driver fails zero-length
1540 * submissions if DMA is enabled. */
1541 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
1542 req->length == 0))
1543 WARN(fsg, "error in submission: %s --> %d\n",
1544 ep->name, rc);
1545 }
1546}
1547
1548
1549static int sleep_thread(struct fsg_dev *fsg)
1550{
1551 int rc;
1552
1553 /* Wait until a signal arrives or we are woken up */
1554 rc = wait_event_interruptible(fsg->thread_wqh,
1555 fsg->thread_wakeup_needed);
1556 fsg->thread_wakeup_needed = 0;
1557 if (current->flags & PF_FREEZE)
1558 refrigerator(PF_FREEZE);
1559 return (rc ? -EINTR : 0);
1560}
1561
1562
1563/*-------------------------------------------------------------------------*/
1564
1565static int do_read(struct fsg_dev *fsg)
1566{
1567 struct lun *curlun = fsg->curlun;
1568 u32 lba;
1569 struct fsg_buffhd *bh;
1570 int rc;
1571 u32 amount_left;
1572 loff_t file_offset, file_offset_tmp;
1573 unsigned int amount;
1574 unsigned int partial_page;
1575 ssize_t nread;
1576
1577 /* Get the starting Logical Block Address and check that it's
1578 * not too big */
1579 if (fsg->cmnd[0] == SC_READ_6)
1580 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
1581 else {
1582 lba = get_be32(&fsg->cmnd[2]);
1583
1584 /* We allow DPO (Disable Page Out = don't save data in the
1585 * cache) and FUA (Force Unit Access = don't read from the
1586 * cache), but we don't implement them. */
1587 if ((fsg->cmnd[1] & ~0x18) != 0) {
1588 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1589 return -EINVAL;
1590 }
1591 }
1592 if (lba >= curlun->num_sectors) {
1593 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1594 return -EINVAL;
1595 }
1596 file_offset = ((loff_t) lba) << 9;
1597
1598 /* Carry out the file reads */
1599 amount_left = fsg->data_size_from_cmnd;
1600 if (unlikely(amount_left == 0))
1601 return -EIO; // No default reply
1602
1603 for (;;) {
1604
1605 /* Figure out how much we need to read:
1606 * Try to read the remaining amount.
1607 * But don't read more than the buffer size.
1608 * And don't try to read past the end of the file.
1609 * Finally, if we're not at a page boundary, don't read past
1610 * the next page.
1611 * If this means reading 0 then we were asked to read past
1612 * the end of file. */
1613 amount = min((unsigned int) amount_left, mod_data.buflen);
1614 amount = min((loff_t) amount,
1615 curlun->file_length - file_offset);
1616 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
1617 if (partial_page > 0)
1618 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
1619 partial_page);
1620
1621 /* Wait for the next buffer to become available */
1622 bh = fsg->next_buffhd_to_fill;
1623 while (bh->state != BUF_STATE_EMPTY) {
1624 if ((rc = sleep_thread(fsg)) != 0)
1625 return rc;
1626 }
1627
1628 /* If we were asked to read past the end of file,
1629 * end with an empty buffer. */
1630 if (amount == 0) {
1631 curlun->sense_data =
1632 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1633 curlun->sense_data_info = file_offset >> 9;
1634 bh->inreq->length = 0;
1635 bh->state = BUF_STATE_FULL;
1636 break;
1637 }
1638
1639 /* Perform the read */
1640 file_offset_tmp = file_offset;
1641 nread = vfs_read(curlun->filp,
1642 (char __user *) bh->buf,
1643 amount, &file_offset_tmp);
1644 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1645 (unsigned long long) file_offset,
1646 (int) nread);
1647 if (signal_pending(current))
1648 return -EINTR;
1649
1650 if (nread < 0) {
1651 LDBG(curlun, "error in file read: %d\n",
1652 (int) nread);
1653 nread = 0;
1654 } else if (nread < amount) {
1655 LDBG(curlun, "partial file read: %d/%u\n",
1656 (int) nread, amount);
1657 nread -= (nread & 511); // Round down to a block
1658 }
1659 file_offset += nread;
1660 amount_left -= nread;
1661 fsg->residue -= nread;
1662 bh->inreq->length = nread;
1663 bh->state = BUF_STATE_FULL;
1664
1665 /* If an error occurred, report it and its position */
1666 if (nread < amount) {
1667 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1668 curlun->sense_data_info = file_offset >> 9;
1669 break;
1670 }
1671
1672 if (amount_left == 0)
1673 break; // No more left to read
1674
1675 /* Send this buffer and go read some more */
1676 bh->inreq->zero = 0;
1677 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1678 &bh->inreq_busy, &bh->state);
1679 fsg->next_buffhd_to_fill = bh->next;
1680 }
1681
1682 return -EIO; // No default reply
1683}
1684
1685
1686/*-------------------------------------------------------------------------*/
1687
1688static int do_write(struct fsg_dev *fsg)
1689{
1690 struct lun *curlun = fsg->curlun;
1691 u32 lba;
1692 struct fsg_buffhd *bh;
1693 int get_some_more;
1694 u32 amount_left_to_req, amount_left_to_write;
1695 loff_t usb_offset, file_offset, file_offset_tmp;
1696 unsigned int amount;
1697 unsigned int partial_page;
1698 ssize_t nwritten;
1699 int rc;
1700
1701 if (curlun->ro) {
1702 curlun->sense_data = SS_WRITE_PROTECTED;
1703 return -EINVAL;
1704 }
1705 curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait
1706
1707 /* Get the starting Logical Block Address and check that it's
1708 * not too big */
1709 if (fsg->cmnd[0] == SC_WRITE_6)
1710 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
1711 else {
1712 lba = get_be32(&fsg->cmnd[2]);
1713
1714 /* We allow DPO (Disable Page Out = don't save data in the
1715 * cache) and FUA (Force Unit Access = write directly to the
1716 * medium). We don't implement DPO; we implement FUA by
1717 * performing synchronous output. */
1718 if ((fsg->cmnd[1] & ~0x18) != 0) {
1719 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1720 return -EINVAL;
1721 }
1722 if (fsg->cmnd[1] & 0x08) // FUA
1723 curlun->filp->f_flags |= O_SYNC;
1724 }
1725 if (lba >= curlun->num_sectors) {
1726 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1727 return -EINVAL;
1728 }
1729
1730 /* Carry out the file writes */
1731 get_some_more = 1;
1732 file_offset = usb_offset = ((loff_t) lba) << 9;
1733 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
1734
1735 while (amount_left_to_write > 0) {
1736
1737 /* Queue a request for more data from the host */
1738 bh = fsg->next_buffhd_to_fill;
1739 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
1740
1741 /* Figure out how much we want to get:
1742 * Try to get the remaining amount.
1743 * But don't get more than the buffer size.
1744 * And don't try to go past the end of the file.
1745 * If we're not at a page boundary,
1746 * don't go past the next page.
1747 * If this means getting 0, then we were asked
1748 * to write past the end of file.
1749 * Finally, round down to a block boundary. */
1750 amount = min(amount_left_to_req, mod_data.buflen);
1751 amount = min((loff_t) amount, curlun->file_length -
1752 usb_offset);
1753 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
1754 if (partial_page > 0)
1755 amount = min(amount,
1756 (unsigned int) PAGE_CACHE_SIZE - partial_page);
1757
1758 if (amount == 0) {
1759 get_some_more = 0;
1760 curlun->sense_data =
1761 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1762 curlun->sense_data_info = usb_offset >> 9;
1763 continue;
1764 }
1765 amount -= (amount & 511);
1766 if (amount == 0) {
1767
1768 /* Why were we were asked to transfer a
1769 * partial block? */
1770 get_some_more = 0;
1771 continue;
1772 }
1773
1774 /* Get the next buffer */
1775 usb_offset += amount;
1776 fsg->usb_amount_left -= amount;
1777 amount_left_to_req -= amount;
1778 if (amount_left_to_req == 0)
1779 get_some_more = 0;
1780
1781 /* amount is always divisible by 512, hence by
1782 * the bulk-out maxpacket size */
1783 bh->outreq->length = bh->bulk_out_intended_length =
1784 amount;
1785 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1786 &bh->outreq_busy, &bh->state);
1787 fsg->next_buffhd_to_fill = bh->next;
1788 continue;
1789 }
1790
1791 /* Write the received data to the backing file */
1792 bh = fsg->next_buffhd_to_drain;
1793 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
1794 break; // We stopped early
1795 if (bh->state == BUF_STATE_FULL) {
1796 fsg->next_buffhd_to_drain = bh->next;
1797 bh->state = BUF_STATE_EMPTY;
1798
1799 /* Did something go wrong with the transfer? */
1800 if (bh->outreq->status != 0) {
1801 curlun->sense_data = SS_COMMUNICATION_FAILURE;
1802 curlun->sense_data_info = file_offset >> 9;
1803 break;
1804 }
1805
1806 amount = bh->outreq->actual;
1807 if (curlun->file_length - file_offset < amount) {
1808 LERROR(curlun,
1809 "write %u @ %llu beyond end %llu\n",
1810 amount, (unsigned long long) file_offset,
1811 (unsigned long long) curlun->file_length);
1812 amount = curlun->file_length - file_offset;
1813 }
1814
1815 /* Perform the write */
1816 file_offset_tmp = file_offset;
1817 nwritten = vfs_write(curlun->filp,
1818 (char __user *) bh->buf,
1819 amount, &file_offset_tmp);
1820 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
1821 (unsigned long long) file_offset,
1822 (int) nwritten);
1823 if (signal_pending(current))
1824 return -EINTR; // Interrupted!
1825
1826 if (nwritten < 0) {
1827 LDBG(curlun, "error in file write: %d\n",
1828 (int) nwritten);
1829 nwritten = 0;
1830 } else if (nwritten < amount) {
1831 LDBG(curlun, "partial file write: %d/%u\n",
1832 (int) nwritten, amount);
1833 nwritten -= (nwritten & 511);
1834 // Round down to a block
1835 }
1836 file_offset += nwritten;
1837 amount_left_to_write -= nwritten;
1838 fsg->residue -= nwritten;
1839
1840 /* If an error occurred, report it and its position */
1841 if (nwritten < amount) {
1842 curlun->sense_data = SS_WRITE_ERROR;
1843 curlun->sense_data_info = file_offset >> 9;
1844 break;
1845 }
1846
1847 /* Did the host decide to stop early? */
1848 if (bh->outreq->actual != bh->outreq->length) {
1849 fsg->short_packet_received = 1;
1850 break;
1851 }
1852 continue;
1853 }
1854
1855 /* Wait for something to happen */
1856 if ((rc = sleep_thread(fsg)) != 0)
1857 return rc;
1858 }
1859
1860 return -EIO; // No default reply
1861}
1862
1863
1864/*-------------------------------------------------------------------------*/
1865
1866/* Sync the file data, don't bother with the metadata.
1867 * This code was copied from fs/buffer.c:sys_fdatasync(). */
1868static int fsync_sub(struct lun *curlun)
1869{
1870 struct file *filp = curlun->filp;
1871 struct inode *inode;
1872 int rc, err;
1873
1874 if (curlun->ro || !filp)
1875 return 0;
1876 if (!filp->f_op->fsync)
1877 return -EINVAL;
1878
1879 inode = filp->f_dentry->d_inode;
1880 down(&inode->i_sem);
1881 current->flags |= PF_SYNCWRITE;
1882 rc = filemap_fdatawrite(inode->i_mapping);
1883 err = filp->f_op->fsync(filp, filp->f_dentry, 1);
1884 if (!rc)
1885 rc = err;
1886 err = filemap_fdatawait(inode->i_mapping);
1887 if (!rc)
1888 rc = err;
1889 current->flags &= ~PF_SYNCWRITE;
1890 up(&inode->i_sem);
1891 VLDBG(curlun, "fdatasync -> %d\n", rc);
1892 return rc;
1893}
1894
1895static void fsync_all(struct fsg_dev *fsg)
1896{
1897 int i;
1898
1899 for (i = 0; i < fsg->nluns; ++i)
1900 fsync_sub(&fsg->luns[i]);
1901}
1902
1903static int do_synchronize_cache(struct fsg_dev *fsg)
1904{
1905 struct lun *curlun = fsg->curlun;
1906 int rc;
1907
1908 /* We ignore the requested LBA and write out all file's
1909 * dirty data buffers. */
1910 rc = fsync_sub(curlun);
1911 if (rc)
1912 curlun->sense_data = SS_WRITE_ERROR;
1913 return 0;
1914}
1915
1916
1917/*-------------------------------------------------------------------------*/
1918
1919static void invalidate_sub(struct lun *curlun)
1920{
1921 struct file *filp = curlun->filp;
1922 struct inode *inode = filp->f_dentry->d_inode;
1923 unsigned long rc;
1924
1925 rc = invalidate_inode_pages(inode->i_mapping);
1926 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
1927}
1928
1929static int do_verify(struct fsg_dev *fsg)
1930{
1931 struct lun *curlun = fsg->curlun;
1932 u32 lba;
1933 u32 verification_length;
1934 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1935 loff_t file_offset, file_offset_tmp;
1936 u32 amount_left;
1937 unsigned int amount;
1938 ssize_t nread;
1939
1940 /* Get the starting Logical Block Address and check that it's
1941 * not too big */
1942 lba = get_be32(&fsg->cmnd[2]);
1943 if (lba >= curlun->num_sectors) {
1944 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1945 return -EINVAL;
1946 }
1947
1948 /* We allow DPO (Disable Page Out = don't save data in the
1949 * cache) but we don't implement it. */
1950 if ((fsg->cmnd[1] & ~0x10) != 0) {
1951 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1952 return -EINVAL;
1953 }
1954
1955 verification_length = get_be16(&fsg->cmnd[7]);
1956 if (unlikely(verification_length == 0))
1957 return -EIO; // No default reply
1958
1959 /* Prepare to carry out the file verify */
1960 amount_left = verification_length << 9;
1961 file_offset = ((loff_t) lba) << 9;
1962
1963 /* Write out all the dirty buffers before invalidating them */
1964 fsync_sub(curlun);
1965 if (signal_pending(current))
1966 return -EINTR;
1967
1968 invalidate_sub(curlun);
1969 if (signal_pending(current))
1970 return -EINTR;
1971
1972 /* Just try to read the requested blocks */
1973 while (amount_left > 0) {
1974
1975 /* Figure out how much we need to read:
1976 * Try to read the remaining amount, but not more than
1977 * the buffer size.
1978 * And don't try to read past the end of the file.
1979 * If this means reading 0 then we were asked to read
1980 * past the end of file. */
1981 amount = min((unsigned int) amount_left, mod_data.buflen);
1982 amount = min((loff_t) amount,
1983 curlun->file_length - file_offset);
1984 if (amount == 0) {
1985 curlun->sense_data =
1986 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1987 curlun->sense_data_info = file_offset >> 9;
1988 break;
1989 }
1990
1991 /* Perform the read */
1992 file_offset_tmp = file_offset;
1993 nread = vfs_read(curlun->filp,
1994 (char __user *) bh->buf,
1995 amount, &file_offset_tmp);
1996 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1997 (unsigned long long) file_offset,
1998 (int) nread);
1999 if (signal_pending(current))
2000 return -EINTR;
2001
2002 if (nread < 0) {
2003 LDBG(curlun, "error in file verify: %d\n",
2004 (int) nread);
2005 nread = 0;
2006 } else if (nread < amount) {
2007 LDBG(curlun, "partial file verify: %d/%u\n",
2008 (int) nread, amount);
2009 nread -= (nread & 511); // Round down to a sector
2010 }
2011 if (nread == 0) {
2012 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
2013 curlun->sense_data_info = file_offset >> 9;
2014 break;
2015 }
2016 file_offset += nread;
2017 amount_left -= nread;
2018 }
2019 return 0;
2020}
2021
2022
2023/*-------------------------------------------------------------------------*/
2024
2025static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2026{
2027 u8 *buf = (u8 *) bh->buf;
2028
2029 static char vendor_id[] = "Linux ";
2030 static char product_id[] = "File-Stor Gadget";
2031
2032 if (!fsg->curlun) { // Unsupported LUNs are okay
2033 fsg->bad_lun_okay = 1;
2034 memset(buf, 0, 36);
2035 buf[0] = 0x7f; // Unsupported, no device-type
2036 return 36;
2037 }
2038
2039 memset(buf, 0, 8); // Non-removable, direct-access device
2040 if (mod_data.removable)
2041 buf[1] = 0x80;
2042 buf[2] = 2; // ANSI SCSI level 2
2043 buf[3] = 2; // SCSI-2 INQUIRY data format
2044 buf[4] = 31; // Additional length
2045 // No special options
2046 sprintf(buf + 8, "%-8s%-16s%04x", vendor_id, product_id,
2047 mod_data.release);
2048 return 36;
2049}
2050
2051
2052static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2053{
2054 struct lun *curlun = fsg->curlun;
2055 u8 *buf = (u8 *) bh->buf;
2056 u32 sd, sdinfo;
2057
2058 /*
2059 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
2060 *
2061 * If a REQUEST SENSE command is received from an initiator
2062 * with a pending unit attention condition (before the target
2063 * generates the contingent allegiance condition), then the
2064 * target shall either:
2065 * a) report any pending sense data and preserve the unit
2066 * attention condition on the logical unit, or,
2067 * b) report the unit attention condition, may discard any
2068 * pending sense data, and clear the unit attention
2069 * condition on the logical unit for that initiator.
2070 *
2071 * FSG normally uses option a); enable this code to use option b).
2072 */
2073#if 0
2074 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
2075 curlun->sense_data = curlun->unit_attention_data;
2076 curlun->unit_attention_data = SS_NO_SENSE;
2077 }
2078#endif
2079
2080 if (!curlun) { // Unsupported LUNs are okay
2081 fsg->bad_lun_okay = 1;
2082 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
2083 sdinfo = 0;
2084 } else {
2085 sd = curlun->sense_data;
2086 sdinfo = curlun->sense_data_info;
2087 curlun->sense_data = SS_NO_SENSE;
2088 curlun->sense_data_info = 0;
2089 }
2090
2091 memset(buf, 0, 18);
2092 buf[0] = 0x80 | 0x70; // Valid, current error
2093 buf[2] = SK(sd);
2094 put_be32(&buf[3], sdinfo); // Sense information
2095 buf[7] = 18 - 8; // Additional sense length
2096 buf[12] = ASC(sd);
2097 buf[13] = ASCQ(sd);
2098 return 18;
2099}
2100
2101
2102static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2103{
2104 struct lun *curlun = fsg->curlun;
2105 u32 lba = get_be32(&fsg->cmnd[2]);
2106 int pmi = fsg->cmnd[8];
2107 u8 *buf = (u8 *) bh->buf;
2108
2109 /* Check the PMI and LBA fields */
2110 if (pmi > 1 || (pmi == 0 && lba != 0)) {
2111 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2112 return -EINVAL;
2113 }
2114
2115 put_be32(&buf[0], curlun->num_sectors - 1); // Max logical block
2116 put_be32(&buf[4], 512); // Block length
2117 return 8;
2118}
2119
2120
2121static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2122{
2123 struct lun *curlun = fsg->curlun;
2124 int mscmnd = fsg->cmnd[0];
2125 u8 *buf = (u8 *) bh->buf;
2126 u8 *buf0 = buf;
2127 int pc, page_code;
2128 int changeable_values, all_pages;
2129 int valid_page = 0;
2130 int len, limit;
2131
2132 if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD
2133 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2134 return -EINVAL;
2135 }
2136 pc = fsg->cmnd[2] >> 6;
2137 page_code = fsg->cmnd[2] & 0x3f;
2138 if (pc == 3) {
2139 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
2140 return -EINVAL;
2141 }
2142 changeable_values = (pc == 1);
2143 all_pages = (page_code == 0x3f);
2144
2145 /* Write the mode parameter header. Fixed values are: default
2146 * medium type, no cache control (DPOFUA), and no block descriptors.
2147 * The only variable value is the WriteProtect bit. We will fill in
2148 * the mode data length later. */
2149 memset(buf, 0, 8);
2150 if (mscmnd == SC_MODE_SENSE_6) {
2151 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
2152 buf += 4;
2153 limit = 255;
2154 } else { // SC_MODE_SENSE_10
2155 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
2156 buf += 8;
2157 limit = 65535; // Should really be mod_data.buflen
2158 }
2159
2160 /* No block descriptors */
2161
2162 /* The mode pages, in numerical order. The only page we support
2163 * is the Caching page. */
2164 if (page_code == 0x08 || all_pages) {
2165 valid_page = 1;
2166 buf[0] = 0x08; // Page code
2167 buf[1] = 10; // Page length
2168 memset(buf+2, 0, 10); // None of the fields are changeable
2169
2170 if (!changeable_values) {
2171 buf[2] = 0x04; // Write cache enable,
2172 // Read cache not disabled
2173 // No cache retention priorities
2174 put_be16(&buf[4], 0xffff); // Don't disable prefetch
2175 // Minimum prefetch = 0
2176 put_be16(&buf[8], 0xffff); // Maximum prefetch
2177 put_be16(&buf[10], 0xffff); // Maximum prefetch ceiling
2178 }
2179 buf += 12;
2180 }
2181
2182 /* Check that a valid page was requested and the mode data length
2183 * isn't too long. */
2184 len = buf - buf0;
2185 if (!valid_page || len > limit) {
2186 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2187 return -EINVAL;
2188 }
2189
2190 /* Store the mode data length */
2191 if (mscmnd == SC_MODE_SENSE_6)
2192 buf0[0] = len - 1;
2193 else
2194 put_be16(buf0, len - 2);
2195 return len;
2196}
2197
2198
2199static int do_start_stop(struct fsg_dev *fsg)
2200{
2201 struct lun *curlun = fsg->curlun;
2202 int loej, start;
2203
2204 if (!mod_data.removable) {
2205 curlun->sense_data = SS_INVALID_COMMAND;
2206 return -EINVAL;
2207 }
2208
2209 // int immed = fsg->cmnd[1] & 0x01;
2210 loej = fsg->cmnd[4] & 0x02;
2211 start = fsg->cmnd[4] & 0x01;
2212
2213#ifdef CONFIG_USB_FILE_STORAGE_TEST
2214 if ((fsg->cmnd[1] & ~0x01) != 0 || // Mask away Immed
2215 (fsg->cmnd[4] & ~0x03) != 0) { // Mask LoEj, Start
2216 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2217 return -EINVAL;
2218 }
2219
2220 if (!start) {
2221
2222 /* Are we allowed to unload the media? */
2223 if (curlun->prevent_medium_removal) {
2224 LDBG(curlun, "unload attempt prevented\n");
2225 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
2226 return -EINVAL;
2227 }
2228 if (loej) { // Simulate an unload/eject
2229 up_read(&fsg->filesem);
2230 down_write(&fsg->filesem);
2231 close_backing_file(curlun);
2232 up_write(&fsg->filesem);
2233 down_read(&fsg->filesem);
2234 }
2235 } else {
2236
2237 /* Our emulation doesn't support mounting; the medium is
2238 * available for use as soon as it is loaded. */
2239 if (!backing_file_is_open(curlun)) {
2240 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
2241 return -EINVAL;
2242 }
2243 }
2244#endif
2245 return 0;
2246}
2247
2248
2249static int do_prevent_allow(struct fsg_dev *fsg)
2250{
2251 struct lun *curlun = fsg->curlun;
2252 int prevent;
2253
2254 if (!mod_data.removable) {
2255 curlun->sense_data = SS_INVALID_COMMAND;
2256 return -EINVAL;
2257 }
2258
2259 prevent = fsg->cmnd[4] & 0x01;
2260 if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
2261 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2262 return -EINVAL;
2263 }
2264
2265 if (curlun->prevent_medium_removal && !prevent)
2266 fsync_sub(curlun);
2267 curlun->prevent_medium_removal = prevent;
2268 return 0;
2269}
2270
2271
2272static int do_read_format_capacities(struct fsg_dev *fsg,
2273 struct fsg_buffhd *bh)
2274{
2275 struct lun *curlun = fsg->curlun;
2276 u8 *buf = (u8 *) bh->buf;
2277
2278 buf[0] = buf[1] = buf[2] = 0;
2279 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
2280 buf += 4;
2281
2282 put_be32(&buf[0], curlun->num_sectors); // Number of blocks
2283 put_be32(&buf[4], 512); // Block length
2284 buf[4] = 0x02; // Current capacity
2285 return 12;
2286}
2287
2288
2289static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2290{
2291 struct lun *curlun = fsg->curlun;
2292
2293 /* We don't support MODE SELECT */
2294 curlun->sense_data = SS_INVALID_COMMAND;
2295 return -EINVAL;
2296}
2297
2298
2299/*-------------------------------------------------------------------------*/
2300
2301static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
2302{
2303 int rc;
2304
2305 rc = fsg_set_halt(fsg, fsg->bulk_in);
2306 if (rc == -EAGAIN)
2307 VDBG(fsg, "delayed bulk-in endpoint halt\n");
2308 while (rc != 0) {
2309 if (rc != -EAGAIN) {
2310 WARN(fsg, "usb_ep_set_halt -> %d\n", rc);
2311 rc = 0;
2312 break;
2313 }
2314
2315 /* Wait for a short time and then try again */
2316 if (msleep_interruptible(100) != 0)
2317 return -EINTR;
2318 rc = usb_ep_set_halt(fsg->bulk_in);
2319 }
2320 return rc;
2321}
2322
2323static int pad_with_zeros(struct fsg_dev *fsg)
2324{
2325 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
2326 u32 nkeep = bh->inreq->length;
2327 u32 nsend;
2328 int rc;
2329
2330 bh->state = BUF_STATE_EMPTY; // For the first iteration
2331 fsg->usb_amount_left = nkeep + fsg->residue;
2332 while (fsg->usb_amount_left > 0) {
2333
2334 /* Wait for the next buffer to be free */
2335 while (bh->state != BUF_STATE_EMPTY) {
2336 if ((rc = sleep_thread(fsg)) != 0)
2337 return rc;
2338 }
2339
2340 nsend = min(fsg->usb_amount_left, (u32) mod_data.buflen);
2341 memset(bh->buf + nkeep, 0, nsend - nkeep);
2342 bh->inreq->length = nsend;
2343 bh->inreq->zero = 0;
2344 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2345 &bh->inreq_busy, &bh->state);
2346 bh = fsg->next_buffhd_to_fill = bh->next;
2347 fsg->usb_amount_left -= nsend;
2348 nkeep = 0;
2349 }
2350 return 0;
2351}
2352
2353static int throw_away_data(struct fsg_dev *fsg)
2354{
2355 struct fsg_buffhd *bh;
2356 u32 amount;
2357 int rc;
2358
2359 while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
2360 fsg->usb_amount_left > 0) {
2361
2362 /* Throw away the data in a filled buffer */
2363 if (bh->state == BUF_STATE_FULL) {
2364 bh->state = BUF_STATE_EMPTY;
2365 fsg->next_buffhd_to_drain = bh->next;
2366
2367 /* A short packet or an error ends everything */
2368 if (bh->outreq->actual != bh->outreq->length ||
2369 bh->outreq->status != 0) {
2370 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2371 return -EINTR;
2372 }
2373 continue;
2374 }
2375
2376 /* Try to submit another request if we need one */
2377 bh = fsg->next_buffhd_to_fill;
2378 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
2379 amount = min(fsg->usb_amount_left,
2380 (u32) mod_data.buflen);
2381
2382 /* amount is always divisible by 512, hence by
2383 * the bulk-out maxpacket size */
2384 bh->outreq->length = bh->bulk_out_intended_length =
2385 amount;
2386 start_transfer(fsg, fsg->bulk_out, bh->outreq,
2387 &bh->outreq_busy, &bh->state);
2388 fsg->next_buffhd_to_fill = bh->next;
2389 fsg->usb_amount_left -= amount;
2390 continue;
2391 }
2392
2393 /* Otherwise wait for something to happen */
2394 if ((rc = sleep_thread(fsg)) != 0)
2395 return rc;
2396 }
2397 return 0;
2398}
2399
2400
2401static int finish_reply(struct fsg_dev *fsg)
2402{
2403 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
2404 int rc = 0;
2405
2406 switch (fsg->data_dir) {
2407 case DATA_DIR_NONE:
2408 break; // Nothing to send
2409
2410 /* If we don't know whether the host wants to read or write,
2411 * this must be CB or CBI with an unknown command. We mustn't
2412 * try to send or receive any data. So stall both bulk pipes
2413 * if we can and wait for a reset. */
2414 case DATA_DIR_UNKNOWN:
2415 if (mod_data.can_stall) {
2416 fsg_set_halt(fsg, fsg->bulk_out);
2417 rc = halt_bulk_in_endpoint(fsg);
2418 }
2419 break;
2420
2421 /* All but the last buffer of data must have already been sent */
2422 case DATA_DIR_TO_HOST:
2423 if (fsg->data_size == 0)
2424 ; // Nothing to send
2425
2426 /* If there's no residue, simply send the last buffer */
2427 else if (fsg->residue == 0) {
2428 bh->inreq->zero = 0;
2429 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2430 &bh->inreq_busy, &bh->state);
2431 fsg->next_buffhd_to_fill = bh->next;
2432 }
2433
2434 /* There is a residue. For CB and CBI, simply mark the end
2435 * of the data with a short packet. However, if we are
2436 * allowed to stall, there was no data at all (residue ==
2437 * data_size), and the command failed (invalid LUN or
2438 * sense data is set), then halt the bulk-in endpoint
2439 * instead. */
2440 else if (!transport_is_bbb()) {
2441 if (mod_data.can_stall &&
2442 fsg->residue == fsg->data_size &&
2443 (!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) {
2444 bh->state = BUF_STATE_EMPTY;
2445 rc = halt_bulk_in_endpoint(fsg);
2446 } else {
2447 bh->inreq->zero = 1;
2448 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2449 &bh->inreq_busy, &bh->state);
2450 fsg->next_buffhd_to_fill = bh->next;
2451 }
2452 }
2453
2454 /* For Bulk-only, if we're allowed to stall then send the
2455 * short packet and halt the bulk-in endpoint. If we can't
2456 * stall, pad out the remaining data with 0's. */
2457 else {
2458 if (mod_data.can_stall) {
2459 bh->inreq->zero = 1;
2460 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2461 &bh->inreq_busy, &bh->state);
2462 fsg->next_buffhd_to_fill = bh->next;
2463 rc = halt_bulk_in_endpoint(fsg);
2464 } else
2465 rc = pad_with_zeros(fsg);
2466 }
2467 break;
2468
2469 /* We have processed all we want from the data the host has sent.
2470 * There may still be outstanding bulk-out requests. */
2471 case DATA_DIR_FROM_HOST:
2472 if (fsg->residue == 0)
2473 ; // Nothing to receive
2474
2475 /* Did the host stop sending unexpectedly early? */
2476 else if (fsg->short_packet_received) {
2477 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2478 rc = -EINTR;
2479 }
2480
2481 /* We haven't processed all the incoming data. Even though
2482 * we may be allowed to stall, doing so would cause a race.
2483 * The controller may already have ACK'ed all the remaining
2484 * bulk-out packets, in which case the host wouldn't see a
2485 * STALL. Not realizing the endpoint was halted, it wouldn't
2486 * clear the halt -- leading to problems later on. */
2487#if 0
2488 else if (mod_data.can_stall) {
2489 fsg_set_halt(fsg, fsg->bulk_out);
2490 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2491 rc = -EINTR;
2492 }
2493#endif
2494
2495 /* We can't stall. Read in the excess data and throw it
2496 * all away. */
2497 else
2498 rc = throw_away_data(fsg);
2499 break;
2500 }
2501 return rc;
2502}
2503
2504
2505static int send_status(struct fsg_dev *fsg)
2506{
2507 struct lun *curlun = fsg->curlun;
2508 struct fsg_buffhd *bh;
2509 int rc;
2510 u8 status = USB_STATUS_PASS;
2511 u32 sd, sdinfo = 0;
2512
2513 /* Wait for the next buffer to become available */
2514 bh = fsg->next_buffhd_to_fill;
2515 while (bh->state != BUF_STATE_EMPTY) {
2516 if ((rc = sleep_thread(fsg)) != 0)
2517 return rc;
2518 }
2519
2520 if (curlun) {
2521 sd = curlun->sense_data;
2522 sdinfo = curlun->sense_data_info;
2523 } else if (fsg->bad_lun_okay)
2524 sd = SS_NO_SENSE;
2525 else
2526 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
2527
2528 if (fsg->phase_error) {
2529 DBG(fsg, "sending phase-error status\n");
2530 status = USB_STATUS_PHASE_ERROR;
2531 sd = SS_INVALID_COMMAND;
2532 } else if (sd != SS_NO_SENSE) {
2533 DBG(fsg, "sending command-failure status\n");
2534 status = USB_STATUS_FAIL;
2535 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
2536 " info x%x\n",
2537 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
2538 }
2539
2540 if (transport_is_bbb()) {
2541 struct bulk_cs_wrap *csw = (struct bulk_cs_wrap *) bh->buf;
2542
2543 /* Store and send the Bulk-only CSW */
2544 csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
2545 csw->Tag = fsg->tag;
2546 csw->Residue = cpu_to_le32(fsg->residue);
2547 csw->Status = status;
2548
2549 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
2550 bh->inreq->zero = 0;
2551 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2552 &bh->inreq_busy, &bh->state);
2553
2554 } else if (mod_data.transport_type == USB_PR_CB) {
2555
2556 /* Control-Bulk transport has no status phase! */
2557 return 0;
2558
2559 } else { // USB_PR_CBI
2560 struct interrupt_data *buf = (struct interrupt_data *)
2561 bh->buf;
2562
2563 /* Store and send the Interrupt data. UFI sends the ASC
2564 * and ASCQ bytes. Everything else sends a Type (which
2565 * is always 0) and the status Value. */
2566 if (mod_data.protocol_type == USB_SC_UFI) {
2567 buf->bType = ASC(sd);
2568 buf->bValue = ASCQ(sd);
2569 } else {
2570 buf->bType = 0;
2571 buf->bValue = status;
2572 }
2573 fsg->intreq->length = CBI_INTERRUPT_DATA_LEN;
2574
2575 fsg->intr_buffhd = bh; // Point to the right buffhd
2576 fsg->intreq->buf = bh->inreq->buf;
2577 fsg->intreq->dma = bh->inreq->dma;
2578 fsg->intreq->context = bh;
2579 start_transfer(fsg, fsg->intr_in, fsg->intreq,
2580 &fsg->intreq_busy, &bh->state);
2581 }
2582
2583 fsg->next_buffhd_to_fill = bh->next;
2584 return 0;
2585}
2586
2587
2588/*-------------------------------------------------------------------------*/
2589
2590/* Check whether the command is properly formed and whether its data size
2591 * and direction agree with the values we already have. */
2592static int check_command(struct fsg_dev *fsg, int cmnd_size,
2593 enum data_direction data_dir, unsigned int mask,
2594 int needs_medium, const char *name)
2595{
2596 int i;
2597 int lun = fsg->cmnd[1] >> 5;
2598 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
2599 char hdlen[20];
2600 struct lun *curlun;
2601
2602 /* Adjust the expected cmnd_size for protocol encapsulation padding.
2603 * Transparent SCSI doesn't pad. */
2604 if (protocol_is_scsi())
2605 ;
2606
2607 /* There's some disagreement as to whether RBC pads commands or not.
2608 * We'll play it safe and accept either form. */
2609 else if (mod_data.protocol_type == USB_SC_RBC) {
2610 if (fsg->cmnd_size == 12)
2611 cmnd_size = 12;
2612
2613 /* All the other protocols pad to 12 bytes */
2614 } else
2615 cmnd_size = 12;
2616
2617 hdlen[0] = 0;
2618 if (fsg->data_dir != DATA_DIR_UNKNOWN)
2619 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
2620 fsg->data_size);
2621 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
2622 name, cmnd_size, dirletter[(int) data_dir],
2623 fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
2624
2625 /* We can't reply at all until we know the correct data direction
2626 * and size. */
2627 if (fsg->data_size_from_cmnd == 0)
2628 data_dir = DATA_DIR_NONE;
2629 if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
2630 fsg->data_dir = data_dir;
2631 fsg->data_size = fsg->data_size_from_cmnd;
2632
2633 } else { // Bulk-only
2634 if (fsg->data_size < fsg->data_size_from_cmnd) {
2635
2636 /* Host data size < Device data size is a phase error.
2637 * Carry out the command, but only transfer as much
2638 * as we are allowed. */
2639 fsg->data_size_from_cmnd = fsg->data_size;
2640 fsg->phase_error = 1;
2641 }
2642 }
2643 fsg->residue = fsg->usb_amount_left = fsg->data_size;
2644
2645 /* Conflicting data directions is a phase error */
2646 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
2647 fsg->phase_error = 1;
2648 return -EINVAL;
2649 }
2650
2651 /* Verify the length of the command itself */
2652 if (cmnd_size != fsg->cmnd_size) {
2653
2654 /* Special case workaround: MS-Windows issues REQUEST SENSE
2655 * with cbw->Length == 12 (it should be 6). */
2656 if (fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
2657 cmnd_size = fsg->cmnd_size;
2658 else {
2659 fsg->phase_error = 1;
2660 return -EINVAL;
2661 }
2662 }
2663
2664 /* Check that the LUN values are oonsistent */
2665 if (transport_is_bbb()) {
2666 if (fsg->lun != lun)
2667 DBG(fsg, "using LUN %d from CBW, "
2668 "not LUN %d from CDB\n",
2669 fsg->lun, lun);
2670 } else
2671 fsg->lun = lun; // Use LUN from the command
2672
2673 /* Check the LUN */
2674 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
2675 fsg->curlun = curlun = &fsg->luns[fsg->lun];
2676 if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
2677 curlun->sense_data = SS_NO_SENSE;
2678 curlun->sense_data_info = 0;
2679 }
2680 } else {
2681 fsg->curlun = curlun = NULL;
2682 fsg->bad_lun_okay = 0;
2683
2684 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2685 * to use unsupported LUNs; all others may not. */
2686 if (fsg->cmnd[0] != SC_INQUIRY &&
2687 fsg->cmnd[0] != SC_REQUEST_SENSE) {
2688 DBG(fsg, "unsupported LUN %d\n", fsg->lun);
2689 return -EINVAL;
2690 }
2691 }
2692
2693 /* If a unit attention condition exists, only INQUIRY and
2694 * REQUEST SENSE commands are allowed; anything else must fail. */
2695 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
2696 fsg->cmnd[0] != SC_INQUIRY &&
2697 fsg->cmnd[0] != SC_REQUEST_SENSE) {
2698 curlun->sense_data = curlun->unit_attention_data;
2699 curlun->unit_attention_data = SS_NO_SENSE;
2700 return -EINVAL;
2701 }
2702
2703 /* Check that only command bytes listed in the mask are non-zero */
2704 fsg->cmnd[1] &= 0x1f; // Mask away the LUN
2705 for (i = 1; i < cmnd_size; ++i) {
2706 if (fsg->cmnd[i] && !(mask & (1 << i))) {
2707 if (curlun)
2708 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2709 return -EINVAL;
2710 }
2711 }
2712
2713 /* If the medium isn't mounted and the command needs to access
2714 * it, return an error. */
2715 if (curlun && !backing_file_is_open(curlun) && needs_medium) {
2716 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
2717 return -EINVAL;
2718 }
2719
2720 return 0;
2721}
2722
2723
2724static int do_scsi_command(struct fsg_dev *fsg)
2725{
2726 struct fsg_buffhd *bh;
2727 int rc;
2728 int reply = -EINVAL;
2729 int i;
2730 static char unknown[16];
2731
2732 dump_cdb(fsg);
2733
2734 /* Wait for the next buffer to become available for data or status */
2735 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
2736 while (bh->state != BUF_STATE_EMPTY) {
2737 if ((rc = sleep_thread(fsg)) != 0)
2738 return rc;
2739 }
2740 fsg->phase_error = 0;
2741 fsg->short_packet_received = 0;
2742
2743 down_read(&fsg->filesem); // We're using the backing file
2744 switch (fsg->cmnd[0]) {
2745
2746 case SC_INQUIRY:
2747 fsg->data_size_from_cmnd = fsg->cmnd[4];
2748 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2749 (1<<4), 0,
2750 "INQUIRY")) == 0)
2751 reply = do_inquiry(fsg, bh);
2752 break;
2753
2754 case SC_MODE_SELECT_6:
2755 fsg->data_size_from_cmnd = fsg->cmnd[4];
2756 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2757 (1<<1) | (1<<4), 0,
2758 "MODE SELECT(6)")) == 0)
2759 reply = do_mode_select(fsg, bh);
2760 break;
2761
2762 case SC_MODE_SELECT_10:
2763 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
2764 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2765 (1<<1) | (3<<7), 0,
2766 "MODE SELECT(10)")) == 0)
2767 reply = do_mode_select(fsg, bh);
2768 break;
2769
2770 case SC_MODE_SENSE_6:
2771 fsg->data_size_from_cmnd = fsg->cmnd[4];
2772 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2773 (1<<1) | (1<<2) | (1<<4), 0,
2774 "MODE SENSE(6)")) == 0)
2775 reply = do_mode_sense(fsg, bh);
2776 break;
2777
2778 case SC_MODE_SENSE_10:
2779 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
2780 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2781 (1<<1) | (1<<2) | (3<<7), 0,
2782 "MODE SENSE(10)")) == 0)
2783 reply = do_mode_sense(fsg, bh);
2784 break;
2785
2786 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
2787 fsg->data_size_from_cmnd = 0;
2788 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2789 (1<<4), 0,
2790 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2791 reply = do_prevent_allow(fsg);
2792 break;
2793
2794 case SC_READ_6:
2795 i = fsg->cmnd[4];
2796 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2797 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2798 (7<<1) | (1<<4), 1,
2799 "READ(6)")) == 0)
2800 reply = do_read(fsg);
2801 break;
2802
2803 case SC_READ_10:
2804 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
2805 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2806 (1<<1) | (0xf<<2) | (3<<7), 1,
2807 "READ(10)")) == 0)
2808 reply = do_read(fsg);
2809 break;
2810
2811 case SC_READ_12:
2812 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
2813 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
2814 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2815 "READ(12)")) == 0)
2816 reply = do_read(fsg);
2817 break;
2818
2819 case SC_READ_CAPACITY:
2820 fsg->data_size_from_cmnd = 8;
2821 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2822 (0xf<<2) | (1<<8), 1,
2823 "READ CAPACITY")) == 0)
2824 reply = do_read_capacity(fsg, bh);
2825 break;
2826
2827 case SC_READ_FORMAT_CAPACITIES:
2828 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
2829 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2830 (3<<7), 1,
2831 "READ FORMAT CAPACITIES")) == 0)
2832 reply = do_read_format_capacities(fsg, bh);
2833 break;
2834
2835 case SC_REQUEST_SENSE:
2836 fsg->data_size_from_cmnd = fsg->cmnd[4];
2837 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2838 (1<<4), 0,
2839 "REQUEST SENSE")) == 0)
2840 reply = do_request_sense(fsg, bh);
2841 break;
2842
2843 case SC_START_STOP_UNIT:
2844 fsg->data_size_from_cmnd = 0;
2845 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2846 (1<<1) | (1<<4), 0,
2847 "START-STOP UNIT")) == 0)
2848 reply = do_start_stop(fsg);
2849 break;
2850
2851 case SC_SYNCHRONIZE_CACHE:
2852 fsg->data_size_from_cmnd = 0;
2853 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2854 (0xf<<2) | (3<<7), 1,
2855 "SYNCHRONIZE CACHE")) == 0)
2856 reply = do_synchronize_cache(fsg);
2857 break;
2858
2859 case SC_TEST_UNIT_READY:
2860 fsg->data_size_from_cmnd = 0;
2861 reply = check_command(fsg, 6, DATA_DIR_NONE,
2862 0, 1,
2863 "TEST UNIT READY");
2864 break;
2865
2866 /* Although optional, this command is used by MS-Windows. We
2867 * support a minimal version: BytChk must be 0. */
2868 case SC_VERIFY:
2869 fsg->data_size_from_cmnd = 0;
2870 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2871 (1<<1) | (0xf<<2) | (3<<7), 1,
2872 "VERIFY")) == 0)
2873 reply = do_verify(fsg);
2874 break;
2875
2876 case SC_WRITE_6:
2877 i = fsg->cmnd[4];
2878 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2879 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2880 (7<<1) | (1<<4), 1,
2881 "WRITE(6)")) == 0)
2882 reply = do_write(fsg);
2883 break;
2884
2885 case SC_WRITE_10:
2886 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
2887 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2888 (1<<1) | (0xf<<2) | (3<<7), 1,
2889 "WRITE(10)")) == 0)
2890 reply = do_write(fsg);
2891 break;
2892
2893 case SC_WRITE_12:
2894 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
2895 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
2896 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2897 "WRITE(12)")) == 0)
2898 reply = do_write(fsg);
2899 break;
2900
2901 /* Some mandatory commands that we recognize but don't implement.
2902 * They don't mean much in this setting. It's left as an exercise
2903 * for anyone interested to implement RESERVE and RELEASE in terms
2904 * of Posix locks. */
2905 case SC_FORMAT_UNIT:
2906 case SC_RELEASE:
2907 case SC_RESERVE:
2908 case SC_SEND_DIAGNOSTIC:
2909 // Fall through
2910
2911 default:
2912 fsg->data_size_from_cmnd = 0;
2913 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2914 if ((reply = check_command(fsg, fsg->cmnd_size,
2915 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
2916 fsg->curlun->sense_data = SS_INVALID_COMMAND;
2917 reply = -EINVAL;
2918 }
2919 break;
2920 }
2921 up_read(&fsg->filesem);
2922
2923 if (reply == -EINTR || signal_pending(current))
2924 return -EINTR;
2925
2926 /* Set up the single reply buffer for finish_reply() */
2927 if (reply == -EINVAL)
2928 reply = 0; // Error reply length
2929 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
2930 reply = min((u32) reply, fsg->data_size_from_cmnd);
2931 bh->inreq->length = reply;
2932 bh->state = BUF_STATE_FULL;
2933 fsg->residue -= reply;
2934 } // Otherwise it's already set
2935
2936 return 0;
2937}
2938
2939
2940/*-------------------------------------------------------------------------*/
2941
2942static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2943{
2944 struct usb_request *req = bh->outreq;
2945 struct bulk_cb_wrap *cbw = (struct bulk_cb_wrap *) req->buf;
2946
2947 /* Was this a real packet? */
2948 if (req->status)
2949 return -EINVAL;
2950
2951 /* Is the CBW valid? */
2952 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2953 cbw->Signature != __constant_cpu_to_le32(
2954 USB_BULK_CB_SIG)) {
2955 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2956 req->actual,
2957 le32_to_cpu(cbw->Signature));
2958
2959 /* The Bulk-only spec says we MUST stall the bulk pipes!
2960 * If we want to avoid stalls, set a flag so that we will
2961 * clear the endpoint halts at the next reset. */
2962 if (!mod_data.can_stall)
2963 set_bit(CLEAR_BULK_HALTS, &fsg->atomic_bitflags);
2964 fsg_set_halt(fsg, fsg->bulk_out);
2965 halt_bulk_in_endpoint(fsg);
2966 return -EINVAL;
2967 }
2968
2969 /* Is the CBW meaningful? */
2970 if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2971 cbw->Length < 6 || cbw->Length > MAX_COMMAND_SIZE) {
2972 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2973 "cmdlen %u\n",
2974 cbw->Lun, cbw->Flags, cbw->Length);
2975
2976 /* We can do anything we want here, so let's stall the
2977 * bulk pipes if we are allowed to. */
2978 if (mod_data.can_stall) {
2979 fsg_set_halt(fsg, fsg->bulk_out);
2980 halt_bulk_in_endpoint(fsg);
2981 }
2982 return -EINVAL;
2983 }
2984
2985 /* Save the command for later */
2986 fsg->cmnd_size = cbw->Length;
2987 memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
2988 if (cbw->Flags & USB_BULK_IN_FLAG)
2989 fsg->data_dir = DATA_DIR_TO_HOST;
2990 else
2991 fsg->data_dir = DATA_DIR_FROM_HOST;
2992 fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2993 if (fsg->data_size == 0)
2994 fsg->data_dir = DATA_DIR_NONE;
2995 fsg->lun = cbw->Lun;
2996 fsg->tag = cbw->Tag;
2997 return 0;
2998}
2999
3000
3001static int get_next_command(struct fsg_dev *fsg)
3002{
3003 struct fsg_buffhd *bh;
3004 int rc = 0;
3005
3006 if (transport_is_bbb()) {
3007
3008 /* Wait for the next buffer to become available */
3009 bh = fsg->next_buffhd_to_fill;
3010 while (bh->state != BUF_STATE_EMPTY) {
3011 if ((rc = sleep_thread(fsg)) != 0)
3012 return rc;
3013 }
3014
3015 /* Queue a request to read a Bulk-only CBW */
3016 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
3017 start_transfer(fsg, fsg->bulk_out, bh->outreq,
3018 &bh->outreq_busy, &bh->state);
3019
3020 /* We will drain the buffer in software, which means we
3021 * can reuse it for the next filling. No need to advance
3022 * next_buffhd_to_fill. */
3023
3024 /* Wait for the CBW to arrive */
3025 while (bh->state != BUF_STATE_FULL) {
3026 if ((rc = sleep_thread(fsg)) != 0)
3027 return rc;
3028 }
3029 rc = received_cbw(fsg, bh);
3030 bh->state = BUF_STATE_EMPTY;
3031
3032 } else { // USB_PR_CB or USB_PR_CBI
3033
3034 /* Wait for the next command to arrive */
3035 while (fsg->cbbuf_cmnd_size == 0) {
3036 if ((rc = sleep_thread(fsg)) != 0)
3037 return rc;
3038 }
3039
3040 /* Is the previous status interrupt request still busy?
3041 * The host is allowed to skip reading the status,
3042 * so we must cancel it. */
3043 if (fsg->intreq_busy)
3044 usb_ep_dequeue(fsg->intr_in, fsg->intreq);
3045
3046 /* Copy the command and mark the buffer empty */
3047 fsg->data_dir = DATA_DIR_UNKNOWN;
3048 spin_lock_irq(&fsg->lock);
3049 fsg->cmnd_size = fsg->cbbuf_cmnd_size;
3050 memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
3051 fsg->cbbuf_cmnd_size = 0;
3052 spin_unlock_irq(&fsg->lock);
3053 }
3054 return rc;
3055}
3056
3057
3058/*-------------------------------------------------------------------------*/
3059
3060static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
3061 const struct usb_endpoint_descriptor *d)
3062{
3063 int rc;
3064
3065 ep->driver_data = fsg;
3066 rc = usb_ep_enable(ep, d);
3067 if (rc)
3068 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
3069 return rc;
3070}
3071
3072static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
3073 struct usb_request **preq)
3074{
3075 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
3076 if (*preq)
3077 return 0;
3078 ERROR(fsg, "can't allocate request for %s\n", ep->name);
3079 return -ENOMEM;
3080}
3081
3082/*
3083 * Reset interface setting and re-init endpoint state (toggle etc).
3084 * Call with altsetting < 0 to disable the interface. The only other
3085 * available altsetting is 0, which enables the interface.
3086 */
3087static int do_set_interface(struct fsg_dev *fsg, int altsetting)
3088{
3089 int rc = 0;
3090 int i;
3091 const struct usb_endpoint_descriptor *d;
3092
3093 if (fsg->running)
3094 DBG(fsg, "reset interface\n");
3095
3096reset:
3097 /* Deallocate the requests */
3098 for (i = 0; i < NUM_BUFFERS; ++i) {
3099 struct fsg_buffhd *bh = &fsg->buffhds[i];
3100
3101 if (bh->inreq) {
3102 usb_ep_free_request(fsg->bulk_in, bh->inreq);
3103 bh->inreq = NULL;
3104 }
3105 if (bh->outreq) {
3106 usb_ep_free_request(fsg->bulk_out, bh->outreq);
3107 bh->outreq = NULL;
3108 }
3109 }
3110 if (fsg->intreq) {
3111 usb_ep_free_request(fsg->intr_in, fsg->intreq);
3112 fsg->intreq = NULL;
3113 }
3114
3115 /* Disable the endpoints */
3116 if (fsg->bulk_in_enabled) {
3117 usb_ep_disable(fsg->bulk_in);
3118 fsg->bulk_in_enabled = 0;
3119 }
3120 if (fsg->bulk_out_enabled) {
3121 usb_ep_disable(fsg->bulk_out);
3122 fsg->bulk_out_enabled = 0;
3123 }
3124 if (fsg->intr_in_enabled) {
3125 usb_ep_disable(fsg->intr_in);
3126 fsg->intr_in_enabled = 0;
3127 }
3128
3129 fsg->running = 0;
3130 if (altsetting < 0 || rc != 0)
3131 return rc;
3132
3133 DBG(fsg, "set interface %d\n", altsetting);
3134
3135 /* Enable the endpoints */
3136 d = ep_desc(fsg->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
3137 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
3138 goto reset;
3139 fsg->bulk_in_enabled = 1;
3140
3141 d = ep_desc(fsg->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
3142 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
3143 goto reset;
3144 fsg->bulk_out_enabled = 1;
3145 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
3146
3147 if (transport_is_cbi()) {
3148 d = ep_desc(fsg->gadget, &fs_intr_in_desc, &hs_intr_in_desc);
3149 if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
3150 goto reset;
3151 fsg->intr_in_enabled = 1;
3152 }
3153
3154 /* Allocate the requests */
3155 for (i = 0; i < NUM_BUFFERS; ++i) {
3156 struct fsg_buffhd *bh = &fsg->buffhds[i];
3157
3158 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
3159 goto reset;
3160 if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
3161 goto reset;
3162 bh->inreq->buf = bh->outreq->buf = bh->buf;
3163 bh->inreq->dma = bh->outreq->dma = bh->dma;
3164 bh->inreq->context = bh->outreq->context = bh;
3165 bh->inreq->complete = bulk_in_complete;
3166 bh->outreq->complete = bulk_out_complete;
3167 }
3168 if (transport_is_cbi()) {
3169 if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0)
3170 goto reset;
3171 fsg->intreq->complete = intr_in_complete;
3172 }
3173
3174 fsg->running = 1;
3175 for (i = 0; i < fsg->nluns; ++i)
3176 fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
3177 return rc;
3178}
3179
3180
3181/*
3182 * Change our operational configuration. This code must agree with the code
3183 * that returns config descriptors, and with interface altsetting code.
3184 *
3185 * It's also responsible for power management interactions. Some
3186 * configurations might not work with our current power sources.
3187 * For now we just assume the gadget is always self-powered.
3188 */
3189static int do_set_config(struct fsg_dev *fsg, u8 new_config)
3190{
3191 int rc = 0;
3192
3193 /* Disable the single interface */
3194 if (fsg->config != 0) {
3195 DBG(fsg, "reset config\n");
3196 fsg->config = 0;
3197 rc = do_set_interface(fsg, -1);
3198 }
3199
3200 /* Enable the interface */
3201 if (new_config != 0) {
3202 fsg->config = new_config;
3203 if ((rc = do_set_interface(fsg, 0)) != 0)
3204 fsg->config = 0; // Reset on errors
3205 else {
3206 char *speed;
3207
3208 switch (fsg->gadget->speed) {
3209 case USB_SPEED_LOW: speed = "low"; break;
3210 case USB_SPEED_FULL: speed = "full"; break;
3211 case USB_SPEED_HIGH: speed = "high"; break;
3212 default: speed = "?"; break;
3213 }
3214 INFO(fsg, "%s speed config #%d\n", speed, fsg->config);
3215 }
3216 }
3217 return rc;
3218}
3219
3220
3221/*-------------------------------------------------------------------------*/
3222
3223static void handle_exception(struct fsg_dev *fsg)
3224{
3225 siginfo_t info;
3226 int sig;
3227 int i;
3228 int num_active;
3229 struct fsg_buffhd *bh;
3230 enum fsg_state old_state;
3231 u8 new_config;
3232 struct lun *curlun;
3233 unsigned int exception_req_tag;
3234 int rc;
3235
3236 /* Clear the existing signals. Anything but SIGUSR1 is converted
3237 * into a high-priority EXIT exception. */
3238 for (;;) {
3239 sig = dequeue_signal_lock(current, &fsg->thread_signal_mask,
3240 &info);
3241 if (!sig)
3242 break;
3243 if (sig != SIGUSR1) {
3244 if (fsg->state < FSG_STATE_EXIT)
3245 DBG(fsg, "Main thread exiting on signal\n");
3246 raise_exception(fsg, FSG_STATE_EXIT);
3247 }
3248 }
3249
3250 /* Cancel all the pending transfers */
3251 if (fsg->intreq_busy)
3252 usb_ep_dequeue(fsg->intr_in, fsg->intreq);
3253 for (i = 0; i < NUM_BUFFERS; ++i) {
3254 bh = &fsg->buffhds[i];
3255 if (bh->inreq_busy)
3256 usb_ep_dequeue(fsg->bulk_in, bh->inreq);
3257 if (bh->outreq_busy)
3258 usb_ep_dequeue(fsg->bulk_out, bh->outreq);
3259 }
3260
3261 /* Wait until everything is idle */
3262 for (;;) {
3263 num_active = fsg->intreq_busy;
3264 for (i = 0; i < NUM_BUFFERS; ++i) {
3265 bh = &fsg->buffhds[i];
3266 num_active += bh->inreq_busy + bh->outreq_busy;
3267 }
3268 if (num_active == 0)
3269 break;
3270 if (sleep_thread(fsg))
3271 return;
3272 }
3273
3274 /* Clear out the controller's fifos */
3275 if (fsg->bulk_in_enabled)
3276 usb_ep_fifo_flush(fsg->bulk_in);
3277 if (fsg->bulk_out_enabled)
3278 usb_ep_fifo_flush(fsg->bulk_out);
3279 if (fsg->intr_in_enabled)
3280 usb_ep_fifo_flush(fsg->intr_in);
3281
3282 /* Reset the I/O buffer states and pointers, the SCSI
3283 * state, and the exception. Then invoke the handler. */
3284 spin_lock_irq(&fsg->lock);
3285
3286 for (i = 0; i < NUM_BUFFERS; ++i) {
3287 bh = &fsg->buffhds[i];
3288 bh->state = BUF_STATE_EMPTY;
3289 }
3290 fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
3291 &fsg->buffhds[0];
3292
3293 exception_req_tag = fsg->exception_req_tag;
3294 new_config = fsg->new_config;
3295 old_state = fsg->state;
3296
3297 if (old_state == FSG_STATE_ABORT_BULK_OUT)
3298 fsg->state = FSG_STATE_STATUS_PHASE;
3299 else {
3300 for (i = 0; i < fsg->nluns; ++i) {
3301 curlun = &fsg->luns[i];
3302 curlun->prevent_medium_removal = 0;
3303 curlun->sense_data = curlun->unit_attention_data =
3304 SS_NO_SENSE;
3305 curlun->sense_data_info = 0;
3306 }
3307 fsg->state = FSG_STATE_IDLE;
3308 }
3309 spin_unlock_irq(&fsg->lock);
3310
3311 /* Carry out any extra actions required for the exception */
3312 switch (old_state) {
3313 default:
3314 break;
3315
3316 case FSG_STATE_ABORT_BULK_OUT:
3317 send_status(fsg);
3318 spin_lock_irq(&fsg->lock);
3319 if (fsg->state == FSG_STATE_STATUS_PHASE)
3320 fsg->state = FSG_STATE_IDLE;
3321 spin_unlock_irq(&fsg->lock);
3322 break;
3323
3324 case FSG_STATE_RESET:
3325 /* In case we were forced against our will to halt a
3326 * bulk endpoint, clear the halt now. (The SuperH UDC
3327 * requires this.) */
3328 if (test_and_clear_bit(CLEAR_BULK_HALTS,
3329 &fsg->atomic_bitflags)) {
3330 usb_ep_clear_halt(fsg->bulk_in);
3331 usb_ep_clear_halt(fsg->bulk_out);
3332 }
3333
3334 if (transport_is_bbb()) {
3335 if (fsg->ep0_req_tag == exception_req_tag)
3336 ep0_queue(fsg); // Complete the status stage
3337
3338 } else if (transport_is_cbi())
3339 send_status(fsg); // Status by interrupt pipe
3340
3341 /* Technically this should go here, but it would only be
3342 * a waste of time. Ditto for the INTERFACE_CHANGE and
3343 * CONFIG_CHANGE cases. */
3344 // for (i = 0; i < fsg->nluns; ++i)
3345 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
3346 break;
3347
3348 case FSG_STATE_INTERFACE_CHANGE:
3349 rc = do_set_interface(fsg, 0);
3350 if (fsg->ep0_req_tag != exception_req_tag)
3351 break;
3352 if (rc != 0) // STALL on errors
3353 fsg_set_halt(fsg, fsg->ep0);
3354 else // Complete the status stage
3355 ep0_queue(fsg);
3356 break;
3357
3358 case FSG_STATE_CONFIG_CHANGE:
3359 rc = do_set_config(fsg, new_config);
3360 if (fsg->ep0_req_tag != exception_req_tag)
3361 break;
3362 if (rc != 0) // STALL on errors
3363 fsg_set_halt(fsg, fsg->ep0);
3364 else // Complete the status stage
3365 ep0_queue(fsg);
3366 break;
3367
3368 case FSG_STATE_DISCONNECT:
3369 fsync_all(fsg);
3370 do_set_config(fsg, 0); // Unconfigured state
3371 break;
3372
3373 case FSG_STATE_EXIT:
3374 case FSG_STATE_TERMINATED:
3375 do_set_config(fsg, 0); // Free resources
3376 spin_lock_irq(&fsg->lock);
3377 fsg->state = FSG_STATE_TERMINATED; // Stop the thread
3378 spin_unlock_irq(&fsg->lock);
3379 break;
3380 }
3381}
3382
3383
3384/*-------------------------------------------------------------------------*/
3385
3386static int fsg_main_thread(void *fsg_)
3387{
3388 struct fsg_dev *fsg = (struct fsg_dev *) fsg_;
3389
3390 fsg->thread_task = current;
3391
3392 /* Release all our userspace resources */
3393 daemonize("file-storage-gadget");
3394
3395 /* Allow the thread to be killed by a signal, but set the signal mask
3396 * to block everything but INT, TERM, KILL, and USR1. */
3397 siginitsetinv(&fsg->thread_signal_mask, sigmask(SIGINT) |
3398 sigmask(SIGTERM) | sigmask(SIGKILL) |
3399 sigmask(SIGUSR1));
3400 sigprocmask(SIG_SETMASK, &fsg->thread_signal_mask, NULL);
3401
3402 /* Arrange for userspace references to be interpreted as kernel
3403 * pointers. That way we can pass a kernel pointer to a routine
3404 * that expects a __user pointer and it will work okay. */
3405 set_fs(get_ds());
3406
3407 /* Wait for the gadget registration to finish up */
3408 wait_for_completion(&fsg->thread_notifier);
3409
3410 /* The main loop */
3411 while (fsg->state != FSG_STATE_TERMINATED) {
3412 if (exception_in_progress(fsg) || signal_pending(current)) {
3413 handle_exception(fsg);
3414 continue;
3415 }
3416
3417 if (!fsg->running) {
3418 sleep_thread(fsg);
3419 continue;
3420 }
3421
3422 if (get_next_command(fsg))
3423 continue;
3424
3425 spin_lock_irq(&fsg->lock);
3426 if (!exception_in_progress(fsg))
3427 fsg->state = FSG_STATE_DATA_PHASE;
3428 spin_unlock_irq(&fsg->lock);
3429
3430 if (do_scsi_command(fsg) || finish_reply(fsg))
3431 continue;
3432
3433 spin_lock_irq(&fsg->lock);
3434 if (!exception_in_progress(fsg))
3435 fsg->state = FSG_STATE_STATUS_PHASE;
3436 spin_unlock_irq(&fsg->lock);
3437
3438 if (send_status(fsg))
3439 continue;
3440
3441 spin_lock_irq(&fsg->lock);
3442 if (!exception_in_progress(fsg))
3443 fsg->state = FSG_STATE_IDLE;
3444 spin_unlock_irq(&fsg->lock);
3445 }
3446
3447 fsg->thread_task = NULL;
3448 flush_signals(current);
3449
3450 /* In case we are exiting because of a signal, unregister the
3451 * gadget driver and close the backing file. */
3452 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) {
3453 usb_gadget_unregister_driver(&fsg_driver);
3454 close_all_backing_files(fsg);
3455 }
3456
3457 /* Let the unbind and cleanup routines know the thread has exited */
3458 complete_and_exit(&fsg->thread_notifier, 0);
3459}
3460
3461
3462/*-------------------------------------------------------------------------*/
3463
3464/* If the next two routines are called while the gadget is registered,
3465 * the caller must own fsg->filesem for writing. */
3466
3467static int open_backing_file(struct lun *curlun, const char *filename)
3468{
3469 int ro;
3470 struct file *filp = NULL;
3471 int rc = -EINVAL;
3472 struct inode *inode = NULL;
3473 loff_t size;
3474 loff_t num_sectors;
3475
3476 /* R/W if we can, R/O if we must */
3477 ro = curlun->ro;
3478 if (!ro) {
3479 filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
3480 if (-EROFS == PTR_ERR(filp))
3481 ro = 1;
3482 }
3483 if (ro)
3484 filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
3485 if (IS_ERR(filp)) {
3486 LINFO(curlun, "unable to open backing file: %s\n", filename);
3487 return PTR_ERR(filp);
3488 }
3489
3490 if (!(filp->f_mode & FMODE_WRITE))
3491 ro = 1;
3492
3493 if (filp->f_dentry)
3494 inode = filp->f_dentry->d_inode;
3495 if (inode && S_ISBLK(inode->i_mode)) {
3496 if (bdev_read_only(inode->i_bdev))
3497 ro = 1;
3498 } else if (!inode || !S_ISREG(inode->i_mode)) {
3499 LINFO(curlun, "invalid file type: %s\n", filename);
3500 goto out;
3501 }
3502
3503 /* If we can't read the file, it's no good.
3504 * If we can't write the file, use it read-only. */
3505 if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
3506 LINFO(curlun, "file not readable: %s\n", filename);
3507 goto out;
3508 }
3509 if (!(filp->f_op->write || filp->f_op->aio_write))
3510 ro = 1;
3511
3512 size = i_size_read(inode->i_mapping->host);
3513 if (size < 0) {
3514 LINFO(curlun, "unable to find file size: %s\n", filename);
3515 rc = (int) size;
3516 goto out;
3517 }
3518 num_sectors = size >> 9; // File size in 512-byte sectors
3519 if (num_sectors == 0) {
3520 LINFO(curlun, "file too small: %s\n", filename);
3521 rc = -ETOOSMALL;
3522 goto out;
3523 }
3524
3525 get_file(filp);
3526 curlun->ro = ro;
3527 curlun->filp = filp;
3528 curlun->file_length = size;
3529 curlun->num_sectors = num_sectors;
3530 LDBG(curlun, "open backing file: %s\n", filename);
3531 rc = 0;
3532
3533out:
3534 filp_close(filp, current->files);
3535 return rc;
3536}
3537
3538
3539static void close_backing_file(struct lun *curlun)
3540{
3541 if (curlun->filp) {
3542 LDBG(curlun, "close backing file\n");
3543 fput(curlun->filp);
3544 curlun->filp = NULL;
3545 }
3546}
3547
3548static void close_all_backing_files(struct fsg_dev *fsg)
3549{
3550 int i;
3551
3552 for (i = 0; i < fsg->nluns; ++i)
3553 close_backing_file(&fsg->luns[i]);
3554}
3555
3556
3557static ssize_t show_ro(struct device *dev, char *buf)
3558{
3559 struct lun *curlun = dev_to_lun(dev);
3560
3561 return sprintf(buf, "%d\n", curlun->ro);
3562}
3563
3564static ssize_t show_file(struct device *dev, char *buf)
3565{
3566 struct lun *curlun = dev_to_lun(dev);
3567 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev);
3568 char *p;
3569 ssize_t rc;
3570
3571 down_read(&fsg->filesem);
3572 if (backing_file_is_open(curlun)) { // Get the complete pathname
3573 p = d_path(curlun->filp->f_dentry, curlun->filp->f_vfsmnt,
3574 buf, PAGE_SIZE - 1);
3575 if (IS_ERR(p))
3576 rc = PTR_ERR(p);
3577 else {
3578 rc = strlen(p);
3579 memmove(buf, p, rc);
3580 buf[rc] = '\n'; // Add a newline
3581 buf[++rc] = 0;
3582 }
3583 } else { // No file, return 0 bytes
3584 *buf = 0;
3585 rc = 0;
3586 }
3587 up_read(&fsg->filesem);
3588 return rc;
3589}
3590
3591
3592static ssize_t store_ro(struct device *dev, const char *buf, size_t count)
3593{
3594 ssize_t rc = count;
3595 struct lun *curlun = dev_to_lun(dev);
3596 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev);
3597 int i;
3598
3599 if (sscanf(buf, "%d", &i) != 1)
3600 return -EINVAL;
3601
3602 /* Allow the write-enable status to change only while the backing file
3603 * is closed. */
3604 down_read(&fsg->filesem);
3605 if (backing_file_is_open(curlun)) {
3606 LDBG(curlun, "read-only status change prevented\n");
3607 rc = -EBUSY;
3608 } else {
3609 curlun->ro = !!i;
3610 LDBG(curlun, "read-only status set to %d\n", curlun->ro);
3611 }
3612 up_read(&fsg->filesem);
3613 return rc;
3614}
3615
3616static ssize_t store_file(struct device *dev, const char *buf, size_t count)
3617{
3618 struct lun *curlun = dev_to_lun(dev);
3619 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev);
3620 int rc = 0;
3621
3622 if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
3623 LDBG(curlun, "eject attempt prevented\n");
3624 return -EBUSY; // "Door is locked"
3625 }
3626
3627 /* Remove a trailing newline */
3628 if (count > 0 && buf[count-1] == '\n')
3629 ((char *) buf)[count-1] = 0; // Ugh!
3630
3631 /* Eject current medium */
3632 down_write(&fsg->filesem);
3633 if (backing_file_is_open(curlun)) {
3634 close_backing_file(curlun);
3635 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
3636 }
3637
3638 /* Load new medium */
3639 if (count > 0 && buf[0]) {
3640 rc = open_backing_file(curlun, buf);
3641 if (rc == 0)
3642 curlun->unit_attention_data =
3643 SS_NOT_READY_TO_READY_TRANSITION;
3644 }
3645 up_write(&fsg->filesem);
3646 return (rc < 0 ? rc : count);
3647}
3648
3649
3650/* The write permissions and store_xxx pointers are set in fsg_bind() */
3651static DEVICE_ATTR(ro, 0444, show_ro, NULL);
3652static DEVICE_ATTR(file, 0444, show_file, NULL);
3653
3654
3655/*-------------------------------------------------------------------------*/
3656
3657static void lun_release(struct device *dev)
3658{
3659 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev);
3660
3661 complete(&fsg->lun_released);
3662}
3663
3664static void fsg_unbind(struct usb_gadget *gadget)
3665{
3666 struct fsg_dev *fsg = get_gadget_data(gadget);
3667 int i;
3668 struct lun *curlun;
3669 struct usb_request *req = fsg->ep0req;
3670
3671 DBG(fsg, "unbind\n");
3672 clear_bit(REGISTERED, &fsg->atomic_bitflags);
3673
3674 /* Unregister the sysfs attribute files and the LUNs */
3675 init_completion(&fsg->lun_released);
3676 for (i = 0; i < fsg->nluns; ++i) {
3677 curlun = &fsg->luns[i];
3678 if (curlun->registered) {
3679 device_remove_file(&curlun->dev, &dev_attr_ro);
3680 device_remove_file(&curlun->dev, &dev_attr_file);
3681 device_unregister(&curlun->dev);
3682 wait_for_completion(&fsg->lun_released);
3683 curlun->registered = 0;
3684 }
3685 }
3686
3687 /* If the thread isn't already dead, tell it to exit now */
3688 if (fsg->state != FSG_STATE_TERMINATED) {
3689 raise_exception(fsg, FSG_STATE_EXIT);
3690 wait_for_completion(&fsg->thread_notifier);
3691
3692 /* The cleanup routine waits for this completion also */
3693 complete(&fsg->thread_notifier);
3694 }
3695
3696 /* Free the data buffers */
3697 for (i = 0; i < NUM_BUFFERS; ++i) {
3698 struct fsg_buffhd *bh = &fsg->buffhds[i];
3699
3700 if (bh->buf)
3701 usb_ep_free_buffer(fsg->bulk_in, bh->buf, bh->dma,
3702 mod_data.buflen);
3703 }
3704
3705 /* Free the request and buffer for endpoint 0 */
3706 if (req) {
3707 if (req->buf)
3708 usb_ep_free_buffer(fsg->ep0, req->buf,
3709 req->dma, EP0_BUFSIZE);
3710 usb_ep_free_request(fsg->ep0, req);
3711 }
3712
3713 set_gadget_data(gadget, NULL);
3714}
3715
3716
3717static int __init check_parameters(struct fsg_dev *fsg)
3718{
3719 int prot;
3720
3721 /* Store the default values */
3722 mod_data.transport_type = USB_PR_BULK;
3723 mod_data.transport_name = "Bulk-only";
3724 mod_data.protocol_type = USB_SC_SCSI;
3725 mod_data.protocol_name = "Transparent SCSI";
3726
3727 if (gadget_is_sh(fsg->gadget))
3728 mod_data.can_stall = 0;
3729
3730 if (mod_data.release == 0xffff) { // Parameter wasn't set
3731 if (gadget_is_net2280(fsg->gadget))
3732 mod_data.release = 0x0301;
3733 else if (gadget_is_dummy(fsg->gadget))
3734 mod_data.release = 0x0302;
3735 else if (gadget_is_pxa(fsg->gadget))
3736 mod_data.release = 0x0303;
3737 else if (gadget_is_sh(fsg->gadget))
3738 mod_data.release = 0x0304;
3739
3740 /* The sa1100 controller is not supported */
3741
3742 else if (gadget_is_goku(fsg->gadget))
3743 mod_data.release = 0x0306;
3744 else if (gadget_is_mq11xx(fsg->gadget))
3745 mod_data.release = 0x0307;
3746 else if (gadget_is_omap(fsg->gadget))
3747 mod_data.release = 0x0308;
3748 else if (gadget_is_lh7a40x(fsg->gadget))
3749 mod_data.release = 0x0309;
3750 else if (gadget_is_n9604(fsg->gadget))
3751 mod_data.release = 0x0310;
3752 else if (gadget_is_pxa27x(fsg->gadget))
3753 mod_data.release = 0x0311;
3754 else if (gadget_is_s3c2410(gadget))
3755 mod_data.release = 0x0312;
3756 else if (gadget_is_at91(fsg->gadget))
3757 mod_data.release = 0x0313;
3758 else {
3759 WARN(fsg, "controller '%s' not recognized\n",
3760 fsg->gadget->name);
3761 mod_data.release = 0x0399;
3762 }
3763 }
3764
3765 prot = simple_strtol(mod_data.protocol_parm, NULL, 0);
3766
3767#ifdef CONFIG_USB_FILE_STORAGE_TEST
3768 if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) {
3769 ; // Use default setting
3770 } else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) {
3771 mod_data.transport_type = USB_PR_CB;
3772 mod_data.transport_name = "Control-Bulk";
3773 } else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) {
3774 mod_data.transport_type = USB_PR_CBI;
3775 mod_data.transport_name = "Control-Bulk-Interrupt";
3776 } else {
3777 ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm);
3778 return -EINVAL;
3779 }
3780
3781 if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 ||
3782 prot == USB_SC_SCSI) {
3783 ; // Use default setting
3784 } else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 ||
3785 prot == USB_SC_RBC) {
3786 mod_data.protocol_type = USB_SC_RBC;
3787 mod_data.protocol_name = "RBC";
3788 } else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 ||
3789 strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 ||
3790 prot == USB_SC_8020) {
3791 mod_data.protocol_type = USB_SC_8020;
3792 mod_data.protocol_name = "8020i (ATAPI)";
3793 } else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 ||
3794 prot == USB_SC_QIC) {
3795 mod_data.protocol_type = USB_SC_QIC;
3796 mod_data.protocol_name = "QIC-157";
3797 } else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 ||
3798 prot == USB_SC_UFI) {
3799 mod_data.protocol_type = USB_SC_UFI;
3800 mod_data.protocol_name = "UFI";
3801 } else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 ||
3802 prot == USB_SC_8070) {
3803 mod_data.protocol_type = USB_SC_8070;
3804 mod_data.protocol_name = "8070i";
3805 } else {
3806 ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm);
3807 return -EINVAL;
3808 }
3809
3810 mod_data.buflen &= PAGE_CACHE_MASK;
3811 if (mod_data.buflen <= 0) {
3812 ERROR(fsg, "invalid buflen\n");
3813 return -ETOOSMALL;
3814 }
3815#endif /* CONFIG_USB_FILE_STORAGE_TEST */
3816
3817 return 0;
3818}
3819
3820
3821static int __init fsg_bind(struct usb_gadget *gadget)
3822{
3823 struct fsg_dev *fsg = the_fsg;
3824 int rc;
3825 int i;
3826 struct lun *curlun;
3827 struct usb_ep *ep;
3828 struct usb_request *req;
3829 char *pathbuf, *p;
3830
3831 fsg->gadget = gadget;
3832 set_gadget_data(gadget, fsg);
3833 fsg->ep0 = gadget->ep0;
3834 fsg->ep0->driver_data = fsg;
3835
3836 if ((rc = check_parameters(fsg)) != 0)
3837 goto out;
3838
3839 if (mod_data.removable) { // Enable the store_xxx attributes
3840 dev_attr_ro.attr.mode = dev_attr_file.attr.mode = 0644;
3841 dev_attr_ro.store = store_ro;
3842 dev_attr_file.store = store_file;
3843 }
3844
3845 /* Find out how many LUNs there should be */
3846 i = mod_data.nluns;
3847 if (i == 0)
3848 i = max(mod_data.num_filenames, 1);
3849 if (i > MAX_LUNS) {
3850 ERROR(fsg, "invalid number of LUNs: %d\n", i);
3851 rc = -EINVAL;
3852 goto out;
3853 }
3854
3855 /* Create the LUNs, open their backing files, and register the
3856 * LUN devices in sysfs. */
3857 fsg->luns = kmalloc(i * sizeof(struct lun), GFP_KERNEL);
3858 if (!fsg->luns) {
3859 rc = -ENOMEM;
3860 goto out;
3861 }
3862 memset(fsg->luns, 0, i * sizeof(struct lun));
3863 fsg->nluns = i;
3864
3865 for (i = 0; i < fsg->nluns; ++i) {
3866 curlun = &fsg->luns[i];
3867 curlun->ro = ro[i];
3868 curlun->dev.parent = &gadget->dev;
3869 curlun->dev.driver = &fsg_driver.driver;
3870 dev_set_drvdata(&curlun->dev, fsg);
3871 snprintf(curlun->dev.bus_id, BUS_ID_SIZE,
3872 "%s-lun%d", gadget->dev.bus_id, i);
3873
3874 if ((rc = device_register(&curlun->dev)) != 0)
3875 INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
3876 else {
3877 curlun->registered = 1;
3878 curlun->dev.release = lun_release;
3879 device_create_file(&curlun->dev, &dev_attr_ro);
3880 device_create_file(&curlun->dev, &dev_attr_file);
3881 }
3882
3883 if (file[i] && *file[i]) {
3884 if ((rc = open_backing_file(curlun, file[i])) != 0)
3885 goto out;
3886 } else if (!mod_data.removable) {
3887 ERROR(fsg, "no file given for LUN%d\n", i);
3888 rc = -EINVAL;
3889 goto out;
3890 }
3891 }
3892
3893 /* Find all the endpoints we will use */
3894 usb_ep_autoconfig_reset(gadget);
3895 ep = usb_ep_autoconfig(gadget, &fs_bulk_in_desc);
3896 if (!ep)
3897 goto autoconf_fail;
3898 ep->driver_data = fsg; // claim the endpoint
3899 fsg->bulk_in = ep;
3900
3901 ep = usb_ep_autoconfig(gadget, &fs_bulk_out_desc);
3902 if (!ep)
3903 goto autoconf_fail;
3904 ep->driver_data = fsg; // claim the endpoint
3905 fsg->bulk_out = ep;
3906
3907 if (transport_is_cbi()) {
3908 ep = usb_ep_autoconfig(gadget, &fs_intr_in_desc);
3909 if (!ep)
3910 goto autoconf_fail;
3911 ep->driver_data = fsg; // claim the endpoint
3912 fsg->intr_in = ep;
3913 }
3914
3915 /* Fix up the descriptors */
3916 device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
3917 device_desc.idVendor = cpu_to_le16(mod_data.vendor);
3918 device_desc.idProduct = cpu_to_le16(mod_data.product);
3919 device_desc.bcdDevice = cpu_to_le16(mod_data.release);
3920
3921 i = (transport_is_cbi() ? 3 : 2); // Number of endpoints
3922 intf_desc.bNumEndpoints = i;
3923 intf_desc.bInterfaceSubClass = mod_data.protocol_type;
3924 intf_desc.bInterfaceProtocol = mod_data.transport_type;
3925 fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3926
3927#ifdef CONFIG_USB_GADGET_DUALSPEED
3928 hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3929
3930 /* Assume ep0 uses the same maxpacket value for both speeds */
3931 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
3932
3933 /* Assume that all endpoint addresses are the same for both speeds */
3934 hs_bulk_in_desc.bEndpointAddress = fs_bulk_in_desc.bEndpointAddress;
3935 hs_bulk_out_desc.bEndpointAddress = fs_bulk_out_desc.bEndpointAddress;
3936 hs_intr_in_desc.bEndpointAddress = fs_intr_in_desc.bEndpointAddress;
3937#endif
3938
3939 if (gadget->is_otg) {
3940 otg_desc.bmAttributes |= USB_OTG_HNP,
3941 config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
3942 }
3943
3944 rc = -ENOMEM;
3945
3946 /* Allocate the request and buffer for endpoint 0 */
3947 fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
3948 if (!req)
3949 goto out;
3950 req->buf = usb_ep_alloc_buffer(fsg->ep0, EP0_BUFSIZE,
3951 &req->dma, GFP_KERNEL);
3952 if (!req->buf)
3953 goto out;
3954 req->complete = ep0_complete;
3955
3956 /* Allocate the data buffers */
3957 for (i = 0; i < NUM_BUFFERS; ++i) {
3958 struct fsg_buffhd *bh = &fsg->buffhds[i];
3959
3960 bh->buf = usb_ep_alloc_buffer(fsg->bulk_in, mod_data.buflen,
3961 &bh->dma, GFP_KERNEL);
3962 if (!bh->buf)
3963 goto out;
3964 bh->next = bh + 1;
3965 }
3966 fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
3967
3968 /* This should reflect the actual gadget power source */
3969 usb_gadget_set_selfpowered(gadget);
3970
3971 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
3972 system_utsname.sysname, system_utsname.release,
3973 gadget->name);
3974
3975 /* On a real device, serial[] would be loaded from permanent
3976 * storage. We just encode it from the driver version string. */
3977 for (i = 0; i < sizeof(serial) - 2; i += 2) {
3978 unsigned char c = DRIVER_VERSION[i / 2];
3979
3980 if (!c)
3981 break;
3982 sprintf(&serial[i], "%02X", c);
3983 }
3984
3985 if ((rc = kernel_thread(fsg_main_thread, fsg, (CLONE_VM | CLONE_FS |
3986 CLONE_FILES))) < 0)
3987 goto out;
3988 fsg->thread_pid = rc;
3989
3990 INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
3991 INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
3992
3993 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
3994 for (i = 0; i < fsg->nluns; ++i) {
3995 curlun = &fsg->luns[i];
3996 if (backing_file_is_open(curlun)) {
3997 p = NULL;
3998 if (pathbuf) {
3999 p = d_path(curlun->filp->f_dentry,
4000 curlun->filp->f_vfsmnt,
4001 pathbuf, PATH_MAX);
4002 if (IS_ERR(p))
4003 p = NULL;
4004 }
4005 LINFO(curlun, "ro=%d, file: %s\n",
4006 curlun->ro, (p ? p : "(error)"));
4007 }
4008 }
4009 kfree(pathbuf);
4010
4011 DBG(fsg, "transport=%s (x%02x)\n",
4012 mod_data.transport_name, mod_data.transport_type);
4013 DBG(fsg, "protocol=%s (x%02x)\n",
4014 mod_data.protocol_name, mod_data.protocol_type);
4015 DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
4016 mod_data.vendor, mod_data.product, mod_data.release);
4017 DBG(fsg, "removable=%d, stall=%d, buflen=%u\n",
4018 mod_data.removable, mod_data.can_stall,
4019 mod_data.buflen);
4020 DBG(fsg, "I/O thread pid: %d\n", fsg->thread_pid);
4021 return 0;
4022
4023autoconf_fail:
4024 ERROR(fsg, "unable to autoconfigure all endpoints\n");
4025 rc = -ENOTSUPP;
4026
4027out:
4028 fsg->state = FSG_STATE_TERMINATED; // The thread is dead
4029 fsg_unbind(gadget);
4030 close_all_backing_files(fsg);
4031 return rc;
4032}
4033
4034
4035/*-------------------------------------------------------------------------*/
4036
4037static void fsg_suspend(struct usb_gadget *gadget)
4038{
4039 struct fsg_dev *fsg = get_gadget_data(gadget);
4040
4041 DBG(fsg, "suspend\n");
4042 set_bit(SUSPENDED, &fsg->atomic_bitflags);
4043}
4044
4045static void fsg_resume(struct usb_gadget *gadget)
4046{
4047 struct fsg_dev *fsg = get_gadget_data(gadget);
4048
4049 DBG(fsg, "resume\n");
4050 clear_bit(SUSPENDED, &fsg->atomic_bitflags);
4051}
4052
4053
4054/*-------------------------------------------------------------------------*/
4055
4056static struct usb_gadget_driver fsg_driver = {
4057#ifdef CONFIG_USB_GADGET_DUALSPEED
4058 .speed = USB_SPEED_HIGH,
4059#else
4060 .speed = USB_SPEED_FULL,
4061#endif
4062 .function = (char *) longname,
4063 .bind = fsg_bind,
4064 .unbind = fsg_unbind,
4065 .disconnect = fsg_disconnect,
4066 .setup = fsg_setup,
4067 .suspend = fsg_suspend,
4068 .resume = fsg_resume,
4069
4070 .driver = {
4071 .name = (char *) shortname,
4072 // .release = ...
4073 // .suspend = ...
4074 // .resume = ...
4075 },
4076};
4077
4078
4079static int __init fsg_alloc(void)
4080{
4081 struct fsg_dev *fsg;
4082
4083 fsg = kmalloc(sizeof *fsg, GFP_KERNEL);
4084 if (!fsg)
4085 return -ENOMEM;
4086 memset(fsg, 0, sizeof *fsg);
4087 spin_lock_init(&fsg->lock);
4088 init_rwsem(&fsg->filesem);
4089 init_waitqueue_head(&fsg->thread_wqh);
4090 init_completion(&fsg->thread_notifier);
4091
4092 the_fsg = fsg;
4093 return 0;
4094}
4095
4096
4097static void fsg_free(struct fsg_dev *fsg)
4098{
4099 kfree(fsg->luns);
4100 kfree(fsg);
4101}
4102
4103
4104static int __init fsg_init(void)
4105{
4106 int rc;
4107 struct fsg_dev *fsg;
4108
4109 if ((rc = fsg_alloc()) != 0)
4110 return rc;
4111 fsg = the_fsg;
4112 if ((rc = usb_gadget_register_driver(&fsg_driver)) != 0) {
4113 fsg_free(fsg);
4114 return rc;
4115 }
4116 set_bit(REGISTERED, &fsg->atomic_bitflags);
4117
4118 /* Tell the thread to start working */
4119 complete(&fsg->thread_notifier);
4120 return 0;
4121}
4122module_init(fsg_init);
4123
4124
4125static void __exit fsg_cleanup(void)
4126{
4127 struct fsg_dev *fsg = the_fsg;
4128
4129 /* Unregister the driver iff the thread hasn't already done so */
4130 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
4131 usb_gadget_unregister_driver(&fsg_driver);
4132
4133 /* Wait for the thread to finish up */
4134 wait_for_completion(&fsg->thread_notifier);
4135
4136 close_all_backing_files(fsg);
4137 fsg_free(fsg);
4138}
4139module_exit(fsg_cleanup);
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
new file mode 100644
index 000000000000..ea2eb52c766d
--- /dev/null
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -0,0 +1,92 @@
1/*
2 * USB device controllers have lots of quirks. Use these macros in
3 * gadget drivers or other code that needs to deal with them, and which
4 * autoconfigures instead of using early binding to the hardware.
5 *
6 * This could eventually work like the ARM mach_is_*() stuff, driven by
7 * some config file that gets updated as new hardware is supported.
8 *
9 * NOTE: some of these controller drivers may not be available yet.
10 */
11#ifdef CONFIG_USB_GADGET_NET2280
12#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
13#else
14#define gadget_is_net2280(g) 0
15#endif
16
17#ifdef CONFIG_USB_GADGET_DUMMY_HCD
18#define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name)
19#else
20#define gadget_is_dummy(g) 0
21#endif
22
23#ifdef CONFIG_USB_GADGET_PXA2XX
24#define gadget_is_pxa(g) !strcmp("pxa2xx_udc", (g)->name)
25#else
26#define gadget_is_pxa(g) 0
27#endif
28
29#ifdef CONFIG_USB_GADGET_GOKU
30#define gadget_is_goku(g) !strcmp("goku_udc", (g)->name)
31#else
32#define gadget_is_goku(g) 0
33#endif
34
35#ifdef CONFIG_USB_GADGET_SUPERH
36#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name)
37#else
38#define gadget_is_sh(g) 0
39#endif
40
41#ifdef CONFIG_USB_GADGET_SA1100
42#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name)
43#else
44#define gadget_is_sa1100(g) 0
45#endif
46
47#ifdef CONFIG_USB_GADGET_LH7A40X
48#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name)
49#else
50#define gadget_is_lh7a40x(g) 0
51#endif
52
53#ifdef CONFIG_USB_GADGET_MQ11XX
54#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name)
55#else
56#define gadget_is_mq11xx(g) 0
57#endif
58
59#ifdef CONFIG_USB_GADGET_OMAP
60#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
61#else
62#define gadget_is_omap(g) 0
63#endif
64
65#ifdef CONFIG_USB_GADGET_N9604
66#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
67#else
68#define gadget_is_n9604(g) 0
69#endif
70
71#ifdef CONFIG_USB_GADGET_PXA27X
72#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
73#else
74#define gadget_is_pxa27x(g) 0
75#endif
76
77#ifdef CONFIG_USB_GADGET_S3C2410
78#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
79#else
80#define gadget_is_s3c2410(g) 0
81#endif
82
83#ifdef CONFIG_USB_GADGET_AT91
84#define gadget_is_at91(g) !strcmp("at91_udc", (g)->name)
85#else
86#define gadget_is_at91(g) 0
87#endif
88
89// CONFIG_USB_GADGET_SX2
90// CONFIG_USB_GADGET_AU1X00
91// ...
92
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
new file mode 100644
index 000000000000..005db7cca292
--- /dev/null
+++ b/drivers/usb/gadget/goku_udc.c
@@ -0,0 +1,1984 @@
1/*
2 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
3 *
4 * Copyright (C) 2000-2002 Lineo
5 * by Stuart Lynne, Tom Rushworth, and Bruce Balden
6 * Copyright (C) 2002 Toshiba Corporation
7 * Copyright (C) 2003 MontaVista Software (source@mvista.com)
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14/*
15 * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
16 *
17 * - Endpoint numbering is fixed: ep{1,2,3}-bulk
18 * - Gadget drivers can choose ep maxpacket (8/16/32/64)
19 * - Gadget drivers can choose direction (IN, OUT)
20 * - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
21 */
22
23#undef DEBUG
24// #define VERBOSE /* extra debug messages (success too) */
25// #define USB_TRACE /* packet-level success messages */
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/delay.h>
32#include <linux/ioport.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/smp_lock.h>
36#include <linux/errno.h>
37#include <linux/init.h>
38#include <linux/timer.h>
39#include <linux/list.h>
40#include <linux/interrupt.h>
41#include <linux/proc_fs.h>
42#include <linux/device.h>
43#include <linux/usb_ch9.h>
44#include <linux/usb_gadget.h>
45
46#include <asm/byteorder.h>
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/system.h>
50#include <asm/unaligned.h>
51
52
53#include "goku_udc.h"
54
55#define DRIVER_DESC "TC86C001 USB Device Controller"
56#define DRIVER_VERSION "30-Oct 2003"
57
58#define DMA_ADDR_INVALID (~(dma_addr_t)0)
59
60static const char driver_name [] = "goku_udc";
61static const char driver_desc [] = DRIVER_DESC;
62
63MODULE_AUTHOR("source@mvista.com");
64MODULE_DESCRIPTION(DRIVER_DESC);
65MODULE_LICENSE("GPL");
66
67
68/*
69 * IN dma behaves ok under testing, though the IN-dma abort paths don't
70 * seem to behave quite as expected. Used by default.
71 *
72 * OUT dma documents design problems handling the common "short packet"
73 * transfer termination policy; it couldn't enabled by default, even
74 * if the OUT-dma abort problems had a resolution.
75 */
76static unsigned use_dma = 1;
77
78#if 0
79//#include <linux/moduleparam.h>
80/* "modprobe goku_udc use_dma=1" etc
81 * 0 to disable dma
82 * 1 to use IN dma only (normal operation)
83 * 2 to use IN and OUT dma
84 */
85module_param(use_dma, uint, S_IRUGO);
86#endif
87
88/*-------------------------------------------------------------------------*/
89
90static void nuke(struct goku_ep *, int status);
91
92static inline void
93command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
94{
95 writel(COMMAND_EP(epnum) | command, &regs->Command);
96 udelay(300);
97}
98
99static int
100goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
101{
102 struct goku_udc *dev;
103 struct goku_ep *ep;
104 u32 mode;
105 u16 max;
106 unsigned long flags;
107
108 ep = container_of(_ep, struct goku_ep, ep);
109 if (!_ep || !desc || ep->desc
110 || desc->bDescriptorType != USB_DT_ENDPOINT)
111 return -EINVAL;
112 dev = ep->dev;
113 if (ep == &dev->ep[0])
114 return -EINVAL;
115 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
116 return -ESHUTDOWN;
117 if (ep->num != (desc->bEndpointAddress & 0x0f))
118 return -EINVAL;
119
120 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
121 case USB_ENDPOINT_XFER_BULK:
122 case USB_ENDPOINT_XFER_INT:
123 break;
124 default:
125 return -EINVAL;
126 }
127
128 if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
129 != EPxSTATUS_EP_INVALID)
130 return -EBUSY;
131
132 /* enabling the no-toggle interrupt mode would need an api hook */
133 mode = 0;
134 max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
135 switch (max) {
136 case 64: mode++;
137 case 32: mode++;
138 case 16: mode++;
139 case 8: mode <<= 3;
140 break;
141 default:
142 return -EINVAL;
143 }
144 mode |= 2 << 1; /* bulk, or intr-with-toggle */
145
146 /* ep1/ep2 dma direction is chosen early; it works in the other
147 * direction, with pio. be cautious with out-dma.
148 */
149 ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
150 if (ep->is_in) {
151 mode |= 1;
152 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
153 } else {
154 ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
155 if (ep->dma)
156 DBG(dev, "%s out-dma hides short packets\n",
157 ep->ep.name);
158 }
159
160 spin_lock_irqsave(&ep->dev->lock, flags);
161
162 /* ep1 and ep2 can do double buffering and/or dma */
163 if (ep->num < 3) {
164 struct goku_udc_regs __iomem *regs = ep->dev->regs;
165 u32 tmp;
166
167 /* double buffer except (for now) with pio in */
168 tmp = ((ep->dma || !ep->is_in)
169 ? 0x10 /* double buffered */
170 : 0x11 /* single buffer */
171 ) << ep->num;
172 tmp |= readl(&regs->EPxSingle);
173 writel(tmp, &regs->EPxSingle);
174
175 tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
176 tmp |= readl(&regs->EPxBCS);
177 writel(tmp, &regs->EPxBCS);
178 }
179 writel(mode, ep->reg_mode);
180 command(ep->dev->regs, COMMAND_RESET, ep->num);
181 ep->ep.maxpacket = max;
182 ep->stopped = 0;
183 ep->desc = desc;
184 spin_unlock_irqrestore(&ep->dev->lock, flags);
185
186 DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
187 ep->is_in ? "IN" : "OUT",
188 ep->dma ? "dma" : "pio",
189 max);
190
191 return 0;
192}
193
194static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
195{
196 struct goku_udc *dev = ep->dev;
197
198 if (regs) {
199 command(regs, COMMAND_INVALID, ep->num);
200 if (ep->num) {
201 if (ep->num == UDC_MSTWR_ENDPOINT)
202 dev->int_enable &= ~(INT_MSTWREND
203 |INT_MSTWRTMOUT);
204 else if (ep->num == UDC_MSTRD_ENDPOINT)
205 dev->int_enable &= ~INT_MSTRDEND;
206 dev->int_enable &= ~INT_EPxDATASET (ep->num);
207 } else
208 dev->int_enable &= ~INT_EP0;
209 writel(dev->int_enable, &regs->int_enable);
210 readl(&regs->int_enable);
211 if (ep->num < 3) {
212 struct goku_udc_regs __iomem *r = ep->dev->regs;
213 u32 tmp;
214
215 tmp = readl(&r->EPxSingle);
216 tmp &= ~(0x11 << ep->num);
217 writel(tmp, &r->EPxSingle);
218
219 tmp = readl(&r->EPxBCS);
220 tmp &= ~(0x11 << ep->num);
221 writel(tmp, &r->EPxBCS);
222 }
223 /* reset dma in case we're still using it */
224 if (ep->dma) {
225 u32 master;
226
227 master = readl(&regs->dma_master) & MST_RW_BITS;
228 if (ep->num == UDC_MSTWR_ENDPOINT) {
229 master &= ~MST_W_BITS;
230 master |= MST_WR_RESET;
231 } else {
232 master &= ~MST_R_BITS;
233 master |= MST_RD_RESET;
234 }
235 writel(master, &regs->dma_master);
236 }
237 }
238
239 ep->ep.maxpacket = MAX_FIFO_SIZE;
240 ep->desc = NULL;
241 ep->stopped = 1;
242 ep->irqs = 0;
243 ep->dma = 0;
244}
245
246static int goku_ep_disable(struct usb_ep *_ep)
247{
248 struct goku_ep *ep;
249 struct goku_udc *dev;
250 unsigned long flags;
251
252 ep = container_of(_ep, struct goku_ep, ep);
253 if (!_ep || !ep->desc)
254 return -ENODEV;
255 dev = ep->dev;
256 if (dev->ep0state == EP0_SUSPEND)
257 return -EBUSY;
258
259 VDBG(dev, "disable %s\n", _ep->name);
260
261 spin_lock_irqsave(&dev->lock, flags);
262 nuke(ep, -ESHUTDOWN);
263 ep_reset(dev->regs, ep);
264 spin_unlock_irqrestore(&dev->lock, flags);
265
266 return 0;
267}
268
269/*-------------------------------------------------------------------------*/
270
271static struct usb_request *
272goku_alloc_request(struct usb_ep *_ep, int gfp_flags)
273{
274 struct goku_request *req;
275
276 if (!_ep)
277 return NULL;
278 req = kmalloc(sizeof *req, gfp_flags);
279 if (!req)
280 return NULL;
281
282 memset(req, 0, sizeof *req);
283 req->req.dma = DMA_ADDR_INVALID;
284 INIT_LIST_HEAD(&req->queue);
285 return &req->req;
286}
287
288static void
289goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
290{
291 struct goku_request *req;
292
293 if (!_ep || !_req)
294 return;
295
296 req = container_of(_req, struct goku_request, req);
297 WARN_ON(!list_empty(&req->queue));
298 kfree(req);
299}
300
301/*-------------------------------------------------------------------------*/
302
303#undef USE_KMALLOC
304
305/* many common platforms have dma-coherent caches, which means that it's
306 * safe to use kmalloc() memory for all i/o buffers without using any
307 * cache flushing calls. (unless you're trying to share cache lines
308 * between dma and non-dma activities, which is a slow idea in any case.)
309 *
310 * other platforms need more care, with 2.6 having a moderately general
311 * solution except for the common "buffer is smaller than a page" case.
312 */
313#if defined(CONFIG_X86)
314#define USE_KMALLOC
315
316#elif defined(CONFIG_MIPS) && !defined(CONFIG_NONCOHERENT_IO)
317#define USE_KMALLOC
318
319#elif defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
320#define USE_KMALLOC
321
322#endif
323
324/* allocating buffers this way eliminates dma mapping overhead, which
325 * on some platforms will mean eliminating a per-io buffer copy. with
326 * some kinds of system caches, further tweaks may still be needed.
327 */
328static void *
329goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
330 dma_addr_t *dma, int gfp_flags)
331{
332 void *retval;
333 struct goku_ep *ep;
334
335 ep = container_of(_ep, struct goku_ep, ep);
336 if (!_ep)
337 return NULL;
338 *dma = DMA_ADDR_INVALID;
339
340#if defined(USE_KMALLOC)
341 retval = kmalloc(bytes, gfp_flags);
342 if (retval)
343 *dma = virt_to_phys(retval);
344#else
345 if (ep->dma) {
346 /* the main problem with this call is that it wastes memory
347 * on typical 1/N page allocations: it allocates 1-N pages.
348 */
349#warning Using dma_alloc_coherent even with buffers smaller than a page.
350 retval = dma_alloc_coherent(&ep->dev->pdev->dev,
351 bytes, dma, gfp_flags);
352 } else
353 retval = kmalloc(bytes, gfp_flags);
354#endif
355 return retval;
356}
357
358static void
359goku_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes)
360{
361 /* free memory into the right allocator */
362#ifndef USE_KMALLOC
363 if (dma != DMA_ADDR_INVALID) {
364 struct goku_ep *ep;
365
366 ep = container_of(_ep, struct goku_ep, ep);
367 if (!_ep)
368 return;
369 dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
370 } else
371#endif
372 kfree (buf);
373}
374
375/*-------------------------------------------------------------------------*/
376
377static void
378done(struct goku_ep *ep, struct goku_request *req, int status)
379{
380 struct goku_udc *dev;
381 unsigned stopped = ep->stopped;
382
383 list_del_init(&req->queue);
384
385 if (likely(req->req.status == -EINPROGRESS))
386 req->req.status = status;
387 else
388 status = req->req.status;
389
390 dev = ep->dev;
391 if (req->mapped) {
392 pci_unmap_single(dev->pdev, req->req.dma, req->req.length,
393 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
394 req->req.dma = DMA_ADDR_INVALID;
395 req->mapped = 0;
396 }
397
398#ifndef USB_TRACE
399 if (status && status != -ESHUTDOWN)
400#endif
401 VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
402 ep->ep.name, &req->req, status,
403 req->req.actual, req->req.length);
404
405 /* don't modify queue heads during completion callback */
406 ep->stopped = 1;
407 spin_unlock(&dev->lock);
408 req->req.complete(&ep->ep, &req->req);
409 spin_lock(&dev->lock);
410 ep->stopped = stopped;
411}
412
413/*-------------------------------------------------------------------------*/
414
415static inline int
416write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
417{
418 unsigned length, count;
419
420 length = min(req->req.length - req->req.actual, max);
421 req->req.actual += length;
422
423 count = length;
424 while (likely(count--))
425 writel(*buf++, fifo);
426 return length;
427}
428
429// return: 0 = still running, 1 = completed, negative = errno
430static int write_fifo(struct goku_ep *ep, struct goku_request *req)
431{
432 struct goku_udc *dev = ep->dev;
433 u32 tmp;
434 u8 *buf;
435 unsigned count;
436 int is_last;
437
438 tmp = readl(&dev->regs->DataSet);
439 buf = req->req.buf + req->req.actual;
440 prefetch(buf);
441
442 dev = ep->dev;
443 if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
444 return -EL2HLT;
445
446 /* NOTE: just single-buffered PIO-IN for now. */
447 if (unlikely((tmp & DATASET_A(ep->num)) != 0))
448 return 0;
449
450 /* clear our "packet available" irq */
451 if (ep->num != 0)
452 writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
453
454 count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
455
456 /* last packet often short (sometimes a zlp, especially on ep0) */
457 if (unlikely(count != ep->ep.maxpacket)) {
458 writel(~(1<<ep->num), &dev->regs->EOP);
459 if (ep->num == 0) {
460 dev->ep[0].stopped = 1;
461 dev->ep0state = EP0_STATUS;
462 }
463 is_last = 1;
464 } else {
465 if (likely(req->req.length != req->req.actual)
466 || req->req.zero)
467 is_last = 0;
468 else
469 is_last = 1;
470 }
471#if 0 /* printk seemed to trash is_last...*/
472//#ifdef USB_TRACE
473 VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
474 ep->ep.name, count, is_last ? "/last" : "",
475 req->req.length - req->req.actual, req);
476#endif
477
478 /* requests complete when all IN data is in the FIFO,
479 * or sometimes later, if a zlp was needed.
480 */
481 if (is_last) {
482 done(ep, req, 0);
483 return 1;
484 }
485
486 return 0;
487}
488
489static int read_fifo(struct goku_ep *ep, struct goku_request *req)
490{
491 struct goku_udc_regs __iomem *regs;
492 u32 size, set;
493 u8 *buf;
494 unsigned bufferspace, is_short, dbuff;
495
496 regs = ep->dev->regs;
497top:
498 buf = req->req.buf + req->req.actual;
499 prefetchw(buf);
500
501 if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
502 return -EL2HLT;
503
504 dbuff = (ep->num == 1 || ep->num == 2);
505 do {
506 /* ack dataset irq matching the status we'll handle */
507 if (ep->num != 0)
508 writel(~INT_EPxDATASET(ep->num), &regs->int_status);
509
510 set = readl(&regs->DataSet) & DATASET_AB(ep->num);
511 size = readl(&regs->EPxSizeLA[ep->num]);
512 bufferspace = req->req.length - req->req.actual;
513
514 /* usually do nothing without an OUT packet */
515 if (likely(ep->num != 0 || bufferspace != 0)) {
516 if (unlikely(set == 0))
517 break;
518 /* use ep1/ep2 double-buffering for OUT */
519 if (!(size & PACKET_ACTIVE))
520 size = readl(&regs->EPxSizeLB[ep->num]);
521 if (!(size & PACKET_ACTIVE)) // "can't happen"
522 break;
523 size &= DATASIZE; /* EPxSizeH == 0 */
524
525 /* ep0out no-out-data case for set_config, etc */
526 } else
527 size = 0;
528
529 /* read all bytes from this packet */
530 req->req.actual += size;
531 is_short = (size < ep->ep.maxpacket);
532#ifdef USB_TRACE
533 VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
534 ep->ep.name, size, is_short ? "/S" : "",
535 req, req->req.actual, req->req.length);
536#endif
537 while (likely(size-- != 0)) {
538 u8 byte = (u8) readl(ep->reg_fifo);
539
540 if (unlikely(bufferspace == 0)) {
541 /* this happens when the driver's buffer
542 * is smaller than what the host sent.
543 * discard the extra data in this packet.
544 */
545 if (req->req.status != -EOVERFLOW)
546 DBG(ep->dev, "%s overflow %u\n",
547 ep->ep.name, size);
548 req->req.status = -EOVERFLOW;
549 } else {
550 *buf++ = byte;
551 bufferspace--;
552 }
553 }
554
555 /* completion */
556 if (unlikely(is_short || req->req.actual == req->req.length)) {
557 if (unlikely(ep->num == 0)) {
558 /* non-control endpoints now usable? */
559 if (ep->dev->req_config)
560 writel(ep->dev->configured
561 ? USBSTATE_CONFIGURED
562 : 0,
563 &regs->UsbState);
564 /* ep0out status stage */
565 writel(~(1<<0), &regs->EOP);
566 ep->stopped = 1;
567 ep->dev->ep0state = EP0_STATUS;
568 }
569 done(ep, req, 0);
570
571 /* empty the second buffer asap */
572 if (dbuff && !list_empty(&ep->queue)) {
573 req = list_entry(ep->queue.next,
574 struct goku_request, queue);
575 goto top;
576 }
577 return 1;
578 }
579 } while (dbuff);
580 return 0;
581}
582
583static inline void
584pio_irq_enable(struct goku_udc *dev,
585 struct goku_udc_regs __iomem *regs, int epnum)
586{
587 dev->int_enable |= INT_EPxDATASET (epnum);
588 writel(dev->int_enable, &regs->int_enable);
589 /* write may still be posted */
590}
591
592static inline void
593pio_irq_disable(struct goku_udc *dev,
594 struct goku_udc_regs __iomem *regs, int epnum)
595{
596 dev->int_enable &= ~INT_EPxDATASET (epnum);
597 writel(dev->int_enable, &regs->int_enable);
598 /* write may still be posted */
599}
600
601static inline void
602pio_advance(struct goku_ep *ep)
603{
604 struct goku_request *req;
605
606 if (unlikely(list_empty (&ep->queue)))
607 return;
608 req = list_entry(ep->queue.next, struct goku_request, queue);
609 (ep->is_in ? write_fifo : read_fifo)(ep, req);
610}
611
612
613/*-------------------------------------------------------------------------*/
614
615// return: 0 = q running, 1 = q stopped, negative = errno
616static int start_dma(struct goku_ep *ep, struct goku_request *req)
617{
618 struct goku_udc_regs __iomem *regs = ep->dev->regs;
619 u32 master;
620 u32 start = req->req.dma;
621 u32 end = start + req->req.length - 1;
622
623 master = readl(&regs->dma_master) & MST_RW_BITS;
624
625 /* re-init the bits affecting IN dma; careful with zlps */
626 if (likely(ep->is_in)) {
627 if (unlikely(master & MST_RD_ENA)) {
628 DBG (ep->dev, "start, IN active dma %03x!!\n",
629 master);
630// return -EL2HLT;
631 }
632 writel(end, &regs->in_dma_end);
633 writel(start, &regs->in_dma_start);
634
635 master &= ~MST_R_BITS;
636 if (unlikely(req->req.length == 0))
637 master = MST_RD_ENA | MST_RD_EOPB;
638 else if ((req->req.length % ep->ep.maxpacket) != 0
639 || req->req.zero)
640 master = MST_RD_ENA | MST_EOPB_ENA;
641 else
642 master = MST_RD_ENA | MST_EOPB_DIS;
643
644 ep->dev->int_enable |= INT_MSTRDEND;
645
646 /* Goku DMA-OUT merges short packets, which plays poorly with
647 * protocols where short packets mark the transfer boundaries.
648 * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
649 * ending transfers after 3 SOFs; we don't turn it on.
650 */
651 } else {
652 if (unlikely(master & MST_WR_ENA)) {
653 DBG (ep->dev, "start, OUT active dma %03x!!\n",
654 master);
655// return -EL2HLT;
656 }
657 writel(end, &regs->out_dma_end);
658 writel(start, &regs->out_dma_start);
659
660 master &= ~MST_W_BITS;
661 master |= MST_WR_ENA | MST_TIMEOUT_DIS;
662
663 ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
664 }
665
666 writel(master, &regs->dma_master);
667 writel(ep->dev->int_enable, &regs->int_enable);
668 return 0;
669}
670
671static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
672{
673 struct goku_request *req;
674 struct goku_udc_regs __iomem *regs = ep->dev->regs;
675 u32 master;
676
677 master = readl(&regs->dma_master);
678
679 if (unlikely(list_empty(&ep->queue))) {
680stop:
681 if (ep->is_in)
682 dev->int_enable &= ~INT_MSTRDEND;
683 else
684 dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
685 writel(dev->int_enable, &regs->int_enable);
686 return;
687 }
688 req = list_entry(ep->queue.next, struct goku_request, queue);
689
690 /* normal hw dma completion (not abort) */
691 if (likely(ep->is_in)) {
692 if (unlikely(master & MST_RD_ENA))
693 return;
694 req->req.actual = readl(&regs->in_dma_current);
695 } else {
696 if (unlikely(master & MST_WR_ENA))
697 return;
698
699 /* hardware merges short packets, and also hides packet
700 * overruns. a partial packet MAY be in the fifo here.
701 */
702 req->req.actual = readl(&regs->out_dma_current);
703 }
704 req->req.actual -= req->req.dma;
705 req->req.actual++;
706
707#ifdef USB_TRACE
708 VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
709 ep->ep.name, ep->is_in ? "IN" : "OUT",
710 req->req.actual, req->req.length, req);
711#endif
712 done(ep, req, 0);
713 if (list_empty(&ep->queue))
714 goto stop;
715 req = list_entry(ep->queue.next, struct goku_request, queue);
716 (void) start_dma(ep, req);
717}
718
719static void abort_dma(struct goku_ep *ep, int status)
720{
721 struct goku_udc_regs __iomem *regs = ep->dev->regs;
722 struct goku_request *req;
723 u32 curr, master;
724
725 /* NAK future host requests, hoping the implicit delay lets the
726 * dma engine finish reading (or writing) its latest packet and
727 * empty the dma buffer (up to 16 bytes).
728 *
729 * This avoids needing to clean up a partial packet in the fifo;
730 * we can't do that for IN without side effects to HALT and TOGGLE.
731 */
732 command(regs, COMMAND_FIFO_DISABLE, ep->num);
733 req = list_entry(ep->queue.next, struct goku_request, queue);
734 master = readl(&regs->dma_master) & MST_RW_BITS;
735
736 /* FIXME using these resets isn't usably documented. this may
737 * not work unless it's followed by disabling the endpoint.
738 *
739 * FIXME the OUT reset path doesn't even behave consistently.
740 */
741 if (ep->is_in) {
742 if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
743 goto finished;
744 curr = readl(&regs->in_dma_current);
745
746 writel(curr, &regs->in_dma_end);
747 writel(curr, &regs->in_dma_start);
748
749 master &= ~MST_R_BITS;
750 master |= MST_RD_RESET;
751 writel(master, &regs->dma_master);
752
753 if (readl(&regs->dma_master) & MST_RD_ENA)
754 DBG(ep->dev, "IN dma active after reset!\n");
755
756 } else {
757 if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
758 goto finished;
759 curr = readl(&regs->out_dma_current);
760
761 writel(curr, &regs->out_dma_end);
762 writel(curr, &regs->out_dma_start);
763
764 master &= ~MST_W_BITS;
765 master |= MST_WR_RESET;
766 writel(master, &regs->dma_master);
767
768 if (readl(&regs->dma_master) & MST_WR_ENA)
769 DBG(ep->dev, "OUT dma active after reset!\n");
770 }
771 req->req.actual = (curr - req->req.dma) + 1;
772 req->req.status = status;
773
774 VDBG(ep->dev, "%s %s %s %d/%d\n", __FUNCTION__, ep->ep.name,
775 ep->is_in ? "IN" : "OUT",
776 req->req.actual, req->req.length);
777
778 command(regs, COMMAND_FIFO_ENABLE, ep->num);
779
780 return;
781
782finished:
783 /* dma already completed; no abort needed */
784 command(regs, COMMAND_FIFO_ENABLE, ep->num);
785 req->req.actual = req->req.length;
786 req->req.status = 0;
787}
788
789/*-------------------------------------------------------------------------*/
790
791static int
792goku_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
793{
794 struct goku_request *req;
795 struct goku_ep *ep;
796 struct goku_udc *dev;
797 unsigned long flags;
798 int status;
799
800 /* always require a cpu-view buffer so pio works */
801 req = container_of(_req, struct goku_request, req);
802 if (unlikely(!_req || !_req->complete
803 || !_req->buf || !list_empty(&req->queue)))
804 return -EINVAL;
805 ep = container_of(_ep, struct goku_ep, ep);
806 if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
807 return -EINVAL;
808 dev = ep->dev;
809 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
810 return -ESHUTDOWN;
811
812 /* can't touch registers when suspended */
813 if (dev->ep0state == EP0_SUSPEND)
814 return -EBUSY;
815
816 /* set up dma mapping in case the caller didn't */
817 if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
818 _req->dma = pci_map_single(dev->pdev, _req->buf, _req->length,
819 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
820 req->mapped = 1;
821 }
822
823#ifdef USB_TRACE
824 VDBG(dev, "%s queue req %p, len %u buf %p\n",
825 _ep->name, _req, _req->length, _req->buf);
826#endif
827
828 spin_lock_irqsave(&dev->lock, flags);
829
830 _req->status = -EINPROGRESS;
831 _req->actual = 0;
832
833 /* for ep0 IN without premature status, zlp is required and
834 * writing EOP starts the status stage (OUT).
835 */
836 if (unlikely(ep->num == 0 && ep->is_in))
837 _req->zero = 1;
838
839 /* kickstart this i/o queue? */
840 status = 0;
841 if (list_empty(&ep->queue) && likely(!ep->stopped)) {
842 /* dma: done after dma completion IRQ (or error)
843 * pio: done after last fifo operation
844 */
845 if (ep->dma)
846 status = start_dma(ep, req);
847 else
848 status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
849
850 if (unlikely(status != 0)) {
851 if (status > 0)
852 status = 0;
853 req = NULL;
854 }
855
856 } /* else pio or dma irq handler advances the queue. */
857
858 if (likely(req != 0))
859 list_add_tail(&req->queue, &ep->queue);
860
861 if (likely(!list_empty(&ep->queue))
862 && likely(ep->num != 0)
863 && !ep->dma
864 && !(dev->int_enable & INT_EPxDATASET (ep->num)))
865 pio_irq_enable(dev, dev->regs, ep->num);
866
867 spin_unlock_irqrestore(&dev->lock, flags);
868
869 /* pci writes may still be posted */
870 return status;
871}
872
873/* dequeue ALL requests */
874static void nuke(struct goku_ep *ep, int status)
875{
876 struct goku_request *req;
877
878 ep->stopped = 1;
879 if (list_empty(&ep->queue))
880 return;
881 if (ep->dma)
882 abort_dma(ep, status);
883 while (!list_empty(&ep->queue)) {
884 req = list_entry(ep->queue.next, struct goku_request, queue);
885 done(ep, req, status);
886 }
887}
888
889/* dequeue JUST ONE request */
890static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
891{
892 struct goku_request *req;
893 struct goku_ep *ep;
894 struct goku_udc *dev;
895 unsigned long flags;
896
897 ep = container_of(_ep, struct goku_ep, ep);
898 if (!_ep || !_req || (!ep->desc && ep->num != 0))
899 return -EINVAL;
900 dev = ep->dev;
901 if (!dev->driver)
902 return -ESHUTDOWN;
903
904 /* we can't touch (dma) registers when suspended */
905 if (dev->ep0state == EP0_SUSPEND)
906 return -EBUSY;
907
908 VDBG(dev, "%s %s %s %s %p\n", __FUNCTION__, _ep->name,
909 ep->is_in ? "IN" : "OUT",
910 ep->dma ? "dma" : "pio",
911 _req);
912
913 spin_lock_irqsave(&dev->lock, flags);
914
915 /* make sure it's actually queued on this endpoint */
916 list_for_each_entry (req, &ep->queue, queue) {
917 if (&req->req == _req)
918 break;
919 }
920 if (&req->req != _req) {
921 spin_unlock_irqrestore (&dev->lock, flags);
922 return -EINVAL;
923 }
924
925 if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
926 abort_dma(ep, -ECONNRESET);
927 done(ep, req, -ECONNRESET);
928 dma_advance(dev, ep);
929 } else if (!list_empty(&req->queue))
930 done(ep, req, -ECONNRESET);
931 else
932 req = NULL;
933 spin_unlock_irqrestore(&dev->lock, flags);
934
935 return req ? 0 : -EOPNOTSUPP;
936}
937
938/*-------------------------------------------------------------------------*/
939
940static void goku_clear_halt(struct goku_ep *ep)
941{
942 // assert (ep->num !=0)
943 VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
944 command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
945 command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
946 if (ep->stopped) {
947 ep->stopped = 0;
948 if (ep->dma) {
949 struct goku_request *req;
950
951 if (list_empty(&ep->queue))
952 return;
953 req = list_entry(ep->queue.next, struct goku_request,
954 queue);
955 (void) start_dma(ep, req);
956 } else
957 pio_advance(ep);
958 }
959}
960
961static int goku_set_halt(struct usb_ep *_ep, int value)
962{
963 struct goku_ep *ep;
964 unsigned long flags;
965 int retval = 0;
966
967 if (!_ep)
968 return -ENODEV;
969 ep = container_of (_ep, struct goku_ep, ep);
970
971 if (ep->num == 0) {
972 if (value) {
973 ep->dev->ep0state = EP0_STALL;
974 ep->dev->ep[0].stopped = 1;
975 } else
976 return -EINVAL;
977
978 /* don't change EPxSTATUS_EP_INVALID to READY */
979 } else if (!ep->desc) {
980 DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
981 return -EINVAL;
982 }
983
984 spin_lock_irqsave(&ep->dev->lock, flags);
985 if (!list_empty(&ep->queue))
986 retval = -EAGAIN;
987 else if (ep->is_in && value
988 /* data in (either) packet buffer? */
989 && (readl(&ep->dev->regs->DataSet)
990 & DATASET_AB(ep->num)))
991 retval = -EAGAIN;
992 else if (!value)
993 goku_clear_halt(ep);
994 else {
995 ep->stopped = 1;
996 VDBG(ep->dev, "%s set halt\n", ep->ep.name);
997 command(ep->dev->regs, COMMAND_STALL, ep->num);
998 readl(ep->reg_status);
999 }
1000 spin_unlock_irqrestore(&ep->dev->lock, flags);
1001 return retval;
1002}
1003
1004static int goku_fifo_status(struct usb_ep *_ep)
1005{
1006 struct goku_ep *ep;
1007 struct goku_udc_regs __iomem *regs;
1008 u32 size;
1009
1010 if (!_ep)
1011 return -ENODEV;
1012 ep = container_of(_ep, struct goku_ep, ep);
1013
1014 /* size is only reported sanely for OUT */
1015 if (ep->is_in)
1016 return -EOPNOTSUPP;
1017
1018 /* ignores 16-byte dma buffer; SizeH == 0 */
1019 regs = ep->dev->regs;
1020 size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
1021 size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
1022 VDBG(ep->dev, "%s %s %u\n", __FUNCTION__, ep->ep.name, size);
1023 return size;
1024}
1025
1026static void goku_fifo_flush(struct usb_ep *_ep)
1027{
1028 struct goku_ep *ep;
1029 struct goku_udc_regs __iomem *regs;
1030 u32 size;
1031
1032 if (!_ep)
1033 return;
1034 ep = container_of(_ep, struct goku_ep, ep);
1035 VDBG(ep->dev, "%s %s\n", __FUNCTION__, ep->ep.name);
1036
1037 /* don't change EPxSTATUS_EP_INVALID to READY */
1038 if (!ep->desc && ep->num != 0) {
1039 DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
1040 return;
1041 }
1042
1043 regs = ep->dev->regs;
1044 size = readl(&regs->EPxSizeLA[ep->num]);
1045 size &= DATASIZE;
1046
1047 /* Non-desirable behavior: FIFO_CLEAR also clears the
1048 * endpoint halt feature. For OUT, we _could_ just read
1049 * the bytes out (PIO, if !ep->dma); for in, no choice.
1050 */
1051 if (size)
1052 command(regs, COMMAND_FIFO_CLEAR, ep->num);
1053}
1054
1055static struct usb_ep_ops goku_ep_ops = {
1056 .enable = goku_ep_enable,
1057 .disable = goku_ep_disable,
1058
1059 .alloc_request = goku_alloc_request,
1060 .free_request = goku_free_request,
1061
1062 .alloc_buffer = goku_alloc_buffer,
1063 .free_buffer = goku_free_buffer,
1064
1065 .queue = goku_queue,
1066 .dequeue = goku_dequeue,
1067
1068 .set_halt = goku_set_halt,
1069 .fifo_status = goku_fifo_status,
1070 .fifo_flush = goku_fifo_flush,
1071};
1072
1073/*-------------------------------------------------------------------------*/
1074
1075static int goku_get_frame(struct usb_gadget *_gadget)
1076{
1077 return -EOPNOTSUPP;
1078}
1079
1080static const struct usb_gadget_ops goku_ops = {
1081 .get_frame = goku_get_frame,
1082 // no remote wakeup
1083 // not selfpowered
1084};
1085
1086/*-------------------------------------------------------------------------*/
1087
1088static inline char *dmastr(void)
1089{
1090 if (use_dma == 0)
1091 return "(dma disabled)";
1092 else if (use_dma == 2)
1093 return "(dma IN and OUT)";
1094 else
1095 return "(dma IN)";
1096}
1097
1098#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1099
1100static const char proc_node_name [] = "driver/udc";
1101
1102#define FOURBITS "%s%s%s%s"
1103#define EIGHTBITS FOURBITS FOURBITS
1104
1105static void
1106dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
1107{
1108 int t;
1109
1110 /* int_status is the same format ... */
1111 t = scnprintf(*next, *size,
1112 "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
1113 label, mask,
1114 (mask & INT_PWRDETECT) ? " power" : "",
1115 (mask & INT_SYSERROR) ? " sys" : "",
1116 (mask & INT_MSTRDEND) ? " in-dma" : "",
1117 (mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
1118
1119 (mask & INT_MSTWREND) ? " out-dma" : "",
1120 (mask & INT_MSTWRSET) ? " wrset" : "",
1121 (mask & INT_ERR) ? " err" : "",
1122 (mask & INT_SOF) ? " sof" : "",
1123
1124 (mask & INT_EP3NAK) ? " ep3nak" : "",
1125 (mask & INT_EP2NAK) ? " ep2nak" : "",
1126 (mask & INT_EP1NAK) ? " ep1nak" : "",
1127 (mask & INT_EP3DATASET) ? " ep3" : "",
1128
1129 (mask & INT_EP2DATASET) ? " ep2" : "",
1130 (mask & INT_EP1DATASET) ? " ep1" : "",
1131 (mask & INT_STATUSNAK) ? " ep0snak" : "",
1132 (mask & INT_STATUS) ? " ep0status" : "",
1133
1134 (mask & INT_SETUP) ? " setup" : "",
1135 (mask & INT_ENDPOINT0) ? " ep0" : "",
1136 (mask & INT_USBRESET) ? " reset" : "",
1137 (mask & INT_SUSPEND) ? " suspend" : "");
1138 *size -= t;
1139 *next += t;
1140}
1141
1142
1143static int
1144udc_proc_read(char *buffer, char **start, off_t off, int count,
1145 int *eof, void *_dev)
1146{
1147 char *buf = buffer;
1148 struct goku_udc *dev = _dev;
1149 struct goku_udc_regs __iomem *regs = dev->regs;
1150 char *next = buf;
1151 unsigned size = count;
1152 unsigned long flags;
1153 int i, t, is_usb_connected;
1154 u32 tmp;
1155
1156 if (off != 0)
1157 return 0;
1158
1159 local_irq_save(flags);
1160
1161 /* basic device status */
1162 tmp = readl(&regs->power_detect);
1163 is_usb_connected = tmp & PW_DETECT;
1164 t = scnprintf(next, size,
1165 "%s - %s\n"
1166 "%s version: %s %s\n"
1167 "Gadget driver: %s\n"
1168 "Host %s, %s\n"
1169 "\n",
1170 pci_name(dev->pdev), driver_desc,
1171 driver_name, DRIVER_VERSION, dmastr(),
1172 dev->driver ? dev->driver->driver.name : "(none)",
1173 is_usb_connected
1174 ? ((tmp & PW_PULLUP) ? "full speed" : "powered")
1175 : "disconnected",
1176 ({char *tmp;
1177 switch(dev->ep0state){
1178 case EP0_DISCONNECT: tmp = "ep0_disconnect"; break;
1179 case EP0_IDLE: tmp = "ep0_idle"; break;
1180 case EP0_IN: tmp = "ep0_in"; break;
1181 case EP0_OUT: tmp = "ep0_out"; break;
1182 case EP0_STATUS: tmp = "ep0_status"; break;
1183 case EP0_STALL: tmp = "ep0_stall"; break;
1184 case EP0_SUSPEND: tmp = "ep0_suspend"; break;
1185 default: tmp = "ep0_?"; break;
1186 } tmp; })
1187 );
1188 size -= t;
1189 next += t;
1190
1191 dump_intmask("int_status", readl(&regs->int_status), &next, &size);
1192 dump_intmask("int_enable", readl(&regs->int_enable), &next, &size);
1193
1194 if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
1195 goto done;
1196
1197 /* registers for (active) device and ep0 */
1198 t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
1199 "single.bcs %02x.%02x state %x addr %u\n",
1200 dev->irqs, readl(&regs->DataSet),
1201 readl(&regs->EPxSingle), readl(&regs->EPxBCS),
1202 readl(&regs->UsbState),
1203 readl(&regs->address));
1204 size -= t;
1205 next += t;
1206
1207 tmp = readl(&regs->dma_master);
1208 t = scnprintf(next, size,
1209 "dma %03X =" EIGHTBITS "%s %s\n", tmp,
1210 (tmp & MST_EOPB_DIS) ? " eopb-" : "",
1211 (tmp & MST_EOPB_ENA) ? " eopb+" : "",
1212 (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
1213 (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
1214
1215 (tmp & MST_RD_EOPB) ? " eopb" : "",
1216 (tmp & MST_RD_RESET) ? " in_reset" : "",
1217 (tmp & MST_WR_RESET) ? " out_reset" : "",
1218 (tmp & MST_RD_ENA) ? " IN" : "",
1219
1220 (tmp & MST_WR_ENA) ? " OUT" : "",
1221 (tmp & MST_CONNECTION)
1222 ? "ep1in/ep2out"
1223 : "ep1out/ep2in");
1224 size -= t;
1225 next += t;
1226
1227 /* dump endpoint queues */
1228 for (i = 0; i < 4; i++) {
1229 struct goku_ep *ep = &dev->ep [i];
1230 struct goku_request *req;
1231 int t;
1232
1233 if (i && !ep->desc)
1234 continue;
1235
1236 tmp = readl(ep->reg_status);
1237 t = scnprintf(next, size,
1238 "%s %s max %u %s, irqs %lu, "
1239 "status %02x (%s) " FOURBITS "\n",
1240 ep->ep.name,
1241 ep->is_in ? "in" : "out",
1242 ep->ep.maxpacket,
1243 ep->dma ? "dma" : "pio",
1244 ep->irqs,
1245 tmp, ({ char *s;
1246 switch (tmp & EPxSTATUS_EP_MASK) {
1247 case EPxSTATUS_EP_READY:
1248 s = "ready"; break;
1249 case EPxSTATUS_EP_DATAIN:
1250 s = "packet"; break;
1251 case EPxSTATUS_EP_FULL:
1252 s = "full"; break;
1253 case EPxSTATUS_EP_TX_ERR: // host will retry
1254 s = "tx_err"; break;
1255 case EPxSTATUS_EP_RX_ERR:
1256 s = "rx_err"; break;
1257 case EPxSTATUS_EP_BUSY: /* ep0 only */
1258 s = "busy"; break;
1259 case EPxSTATUS_EP_STALL:
1260 s = "stall"; break;
1261 case EPxSTATUS_EP_INVALID: // these "can't happen"
1262 s = "invalid"; break;
1263 default:
1264 s = "?"; break;
1265 }; s; }),
1266 (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
1267 (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
1268 (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
1269 (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
1270 );
1271 if (t <= 0 || t > size)
1272 goto done;
1273 size -= t;
1274 next += t;
1275
1276 if (list_empty(&ep->queue)) {
1277 t = scnprintf(next, size, "\t(nothing queued)\n");
1278 if (t <= 0 || t > size)
1279 goto done;
1280 size -= t;
1281 next += t;
1282 continue;
1283 }
1284 list_for_each_entry(req, &ep->queue, queue) {
1285 if (ep->dma && req->queue.prev == &ep->queue) {
1286 if (i == UDC_MSTRD_ENDPOINT)
1287 tmp = readl(&regs->in_dma_current);
1288 else
1289 tmp = readl(&regs->out_dma_current);
1290 tmp -= req->req.dma;
1291 tmp++;
1292 } else
1293 tmp = req->req.actual;
1294
1295 t = scnprintf(next, size,
1296 "\treq %p len %u/%u buf %p\n",
1297 &req->req, tmp, req->req.length,
1298 req->req.buf);
1299 if (t <= 0 || t > size)
1300 goto done;
1301 size -= t;
1302 next += t;
1303 }
1304 }
1305
1306done:
1307 local_irq_restore(flags);
1308 *eof = 1;
1309 return count - size;
1310}
1311
1312#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1313
1314/*-------------------------------------------------------------------------*/
1315
1316static void udc_reinit (struct goku_udc *dev)
1317{
1318 static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
1319
1320 unsigned i;
1321
1322 INIT_LIST_HEAD (&dev->gadget.ep_list);
1323 dev->gadget.ep0 = &dev->ep [0].ep;
1324 dev->gadget.speed = USB_SPEED_UNKNOWN;
1325 dev->ep0state = EP0_DISCONNECT;
1326 dev->irqs = 0;
1327
1328 for (i = 0; i < 4; i++) {
1329 struct goku_ep *ep = &dev->ep[i];
1330
1331 ep->num = i;
1332 ep->ep.name = names[i];
1333 ep->reg_fifo = &dev->regs->ep_fifo [i];
1334 ep->reg_status = &dev->regs->ep_status [i];
1335 ep->reg_mode = &dev->regs->ep_mode[i];
1336
1337 ep->ep.ops = &goku_ep_ops;
1338 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1339 ep->dev = dev;
1340 INIT_LIST_HEAD (&ep->queue);
1341
1342 ep_reset(NULL, ep);
1343 }
1344
1345 dev->ep[0].reg_mode = NULL;
1346 dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
1347 list_del_init (&dev->ep[0].ep.ep_list);
1348}
1349
1350static void udc_reset(struct goku_udc *dev)
1351{
1352 struct goku_udc_regs __iomem *regs = dev->regs;
1353
1354 writel(0, &regs->power_detect);
1355 writel(0, &regs->int_enable);
1356 readl(&regs->int_enable);
1357 dev->int_enable = 0;
1358
1359 /* deassert reset, leave USB D+ at hi-Z (no pullup)
1360 * don't let INT_PWRDETECT sequence begin
1361 */
1362 udelay(250);
1363 writel(PW_RESETB, &regs->power_detect);
1364 readl(&regs->int_enable);
1365}
1366
1367static void ep0_start(struct goku_udc *dev)
1368{
1369 struct goku_udc_regs __iomem *regs = dev->regs;
1370 unsigned i;
1371
1372 VDBG(dev, "%s\n", __FUNCTION__);
1373
1374 udc_reset(dev);
1375 udc_reinit (dev);
1376 //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
1377
1378 /* hw handles set_address, set_feature, get_status; maybe more */
1379 writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
1380 | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
1381 | G_REQMODE_GET_DESC
1382 | G_REQMODE_CLEAR_FEAT
1383 , &regs->reqmode);
1384
1385 for (i = 0; i < 4; i++)
1386 dev->ep[i].irqs = 0;
1387
1388 /* can't modify descriptors after writing UsbReady */
1389 for (i = 0; i < DESC_LEN; i++)
1390 writel(0, &regs->descriptors[i]);
1391 writel(0, &regs->UsbReady);
1392
1393 /* expect ep0 requests when the host drops reset */
1394 writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
1395 dev->int_enable = INT_DEVWIDE | INT_EP0;
1396 writel(dev->int_enable, &dev->regs->int_enable);
1397 readl(&regs->int_enable);
1398 dev->gadget.speed = USB_SPEED_FULL;
1399 dev->ep0state = EP0_IDLE;
1400}
1401
1402static void udc_enable(struct goku_udc *dev)
1403{
1404 /* start enumeration now, or after power detect irq */
1405 if (readl(&dev->regs->power_detect) & PW_DETECT)
1406 ep0_start(dev);
1407 else {
1408 DBG(dev, "%s\n", __FUNCTION__);
1409 dev->int_enable = INT_PWRDETECT;
1410 writel(dev->int_enable, &dev->regs->int_enable);
1411 }
1412}
1413
1414/*-------------------------------------------------------------------------*/
1415
1416/* keeping it simple:
1417 * - one bus driver, initted first;
1418 * - one function driver, initted second
1419 */
1420
1421static struct goku_udc *the_controller;
1422
1423/* when a driver is successfully registered, it will receive
1424 * control requests including set_configuration(), which enables
1425 * non-control requests. then usb traffic follows until a
1426 * disconnect is reported. then a host may connect again, or
1427 * the driver might get unbound.
1428 */
1429int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1430{
1431 struct goku_udc *dev = the_controller;
1432 int retval;
1433
1434 if (!driver
1435 || driver->speed != USB_SPEED_FULL
1436 || !driver->bind
1437 || !driver->unbind
1438 || !driver->disconnect
1439 || !driver->setup)
1440 return -EINVAL;
1441 if (!dev)
1442 return -ENODEV;
1443 if (dev->driver)
1444 return -EBUSY;
1445
1446 /* hook up the driver */
1447 driver->driver.bus = NULL;
1448 dev->driver = driver;
1449 dev->gadget.dev.driver = &driver->driver;
1450 retval = driver->bind(&dev->gadget);
1451 if (retval) {
1452 DBG(dev, "bind to driver %s --> error %d\n",
1453 driver->driver.name, retval);
1454 dev->driver = NULL;
1455 dev->gadget.dev.driver = NULL;
1456 return retval;
1457 }
1458
1459 /* then enable host detection and ep0; and we're ready
1460 * for set_configuration as well as eventual disconnect.
1461 */
1462 udc_enable(dev);
1463
1464 DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
1465 return 0;
1466}
1467EXPORT_SYMBOL(usb_gadget_register_driver);
1468
1469static void
1470stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
1471{
1472 unsigned i;
1473
1474 DBG (dev, "%s\n", __FUNCTION__);
1475
1476 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1477 driver = NULL;
1478
1479 /* disconnect gadget driver after quiesceing hw and the driver */
1480 udc_reset (dev);
1481 for (i = 0; i < 4; i++)
1482 nuke(&dev->ep [i], -ESHUTDOWN);
1483 if (driver) {
1484 spin_unlock(&dev->lock);
1485 driver->disconnect(&dev->gadget);
1486 spin_lock(&dev->lock);
1487 }
1488
1489 if (dev->driver)
1490 udc_enable(dev);
1491}
1492
1493int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1494{
1495 struct goku_udc *dev = the_controller;
1496 unsigned long flags;
1497
1498 if (!dev)
1499 return -ENODEV;
1500 if (!driver || driver != dev->driver)
1501 return -EINVAL;
1502
1503 spin_lock_irqsave(&dev->lock, flags);
1504 dev->driver = NULL;
1505 stop_activity(dev, driver);
1506 spin_unlock_irqrestore(&dev->lock, flags);
1507
1508 driver->unbind(&dev->gadget);
1509
1510 DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
1511 return 0;
1512}
1513EXPORT_SYMBOL(usb_gadget_unregister_driver);
1514
1515
1516/*-------------------------------------------------------------------------*/
1517
1518static void ep0_setup(struct goku_udc *dev)
1519{
1520 struct goku_udc_regs __iomem *regs = dev->regs;
1521 struct usb_ctrlrequest ctrl;
1522 int tmp;
1523
1524 /* read SETUP packet and enter DATA stage */
1525 ctrl.bRequestType = readl(&regs->bRequestType);
1526 ctrl.bRequest = readl(&regs->bRequest);
1527 ctrl.wValue = (readl(&regs->wValueH) << 8) | readl(&regs->wValueL);
1528 ctrl.wIndex = (readl(&regs->wIndexH) << 8) | readl(&regs->wIndexL);
1529 ctrl.wLength = (readl(&regs->wLengthH) << 8) | readl(&regs->wLengthL);
1530 writel(0, &regs->SetupRecv);
1531
1532 nuke(&dev->ep[0], 0);
1533 dev->ep[0].stopped = 0;
1534 if (likely(ctrl.bRequestType & USB_DIR_IN)) {
1535 dev->ep[0].is_in = 1;
1536 dev->ep0state = EP0_IN;
1537 /* detect early status stages */
1538 writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
1539 } else {
1540 dev->ep[0].is_in = 0;
1541 dev->ep0state = EP0_OUT;
1542
1543 /* NOTE: CLEAR_FEATURE is done in software so that we can
1544 * synchronize transfer restarts after bulk IN stalls. data
1545 * won't even enter the fifo until the halt is cleared.
1546 */
1547 switch (ctrl.bRequest) {
1548 case USB_REQ_CLEAR_FEATURE:
1549 switch (ctrl.bRequestType) {
1550 case USB_RECIP_ENDPOINT:
1551 tmp = ctrl.wIndex & 0x0f;
1552 /* active endpoint */
1553 if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
1554 goto stall;
1555 if (ctrl.wIndex & USB_DIR_IN) {
1556 if (!dev->ep[tmp].is_in)
1557 goto stall;
1558 } else {
1559 if (dev->ep[tmp].is_in)
1560 goto stall;
1561 }
1562 if (ctrl.wValue != USB_ENDPOINT_HALT)
1563 goto stall;
1564 if (tmp)
1565 goku_clear_halt(&dev->ep[tmp]);
1566succeed:
1567 /* start ep0out status stage */
1568 writel(~(1<<0), &regs->EOP);
1569 dev->ep[0].stopped = 1;
1570 dev->ep0state = EP0_STATUS;
1571 return;
1572 case USB_RECIP_DEVICE:
1573 /* device remote wakeup: always clear */
1574 if (ctrl.wValue != 1)
1575 goto stall;
1576 VDBG(dev, "clear dev remote wakeup\n");
1577 goto succeed;
1578 case USB_RECIP_INTERFACE:
1579 goto stall;
1580 default: /* pass to gadget driver */
1581 break;
1582 }
1583 break;
1584 default:
1585 break;
1586 }
1587 }
1588
1589#ifdef USB_TRACE
1590 VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1591 ctrl.bRequestType, ctrl.bRequest,
1592 ctrl.wValue, ctrl.wIndex, ctrl.wLength);
1593#endif
1594
1595 /* hw wants to know when we're configured (or not) */
1596 dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
1597 && ctrl.bRequestType == USB_RECIP_DEVICE);
1598 if (unlikely(dev->req_config))
1599 dev->configured = (ctrl.wValue != 0);
1600
1601 /* delegate everything to the gadget driver.
1602 * it may respond after this irq handler returns.
1603 */
1604 spin_unlock (&dev->lock);
1605 tmp = dev->driver->setup(&dev->gadget, &ctrl);
1606 spin_lock (&dev->lock);
1607 if (unlikely(tmp < 0)) {
1608stall:
1609#ifdef USB_TRACE
1610 VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
1611 ctrl.bRequestType, ctrl.bRequest, tmp);
1612#endif
1613 command(regs, COMMAND_STALL, 0);
1614 dev->ep[0].stopped = 1;
1615 dev->ep0state = EP0_STALL;
1616 }
1617
1618 /* expect at least one data or status stage irq */
1619}
1620
1621#define ACK(irqbit) { \
1622 stat &= ~irqbit; \
1623 writel(~irqbit, &regs->int_status); \
1624 handled = 1; \
1625 }
1626
1627static irqreturn_t goku_irq(int irq, void *_dev, struct pt_regs *r)
1628{
1629 struct goku_udc *dev = _dev;
1630 struct goku_udc_regs __iomem *regs = dev->regs;
1631 struct goku_ep *ep;
1632 u32 stat, handled = 0;
1633 unsigned i, rescans = 5;
1634
1635 spin_lock(&dev->lock);
1636
1637rescan:
1638 stat = readl(&regs->int_status) & dev->int_enable;
1639 if (!stat)
1640 goto done;
1641 dev->irqs++;
1642
1643 /* device-wide irqs */
1644 if (unlikely(stat & INT_DEVWIDE)) {
1645 if (stat & INT_SYSERROR) {
1646 ERROR(dev, "system error\n");
1647 stop_activity(dev, dev->driver);
1648 stat = 0;
1649 handled = 1;
1650 // FIXME have a neater way to prevent re-enumeration
1651 dev->driver = NULL;
1652 goto done;
1653 }
1654 if (stat & INT_PWRDETECT) {
1655 writel(~stat, &regs->int_status);
1656 if (readl(&dev->regs->power_detect) & PW_DETECT) {
1657 VDBG(dev, "connect\n");
1658 ep0_start(dev);
1659 } else {
1660 DBG(dev, "disconnect\n");
1661 if (dev->gadget.speed == USB_SPEED_FULL)
1662 stop_activity(dev, dev->driver);
1663 dev->ep0state = EP0_DISCONNECT;
1664 dev->int_enable = INT_DEVWIDE;
1665 writel(dev->int_enable, &dev->regs->int_enable);
1666 }
1667 stat = 0;
1668 handled = 1;
1669 goto done;
1670 }
1671 if (stat & INT_SUSPEND) {
1672 ACK(INT_SUSPEND);
1673 if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
1674 switch (dev->ep0state) {
1675 case EP0_DISCONNECT:
1676 case EP0_SUSPEND:
1677 goto pm_next;
1678 default:
1679 break;
1680 }
1681 DBG(dev, "USB suspend\n");
1682 dev->ep0state = EP0_SUSPEND;
1683 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1684 && dev->driver
1685 && dev->driver->suspend) {
1686 spin_unlock(&dev->lock);
1687 dev->driver->suspend(&dev->gadget);
1688 spin_lock(&dev->lock);
1689 }
1690 } else {
1691 if (dev->ep0state != EP0_SUSPEND) {
1692 DBG(dev, "bogus USB resume %d\n",
1693 dev->ep0state);
1694 goto pm_next;
1695 }
1696 DBG(dev, "USB resume\n");
1697 dev->ep0state = EP0_IDLE;
1698 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1699 && dev->driver
1700 && dev->driver->resume) {
1701 spin_unlock(&dev->lock);
1702 dev->driver->resume(&dev->gadget);
1703 spin_lock(&dev->lock);
1704 }
1705 }
1706 }
1707pm_next:
1708 if (stat & INT_USBRESET) { /* hub reset done */
1709 ACK(INT_USBRESET);
1710 INFO(dev, "USB reset done, gadget %s\n",
1711 dev->driver->driver.name);
1712 }
1713 // and INT_ERR on some endpoint's crc/bitstuff/... problem
1714 }
1715
1716 /* progress ep0 setup, data, or status stages.
1717 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
1718 */
1719 if (stat & INT_SETUP) {
1720 ACK(INT_SETUP);
1721 dev->ep[0].irqs++;
1722 ep0_setup(dev);
1723 }
1724 if (stat & INT_STATUSNAK) {
1725 ACK(INT_STATUSNAK|INT_ENDPOINT0);
1726 if (dev->ep0state == EP0_IN) {
1727 ep = &dev->ep[0];
1728 ep->irqs++;
1729 nuke(ep, 0);
1730 writel(~(1<<0), &regs->EOP);
1731 dev->ep0state = EP0_STATUS;
1732 }
1733 }
1734 if (stat & INT_ENDPOINT0) {
1735 ACK(INT_ENDPOINT0);
1736 ep = &dev->ep[0];
1737 ep->irqs++;
1738 pio_advance(ep);
1739 }
1740
1741 /* dma completion */
1742 if (stat & INT_MSTRDEND) { /* IN */
1743 ACK(INT_MSTRDEND);
1744 ep = &dev->ep[UDC_MSTRD_ENDPOINT];
1745 ep->irqs++;
1746 dma_advance(dev, ep);
1747 }
1748 if (stat & INT_MSTWREND) { /* OUT */
1749 ACK(INT_MSTWREND);
1750 ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1751 ep->irqs++;
1752 dma_advance(dev, ep);
1753 }
1754 if (stat & INT_MSTWRTMOUT) { /* OUT */
1755 ACK(INT_MSTWRTMOUT);
1756 ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1757 ep->irqs++;
1758 ERROR(dev, "%s write timeout ?\n", ep->ep.name);
1759 // reset dma? then dma_advance()
1760 }
1761
1762 /* pio */
1763 for (i = 1; i < 4; i++) {
1764 u32 tmp = INT_EPxDATASET(i);
1765
1766 if (!(stat & tmp))
1767 continue;
1768 ep = &dev->ep[i];
1769 pio_advance(ep);
1770 if (list_empty (&ep->queue))
1771 pio_irq_disable(dev, regs, i);
1772 stat &= ~tmp;
1773 handled = 1;
1774 ep->irqs++;
1775 }
1776
1777 if (rescans--)
1778 goto rescan;
1779
1780done:
1781 (void)readl(&regs->int_enable);
1782 spin_unlock(&dev->lock);
1783 if (stat)
1784 DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
1785 readl(&regs->int_status), dev->int_enable);
1786 return IRQ_RETVAL(handled);
1787}
1788
1789#undef ACK
1790
1791/*-------------------------------------------------------------------------*/
1792
1793static void gadget_release(struct device *_dev)
1794{
1795 struct goku_udc *dev = dev_get_drvdata(_dev);
1796
1797 kfree(dev);
1798}
1799
1800/* tear down the binding between this driver and the pci device */
1801
1802static void goku_remove(struct pci_dev *pdev)
1803{
1804 struct goku_udc *dev = pci_get_drvdata(pdev);
1805
1806 DBG(dev, "%s\n", __FUNCTION__);
1807 /* start with the driver above us */
1808 if (dev->driver) {
1809 /* should have been done already by driver model core */
1810 WARN(dev, "pci remove, driver '%s' is still registered\n",
1811 dev->driver->driver.name);
1812 usb_gadget_unregister_driver(dev->driver);
1813 }
1814
1815#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1816 remove_proc_entry(proc_node_name, NULL);
1817#endif
1818 if (dev->regs)
1819 udc_reset(dev);
1820 if (dev->got_irq)
1821 free_irq(pdev->irq, dev);
1822 if (dev->regs)
1823 iounmap(dev->regs);
1824 if (dev->got_region)
1825 release_mem_region(pci_resource_start (pdev, 0),
1826 pci_resource_len (pdev, 0));
1827 if (dev->enabled)
1828 pci_disable_device(pdev);
1829 device_unregister(&dev->gadget.dev);
1830
1831 pci_set_drvdata(pdev, NULL);
1832 dev->regs = NULL;
1833 the_controller = NULL;
1834
1835 INFO(dev, "unbind\n");
1836}
1837
1838/* wrap this driver around the specified pci device, but
1839 * don't respond over USB until a gadget driver binds to us.
1840 */
1841
1842static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1843{
1844 struct goku_udc *dev = NULL;
1845 unsigned long resource, len;
1846 void __iomem *base = NULL;
1847 int retval;
1848 char buf [8], *bufp;
1849
1850 /* if you want to support more than one controller in a system,
1851 * usb_gadget_driver_{register,unregister}() must change.
1852 */
1853 if (the_controller) {
1854 WARN(dev, "ignoring %s\n", pci_name(pdev));
1855 return -EBUSY;
1856 }
1857 if (!pdev->irq) {
1858 printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
1859 retval = -ENODEV;
1860 goto done;
1861 }
1862
1863 /* alloc, and start init */
1864 dev = kmalloc (sizeof *dev, SLAB_KERNEL);
1865 if (dev == NULL){
1866 pr_debug("enomem %s\n", pci_name(pdev));
1867 retval = -ENOMEM;
1868 goto done;
1869 }
1870
1871 memset(dev, 0, sizeof *dev);
1872 spin_lock_init(&dev->lock);
1873 dev->pdev = pdev;
1874 dev->gadget.ops = &goku_ops;
1875
1876 /* the "gadget" abstracts/virtualizes the controller */
1877 strcpy(dev->gadget.dev.bus_id, "gadget");
1878 dev->gadget.dev.parent = &pdev->dev;
1879 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
1880 dev->gadget.dev.release = gadget_release;
1881 dev->gadget.name = driver_name;
1882
1883 /* now all the pci goodies ... */
1884 retval = pci_enable_device(pdev);
1885 if (retval < 0) {
1886 DBG(dev, "can't enable, %d\n", retval);
1887 goto done;
1888 }
1889 dev->enabled = 1;
1890
1891 resource = pci_resource_start(pdev, 0);
1892 len = pci_resource_len(pdev, 0);
1893 if (!request_mem_region(resource, len, driver_name)) {
1894 DBG(dev, "controller already in use\n");
1895 retval = -EBUSY;
1896 goto done;
1897 }
1898 dev->got_region = 1;
1899
1900 base = ioremap_nocache(resource, len);
1901 if (base == NULL) {
1902 DBG(dev, "can't map memory\n");
1903 retval = -EFAULT;
1904 goto done;
1905 }
1906 dev->regs = (struct goku_udc_regs __iomem *) base;
1907
1908 pci_set_drvdata(pdev, dev);
1909 INFO(dev, "%s\n", driver_desc);
1910 INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
1911#ifndef __sparc__
1912 scnprintf(buf, sizeof buf, "%d", pdev->irq);
1913 bufp = buf;
1914#else
1915 bufp = __irq_itoa(pdev->irq);
1916#endif
1917 INFO(dev, "irq %s, pci mem %p\n", bufp, base);
1918
1919 /* init to known state, then setup irqs */
1920 udc_reset(dev);
1921 udc_reinit (dev);
1922 if (request_irq(pdev->irq, goku_irq, SA_SHIRQ/*|SA_SAMPLE_RANDOM*/,
1923 driver_name, dev) != 0) {
1924 DBG(dev, "request interrupt %s failed\n", bufp);
1925 retval = -EBUSY;
1926 goto done;
1927 }
1928 dev->got_irq = 1;
1929 if (use_dma)
1930 pci_set_master(pdev);
1931
1932
1933#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1934 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
1935#endif
1936
1937 /* done */
1938 the_controller = dev;
1939 device_register(&dev->gadget.dev);
1940
1941 return 0;
1942
1943done:
1944 if (dev)
1945 goku_remove (pdev);
1946 return retval;
1947}
1948
1949
1950/*-------------------------------------------------------------------------*/
1951
1952static struct pci_device_id pci_ids [] = { {
1953 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
1954 .class_mask = ~0,
1955 .vendor = 0x102f, /* Toshiba */
1956 .device = 0x0107, /* this UDC */
1957 .subvendor = PCI_ANY_ID,
1958 .subdevice = PCI_ANY_ID,
1959
1960}, { /* end: all zeroes */ }
1961};
1962MODULE_DEVICE_TABLE (pci, pci_ids);
1963
1964static struct pci_driver goku_pci_driver = {
1965 .name = (char *) driver_name,
1966 .id_table = pci_ids,
1967
1968 .probe = goku_probe,
1969 .remove = goku_remove,
1970
1971 /* FIXME add power management support */
1972};
1973
1974static int __init init (void)
1975{
1976 return pci_register_driver (&goku_pci_driver);
1977}
1978module_init (init);
1979
1980static void __exit cleanup (void)
1981{
1982 pci_unregister_driver (&goku_pci_driver);
1983}
1984module_exit (cleanup);
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
new file mode 100644
index 000000000000..ea8c8e58cabf
--- /dev/null
+++ b/drivers/usb/gadget/goku_udc.h
@@ -0,0 +1,290 @@
1/*
2 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
3 *
4 * Copyright (C) 2000-2002 Lineo
5 * by Stuart Lynne, Tom Rushworth, and Bruce Balden
6 * Copyright (C) 2002 Toshiba Corporation
7 * Copyright (C) 2003 MontaVista Software (source@mvista.com)
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14/*
15 * PCI BAR 0 points to these registers.
16 */
17struct goku_udc_regs {
18 /* irq management */
19 u32 int_status; /* 0x000 */
20 u32 int_enable;
21#define INT_SUSPEND 0x00001 /* or resume */
22#define INT_USBRESET 0x00002
23#define INT_ENDPOINT0 0x00004
24#define INT_SETUP 0x00008
25#define INT_STATUS 0x00010
26#define INT_STATUSNAK 0x00020
27#define INT_EPxDATASET(n) (0x00020 << (n)) /* 0 < n < 4 */
28# define INT_EP1DATASET 0x00040
29# define INT_EP2DATASET 0x00080
30# define INT_EP3DATASET 0x00100
31#define INT_EPnNAK(n) (0x00100 < (n)) /* 0 < n < 4 */
32# define INT_EP1NAK 0x00200
33# define INT_EP2NAK 0x00400
34# define INT_EP3NAK 0x00800
35#define INT_SOF 0x01000
36#define INT_ERR 0x02000
37#define INT_MSTWRSET 0x04000
38#define INT_MSTWREND 0x08000
39#define INT_MSTWRTMOUT 0x10000
40#define INT_MSTRDEND 0x20000
41#define INT_SYSERROR 0x40000
42#define INT_PWRDETECT 0x80000
43
44#define INT_DEVWIDE (INT_PWRDETECT|INT_SYSERROR/*|INT_ERR*/|INT_USBRESET|INT_SUSPEND)
45#define INT_EP0 (INT_SETUP|INT_ENDPOINT0/*|INT_STATUS*/|INT_STATUSNAK)
46
47 u32 dma_master;
48#define MST_EOPB_DIS 0x0800
49#define MST_EOPB_ENA 0x0400
50#define MST_TIMEOUT_DIS 0x0200
51#define MST_TIMEOUT_ENA 0x0100
52#define MST_RD_EOPB 0x0080 /* write-only */
53#define MST_RD_RESET 0x0040
54#define MST_WR_RESET 0x0020
55#define MST_RD_ENA 0x0004 /* 1:start, 0:ignore */
56#define MST_WR_ENA 0x0002 /* 1:start, 0:ignore */
57#define MST_CONNECTION 0x0001 /* 0 for ep1out/ep2in */
58
59#define MST_R_BITS (MST_EOPB_DIS|MST_EOPB_ENA \
60 |MST_RD_ENA|MST_RD_RESET)
61#define MST_W_BITS (MST_TIMEOUT_DIS|MST_TIMEOUT_ENA \
62 |MST_WR_ENA|MST_WR_RESET)
63#define MST_RW_BITS (MST_R_BITS|MST_W_BITS \
64 |MST_CONNECTION)
65
66/* these values assume (dma_master & MST_CONNECTION) == 0 */
67#define UDC_MSTWR_ENDPOINT 1
68#define UDC_MSTRD_ENDPOINT 2
69
70 /* dma master write */
71 u32 out_dma_start;
72 u32 out_dma_end;
73 u32 out_dma_current;
74
75 /* dma master read */
76 u32 in_dma_start;
77 u32 in_dma_end;
78 u32 in_dma_current;
79
80 u32 power_detect;
81#define PW_DETECT 0x04
82#define PW_RESETB 0x02
83#define PW_PULLUP 0x01
84
85 u8 _reserved0 [0x1d8];
86
87 /* endpoint registers */
88 u32 ep_fifo [4]; /* 0x200 */
89 u8 _reserved1 [0x10];
90 u32 ep_mode [4]; /* only 1-3 valid */
91 u8 _reserved2 [0x10];
92
93 u32 ep_status [4];
94#define EPxSTATUS_TOGGLE 0x40
95#define EPxSTATUS_SUSPEND 0x20
96#define EPxSTATUS_EP_MASK (0x07<<2)
97# define EPxSTATUS_EP_READY (0<<2)
98# define EPxSTATUS_EP_DATAIN (1<<2)
99# define EPxSTATUS_EP_FULL (2<<2)
100# define EPxSTATUS_EP_TX_ERR (3<<2)
101# define EPxSTATUS_EP_RX_ERR (4<<2)
102# define EPxSTATUS_EP_BUSY (5<<2)
103# define EPxSTATUS_EP_STALL (6<<2)
104# define EPxSTATUS_EP_INVALID (7<<2)
105#define EPxSTATUS_FIFO_DISABLE 0x02
106#define EPxSTATUS_STAGE_ERROR 0x01
107
108 u8 _reserved3 [0x10];
109 u32 EPxSizeLA[4];
110#define PACKET_ACTIVE (1<<7)
111#define DATASIZE 0x7f
112 u8 _reserved3a [0x10];
113 u32 EPxSizeLB[4]; /* only 1,2 valid */
114 u8 _reserved3b [0x10];
115 u32 EPxSizeHA[4]; /* only 1-3 valid */
116 u8 _reserved3c [0x10];
117 u32 EPxSizeHB[4]; /* only 1,2 valid */
118 u8 _reserved4[0x30];
119
120 /* SETUP packet contents */
121 u32 bRequestType; /* 0x300 */
122 u32 bRequest;
123 u32 wValueL;
124 u32 wValueH;
125 u32 wIndexL;
126 u32 wIndexH;
127 u32 wLengthL;
128 u32 wLengthH;
129
130 /* command interaction/handshaking */
131 u32 SetupRecv; /* 0x320 */
132 u32 CurrConfig;
133 u32 StdRequest;
134 u32 Request;
135 u32 DataSet;
136#define DATASET_A(epnum) (1<<(2*(epnum)))
137#define DATASET_B(epnum) (2<<(2*(epnum)))
138#define DATASET_AB(epnum) (3<<(2*(epnum)))
139 u8 _reserved5[4];
140
141 u32 UsbState;
142#define USBSTATE_CONFIGURED 0x04
143#define USBSTATE_ADDRESSED 0x02
144#define USBSTATE_DEFAULT 0x01
145
146 u32 EOP;
147
148 u32 Command; /* 0x340 */
149#define COMMAND_SETDATA0 2
150#define COMMAND_RESET 3
151#define COMMAND_STALL 4
152#define COMMAND_INVALID 5
153#define COMMAND_FIFO_DISABLE 7
154#define COMMAND_FIFO_ENABLE 8
155#define COMMAND_INIT_DESCRIPTOR 9
156#define COMMAND_FIFO_CLEAR 10 /* also stall */
157#define COMMAND_STALL_CLEAR 11
158#define COMMAND_EP(n) ((n) << 4)
159
160 u32 EPxSingle;
161 u8 _reserved6[4];
162 u32 EPxBCS;
163 u8 _reserved7[8];
164 u32 IntControl;
165#define ICONTROL_STATUSNAK 1
166 u8 _reserved8[4];
167
168 u32 reqmode; // 0x360 standard request mode, low 8 bits
169#define G_REQMODE_SET_INTF (1<<7)
170#define G_REQMODE_GET_INTF (1<<6)
171#define G_REQMODE_SET_CONF (1<<5)
172#define G_REQMODE_GET_CONF (1<<4)
173#define G_REQMODE_GET_DESC (1<<3)
174#define G_REQMODE_SET_FEAT (1<<2)
175#define G_REQMODE_CLEAR_FEAT (1<<1)
176#define G_REQMODE_GET_STATUS (1<<0)
177
178 u32 ReqMode;
179 u8 _reserved9[0x18];
180 u32 PortStatus; /* 0x380 */
181 u8 _reserved10[8];
182 u32 address;
183 u32 buff_test;
184 u8 _reserved11[4];
185 u32 UsbReady;
186 u8 _reserved12[4];
187 u32 SetDescStall; /* 0x3a0 */
188 u8 _reserved13[0x45c];
189
190 /* hardware could handle limited GET_DESCRIPTOR duties */
191#define DESC_LEN 0x80
192 u32 descriptors[DESC_LEN]; /* 0x800 */
193 u8 _reserved14[0x600];
194
195} __attribute__ ((packed));
196
197#define MAX_FIFO_SIZE 64
198#define MAX_EP0_SIZE 8 /* ep0 fifo is bigger, though */
199
200
201/*-------------------------------------------------------------------------*/
202
203/* DRIVER DATA STRUCTURES and UTILITIES */
204
205struct goku_ep {
206 struct usb_ep ep;
207 struct goku_udc *dev;
208 unsigned long irqs;
209
210 unsigned num:8,
211 dma:1,
212 is_in:1,
213 stopped:1;
214
215 /* analogous to a host-side qh */
216 struct list_head queue;
217 const struct usb_endpoint_descriptor *desc;
218
219 u32 __iomem *reg_fifo;
220 u32 __iomem *reg_mode;
221 u32 __iomem *reg_status;
222};
223
224struct goku_request {
225 struct usb_request req;
226 struct list_head queue;
227
228 unsigned mapped:1;
229};
230
231enum ep0state {
232 EP0_DISCONNECT, /* no host */
233 EP0_IDLE, /* between STATUS ack and SETUP report */
234 EP0_IN, EP0_OUT, /* data stage */
235 EP0_STATUS, /* status stage */
236 EP0_STALL, /* data or status stages */
237 EP0_SUSPEND, /* usb suspend */
238};
239
240struct goku_udc {
241 /* each pci device provides one gadget, several endpoints */
242 struct usb_gadget gadget;
243 spinlock_t lock;
244 struct goku_ep ep[4];
245 struct usb_gadget_driver *driver;
246
247 enum ep0state ep0state;
248 unsigned got_irq:1,
249 got_region:1,
250 req_config:1,
251 configured:1,
252 enabled:1;
253
254 /* pci state used to access those endpoints */
255 struct pci_dev *pdev;
256 struct goku_udc_regs __iomem *regs;
257 u32 int_enable;
258
259 /* statistics... */
260 unsigned long irqs;
261};
262
263/*-------------------------------------------------------------------------*/
264
265#define xprintk(dev,level,fmt,args...) \
266 printk(level "%s %s: " fmt , driver_name , \
267 pci_name(dev->pdev) , ## args)
268
269#ifdef DEBUG
270#define DBG(dev,fmt,args...) \
271 xprintk(dev , KERN_DEBUG , fmt , ## args)
272#else
273#define DBG(dev,fmt,args...) \
274 do { } while (0)
275#endif /* DEBUG */
276
277#ifdef VERBOSE
278#define VDBG DBG
279#else
280#define VDBG(dev,fmt,args...) \
281 do { } while (0)
282#endif /* VERBOSE */
283
284#define ERROR(dev,fmt,args...) \
285 xprintk(dev , KERN_ERR , fmt , ## args)
286#define WARN(dev,fmt,args...) \
287 xprintk(dev , KERN_WARNING , fmt , ## args)
288#define INFO(dev,fmt,args...) \
289 xprintk(dev , KERN_INFO , fmt , ## args)
290
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
new file mode 100644
index 000000000000..2cff67ccce45
--- /dev/null
+++ b/drivers/usb/gadget/inode.c
@@ -0,0 +1,2110 @@
1/*
2 * inode.c -- user mode filesystem api for usb gadget controllers
3 *
4 * Copyright (C) 2003-2004 David Brownell
5 * Copyright (C) 2003 Agilent Technologies
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22
23// #define DEBUG /* data to help fault diagnosis */
24// #define VERBOSE /* extra debug messages (success too) */
25
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/fs.h>
29#include <linux/pagemap.h>
30#include <linux/uts.h>
31#include <linux/wait.h>
32#include <linux/compiler.h>
33#include <asm/uaccess.h>
34#include <linux/slab.h>
35
36#include <linux/device.h>
37#include <linux/moduleparam.h>
38
39#include <linux/usb_gadgetfs.h>
40#include <linux/usb_gadget.h>
41
42
43/*
44 * The gadgetfs API maps each endpoint to a file descriptor so that you
45 * can use standard synchronous read/write calls for I/O. There's some
46 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
47 * drivers show how this works in practice. You can also use AIO to
48 * eliminate I/O gaps between requests, to help when streaming data.
49 *
50 * Key parts that must be USB-specific are protocols defining how the
51 * read/write operations relate to the hardware state machines. There
52 * are two types of files. One type is for the device, implementing ep0.
53 * The other type is for each IN or OUT endpoint. In both cases, the
54 * user mode driver must configure the hardware before using it.
55 *
56 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
57 * (by writing configuration and device descriptors). Afterwards it
58 * may serve as a source of device events, used to handle all control
59 * requests other than basic enumeration.
60 *
61 * - Then either immediately, or after a SET_CONFIGURATION control request,
62 * ep_config() is called when each /dev/gadget/ep* file is configured
63 * (by writing endpoint descriptors). Afterwards these files are used
64 * to write() IN data or to read() OUT data. To halt the endpoint, a
65 * "wrong direction" request is issued (like reading an IN endpoint).
66 *
67 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
68 * not possible on all hardware. For example, precise fault handling with
69 * respect to data left in endpoint fifos after aborted operations; or
70 * selective clearing of endpoint halts, to implement SET_INTERFACE.
71 */
72
73#define DRIVER_DESC "USB Gadget filesystem"
74#define DRIVER_VERSION "24 Aug 2004"
75
76static const char driver_desc [] = DRIVER_DESC;
77static const char shortname [] = "gadgetfs";
78
79MODULE_DESCRIPTION (DRIVER_DESC);
80MODULE_AUTHOR ("David Brownell");
81MODULE_LICENSE ("GPL");
82
83
84/*----------------------------------------------------------------------*/
85
86#define GADGETFS_MAGIC 0xaee71ee7
87#define DMA_ADDR_INVALID (~(dma_addr_t)0)
88
89/* /dev/gadget/$CHIP represents ep0 and the whole device */
90enum ep0_state {
91 /* DISBLED is the initial state.
92 */
93 STATE_DEV_DISABLED = 0,
94
95 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
96 * ep0/device i/o modes and binding to the controller. Driver
97 * must always write descriptors to initialize the device, then
98 * the device becomes UNCONNECTED until enumeration.
99 */
100 STATE_OPENED,
101
102 /* From then on, ep0 fd is in either of two basic modes:
103 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
104 * - SETUP: read/write will transfer control data and succeed;
105 * or if "wrong direction", performs protocol stall
106 */
107 STATE_UNCONNECTED,
108 STATE_CONNECTED,
109 STATE_SETUP,
110
111 /* UNBOUND means the driver closed ep0, so the device won't be
112 * accessible again (DEV_DISABLED) until all fds are closed.
113 */
114 STATE_DEV_UNBOUND,
115};
116
117/* enough for the whole queue: most events invalidate others */
118#define N_EVENT 5
119
120struct dev_data {
121 spinlock_t lock;
122 atomic_t count;
123 enum ep0_state state;
124 struct usb_gadgetfs_event event [N_EVENT];
125 unsigned ev_next;
126 struct fasync_struct *fasync;
127 u8 current_config;
128
129 /* drivers reading ep0 MUST handle control requests (SETUP)
130 * reported that way; else the host will time out.
131 */
132 unsigned usermode_setup : 1,
133 setup_in : 1,
134 setup_can_stall : 1,
135 setup_out_ready : 1,
136 setup_out_error : 1,
137 setup_abort : 1;
138
139 /* the rest is basically write-once */
140 struct usb_config_descriptor *config, *hs_config;
141 struct usb_device_descriptor *dev;
142 struct usb_request *req;
143 struct usb_gadget *gadget;
144 struct list_head epfiles;
145 void *buf;
146 wait_queue_head_t wait;
147 struct super_block *sb;
148 struct dentry *dentry;
149
150 /* except this scratch i/o buffer for ep0 */
151 u8 rbuf [256];
152};
153
154static inline void get_dev (struct dev_data *data)
155{
156 atomic_inc (&data->count);
157}
158
159static void put_dev (struct dev_data *data)
160{
161 if (likely (!atomic_dec_and_test (&data->count)))
162 return;
163 /* needs no more cleanup */
164 BUG_ON (waitqueue_active (&data->wait));
165 kfree (data);
166}
167
168static struct dev_data *dev_new (void)
169{
170 struct dev_data *dev;
171
172 dev = kmalloc (sizeof *dev, GFP_KERNEL);
173 if (!dev)
174 return NULL;
175 memset (dev, 0, sizeof *dev);
176 dev->state = STATE_DEV_DISABLED;
177 atomic_set (&dev->count, 1);
178 spin_lock_init (&dev->lock);
179 INIT_LIST_HEAD (&dev->epfiles);
180 init_waitqueue_head (&dev->wait);
181 return dev;
182}
183
184/*----------------------------------------------------------------------*/
185
186/* other /dev/gadget/$ENDPOINT files represent endpoints */
187enum ep_state {
188 STATE_EP_DISABLED = 0,
189 STATE_EP_READY,
190 STATE_EP_DEFER_ENABLE,
191 STATE_EP_ENABLED,
192 STATE_EP_UNBOUND,
193};
194
195struct ep_data {
196 struct semaphore lock;
197 enum ep_state state;
198 atomic_t count;
199 struct dev_data *dev;
200 /* must hold dev->lock before accessing ep or req */
201 struct usb_ep *ep;
202 struct usb_request *req;
203 ssize_t status;
204 char name [16];
205 struct usb_endpoint_descriptor desc, hs_desc;
206 struct list_head epfiles;
207 wait_queue_head_t wait;
208 struct dentry *dentry;
209 struct inode *inode;
210};
211
212static inline void get_ep (struct ep_data *data)
213{
214 atomic_inc (&data->count);
215}
216
217static void put_ep (struct ep_data *data)
218{
219 if (likely (!atomic_dec_and_test (&data->count)))
220 return;
221 put_dev (data->dev);
222 /* needs no more cleanup */
223 BUG_ON (!list_empty (&data->epfiles));
224 BUG_ON (waitqueue_active (&data->wait));
225 BUG_ON (down_trylock (&data->lock) != 0);
226 kfree (data);
227}
228
229/*----------------------------------------------------------------------*/
230
231/* most "how to use the hardware" policy choices are in userspace:
232 * mapping endpoint roles (which the driver needs) to the capabilities
233 * which the usb controller has. most of those capabilities are exposed
234 * implicitly, starting with the driver name and then endpoint names.
235 */
236
237static const char *CHIP;
238
239/*----------------------------------------------------------------------*/
240
241/* NOTE: don't use dev_printk calls before binding to the gadget
242 * at the end of ep0 configuration, or after unbind.
243 */
244
245/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
246#define xprintk(d,level,fmt,args...) \
247 printk(level "%s: " fmt , shortname , ## args)
248
249#ifdef DEBUG
250#define DBG(dev,fmt,args...) \
251 xprintk(dev , KERN_DEBUG , fmt , ## args)
252#else
253#define DBG(dev,fmt,args...) \
254 do { } while (0)
255#endif /* DEBUG */
256
257#ifdef VERBOSE
258#define VDEBUG DBG
259#else
260#define VDEBUG(dev,fmt,args...) \
261 do { } while (0)
262#endif /* DEBUG */
263
264#define ERROR(dev,fmt,args...) \
265 xprintk(dev , KERN_ERR , fmt , ## args)
266#define WARN(dev,fmt,args...) \
267 xprintk(dev , KERN_WARNING , fmt , ## args)
268#define INFO(dev,fmt,args...) \
269 xprintk(dev , KERN_INFO , fmt , ## args)
270
271
272/*----------------------------------------------------------------------*/
273
274/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
275 *
276 * After opening, configure non-control endpoints. Then use normal
277 * stream read() and write() requests; and maybe ioctl() to get more
278 * precise FIFO status when recovering from cancelation.
279 */
280
281static void epio_complete (struct usb_ep *ep, struct usb_request *req)
282{
283 struct ep_data *epdata = ep->driver_data;
284
285 if (!req->context)
286 return;
287 if (req->status)
288 epdata->status = req->status;
289 else
290 epdata->status = req->actual;
291 complete ((struct completion *)req->context);
292}
293
294/* tasklock endpoint, returning when it's connected.
295 * still need dev->lock to use epdata->ep.
296 */
297static int
298get_ready_ep (unsigned f_flags, struct ep_data *epdata)
299{
300 int val;
301
302 if (f_flags & O_NONBLOCK) {
303 if (down_trylock (&epdata->lock) != 0)
304 goto nonblock;
305 if (epdata->state != STATE_EP_ENABLED) {
306 up (&epdata->lock);
307nonblock:
308 val = -EAGAIN;
309 } else
310 val = 0;
311 return val;
312 }
313
314 if ((val = down_interruptible (&epdata->lock)) < 0)
315 return val;
316newstate:
317 switch (epdata->state) {
318 case STATE_EP_ENABLED:
319 break;
320 case STATE_EP_DEFER_ENABLE:
321 DBG (epdata->dev, "%s wait for host\n", epdata->name);
322 if ((val = wait_event_interruptible (epdata->wait,
323 epdata->state != STATE_EP_DEFER_ENABLE
324 || epdata->dev->state == STATE_DEV_UNBOUND
325 )) < 0)
326 goto fail;
327 goto newstate;
328 // case STATE_EP_DISABLED: /* "can't happen" */
329 // case STATE_EP_READY: /* "can't happen" */
330 default: /* error! */
331 pr_debug ("%s: ep %p not available, state %d\n",
332 shortname, epdata, epdata->state);
333 // FALLTHROUGH
334 case STATE_EP_UNBOUND: /* clean disconnect */
335 val = -ENODEV;
336fail:
337 up (&epdata->lock);
338 }
339 return val;
340}
341
342static ssize_t
343ep_io (struct ep_data *epdata, void *buf, unsigned len)
344{
345 DECLARE_COMPLETION (done);
346 int value;
347
348 spin_lock_irq (&epdata->dev->lock);
349 if (likely (epdata->ep != NULL)) {
350 struct usb_request *req = epdata->req;
351
352 req->context = &done;
353 req->complete = epio_complete;
354 req->buf = buf;
355 req->length = len;
356 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
357 } else
358 value = -ENODEV;
359 spin_unlock_irq (&epdata->dev->lock);
360
361 if (likely (value == 0)) {
362 value = wait_event_interruptible (done.wait, done.done);
363 if (value != 0) {
364 spin_lock_irq (&epdata->dev->lock);
365 if (likely (epdata->ep != NULL)) {
366 DBG (epdata->dev, "%s i/o interrupted\n",
367 epdata->name);
368 usb_ep_dequeue (epdata->ep, epdata->req);
369 spin_unlock_irq (&epdata->dev->lock);
370
371 wait_event (done.wait, done.done);
372 if (epdata->status == -ECONNRESET)
373 epdata->status = -EINTR;
374 } else {
375 spin_unlock_irq (&epdata->dev->lock);
376
377 DBG (epdata->dev, "endpoint gone\n");
378 epdata->status = -ENODEV;
379 }
380 }
381 return epdata->status;
382 }
383 return value;
384}
385
386
387/* handle a synchronous OUT bulk/intr/iso transfer */
388static ssize_t
389ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
390{
391 struct ep_data *data = fd->private_data;
392 void *kbuf;
393 ssize_t value;
394
395 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
396 return value;
397
398 /* halt any endpoint by doing a "wrong direction" i/o call */
399 if (data->desc.bEndpointAddress & USB_DIR_IN) {
400 if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
401 == USB_ENDPOINT_XFER_ISOC)
402 return -EINVAL;
403 DBG (data->dev, "%s halt\n", data->name);
404 spin_lock_irq (&data->dev->lock);
405 if (likely (data->ep != NULL))
406 usb_ep_set_halt (data->ep);
407 spin_unlock_irq (&data->dev->lock);
408 up (&data->lock);
409 return -EBADMSG;
410 }
411
412 /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
413
414 value = -ENOMEM;
415 kbuf = kmalloc (len, SLAB_KERNEL);
416 if (unlikely (!kbuf))
417 goto free1;
418
419 value = ep_io (data, kbuf, len);
420 VDEBUG (data->dev, "%s read %d OUT, status %d\n",
421 data->name, len, value);
422 if (value >= 0 && copy_to_user (buf, kbuf, value))
423 value = -EFAULT;
424
425free1:
426 up (&data->lock);
427 kfree (kbuf);
428 return value;
429}
430
431/* handle a synchronous IN bulk/intr/iso transfer */
432static ssize_t
433ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
434{
435 struct ep_data *data = fd->private_data;
436 void *kbuf;
437 ssize_t value;
438
439 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
440 return value;
441
442 /* halt any endpoint by doing a "wrong direction" i/o call */
443 if (!(data->desc.bEndpointAddress & USB_DIR_IN)) {
444 if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
445 == USB_ENDPOINT_XFER_ISOC)
446 return -EINVAL;
447 DBG (data->dev, "%s halt\n", data->name);
448 spin_lock_irq (&data->dev->lock);
449 if (likely (data->ep != NULL))
450 usb_ep_set_halt (data->ep);
451 spin_unlock_irq (&data->dev->lock);
452 up (&data->lock);
453 return -EBADMSG;
454 }
455
456 /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
457
458 value = -ENOMEM;
459 kbuf = kmalloc (len, SLAB_KERNEL);
460 if (!kbuf)
461 goto free1;
462 if (copy_from_user (kbuf, buf, len)) {
463 value = -EFAULT;
464 goto free1;
465 }
466
467 value = ep_io (data, kbuf, len);
468 VDEBUG (data->dev, "%s write %d IN, status %d\n",
469 data->name, len, value);
470free1:
471 up (&data->lock);
472 kfree (kbuf);
473 return value;
474}
475
476static int
477ep_release (struct inode *inode, struct file *fd)
478{
479 struct ep_data *data = fd->private_data;
480
481 /* clean up if this can be reopened */
482 if (data->state != STATE_EP_UNBOUND) {
483 data->state = STATE_EP_DISABLED;
484 data->desc.bDescriptorType = 0;
485 data->hs_desc.bDescriptorType = 0;
486 }
487 put_ep (data);
488 return 0;
489}
490
491static int ep_ioctl (struct inode *inode, struct file *fd,
492 unsigned code, unsigned long value)
493{
494 struct ep_data *data = fd->private_data;
495 int status;
496
497 if ((status = get_ready_ep (fd->f_flags, data)) < 0)
498 return status;
499
500 spin_lock_irq (&data->dev->lock);
501 if (likely (data->ep != NULL)) {
502 switch (code) {
503 case GADGETFS_FIFO_STATUS:
504 status = usb_ep_fifo_status (data->ep);
505 break;
506 case GADGETFS_FIFO_FLUSH:
507 usb_ep_fifo_flush (data->ep);
508 break;
509 case GADGETFS_CLEAR_HALT:
510 status = usb_ep_clear_halt (data->ep);
511 break;
512 default:
513 status = -ENOTTY;
514 }
515 } else
516 status = -ENODEV;
517 spin_unlock_irq (&data->dev->lock);
518 up (&data->lock);
519 return status;
520}
521
522/*----------------------------------------------------------------------*/
523
524/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
525
526struct kiocb_priv {
527 struct usb_request *req;
528 struct ep_data *epdata;
529 void *buf;
530 char __user *ubuf;
531 unsigned actual;
532};
533
534static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
535{
536 struct kiocb_priv *priv = iocb->private;
537 struct ep_data *epdata;
538 int value;
539
540 local_irq_disable();
541 epdata = priv->epdata;
542 // spin_lock(&epdata->dev->lock);
543 kiocbSetCancelled(iocb);
544 if (likely(epdata && epdata->ep && priv->req))
545 value = usb_ep_dequeue (epdata->ep, priv->req);
546 else
547 value = -EINVAL;
548 // spin_unlock(&epdata->dev->lock);
549 local_irq_enable();
550
551 aio_put_req(iocb);
552 return value;
553}
554
555static ssize_t ep_aio_read_retry(struct kiocb *iocb)
556{
557 struct kiocb_priv *priv = iocb->private;
558 ssize_t status = priv->actual;
559
560 /* we "retry" to get the right mm context for this: */
561 status = copy_to_user(priv->ubuf, priv->buf, priv->actual);
562 if (unlikely(0 != status))
563 status = -EFAULT;
564 else
565 status = priv->actual;
566 kfree(priv->buf);
567 kfree(priv);
568 aio_put_req(iocb);
569 return status;
570}
571
572static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
573{
574 struct kiocb *iocb = req->context;
575 struct kiocb_priv *priv = iocb->private;
576 struct ep_data *epdata = priv->epdata;
577
578 /* lock against disconnect (and ideally, cancel) */
579 spin_lock(&epdata->dev->lock);
580 priv->req = NULL;
581 priv->epdata = NULL;
582 if (NULL == iocb->ki_retry
583 || unlikely(0 == req->actual)
584 || unlikely(kiocbIsCancelled(iocb))) {
585 kfree(req->buf);
586 kfree(priv);
587 iocb->private = NULL;
588 /* aio_complete() reports bytes-transferred _and_ faults */
589 if (unlikely(kiocbIsCancelled(iocb)))
590 aio_put_req(iocb);
591 else
592 aio_complete(iocb,
593 req->actual ? req->actual : req->status,
594 req->status);
595 } else {
596 /* retry() won't report both; so we hide some faults */
597 if (unlikely(0 != req->status))
598 DBG(epdata->dev, "%s fault %d len %d\n",
599 ep->name, req->status, req->actual);
600
601 priv->buf = req->buf;
602 priv->actual = req->actual;
603 kick_iocb(iocb);
604 }
605 spin_unlock(&epdata->dev->lock);
606
607 usb_ep_free_request(ep, req);
608 put_ep(epdata);
609}
610
611static ssize_t
612ep_aio_rwtail(
613 struct kiocb *iocb,
614 char *buf,
615 size_t len,
616 struct ep_data *epdata,
617 char __user *ubuf
618)
619{
620 struct kiocb_priv *priv = (void *) &iocb->private;
621 struct usb_request *req;
622 ssize_t value;
623
624 priv = kmalloc(sizeof *priv, GFP_KERNEL);
625 if (!priv) {
626 value = -ENOMEM;
627fail:
628 kfree(buf);
629 return value;
630 }
631 iocb->private = priv;
632 priv->ubuf = ubuf;
633
634 value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
635 if (unlikely(value < 0)) {
636 kfree(priv);
637 goto fail;
638 }
639
640 iocb->ki_cancel = ep_aio_cancel;
641 get_ep(epdata);
642 priv->epdata = epdata;
643 priv->actual = 0;
644
645 /* each kiocb is coupled to one usb_request, but we can't
646 * allocate or submit those if the host disconnected.
647 */
648 spin_lock_irq(&epdata->dev->lock);
649 if (likely(epdata->ep)) {
650 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
651 if (likely(req)) {
652 priv->req = req;
653 req->buf = buf;
654 req->length = len;
655 req->complete = ep_aio_complete;
656 req->context = iocb;
657 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
658 if (unlikely(0 != value))
659 usb_ep_free_request(epdata->ep, req);
660 } else
661 value = -EAGAIN;
662 } else
663 value = -ENODEV;
664 spin_unlock_irq(&epdata->dev->lock);
665
666 up(&epdata->lock);
667
668 if (unlikely(value)) {
669 kfree(priv);
670 put_ep(epdata);
671 } else
672 value = -EIOCBQUEUED;
673 return value;
674}
675
676static ssize_t
677ep_aio_read(struct kiocb *iocb, char __user *ubuf, size_t len, loff_t o)
678{
679 struct ep_data *epdata = iocb->ki_filp->private_data;
680 char *buf;
681
682 if (unlikely(epdata->desc.bEndpointAddress & USB_DIR_IN))
683 return -EINVAL;
684 buf = kmalloc(len, GFP_KERNEL);
685 if (unlikely(!buf))
686 return -ENOMEM;
687 iocb->ki_retry = ep_aio_read_retry;
688 return ep_aio_rwtail(iocb, buf, len, epdata, ubuf);
689}
690
691static ssize_t
692ep_aio_write(struct kiocb *iocb, const char __user *ubuf, size_t len, loff_t o)
693{
694 struct ep_data *epdata = iocb->ki_filp->private_data;
695 char *buf;
696
697 if (unlikely(!(epdata->desc.bEndpointAddress & USB_DIR_IN)))
698 return -EINVAL;
699 buf = kmalloc(len, GFP_KERNEL);
700 if (unlikely(!buf))
701 return -ENOMEM;
702 if (unlikely(copy_from_user(buf, ubuf, len) != 0)) {
703 kfree(buf);
704 return -EFAULT;
705 }
706 return ep_aio_rwtail(iocb, buf, len, epdata, NULL);
707}
708
709/*----------------------------------------------------------------------*/
710
711/* used after endpoint configuration */
712static struct file_operations ep_io_operations = {
713 .owner = THIS_MODULE,
714 .llseek = no_llseek,
715
716 .read = ep_read,
717 .write = ep_write,
718 .ioctl = ep_ioctl,
719 .release = ep_release,
720
721 .aio_read = ep_aio_read,
722 .aio_write = ep_aio_write,
723};
724
725/* ENDPOINT INITIALIZATION
726 *
727 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
728 * status = write (fd, descriptors, sizeof descriptors)
729 *
730 * That write establishes the endpoint configuration, configuring
731 * the controller to process bulk, interrupt, or isochronous transfers
732 * at the right maxpacket size, and so on.
733 *
734 * The descriptors are message type 1, identified by a host order u32
735 * at the beginning of what's written. Descriptor order is: full/low
736 * speed descriptor, then optional high speed descriptor.
737 */
738static ssize_t
739ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
740{
741 struct ep_data *data = fd->private_data;
742 struct usb_ep *ep;
743 u32 tag;
744 int value;
745
746 if ((value = down_interruptible (&data->lock)) < 0)
747 return value;
748
749 if (data->state != STATE_EP_READY) {
750 value = -EL2HLT;
751 goto fail;
752 }
753
754 value = len;
755 if (len < USB_DT_ENDPOINT_SIZE + 4)
756 goto fail0;
757
758 /* we might need to change message format someday */
759 if (copy_from_user (&tag, buf, 4)) {
760 goto fail1;
761 }
762 if (tag != 1) {
763 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
764 goto fail0;
765 }
766 buf += 4;
767 len -= 4;
768
769 /* NOTE: audio endpoint extensions not accepted here;
770 * just don't include the extra bytes.
771 */
772
773 /* full/low speed descriptor, then high speed */
774 if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
775 goto fail1;
776 }
777 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
778 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
779 goto fail0;
780 if (len != USB_DT_ENDPOINT_SIZE) {
781 if (len != 2 * USB_DT_ENDPOINT_SIZE)
782 goto fail0;
783 if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
784 USB_DT_ENDPOINT_SIZE)) {
785 goto fail1;
786 }
787 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
788 || data->hs_desc.bDescriptorType
789 != USB_DT_ENDPOINT) {
790 DBG(data->dev, "config %s, bad hs length or type\n",
791 data->name);
792 goto fail0;
793 }
794 }
795 value = len;
796
797 spin_lock_irq (&data->dev->lock);
798 if (data->dev->state == STATE_DEV_UNBOUND) {
799 value = -ENOENT;
800 goto gone;
801 } else if ((ep = data->ep) == NULL) {
802 value = -ENODEV;
803 goto gone;
804 }
805 switch (data->dev->gadget->speed) {
806 case USB_SPEED_LOW:
807 case USB_SPEED_FULL:
808 value = usb_ep_enable (ep, &data->desc);
809 if (value == 0)
810 data->state = STATE_EP_ENABLED;
811 break;
812#ifdef HIGHSPEED
813 case USB_SPEED_HIGH:
814 /* fails if caller didn't provide that descriptor... */
815 value = usb_ep_enable (ep, &data->hs_desc);
816 if (value == 0)
817 data->state = STATE_EP_ENABLED;
818 break;
819#endif
820 default:
821 DBG (data->dev, "unconnected, %s init deferred\n",
822 data->name);
823 data->state = STATE_EP_DEFER_ENABLE;
824 }
825 if (value == 0)
826 fd->f_op = &ep_io_operations;
827gone:
828 spin_unlock_irq (&data->dev->lock);
829 if (value < 0) {
830fail:
831 data->desc.bDescriptorType = 0;
832 data->hs_desc.bDescriptorType = 0;
833 }
834 up (&data->lock);
835 return value;
836fail0:
837 value = -EINVAL;
838 goto fail;
839fail1:
840 value = -EFAULT;
841 goto fail;
842}
843
844static int
845ep_open (struct inode *inode, struct file *fd)
846{
847 struct ep_data *data = inode->u.generic_ip;
848 int value = -EBUSY;
849
850 if (down_interruptible (&data->lock) != 0)
851 return -EINTR;
852 spin_lock_irq (&data->dev->lock);
853 if (data->dev->state == STATE_DEV_UNBOUND)
854 value = -ENOENT;
855 else if (data->state == STATE_EP_DISABLED) {
856 value = 0;
857 data->state = STATE_EP_READY;
858 get_ep (data);
859 fd->private_data = data;
860 VDEBUG (data->dev, "%s ready\n", data->name);
861 } else
862 DBG (data->dev, "%s state %d\n",
863 data->name, data->state);
864 spin_unlock_irq (&data->dev->lock);
865 up (&data->lock);
866 return value;
867}
868
869/* used before endpoint configuration */
870static struct file_operations ep_config_operations = {
871 .owner = THIS_MODULE,
872 .llseek = no_llseek,
873
874 .open = ep_open,
875 .write = ep_config,
876 .release = ep_release,
877};
878
879/*----------------------------------------------------------------------*/
880
881/* EP0 IMPLEMENTATION can be partly in userspace.
882 *
883 * Drivers that use this facility receive various events, including
884 * control requests the kernel doesn't handle. Drivers that don't
885 * use this facility may be too simple-minded for real applications.
886 */
887
888static inline void ep0_readable (struct dev_data *dev)
889{
890 wake_up (&dev->wait);
891 kill_fasync (&dev->fasync, SIGIO, POLL_IN);
892}
893
894static void clean_req (struct usb_ep *ep, struct usb_request *req)
895{
896 struct dev_data *dev = ep->driver_data;
897
898 if (req->buf != dev->rbuf) {
899 usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
900 req->buf = dev->rbuf;
901 req->dma = DMA_ADDR_INVALID;
902 }
903 req->complete = epio_complete;
904 dev->setup_out_ready = 0;
905}
906
907static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
908{
909 struct dev_data *dev = ep->driver_data;
910 int free = 1;
911
912 /* for control OUT, data must still get to userspace */
913 if (!dev->setup_in) {
914 dev->setup_out_error = (req->status != 0);
915 if (!dev->setup_out_error)
916 free = 0;
917 dev->setup_out_ready = 1;
918 ep0_readable (dev);
919 } else if (dev->state == STATE_SETUP)
920 dev->state = STATE_CONNECTED;
921
922 /* clean up as appropriate */
923 if (free && req->buf != &dev->rbuf)
924 clean_req (ep, req);
925 req->complete = epio_complete;
926}
927
928static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
929{
930 struct dev_data *dev = ep->driver_data;
931
932 if (dev->setup_out_ready) {
933 DBG (dev, "ep0 request busy!\n");
934 return -EBUSY;
935 }
936 if (len > sizeof (dev->rbuf))
937 req->buf = usb_ep_alloc_buffer (ep, len, &req->dma, GFP_ATOMIC);
938 if (req->buf == 0) {
939 req->buf = dev->rbuf;
940 return -ENOMEM;
941 }
942 req->complete = ep0_complete;
943 req->length = len;
944 return 0;
945}
946
947static ssize_t
948ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
949{
950 struct dev_data *dev = fd->private_data;
951 ssize_t retval;
952 enum ep0_state state;
953
954 spin_lock_irq (&dev->lock);
955
956 /* report fd mode change before acting on it */
957 if (dev->setup_abort) {
958 dev->setup_abort = 0;
959 retval = -EIDRM;
960 goto done;
961 }
962
963 /* control DATA stage */
964 if ((state = dev->state) == STATE_SETUP) {
965
966 if (dev->setup_in) { /* stall IN */
967 VDEBUG(dev, "ep0in stall\n");
968 (void) usb_ep_set_halt (dev->gadget->ep0);
969 retval = -EL2HLT;
970 dev->state = STATE_CONNECTED;
971
972 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
973 struct usb_ep *ep = dev->gadget->ep0;
974 struct usb_request *req = dev->req;
975
976 if ((retval = setup_req (ep, req, 0)) == 0)
977 retval = usb_ep_queue (ep, req, GFP_ATOMIC);
978 dev->state = STATE_CONNECTED;
979
980 /* assume that was SET_CONFIGURATION */
981 if (dev->current_config) {
982 unsigned power;
983#ifdef HIGHSPEED
984 if (dev->gadget->speed == USB_SPEED_HIGH)
985 power = dev->hs_config->bMaxPower;
986 else
987#endif
988 power = dev->config->bMaxPower;
989 usb_gadget_vbus_draw(dev->gadget, 2 * power);
990 }
991
992 } else { /* collect OUT data */
993 if ((fd->f_flags & O_NONBLOCK) != 0
994 && !dev->setup_out_ready) {
995 retval = -EAGAIN;
996 goto done;
997 }
998 spin_unlock_irq (&dev->lock);
999 retval = wait_event_interruptible (dev->wait,
1000 dev->setup_out_ready != 0);
1001
1002 /* FIXME state could change from under us */
1003 spin_lock_irq (&dev->lock);
1004 if (retval)
1005 goto done;
1006 if (dev->setup_out_error)
1007 retval = -EIO;
1008 else {
1009 len = min (len, (size_t)dev->req->actual);
1010// FIXME don't call this with the spinlock held ...
1011 if (copy_to_user (buf, &dev->req->buf, len))
1012 retval = -EFAULT;
1013 clean_req (dev->gadget->ep0, dev->req);
1014 /* NOTE userspace can't yet choose to stall */
1015 }
1016 }
1017 goto done;
1018 }
1019
1020 /* else normal: return event data */
1021 if (len < sizeof dev->event [0]) {
1022 retval = -EINVAL;
1023 goto done;
1024 }
1025 len -= len % sizeof (struct usb_gadgetfs_event);
1026 dev->usermode_setup = 1;
1027
1028scan:
1029 /* return queued events right away */
1030 if (dev->ev_next != 0) {
1031 unsigned i, n;
1032 int tmp = dev->ev_next;
1033
1034 len = min (len, tmp * sizeof (struct usb_gadgetfs_event));
1035 n = len / sizeof (struct usb_gadgetfs_event);
1036
1037 /* ep0 can't deliver events when STATE_SETUP */
1038 for (i = 0; i < n; i++) {
1039 if (dev->event [i].type == GADGETFS_SETUP) {
1040 len = n = i + 1;
1041 len *= sizeof (struct usb_gadgetfs_event);
1042 n = 0;
1043 break;
1044 }
1045 }
1046 spin_unlock_irq (&dev->lock);
1047 if (copy_to_user (buf, &dev->event, len))
1048 retval = -EFAULT;
1049 else
1050 retval = len;
1051 if (len > 0) {
1052 len /= sizeof (struct usb_gadgetfs_event);
1053
1054 /* NOTE this doesn't guard against broken drivers;
1055 * concurrent ep0 readers may lose events.
1056 */
1057 spin_lock_irq (&dev->lock);
1058 dev->ev_next -= len;
1059 if (dev->ev_next != 0)
1060 memmove (&dev->event, &dev->event [len],
1061 sizeof (struct usb_gadgetfs_event)
1062 * (tmp - len));
1063 if (n == 0)
1064 dev->state = STATE_SETUP;
1065 spin_unlock_irq (&dev->lock);
1066 }
1067 return retval;
1068 }
1069 if (fd->f_flags & O_NONBLOCK) {
1070 retval = -EAGAIN;
1071 goto done;
1072 }
1073
1074 switch (state) {
1075 default:
1076 DBG (dev, "fail %s, state %d\n", __FUNCTION__, state);
1077 retval = -ESRCH;
1078 break;
1079 case STATE_UNCONNECTED:
1080 case STATE_CONNECTED:
1081 spin_unlock_irq (&dev->lock);
1082 DBG (dev, "%s wait\n", __FUNCTION__);
1083
1084 /* wait for events */
1085 retval = wait_event_interruptible (dev->wait,
1086 dev->ev_next != 0);
1087 if (retval < 0)
1088 return retval;
1089 spin_lock_irq (&dev->lock);
1090 goto scan;
1091 }
1092
1093done:
1094 spin_unlock_irq (&dev->lock);
1095 return retval;
1096}
1097
1098static struct usb_gadgetfs_event *
1099next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1100{
1101 struct usb_gadgetfs_event *event;
1102 unsigned i;
1103
1104 switch (type) {
1105 /* these events purge the queue */
1106 case GADGETFS_DISCONNECT:
1107 if (dev->state == STATE_SETUP)
1108 dev->setup_abort = 1;
1109 // FALL THROUGH
1110 case GADGETFS_CONNECT:
1111 dev->ev_next = 0;
1112 break;
1113 case GADGETFS_SETUP: /* previous request timed out */
1114 case GADGETFS_SUSPEND: /* same effect */
1115 /* these events can't be repeated */
1116 for (i = 0; i != dev->ev_next; i++) {
1117 if (dev->event [i].type != type)
1118 continue;
1119 DBG (dev, "discard old event %d\n", type);
1120 dev->ev_next--;
1121 if (i == dev->ev_next)
1122 break;
1123 /* indices start at zero, for simplicity */
1124 memmove (&dev->event [i], &dev->event [i + 1],
1125 sizeof (struct usb_gadgetfs_event)
1126 * (dev->ev_next - i));
1127 }
1128 break;
1129 default:
1130 BUG ();
1131 }
1132 event = &dev->event [dev->ev_next++];
1133 BUG_ON (dev->ev_next > N_EVENT);
1134 VDEBUG (dev, "ev %d, next %d\n", type, dev->ev_next);
1135 memset (event, 0, sizeof *event);
1136 event->type = type;
1137 return event;
1138}
1139
1140static ssize_t
1141ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1142{
1143 struct dev_data *dev = fd->private_data;
1144 ssize_t retval = -ESRCH;
1145
1146 spin_lock_irq (&dev->lock);
1147
1148 /* report fd mode change before acting on it */
1149 if (dev->setup_abort) {
1150 dev->setup_abort = 0;
1151 retval = -EIDRM;
1152
1153 /* data and/or status stage for control request */
1154 } else if (dev->state == STATE_SETUP) {
1155
1156 /* IN DATA+STATUS caller makes len <= wLength */
1157 if (dev->setup_in) {
1158 retval = setup_req (dev->gadget->ep0, dev->req, len);
1159 if (retval == 0) {
1160 spin_unlock_irq (&dev->lock);
1161 if (copy_from_user (dev->req->buf, buf, len))
1162 retval = -EFAULT;
1163 else
1164 retval = usb_ep_queue (
1165 dev->gadget->ep0, dev->req,
1166 GFP_KERNEL);
1167 if (retval < 0) {
1168 spin_lock_irq (&dev->lock);
1169 clean_req (dev->gadget->ep0, dev->req);
1170 spin_unlock_irq (&dev->lock);
1171 } else
1172 retval = len;
1173
1174 return retval;
1175 }
1176
1177 /* can stall some OUT transfers */
1178 } else if (dev->setup_can_stall) {
1179 VDEBUG(dev, "ep0out stall\n");
1180 (void) usb_ep_set_halt (dev->gadget->ep0);
1181 retval = -EL2HLT;
1182 dev->state = STATE_CONNECTED;
1183 } else {
1184 DBG(dev, "bogus ep0out stall!\n");
1185 }
1186 } else
1187 DBG (dev, "fail %s, state %d\n", __FUNCTION__, dev->state);
1188
1189 spin_unlock_irq (&dev->lock);
1190 return retval;
1191}
1192
1193static int
1194ep0_fasync (int f, struct file *fd, int on)
1195{
1196 struct dev_data *dev = fd->private_data;
1197 // caller must F_SETOWN before signal delivery happens
1198 VDEBUG (dev, "%s %s\n", __FUNCTION__, on ? "on" : "off");
1199 return fasync_helper (f, fd, on, &dev->fasync);
1200}
1201
1202static struct usb_gadget_driver gadgetfs_driver;
1203
1204static int
1205dev_release (struct inode *inode, struct file *fd)
1206{
1207 struct dev_data *dev = fd->private_data;
1208
1209 /* closing ep0 === shutdown all */
1210
1211 usb_gadget_unregister_driver (&gadgetfs_driver);
1212
1213 /* at this point "good" hardware has disconnected the
1214 * device from USB; the host won't see it any more.
1215 * alternatively, all host requests will time out.
1216 */
1217
1218 fasync_helper (-1, fd, 0, &dev->fasync);
1219 kfree (dev->buf);
1220 dev->buf = NULL;
1221 put_dev (dev);
1222
1223 /* other endpoints were all decoupled from this device */
1224 dev->state = STATE_DEV_DISABLED;
1225 return 0;
1226}
1227
1228static int dev_ioctl (struct inode *inode, struct file *fd,
1229 unsigned code, unsigned long value)
1230{
1231 struct dev_data *dev = fd->private_data;
1232 struct usb_gadget *gadget = dev->gadget;
1233
1234 if (gadget->ops->ioctl)
1235 return gadget->ops->ioctl (gadget, code, value);
1236 return -ENOTTY;
1237}
1238
1239/* used after device configuration */
1240static struct file_operations ep0_io_operations = {
1241 .owner = THIS_MODULE,
1242 .llseek = no_llseek,
1243
1244 .read = ep0_read,
1245 .write = ep0_write,
1246 .fasync = ep0_fasync,
1247 // .poll = ep0_poll,
1248 .ioctl = dev_ioctl,
1249 .release = dev_release,
1250};
1251
1252/*----------------------------------------------------------------------*/
1253
1254/* The in-kernel gadget driver handles most ep0 issues, in particular
1255 * enumerating the single configuration (as provided from user space).
1256 *
1257 * Unrecognized ep0 requests may be handled in user space.
1258 */
1259
1260#ifdef HIGHSPEED
1261static void make_qualifier (struct dev_data *dev)
1262{
1263 struct usb_qualifier_descriptor qual;
1264 struct usb_device_descriptor *desc;
1265
1266 qual.bLength = sizeof qual;
1267 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1268 qual.bcdUSB = __constant_cpu_to_le16 (0x0200);
1269
1270 desc = dev->dev;
1271 qual.bDeviceClass = desc->bDeviceClass;
1272 qual.bDeviceSubClass = desc->bDeviceSubClass;
1273 qual.bDeviceProtocol = desc->bDeviceProtocol;
1274
1275 /* assumes ep0 uses the same value for both speeds ... */
1276 qual.bMaxPacketSize0 = desc->bMaxPacketSize0;
1277
1278 qual.bNumConfigurations = 1;
1279 qual.bRESERVED = 0;
1280
1281 memcpy (dev->rbuf, &qual, sizeof qual);
1282}
1283#endif
1284
1285static int
1286config_buf (struct dev_data *dev, u8 type, unsigned index)
1287{
1288 int len;
1289#ifdef HIGHSPEED
1290 int hs;
1291#endif
1292
1293 /* only one configuration */
1294 if (index > 0)
1295 return -EINVAL;
1296
1297#ifdef HIGHSPEED
1298 hs = (dev->gadget->speed == USB_SPEED_HIGH);
1299 if (type == USB_DT_OTHER_SPEED_CONFIG)
1300 hs = !hs;
1301 if (hs) {
1302 dev->req->buf = dev->hs_config;
1303 len = le16_to_cpup (&dev->hs_config->wTotalLength);
1304 } else
1305#endif
1306 {
1307 dev->req->buf = dev->config;
1308 len = le16_to_cpup (&dev->config->wTotalLength);
1309 }
1310 ((u8 *)dev->req->buf) [1] = type;
1311 return len;
1312}
1313
1314static int
1315gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1316{
1317 struct dev_data *dev = get_gadget_data (gadget);
1318 struct usb_request *req = dev->req;
1319 int value = -EOPNOTSUPP;
1320 struct usb_gadgetfs_event *event;
1321 u16 w_value = ctrl->wValue;
1322 u16 w_length = ctrl->wLength;
1323
1324 spin_lock (&dev->lock);
1325 dev->setup_abort = 0;
1326 if (dev->state == STATE_UNCONNECTED) {
1327 struct usb_ep *ep;
1328 struct ep_data *data;
1329
1330 dev->state = STATE_CONNECTED;
1331 dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
1332
1333#ifdef HIGHSPEED
1334 if (gadget->speed == USB_SPEED_HIGH && dev->hs_config == 0) {
1335 ERROR (dev, "no high speed config??\n");
1336 return -EINVAL;
1337 }
1338#endif /* HIGHSPEED */
1339
1340 INFO (dev, "connected\n");
1341 event = next_event (dev, GADGETFS_CONNECT);
1342 event->u.speed = gadget->speed;
1343 ep0_readable (dev);
1344
1345 list_for_each_entry (ep, &gadget->ep_list, ep_list) {
1346 data = ep->driver_data;
1347 /* ... down_trylock (&data->lock) ... */
1348 if (data->state != STATE_EP_DEFER_ENABLE)
1349 continue;
1350#ifdef HIGHSPEED
1351 if (gadget->speed == USB_SPEED_HIGH)
1352 value = usb_ep_enable (ep, &data->hs_desc);
1353 else
1354#endif /* HIGHSPEED */
1355 value = usb_ep_enable (ep, &data->desc);
1356 if (value) {
1357 ERROR (dev, "deferred %s enable --> %d\n",
1358 data->name, value);
1359 continue;
1360 }
1361 data->state = STATE_EP_ENABLED;
1362 wake_up (&data->wait);
1363 DBG (dev, "woke up %s waiters\n", data->name);
1364 }
1365
1366 /* host may have given up waiting for response. we can miss control
1367 * requests handled lower down (device/endpoint status and features);
1368 * then ep0_{read,write} will report the wrong status. controller
1369 * driver will have aborted pending i/o.
1370 */
1371 } else if (dev->state == STATE_SETUP)
1372 dev->setup_abort = 1;
1373
1374 req->buf = dev->rbuf;
1375 req->dma = DMA_ADDR_INVALID;
1376 req->context = NULL;
1377 value = -EOPNOTSUPP;
1378 switch (ctrl->bRequest) {
1379
1380 case USB_REQ_GET_DESCRIPTOR:
1381 if (ctrl->bRequestType != USB_DIR_IN)
1382 goto unrecognized;
1383 switch (w_value >> 8) {
1384
1385 case USB_DT_DEVICE:
1386 value = min (w_length, (u16) sizeof *dev->dev);
1387 req->buf = dev->dev;
1388 break;
1389#ifdef HIGHSPEED
1390 case USB_DT_DEVICE_QUALIFIER:
1391 if (!dev->hs_config)
1392 break;
1393 value = min (w_length, (u16)
1394 sizeof (struct usb_qualifier_descriptor));
1395 make_qualifier (dev);
1396 break;
1397 case USB_DT_OTHER_SPEED_CONFIG:
1398 // FALLTHROUGH
1399#endif
1400 case USB_DT_CONFIG:
1401 value = config_buf (dev,
1402 w_value >> 8,
1403 w_value & 0xff);
1404 if (value >= 0)
1405 value = min (w_length, (u16) value);
1406 break;
1407 case USB_DT_STRING:
1408 goto unrecognized;
1409
1410 default: // all others are errors
1411 break;
1412 }
1413 break;
1414
1415 /* currently one config, two speeds */
1416 case USB_REQ_SET_CONFIGURATION:
1417 if (ctrl->bRequestType != 0)
1418 break;
1419 if (0 == (u8) w_value) {
1420 value = 0;
1421 dev->current_config = 0;
1422 usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1423 // user mode expected to disable endpoints
1424 } else {
1425 u8 config, power;
1426#ifdef HIGHSPEED
1427 if (gadget->speed == USB_SPEED_HIGH) {
1428 config = dev->hs_config->bConfigurationValue;
1429 power = dev->hs_config->bMaxPower;
1430 } else
1431#endif
1432 {
1433 config = dev->config->bConfigurationValue;
1434 power = dev->config->bMaxPower;
1435 }
1436
1437 if (config == (u8) w_value) {
1438 value = 0;
1439 dev->current_config = config;
1440 usb_gadget_vbus_draw(gadget, 2 * power);
1441 }
1442 }
1443
1444 /* report SET_CONFIGURATION like any other control request,
1445 * except that usermode may not stall this. the next
1446 * request mustn't be allowed start until this finishes:
1447 * endpoints and threads set up, etc.
1448 *
1449 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1450 * has bad/racey automagic that prevents synchronizing here.
1451 * even kernel mode drivers often miss them.
1452 */
1453 if (value == 0) {
1454 INFO (dev, "configuration #%d\n", dev->current_config);
1455 if (dev->usermode_setup) {
1456 dev->setup_can_stall = 0;
1457 goto delegate;
1458 }
1459 }
1460 break;
1461
1462#ifndef CONFIG_USB_GADGETFS_PXA2XX
1463 /* PXA automagically handles this request too */
1464 case USB_REQ_GET_CONFIGURATION:
1465 if (ctrl->bRequestType != 0x80)
1466 break;
1467 *(u8 *)req->buf = dev->current_config;
1468 value = min (w_length, (u16) 1);
1469 break;
1470#endif
1471
1472 default:
1473unrecognized:
1474 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1475 dev->usermode_setup ? "delegate" : "fail",
1476 ctrl->bRequestType, ctrl->bRequest,
1477 w_value, le16_to_cpu(ctrl->wIndex), w_length);
1478
1479 /* if there's an ep0 reader, don't stall */
1480 if (dev->usermode_setup) {
1481 dev->setup_can_stall = 1;
1482delegate:
1483 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1484 ? 1 : 0;
1485 dev->setup_out_ready = 0;
1486 dev->setup_out_error = 0;
1487 value = 0;
1488
1489 /* read DATA stage for OUT right away */
1490 if (unlikely (!dev->setup_in && w_length)) {
1491 value = setup_req (gadget->ep0, dev->req,
1492 w_length);
1493 if (value < 0)
1494 break;
1495 value = usb_ep_queue (gadget->ep0, dev->req,
1496 GFP_ATOMIC);
1497 if (value < 0) {
1498 clean_req (gadget->ep0, dev->req);
1499 break;
1500 }
1501
1502 /* we can't currently stall these */
1503 dev->setup_can_stall = 0;
1504 }
1505
1506 /* state changes when reader collects event */
1507 event = next_event (dev, GADGETFS_SETUP);
1508 event->u.setup = *ctrl;
1509 ep0_readable (dev);
1510 spin_unlock (&dev->lock);
1511 return 0;
1512 }
1513 }
1514
1515 /* proceed with data transfer and status phases? */
1516 if (value >= 0 && dev->state != STATE_SETUP) {
1517 req->length = value;
1518 req->zero = value < w_length;
1519 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
1520 if (value < 0) {
1521 DBG (dev, "ep_queue --> %d\n", value);
1522 req->status = 0;
1523 }
1524 }
1525
1526 /* device stalls when value < 0 */
1527 spin_unlock (&dev->lock);
1528 return value;
1529}
1530
1531static void destroy_ep_files (struct dev_data *dev)
1532{
1533 struct list_head *entry, *tmp;
1534
1535 DBG (dev, "%s %d\n", __FUNCTION__, dev->state);
1536
1537 /* dev->state must prevent interference */
1538restart:
1539 spin_lock_irq (&dev->lock);
1540 list_for_each_safe (entry, tmp, &dev->epfiles) {
1541 struct ep_data *ep;
1542 struct inode *parent;
1543 struct dentry *dentry;
1544
1545 /* break link to FS */
1546 ep = list_entry (entry, struct ep_data, epfiles);
1547 list_del_init (&ep->epfiles);
1548 dentry = ep->dentry;
1549 ep->dentry = NULL;
1550 parent = dentry->d_parent->d_inode;
1551
1552 /* break link to controller */
1553 if (ep->state == STATE_EP_ENABLED)
1554 (void) usb_ep_disable (ep->ep);
1555 ep->state = STATE_EP_UNBOUND;
1556 usb_ep_free_request (ep->ep, ep->req);
1557 ep->ep = NULL;
1558 wake_up (&ep->wait);
1559 put_ep (ep);
1560
1561 spin_unlock_irq (&dev->lock);
1562
1563 /* break link to dcache */
1564 down (&parent->i_sem);
1565 d_delete (dentry);
1566 dput (dentry);
1567 up (&parent->i_sem);
1568
1569 /* fds may still be open */
1570 goto restart;
1571 }
1572 spin_unlock_irq (&dev->lock);
1573}
1574
1575
1576static struct inode *
1577gadgetfs_create_file (struct super_block *sb, char const *name,
1578 void *data, struct file_operations *fops,
1579 struct dentry **dentry_p);
1580
1581static int activate_ep_files (struct dev_data *dev)
1582{
1583 struct usb_ep *ep;
1584
1585 gadget_for_each_ep (ep, dev->gadget) {
1586 struct ep_data *data;
1587
1588 data = kmalloc (sizeof *data, GFP_KERNEL);
1589 if (!data)
1590 goto enomem;
1591 memset (data, 0, sizeof data);
1592 data->state = STATE_EP_DISABLED;
1593 init_MUTEX (&data->lock);
1594 init_waitqueue_head (&data->wait);
1595
1596 strncpy (data->name, ep->name, sizeof (data->name) - 1);
1597 atomic_set (&data->count, 1);
1598 data->dev = dev;
1599 get_dev (dev);
1600
1601 data->ep = ep;
1602 ep->driver_data = data;
1603
1604 data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1605 if (!data->req)
1606 goto enomem;
1607
1608 data->inode = gadgetfs_create_file (dev->sb, data->name,
1609 data, &ep_config_operations,
1610 &data->dentry);
1611 if (!data->inode) {
1612 kfree (data);
1613 goto enomem;
1614 }
1615 list_add_tail (&data->epfiles, &dev->epfiles);
1616 }
1617 return 0;
1618
1619enomem:
1620 DBG (dev, "%s enomem\n", __FUNCTION__);
1621 destroy_ep_files (dev);
1622 return -ENOMEM;
1623}
1624
1625static void
1626gadgetfs_unbind (struct usb_gadget *gadget)
1627{
1628 struct dev_data *dev = get_gadget_data (gadget);
1629
1630 DBG (dev, "%s\n", __FUNCTION__);
1631
1632 spin_lock_irq (&dev->lock);
1633 dev->state = STATE_DEV_UNBOUND;
1634 spin_unlock_irq (&dev->lock);
1635
1636 destroy_ep_files (dev);
1637 gadget->ep0->driver_data = NULL;
1638 set_gadget_data (gadget, NULL);
1639
1640 /* we've already been disconnected ... no i/o is active */
1641 if (dev->req)
1642 usb_ep_free_request (gadget->ep0, dev->req);
1643 DBG (dev, "%s done\n", __FUNCTION__);
1644 put_dev (dev);
1645}
1646
1647static struct dev_data *the_device;
1648
1649static int
1650gadgetfs_bind (struct usb_gadget *gadget)
1651{
1652 struct dev_data *dev = the_device;
1653
1654 if (!dev)
1655 return -ESRCH;
1656 if (0 != strcmp (CHIP, gadget->name)) {
1657 printk (KERN_ERR "%s expected %s controller not %s\n",
1658 shortname, CHIP, gadget->name);
1659 return -ENODEV;
1660 }
1661
1662 set_gadget_data (gadget, dev);
1663 dev->gadget = gadget;
1664 gadget->ep0->driver_data = dev;
1665 dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
1666
1667 /* preallocate control response and buffer */
1668 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1669 if (!dev->req)
1670 goto enomem;
1671 dev->req->context = NULL;
1672 dev->req->complete = epio_complete;
1673
1674 if (activate_ep_files (dev) < 0)
1675 goto enomem;
1676
1677 INFO (dev, "bound to %s driver\n", gadget->name);
1678 dev->state = STATE_UNCONNECTED;
1679 get_dev (dev);
1680 return 0;
1681
1682enomem:
1683 gadgetfs_unbind (gadget);
1684 return -ENOMEM;
1685}
1686
1687static void
1688gadgetfs_disconnect (struct usb_gadget *gadget)
1689{
1690 struct dev_data *dev = get_gadget_data (gadget);
1691
1692 if (dev->state == STATE_UNCONNECTED) {
1693 DBG (dev, "already unconnected\n");
1694 return;
1695 }
1696 dev->state = STATE_UNCONNECTED;
1697
1698 INFO (dev, "disconnected\n");
1699 spin_lock (&dev->lock);
1700 next_event (dev, GADGETFS_DISCONNECT);
1701 ep0_readable (dev);
1702 spin_unlock (&dev->lock);
1703}
1704
1705static void
1706gadgetfs_suspend (struct usb_gadget *gadget)
1707{
1708 struct dev_data *dev = get_gadget_data (gadget);
1709
1710 INFO (dev, "suspended from state %d\n", dev->state);
1711 spin_lock (&dev->lock);
1712 switch (dev->state) {
1713 case STATE_SETUP: // VERY odd... host died??
1714 case STATE_CONNECTED:
1715 case STATE_UNCONNECTED:
1716 next_event (dev, GADGETFS_SUSPEND);
1717 ep0_readable (dev);
1718 /* FALLTHROUGH */
1719 default:
1720 break;
1721 }
1722 spin_unlock (&dev->lock);
1723}
1724
1725static struct usb_gadget_driver gadgetfs_driver = {
1726#ifdef HIGHSPEED
1727 .speed = USB_SPEED_HIGH,
1728#else
1729 .speed = USB_SPEED_FULL,
1730#endif
1731 .function = (char *) driver_desc,
1732 .bind = gadgetfs_bind,
1733 .unbind = gadgetfs_unbind,
1734 .setup = gadgetfs_setup,
1735 .disconnect = gadgetfs_disconnect,
1736 .suspend = gadgetfs_suspend,
1737
1738 .driver = {
1739 .name = (char *) shortname,
1740 // .shutdown = ...
1741 // .suspend = ...
1742 // .resume = ...
1743 },
1744};
1745
1746/*----------------------------------------------------------------------*/
1747
1748static void gadgetfs_nop(struct usb_gadget *arg) { }
1749
1750static int gadgetfs_probe (struct usb_gadget *gadget)
1751{
1752 CHIP = gadget->name;
1753 return -EISNAM;
1754}
1755
1756static struct usb_gadget_driver probe_driver = {
1757 .speed = USB_SPEED_HIGH,
1758 .bind = gadgetfs_probe,
1759 .unbind = gadgetfs_nop,
1760 .setup = (void *)gadgetfs_nop,
1761 .disconnect = gadgetfs_nop,
1762 .driver = {
1763 .name = "nop",
1764 },
1765};
1766
1767
1768/* DEVICE INITIALIZATION
1769 *
1770 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1771 * status = write (fd, descriptors, sizeof descriptors)
1772 *
1773 * That write establishes the device configuration, so the kernel can
1774 * bind to the controller ... guaranteeing it can handle enumeration
1775 * at all necessary speeds. Descriptor order is:
1776 *
1777 * . message tag (u32, host order) ... for now, must be zero; it
1778 * would change to support features like multi-config devices
1779 * . full/low speed config ... all wTotalLength bytes (with interface,
1780 * class, altsetting, endpoint, and other descriptors)
1781 * . high speed config ... all descriptors, for high speed operation;
1782 * this one's optional except for high-speed hardware
1783 * . device descriptor
1784 *
1785 * Endpoints are not yet enabled. Drivers may want to immediately
1786 * initialize them, using the /dev/gadget/ep* files that are available
1787 * as soon as the kernel sees the configuration, or they can wait
1788 * until device configuration and interface altsetting changes create
1789 * the need to configure (or unconfigure) them.
1790 *
1791 * After initialization, the device stays active for as long as that
1792 * $CHIP file is open. Events may then be read from that descriptor,
1793 * such configuration notifications. More complex drivers will handle
1794 * some control requests in user space.
1795 */
1796
1797static int is_valid_config (struct usb_config_descriptor *config)
1798{
1799 return config->bDescriptorType == USB_DT_CONFIG
1800 && config->bLength == USB_DT_CONFIG_SIZE
1801 && config->bConfigurationValue != 0
1802 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1803 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1804 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1805 /* FIXME check lengths: walk to end */
1806}
1807
1808static ssize_t
1809dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1810{
1811 struct dev_data *dev = fd->private_data;
1812 ssize_t value = len, length = len;
1813 unsigned total;
1814 u32 tag;
1815 char *kbuf;
1816
1817 if (dev->state != STATE_OPENED)
1818 return -EEXIST;
1819
1820 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1821 return -EINVAL;
1822
1823 /* we might need to change message format someday */
1824 if (copy_from_user (&tag, buf, 4))
1825 return -EFAULT;
1826 if (tag != 0)
1827 return -EINVAL;
1828 buf += 4;
1829 length -= 4;
1830
1831 kbuf = kmalloc (length, SLAB_KERNEL);
1832 if (!kbuf)
1833 return -ENOMEM;
1834 if (copy_from_user (kbuf, buf, length)) {
1835 kfree (kbuf);
1836 return -EFAULT;
1837 }
1838
1839 spin_lock_irq (&dev->lock);
1840 value = -EINVAL;
1841 if (dev->buf)
1842 goto fail;
1843 dev->buf = kbuf;
1844
1845 /* full or low speed config */
1846 dev->config = (void *) kbuf;
1847 total = le16_to_cpup (&dev->config->wTotalLength);
1848 if (!is_valid_config (dev->config) || total >= length)
1849 goto fail;
1850 kbuf += total;
1851 length -= total;
1852
1853 /* optional high speed config */
1854 if (kbuf [1] == USB_DT_CONFIG) {
1855 dev->hs_config = (void *) kbuf;
1856 total = le16_to_cpup (&dev->hs_config->wTotalLength);
1857 if (!is_valid_config (dev->hs_config) || total >= length)
1858 goto fail;
1859 kbuf += total;
1860 length -= total;
1861 }
1862
1863 /* could support multiple configs, using another encoding! */
1864
1865 /* device descriptor (tweaked for paranoia) */
1866 if (length != USB_DT_DEVICE_SIZE)
1867 goto fail;
1868 dev->dev = (void *)kbuf;
1869 if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1870 || dev->dev->bDescriptorType != USB_DT_DEVICE
1871 || dev->dev->bNumConfigurations != 1)
1872 goto fail;
1873 dev->dev->bNumConfigurations = 1;
1874 dev->dev->bcdUSB = __constant_cpu_to_le16 (0x0200);
1875
1876 /* triggers gadgetfs_bind(); then we can enumerate. */
1877 spin_unlock_irq (&dev->lock);
1878 value = usb_gadget_register_driver (&gadgetfs_driver);
1879 if (value != 0) {
1880 kfree (dev->buf);
1881 dev->buf = NULL;
1882 } else {
1883 /* at this point "good" hardware has for the first time
1884 * let the USB the host see us. alternatively, if users
1885 * unplug/replug that will clear all the error state.
1886 *
1887 * note: everything running before here was guaranteed
1888 * to choke driver model style diagnostics. from here
1889 * on, they can work ... except in cleanup paths that
1890 * kick in after the ep0 descriptor is closed.
1891 */
1892 fd->f_op = &ep0_io_operations;
1893 value = len;
1894 }
1895 return value;
1896
1897fail:
1898 spin_unlock_irq (&dev->lock);
1899 pr_debug ("%s: %s fail %Zd, %p\n", shortname, __FUNCTION__, value, dev);
1900 kfree (dev->buf);
1901 dev->buf = NULL;
1902 return value;
1903}
1904
1905static int
1906dev_open (struct inode *inode, struct file *fd)
1907{
1908 struct dev_data *dev = inode->u.generic_ip;
1909 int value = -EBUSY;
1910
1911 if (dev->state == STATE_DEV_DISABLED) {
1912 dev->ev_next = 0;
1913 dev->state = STATE_OPENED;
1914 fd->private_data = dev;
1915 get_dev (dev);
1916 value = 0;
1917 }
1918 return value;
1919}
1920
1921static struct file_operations dev_init_operations = {
1922 .owner = THIS_MODULE,
1923 .llseek = no_llseek,
1924
1925 .open = dev_open,
1926 .write = dev_config,
1927 .fasync = ep0_fasync,
1928 .ioctl = dev_ioctl,
1929 .release = dev_release,
1930};
1931
1932/*----------------------------------------------------------------------*/
1933
1934/* FILESYSTEM AND SUPERBLOCK OPERATIONS
1935 *
1936 * Mounting the filesystem creates a controller file, used first for
1937 * device configuration then later for event monitoring.
1938 */
1939
1940
1941/* FIXME PAM etc could set this security policy without mount options
1942 * if epfiles inherited ownership and permissons from ep0 ...
1943 */
1944
1945static unsigned default_uid;
1946static unsigned default_gid;
1947static unsigned default_perm = S_IRUSR | S_IWUSR;
1948
1949module_param (default_uid, uint, 0644);
1950module_param (default_gid, uint, 0644);
1951module_param (default_perm, uint, 0644);
1952
1953
1954static struct inode *
1955gadgetfs_make_inode (struct super_block *sb,
1956 void *data, struct file_operations *fops,
1957 int mode)
1958{
1959 struct inode *inode = new_inode (sb);
1960
1961 if (inode) {
1962 inode->i_mode = mode;
1963 inode->i_uid = default_uid;
1964 inode->i_gid = default_gid;
1965 inode->i_blksize = PAGE_CACHE_SIZE;
1966 inode->i_blocks = 0;
1967 inode->i_atime = inode->i_mtime = inode->i_ctime
1968 = CURRENT_TIME;
1969 inode->u.generic_ip = data;
1970 inode->i_fop = fops;
1971 }
1972 return inode;
1973}
1974
1975/* creates in fs root directory, so non-renamable and non-linkable.
1976 * so inode and dentry are paired, until device reconfig.
1977 */
1978static struct inode *
1979gadgetfs_create_file (struct super_block *sb, char const *name,
1980 void *data, struct file_operations *fops,
1981 struct dentry **dentry_p)
1982{
1983 struct dentry *dentry;
1984 struct inode *inode;
1985
1986 dentry = d_alloc_name(sb->s_root, name);
1987 if (!dentry)
1988 return NULL;
1989
1990 inode = gadgetfs_make_inode (sb, data, fops,
1991 S_IFREG | (default_perm & S_IRWXUGO));
1992 if (!inode) {
1993 dput(dentry);
1994 return NULL;
1995 }
1996 d_add (dentry, inode);
1997 *dentry_p = dentry;
1998 return inode;
1999}
2000
2001static struct super_operations gadget_fs_operations = {
2002 .statfs = simple_statfs,
2003 .drop_inode = generic_delete_inode,
2004};
2005
2006static int
2007gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2008{
2009 struct inode *inode;
2010 struct dentry *d;
2011 struct dev_data *dev;
2012
2013 if (the_device)
2014 return -ESRCH;
2015
2016 /* fake probe to determine $CHIP */
2017 (void) usb_gadget_register_driver (&probe_driver);
2018 if (!CHIP)
2019 return -ENODEV;
2020
2021 /* superblock */
2022 sb->s_blocksize = PAGE_CACHE_SIZE;
2023 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2024 sb->s_magic = GADGETFS_MAGIC;
2025 sb->s_op = &gadget_fs_operations;
2026 sb->s_time_gran = 1;
2027
2028 /* root inode */
2029 inode = gadgetfs_make_inode (sb,
2030 NULL, &simple_dir_operations,
2031 S_IFDIR | S_IRUGO | S_IXUGO);
2032 if (!inode)
2033 return -ENOMEM;
2034 inode->i_op = &simple_dir_inode_operations;
2035 if (!(d = d_alloc_root (inode))) {
2036 iput (inode);
2037 return -ENOMEM;
2038 }
2039 sb->s_root = d;
2040
2041 /* the ep0 file is named after the controller we expect;
2042 * user mode code can use it for sanity checks, like we do.
2043 */
2044 dev = dev_new ();
2045 if (!dev)
2046 return -ENOMEM;
2047
2048 dev->sb = sb;
2049 if (!(inode = gadgetfs_create_file (sb, CHIP,
2050 dev, &dev_init_operations,
2051 &dev->dentry))) {
2052 put_dev(dev);
2053 return -ENOMEM;
2054 }
2055
2056 /* other endpoint files are available after hardware setup,
2057 * from binding to a controller.
2058 */
2059 the_device = dev;
2060 return 0;
2061}
2062
2063/* "mount -t gadgetfs path /dev/gadget" ends up here */
2064static struct super_block *
2065gadgetfs_get_sb (struct file_system_type *t, int flags,
2066 const char *path, void *opts)
2067{
2068 return get_sb_single (t, flags, opts, gadgetfs_fill_super);
2069}
2070
2071static void
2072gadgetfs_kill_sb (struct super_block *sb)
2073{
2074 kill_litter_super (sb);
2075 if (the_device) {
2076 put_dev (the_device);
2077 the_device = NULL;
2078 }
2079}
2080
2081/*----------------------------------------------------------------------*/
2082
2083static struct file_system_type gadgetfs_type = {
2084 .owner = THIS_MODULE,
2085 .name = shortname,
2086 .get_sb = gadgetfs_get_sb,
2087 .kill_sb = gadgetfs_kill_sb,
2088};
2089
2090/*----------------------------------------------------------------------*/
2091
2092static int __init init (void)
2093{
2094 int status;
2095
2096 status = register_filesystem (&gadgetfs_type);
2097 if (status == 0)
2098 pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2099 shortname, driver_desc);
2100 return status;
2101}
2102module_init (init);
2103
2104static void __exit cleanup (void)
2105{
2106 pr_debug ("unregister %s\n", shortname);
2107 unregister_filesystem (&gadgetfs_type);
2108}
2109module_exit (cleanup);
2110
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
new file mode 100644
index 000000000000..0def9f70e889
--- /dev/null
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -0,0 +1,2167 @@
1/*
2 * linux/drivers/usb/gadget/lh7a40x_udc.c
3 * Sharp LH7A40x on-chip full speed USB device controllers
4 *
5 * Copyright (C) 2004 Mikko Lahteenmaki, Nordic ID
6 * Copyright (C) 2004 Bo Henriksen, Nordic ID
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include "lh7a40x_udc.h"
25
26//#define DEBUG printk
27//#define DEBUG_EP0 printk
28//#define DEBUG_SETUP printk
29
30#ifndef DEBUG_EP0
31# define DEBUG_EP0(fmt,args...)
32#endif
33#ifndef DEBUG_SETUP
34# define DEBUG_SETUP(fmt,args...)
35#endif
36#ifndef DEBUG
37# define NO_STATES
38# define DEBUG(fmt,args...)
39#endif
40
41#define DRIVER_DESC "LH7A40x USB Device Controller"
42#define DRIVER_VERSION __DATE__
43
44#ifndef _BIT /* FIXME - what happended to _BIT in 2.6.7bk18? */
45#define _BIT(x) (1<<(x))
46#endif
47
48struct lh7a40x_udc *the_controller;
49
50static const char driver_name[] = "lh7a40x_udc";
51static const char driver_desc[] = DRIVER_DESC;
52static const char ep0name[] = "ep0-control";
53
54/*
55 Local definintions.
56*/
57
58#ifndef NO_STATES
59static char *state_names[] = {
60 "WAIT_FOR_SETUP",
61 "DATA_STATE_XMIT",
62 "DATA_STATE_NEED_ZLP",
63 "WAIT_FOR_OUT_STATUS",
64 "DATA_STATE_RECV"
65};
66#endif
67
68/*
69 Local declarations.
70*/
71static int lh7a40x_ep_enable(struct usb_ep *ep,
72 const struct usb_endpoint_descriptor *);
73static int lh7a40x_ep_disable(struct usb_ep *ep);
74static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, int);
75static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *);
76static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned, dma_addr_t *,
77 int);
78static void lh7a40x_free_buffer(struct usb_ep *ep, void *, dma_addr_t,
79 unsigned);
80static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, int);
81static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *);
82static int lh7a40x_set_halt(struct usb_ep *ep, int);
83static int lh7a40x_fifo_status(struct usb_ep *ep);
84static int lh7a40x_fifo_status(struct usb_ep *ep);
85static void lh7a40x_fifo_flush(struct usb_ep *ep);
86static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep);
87static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr);
88
89static void done(struct lh7a40x_ep *ep, struct lh7a40x_request *req,
90 int status);
91static void pio_irq_enable(int bEndpointAddress);
92static void pio_irq_disable(int bEndpointAddress);
93static void stop_activity(struct lh7a40x_udc *dev,
94 struct usb_gadget_driver *driver);
95static void flush(struct lh7a40x_ep *ep);
96static void udc_enable(struct lh7a40x_udc *dev);
97static void udc_set_address(struct lh7a40x_udc *dev, unsigned char address);
98
99static struct usb_ep_ops lh7a40x_ep_ops = {
100 .enable = lh7a40x_ep_enable,
101 .disable = lh7a40x_ep_disable,
102
103 .alloc_request = lh7a40x_alloc_request,
104 .free_request = lh7a40x_free_request,
105
106 .alloc_buffer = lh7a40x_alloc_buffer,
107 .free_buffer = lh7a40x_free_buffer,
108
109 .queue = lh7a40x_queue,
110 .dequeue = lh7a40x_dequeue,
111
112 .set_halt = lh7a40x_set_halt,
113 .fifo_status = lh7a40x_fifo_status,
114 .fifo_flush = lh7a40x_fifo_flush,
115};
116
117/* Inline code */
118
119static __inline__ int write_packet(struct lh7a40x_ep *ep,
120 struct lh7a40x_request *req, int max)
121{
122 u8 *buf;
123 int length, count;
124 volatile u32 *fifo = (volatile u32 *)ep->fifo;
125
126 buf = req->req.buf + req->req.actual;
127 prefetch(buf);
128
129 length = req->req.length - req->req.actual;
130 length = min(length, max);
131 req->req.actual += length;
132
133 DEBUG("Write %d (max %d), fifo %p\n", length, max, fifo);
134
135 count = length;
136 while (count--) {
137 *fifo = *buf++;
138 }
139
140 return length;
141}
142
143static __inline__ void usb_set_index(u32 ep)
144{
145 *(volatile u32 *)io_p2v(USB_INDEX) = ep;
146}
147
148static __inline__ u32 usb_read(u32 port)
149{
150 return *(volatile u32 *)io_p2v(port);
151}
152
153static __inline__ void usb_write(u32 val, u32 port)
154{
155 *(volatile u32 *)io_p2v(port) = val;
156}
157
158static __inline__ void usb_set(u32 val, u32 port)
159{
160 volatile u32 *ioport = (volatile u32 *)io_p2v(port);
161 u32 after = (*ioport) | val;
162 *ioport = after;
163}
164
165static __inline__ void usb_clear(u32 val, u32 port)
166{
167 volatile u32 *ioport = (volatile u32 *)io_p2v(port);
168 u32 after = (*ioport) & ~val;
169 *ioport = after;
170}
171
172/*-------------------------------------------------------------------------*/
173
174#define GPIO_PORTC_DR (0x80000E08)
175#define GPIO_PORTC_DDR (0x80000E18)
176#define GPIO_PORTC_PDR (0x80000E70)
177
178/* get port C pin data register */
179#define get_portc_pdr(bit) ((usb_read(GPIO_PORTC_PDR) & _BIT(bit)) != 0)
180/* get port C data direction register */
181#define get_portc_ddr(bit) ((usb_read(GPIO_PORTC_DDR) & _BIT(bit)) != 0)
182/* set port C data register */
183#define set_portc_dr(bit, val) (val ? usb_set(_BIT(bit), GPIO_PORTC_DR) : usb_clear(_BIT(bit), GPIO_PORTC_DR))
184/* set port C data direction register */
185#define set_portc_ddr(bit, val) (val ? usb_set(_BIT(bit), GPIO_PORTC_DDR) : usb_clear(_BIT(bit), GPIO_PORTC_DDR))
186
187/*
188 * LPD7A404 GPIO's:
189 * Port C bit 1 = USB Port 1 Power Enable
190 * Port C bit 2 = USB Port 1 Data Carrier Detect
191 */
192#define is_usb_connected() get_portc_pdr(2)
193
194#ifdef CONFIG_USB_GADGET_DEBUG_FILES
195
196static const char proc_node_name[] = "driver/udc";
197
198static int
199udc_proc_read(char *page, char **start, off_t off, int count,
200 int *eof, void *_dev)
201{
202 char *buf = page;
203 struct lh7a40x_udc *dev = _dev;
204 char *next = buf;
205 unsigned size = count;
206 unsigned long flags;
207 int t;
208
209 if (off != 0)
210 return 0;
211
212 local_irq_save(flags);
213
214 /* basic device status */
215 t = scnprintf(next, size,
216 DRIVER_DESC "\n"
217 "%s version: %s\n"
218 "Gadget driver: %s\n"
219 "Host: %s\n\n",
220 driver_name, DRIVER_VERSION,
221 dev->driver ? dev->driver->driver.name : "(none)",
222 is_usb_connected()? "full speed" : "disconnected");
223 size -= t;
224 next += t;
225
226 t = scnprintf(next, size,
227 "GPIO:\n"
228 " Port C bit 1: %d, dir %d\n"
229 " Port C bit 2: %d, dir %d\n\n",
230 get_portc_pdr(1), get_portc_ddr(1),
231 get_portc_pdr(2), get_portc_ddr(2)
232 );
233 size -= t;
234 next += t;
235
236 t = scnprintf(next, size,
237 "DCP pullup: %d\n\n",
238 (usb_read(USB_PM) & PM_USB_DCP) != 0);
239 size -= t;
240 next += t;
241
242 local_irq_restore(flags);
243 *eof = 1;
244 return count - size;
245}
246
247#define create_proc_files() create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
248#define remove_proc_files() remove_proc_entry(proc_node_name, NULL)
249
250#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
251
252#define create_proc_files() do {} while (0)
253#define remove_proc_files() do {} while (0)
254
255#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
256
257/*
258 * udc_disable - disable USB device controller
259 */
260static void udc_disable(struct lh7a40x_udc *dev)
261{
262 DEBUG("%s, %p\n", __FUNCTION__, dev);
263
264 udc_set_address(dev, 0);
265
266 /* Disable interrupts */
267 usb_write(0, USB_IN_INT_EN);
268 usb_write(0, USB_OUT_INT_EN);
269 usb_write(0, USB_INT_EN);
270
271 /* Disable the USB */
272 usb_write(0, USB_PM);
273
274#ifdef CONFIG_ARCH_LH7A404
275 /* Disable USB power */
276 set_portc_dr(1, 0);
277#endif
278
279 /* if hardware supports it, disconnect from usb */
280 /* make_usb_disappear(); */
281
282 dev->ep0state = WAIT_FOR_SETUP;
283 dev->gadget.speed = USB_SPEED_UNKNOWN;
284 dev->usb_address = 0;
285}
286
287/*
288 * udc_reinit - initialize software state
289 */
290static void udc_reinit(struct lh7a40x_udc *dev)
291{
292 u32 i;
293
294 DEBUG("%s, %p\n", __FUNCTION__, dev);
295
296 /* device/ep0 records init */
297 INIT_LIST_HEAD(&dev->gadget.ep_list);
298 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
299 dev->ep0state = WAIT_FOR_SETUP;
300
301 /* basic endpoint records init */
302 for (i = 0; i < UDC_MAX_ENDPOINTS; i++) {
303 struct lh7a40x_ep *ep = &dev->ep[i];
304
305 if (i != 0)
306 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
307
308 ep->desc = 0;
309 ep->stopped = 0;
310 INIT_LIST_HEAD(&ep->queue);
311 ep->pio_irqs = 0;
312 }
313
314 /* the rest was statically initialized, and is read-only */
315}
316
317#define BYTES2MAXP(x) (x / 8)
318#define MAXP2BYTES(x) (x * 8)
319
320/* until it's enabled, this UDC should be completely invisible
321 * to any USB host.
322 */
323static void udc_enable(struct lh7a40x_udc *dev)
324{
325 int ep;
326
327 DEBUG("%s, %p\n", __FUNCTION__, dev);
328
329 dev->gadget.speed = USB_SPEED_UNKNOWN;
330
331#ifdef CONFIG_ARCH_LH7A404
332 /* Set Port C bit 1 & 2 as output */
333 set_portc_ddr(1, 1);
334 set_portc_ddr(2, 1);
335
336 /* Enable USB power */
337 set_portc_dr(1, 0);
338#endif
339
340 /*
341 * C.f Chapter 18.1.3.1 Initializing the USB
342 */
343
344 /* Disable the USB */
345 usb_clear(PM_USB_ENABLE, USB_PM);
346
347 /* Reset APB & I/O sides of the USB */
348 usb_set(USB_RESET_APB | USB_RESET_IO, USB_RESET);
349 mdelay(5);
350 usb_clear(USB_RESET_APB | USB_RESET_IO, USB_RESET);
351
352 /* Set MAXP values for each */
353 for (ep = 0; ep < UDC_MAX_ENDPOINTS; ep++) {
354 struct lh7a40x_ep *ep_reg = &dev->ep[ep];
355 u32 csr;
356
357 usb_set_index(ep);
358
359 switch (ep_reg->ep_type) {
360 case ep_bulk_in:
361 case ep_interrupt:
362 usb_clear(USB_IN_CSR2_USB_DMA_EN | USB_IN_CSR2_AUTO_SET,
363 ep_reg->csr2);
364 /* Fall through */
365 case ep_control:
366 usb_write(BYTES2MAXP(ep_maxpacket(ep_reg)),
367 USB_IN_MAXP);
368 break;
369 case ep_bulk_out:
370 usb_clear(USB_OUT_CSR2_USB_DMA_EN |
371 USB_OUT_CSR2_AUTO_CLR, ep_reg->csr2);
372 usb_write(BYTES2MAXP(ep_maxpacket(ep_reg)),
373 USB_OUT_MAXP);
374 break;
375 }
376
377 /* Read & Write CSR1, just in case */
378 csr = usb_read(ep_reg->csr1);
379 usb_write(csr, ep_reg->csr1);
380
381 flush(ep_reg);
382 }
383
384 /* Disable interrupts */
385 usb_write(0, USB_IN_INT_EN);
386 usb_write(0, USB_OUT_INT_EN);
387 usb_write(0, USB_INT_EN);
388
389 /* Enable interrupts */
390 usb_set(USB_IN_INT_EP0, USB_IN_INT_EN);
391 usb_set(USB_INT_RESET_INT | USB_INT_RESUME_INT, USB_INT_EN);
392 /* Dont enable rest of the interrupts */
393 /* usb_set(USB_IN_INT_EP3 | USB_IN_INT_EP1 | USB_IN_INT_EP0, USB_IN_INT_EN);
394 usb_set(USB_OUT_INT_EP2, USB_OUT_INT_EN); */
395
396 /* Enable SUSPEND */
397 usb_set(PM_ENABLE_SUSPEND, USB_PM);
398
399 /* Enable the USB */
400 usb_set(PM_USB_ENABLE, USB_PM);
401
402#ifdef CONFIG_ARCH_LH7A404
403 /* NOTE: DOES NOT WORK! */
404 /* Let host detect UDC:
405 * Software must write a 0 to the PMR:DCP_CTRL bit to turn this
406 * transistor on and pull the USBDP pin HIGH.
407 */
408 /* usb_clear(PM_USB_DCP, USB_PM);
409 usb_set(PM_USB_DCP, USB_PM); */
410#endif
411}
412
413/*
414 Register entry point for the peripheral controller driver.
415*/
416int usb_gadget_register_driver(struct usb_gadget_driver *driver)
417{
418 struct lh7a40x_udc *dev = the_controller;
419 int retval;
420
421 DEBUG("%s: %s\n", __FUNCTION__, driver->driver.name);
422
423 if (!driver
424 || driver->speed != USB_SPEED_FULL
425 || !driver->bind
426 || !driver->unbind || !driver->disconnect || !driver->setup)
427 return -EINVAL;
428 if (!dev)
429 return -ENODEV;
430 if (dev->driver)
431 return -EBUSY;
432
433 /* first hook up the driver ... */
434 dev->driver = driver;
435 dev->gadget.dev.driver = &driver->driver;
436
437 device_add(&dev->gadget.dev);
438 retval = driver->bind(&dev->gadget);
439 if (retval) {
440 printk("%s: bind to driver %s --> error %d\n", dev->gadget.name,
441 driver->driver.name, retval);
442 device_del(&dev->gadget.dev);
443
444 dev->driver = 0;
445 dev->gadget.dev.driver = 0;
446 return retval;
447 }
448
449 /* ... then enable host detection and ep0; and we're ready
450 * for set_configuration as well as eventual disconnect.
451 * NOTE: this shouldn't power up until later.
452 */
453 printk("%s: registered gadget driver '%s'\n", dev->gadget.name,
454 driver->driver.name);
455
456 udc_enable(dev);
457
458 return 0;
459}
460
461EXPORT_SYMBOL(usb_gadget_register_driver);
462
463/*
464 Unregister entry point for the peripheral controller driver.
465*/
466int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
467{
468 struct lh7a40x_udc *dev = the_controller;
469 unsigned long flags;
470
471 if (!dev)
472 return -ENODEV;
473 if (!driver || driver != dev->driver)
474 return -EINVAL;
475
476 spin_lock_irqsave(&dev->lock, flags);
477 dev->driver = 0;
478 stop_activity(dev, driver);
479 spin_unlock_irqrestore(&dev->lock, flags);
480
481 driver->unbind(&dev->gadget);
482 device_del(&dev->gadget.dev);
483
484 udc_disable(dev);
485
486 DEBUG("unregistered gadget driver '%s'\n", driver->driver.name);
487 return 0;
488}
489
490EXPORT_SYMBOL(usb_gadget_unregister_driver);
491
492/*-------------------------------------------------------------------------*/
493
494/** Write request to FIFO (max write == maxp size)
495 * Return: 0 = still running, 1 = completed, negative = errno
496 * NOTE: INDEX register must be set for EP
497 */
498static int write_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
499{
500 u32 max;
501 u32 csr;
502
503 max = le16_to_cpu(ep->desc->wMaxPacketSize);
504
505 csr = usb_read(ep->csr1);
506 DEBUG("CSR: %x %d\n", csr, csr & USB_IN_CSR1_FIFO_NOT_EMPTY);
507
508 if (!(csr & USB_IN_CSR1_FIFO_NOT_EMPTY)) {
509 unsigned count;
510 int is_last, is_short;
511
512 count = write_packet(ep, req, max);
513 usb_set(USB_IN_CSR1_IN_PKT_RDY, ep->csr1);
514
515 /* last packet is usually short (or a zlp) */
516 if (unlikely(count != max))
517 is_last = is_short = 1;
518 else {
519 if (likely(req->req.length != req->req.actual)
520 || req->req.zero)
521 is_last = 0;
522 else
523 is_last = 1;
524 /* interrupt/iso maxpacket may not fill the fifo */
525 is_short = unlikely(max < ep_maxpacket(ep));
526 }
527
528 DEBUG("%s: wrote %s %d bytes%s%s %d left %p\n", __FUNCTION__,
529 ep->ep.name, count,
530 is_last ? "/L" : "", is_short ? "/S" : "",
531 req->req.length - req->req.actual, req);
532
533 /* requests complete when all IN data is in the FIFO */
534 if (is_last) {
535 done(ep, req, 0);
536 if (list_empty(&ep->queue)) {
537 pio_irq_disable(ep_index(ep));
538 }
539 return 1;
540 }
541 } else {
542 DEBUG("Hmm.. %d ep FIFO is not empty!\n", ep_index(ep));
543 }
544
545 return 0;
546}
547
548/** Read to request from FIFO (max read == bytes in fifo)
549 * Return: 0 = still running, 1 = completed, negative = errno
550 * NOTE: INDEX register must be set for EP
551 */
552static int read_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
553{
554 u32 csr;
555 u8 *buf;
556 unsigned bufferspace, count, is_short;
557 volatile u32 *fifo = (volatile u32 *)ep->fifo;
558
559 /* make sure there's a packet in the FIFO. */
560 csr = usb_read(ep->csr1);
561 if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY)) {
562 DEBUG("%s: Packet NOT ready!\n", __FUNCTION__);
563 return -EINVAL;
564 }
565
566 buf = req->req.buf + req->req.actual;
567 prefetchw(buf);
568 bufferspace = req->req.length - req->req.actual;
569
570 /* read all bytes from this packet */
571 count = usb_read(USB_OUT_FIFO_WC1);
572 req->req.actual += min(count, bufferspace);
573
574 is_short = (count < ep->ep.maxpacket);
575 DEBUG("read %s %02x, %d bytes%s req %p %d/%d\n",
576 ep->ep.name, csr, count,
577 is_short ? "/S" : "", req, req->req.actual, req->req.length);
578
579 while (likely(count-- != 0)) {
580 u8 byte = (u8) (*fifo & 0xff);
581
582 if (unlikely(bufferspace == 0)) {
583 /* this happens when the driver's buffer
584 * is smaller than what the host sent.
585 * discard the extra data.
586 */
587 if (req->req.status != -EOVERFLOW)
588 printk("%s overflow %d\n", ep->ep.name, count);
589 req->req.status = -EOVERFLOW;
590 } else {
591 *buf++ = byte;
592 bufferspace--;
593 }
594 }
595
596 usb_clear(USB_OUT_CSR1_OUT_PKT_RDY, ep->csr1);
597
598 /* completion */
599 if (is_short || req->req.actual == req->req.length) {
600 done(ep, req, 0);
601 usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1);
602
603 if (list_empty(&ep->queue))
604 pio_irq_disable(ep_index(ep));
605 return 1;
606 }
607
608 /* finished that packet. the next one may be waiting... */
609 return 0;
610}
611
612/*
613 * done - retire a request; caller blocked irqs
614 * INDEX register is preserved to keep same
615 */
616static void done(struct lh7a40x_ep *ep, struct lh7a40x_request *req, int status)
617{
618 unsigned int stopped = ep->stopped;
619 u32 index;
620
621 DEBUG("%s, %p\n", __FUNCTION__, ep);
622 list_del_init(&req->queue);
623
624 if (likely(req->req.status == -EINPROGRESS))
625 req->req.status = status;
626 else
627 status = req->req.status;
628
629 if (status && status != -ESHUTDOWN)
630 DEBUG("complete %s req %p stat %d len %u/%u\n",
631 ep->ep.name, &req->req, status,
632 req->req.actual, req->req.length);
633
634 /* don't modify queue heads during completion callback */
635 ep->stopped = 1;
636 /* Read current index (completion may modify it) */
637 index = usb_read(USB_INDEX);
638
639 spin_unlock(&ep->dev->lock);
640 req->req.complete(&ep->ep, &req->req);
641 spin_lock(&ep->dev->lock);
642
643 /* Restore index */
644 usb_set_index(index);
645 ep->stopped = stopped;
646}
647
648/** Enable EP interrupt */
649static void pio_irq_enable(int ep)
650{
651 DEBUG("%s: %d\n", __FUNCTION__, ep);
652
653 switch (ep) {
654 case 1:
655 usb_set(USB_IN_INT_EP1, USB_IN_INT_EN);
656 break;
657 case 2:
658 usb_set(USB_OUT_INT_EP2, USB_OUT_INT_EN);
659 break;
660 case 3:
661 usb_set(USB_IN_INT_EP3, USB_IN_INT_EN);
662 break;
663 default:
664 DEBUG("Unknown endpoint: %d\n", ep);
665 break;
666 }
667}
668
669/** Disable EP interrupt */
670static void pio_irq_disable(int ep)
671{
672 DEBUG("%s: %d\n", __FUNCTION__, ep);
673
674 switch (ep) {
675 case 1:
676 usb_clear(USB_IN_INT_EP1, USB_IN_INT_EN);
677 break;
678 case 2:
679 usb_clear(USB_OUT_INT_EP2, USB_OUT_INT_EN);
680 break;
681 case 3:
682 usb_clear(USB_IN_INT_EP3, USB_IN_INT_EN);
683 break;
684 default:
685 DEBUG("Unknown endpoint: %d\n", ep);
686 break;
687 }
688}
689
690/*
691 * nuke - dequeue ALL requests
692 */
693void nuke(struct lh7a40x_ep *ep, int status)
694{
695 struct lh7a40x_request *req;
696
697 DEBUG("%s, %p\n", __FUNCTION__, ep);
698
699 /* Flush FIFO */
700 flush(ep);
701
702 /* called with irqs blocked */
703 while (!list_empty(&ep->queue)) {
704 req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
705 done(ep, req, status);
706 }
707
708 /* Disable IRQ if EP is enabled (has decriptor) */
709 if (ep->desc)
710 pio_irq_disable(ep_index(ep));
711}
712
713/*
714void nuke_all(struct lh7a40x_udc *dev)
715{
716 int n;
717 for(n=0; n<UDC_MAX_ENDPOINTS; n++) {
718 struct lh7a40x_ep *ep = &dev->ep[n];
719 usb_set_index(n);
720 nuke(ep, 0);
721 }
722}*/
723
724/*
725static void flush_all(struct lh7a40x_udc *dev)
726{
727 int n;
728 for (n = 0; n < UDC_MAX_ENDPOINTS; n++)
729 {
730 struct lh7a40x_ep *ep = &dev->ep[n];
731 flush(ep);
732 }
733}
734*/
735
736/** Flush EP
737 * NOTE: INDEX register must be set before this call
738 */
739static void flush(struct lh7a40x_ep *ep)
740{
741 DEBUG("%s, %p\n", __FUNCTION__, ep);
742
743 switch (ep->ep_type) {
744 case ep_control:
745 /* check, by implication c.f. 15.1.2.11 */
746 break;
747
748 case ep_bulk_in:
749 case ep_interrupt:
750 /* if(csr & USB_IN_CSR1_IN_PKT_RDY) */
751 usb_set(USB_IN_CSR1_FIFO_FLUSH, ep->csr1);
752 break;
753
754 case ep_bulk_out:
755 /* if(csr & USB_OUT_CSR1_OUT_PKT_RDY) */
756 usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1);
757 break;
758 }
759}
760
761/**
762 * lh7a40x_in_epn - handle IN interrupt
763 */
764static void lh7a40x_in_epn(struct lh7a40x_udc *dev, u32 ep_idx, u32 intr)
765{
766 u32 csr;
767 struct lh7a40x_ep *ep = &dev->ep[ep_idx];
768 struct lh7a40x_request *req;
769
770 usb_set_index(ep_idx);
771
772 csr = usb_read(ep->csr1);
773 DEBUG("%s: %d, csr %x\n", __FUNCTION__, ep_idx, csr);
774
775 if (csr & USB_IN_CSR1_SENT_STALL) {
776 DEBUG("USB_IN_CSR1_SENT_STALL\n");
777 usb_set(USB_IN_CSR1_SENT_STALL /*|USB_IN_CSR1_SEND_STALL */ ,
778 ep->csr1);
779 return;
780 }
781
782 if (!ep->desc) {
783 DEBUG("%s: NO EP DESC\n", __FUNCTION__);
784 return;
785 }
786
787 if (list_empty(&ep->queue))
788 req = 0;
789 else
790 req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
791
792 DEBUG("req: %p\n", req);
793
794 if (!req)
795 return;
796
797 write_fifo(ep, req);
798}
799
800/* ********************************************************************************************* */
801/* Bulk OUT (recv)
802 */
803
804static void lh7a40x_out_epn(struct lh7a40x_udc *dev, u32 ep_idx, u32 intr)
805{
806 struct lh7a40x_ep *ep = &dev->ep[ep_idx];
807 struct lh7a40x_request *req;
808
809 DEBUG("%s: %d\n", __FUNCTION__, ep_idx);
810
811 usb_set_index(ep_idx);
812
813 if (ep->desc) {
814 u32 csr;
815 csr = usb_read(ep->csr1);
816
817 while ((csr =
818 usb_read(ep->
819 csr1)) & (USB_OUT_CSR1_OUT_PKT_RDY |
820 USB_OUT_CSR1_SENT_STALL)) {
821 DEBUG("%s: %x\n", __FUNCTION__, csr);
822
823 if (csr & USB_OUT_CSR1_SENT_STALL) {
824 DEBUG("%s: stall sent, flush fifo\n",
825 __FUNCTION__);
826 /* usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1); */
827 flush(ep);
828 } else if (csr & USB_OUT_CSR1_OUT_PKT_RDY) {
829 if (list_empty(&ep->queue))
830 req = 0;
831 else
832 req =
833 list_entry(ep->queue.next,
834 struct lh7a40x_request,
835 queue);
836
837 if (!req) {
838 printk("%s: NULL REQ %d\n",
839 __FUNCTION__, ep_idx);
840 flush(ep);
841 break;
842 } else {
843 read_fifo(ep, req);
844 }
845 }
846
847 }
848
849 } else {
850 /* Throw packet away.. */
851 printk("%s: No descriptor?!?\n", __FUNCTION__);
852 flush(ep);
853 }
854}
855
856static void stop_activity(struct lh7a40x_udc *dev,
857 struct usb_gadget_driver *driver)
858{
859 int i;
860
861 /* don't disconnect drivers more than once */
862 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
863 driver = 0;
864 dev->gadget.speed = USB_SPEED_UNKNOWN;
865
866 /* prevent new request submissions, kill any outstanding requests */
867 for (i = 0; i < UDC_MAX_ENDPOINTS; i++) {
868 struct lh7a40x_ep *ep = &dev->ep[i];
869 ep->stopped = 1;
870
871 usb_set_index(i);
872 nuke(ep, -ESHUTDOWN);
873 }
874
875 /* report disconnect; the driver is already quiesced */
876 if (driver) {
877 spin_unlock(&dev->lock);
878 driver->disconnect(&dev->gadget);
879 spin_lock(&dev->lock);
880 }
881
882 /* re-init driver-visible data structures */
883 udc_reinit(dev);
884}
885
886/** Handle USB RESET interrupt
887 */
888static void lh7a40x_reset_intr(struct lh7a40x_udc *dev)
889{
890#if 0 /* def CONFIG_ARCH_LH7A404 */
891 /* Does not work always... */
892
893 DEBUG("%s: %d\n", __FUNCTION__, dev->usb_address);
894
895 if (!dev->usb_address) {
896 /*usb_set(USB_RESET_IO, USB_RESET);
897 mdelay(5);
898 usb_clear(USB_RESET_IO, USB_RESET); */
899 return;
900 }
901 /* Put the USB controller into reset. */
902 usb_set(USB_RESET_IO, USB_RESET);
903
904 /* Set Device ID to 0 */
905 udc_set_address(dev, 0);
906
907 /* Let PLL2 settle down */
908 mdelay(5);
909
910 /* Release the USB controller from reset */
911 usb_clear(USB_RESET_IO, USB_RESET);
912
913 /* Re-enable UDC */
914 udc_enable(dev);
915
916#endif
917 dev->gadget.speed = USB_SPEED_FULL;
918}
919
920/*
921 * lh7a40x usb client interrupt handler.
922 */
923static irqreturn_t lh7a40x_udc_irq(int irq, void *_dev, struct pt_regs *r)
924{
925 struct lh7a40x_udc *dev = _dev;
926
927 DEBUG("\n\n");
928
929 spin_lock(&dev->lock);
930
931 for (;;) {
932 u32 intr_in = usb_read(USB_IN_INT);
933 u32 intr_out = usb_read(USB_OUT_INT);
934 u32 intr_int = usb_read(USB_INT);
935
936 /* Test also against enable bits.. (lh7a40x errata).. Sigh.. */
937 u32 in_en = usb_read(USB_IN_INT_EN);
938 u32 out_en = usb_read(USB_OUT_INT_EN);
939
940 if (!intr_out && !intr_in && !intr_int)
941 break;
942
943 DEBUG("%s (on state %s)\n", __FUNCTION__,
944 state_names[dev->ep0state]);
945 DEBUG("intr_out = %x\n", intr_out);
946 DEBUG("intr_in = %x\n", intr_in);
947 DEBUG("intr_int = %x\n", intr_int);
948
949 if (intr_in) {
950 usb_write(intr_in, USB_IN_INT);
951
952 if ((intr_in & USB_IN_INT_EP1)
953 && (in_en & USB_IN_INT_EP1)) {
954 DEBUG("USB_IN_INT_EP1\n");
955 lh7a40x_in_epn(dev, 1, intr_in);
956 }
957 if ((intr_in & USB_IN_INT_EP3)
958 && (in_en & USB_IN_INT_EP3)) {
959 DEBUG("USB_IN_INT_EP3\n");
960 lh7a40x_in_epn(dev, 3, intr_in);
961 }
962 if (intr_in & USB_IN_INT_EP0) {
963 DEBUG("USB_IN_INT_EP0 (control)\n");
964 lh7a40x_handle_ep0(dev, intr_in);
965 }
966 }
967
968 if (intr_out) {
969 usb_write(intr_out, USB_OUT_INT);
970
971 if ((intr_out & USB_OUT_INT_EP2)
972 && (out_en & USB_OUT_INT_EP2)) {
973 DEBUG("USB_OUT_INT_EP2\n");
974 lh7a40x_out_epn(dev, 2, intr_out);
975 }
976 }
977
978 if (intr_int) {
979 usb_write(intr_int, USB_INT);
980
981 if (intr_int & USB_INT_RESET_INT) {
982 lh7a40x_reset_intr(dev);
983 }
984
985 if (intr_int & USB_INT_RESUME_INT) {
986 DEBUG("USB resume\n");
987
988 if (dev->gadget.speed != USB_SPEED_UNKNOWN
989 && dev->driver
990 && dev->driver->resume
991 && is_usb_connected()) {
992 dev->driver->resume(&dev->gadget);
993 }
994 }
995
996 if (intr_int & USB_INT_SUSPEND_INT) {
997 DEBUG("USB suspend%s\n",
998 is_usb_connected()? "" : "+disconnect");
999 if (!is_usb_connected()) {
1000 stop_activity(dev, dev->driver);
1001 } else if (dev->gadget.speed !=
1002 USB_SPEED_UNKNOWN && dev->driver
1003 && dev->driver->suspend) {
1004 dev->driver->suspend(&dev->gadget);
1005 }
1006 }
1007
1008 }
1009 }
1010
1011 spin_unlock(&dev->lock);
1012
1013 return IRQ_HANDLED;
1014}
1015
1016static int lh7a40x_ep_enable(struct usb_ep *_ep,
1017 const struct usb_endpoint_descriptor *desc)
1018{
1019 struct lh7a40x_ep *ep;
1020 struct lh7a40x_udc *dev;
1021 unsigned long flags;
1022
1023 DEBUG("%s, %p\n", __FUNCTION__, _ep);
1024
1025 ep = container_of(_ep, struct lh7a40x_ep, ep);
1026 if (!_ep || !desc || ep->desc || _ep->name == ep0name
1027 || desc->bDescriptorType != USB_DT_ENDPOINT
1028 || ep->bEndpointAddress != desc->bEndpointAddress
1029 || ep_maxpacket(ep) < le16_to_cpu(desc->wMaxPacketSize)) {
1030 DEBUG("%s, bad ep or descriptor\n", __FUNCTION__);
1031 return -EINVAL;
1032 }
1033
1034 /* xfer types must match, except that interrupt ~= bulk */
1035 if (ep->bmAttributes != desc->bmAttributes
1036 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
1037 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
1038 DEBUG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
1039 return -EINVAL;
1040 }
1041
1042 /* hardware _could_ do smaller, but driver doesn't */
1043 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
1044 && le16_to_cpu(desc->wMaxPacketSize) != ep_maxpacket(ep))
1045 || !desc->wMaxPacketSize) {
1046 DEBUG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
1047 return -ERANGE;
1048 }
1049
1050 dev = ep->dev;
1051 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
1052 DEBUG("%s, bogus device state\n", __FUNCTION__);
1053 return -ESHUTDOWN;
1054 }
1055
1056 spin_lock_irqsave(&ep->dev->lock, flags);
1057
1058 ep->stopped = 0;
1059 ep->desc = desc;
1060 ep->pio_irqs = 0;
1061 ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
1062
1063 /* Reset halt state (does flush) */
1064 lh7a40x_set_halt(_ep, 0);
1065
1066 spin_unlock_irqrestore(&ep->dev->lock, flags);
1067
1068 DEBUG("%s: enabled %s\n", __FUNCTION__, _ep->name);
1069 return 0;
1070}
1071
1072/** Disable EP
1073 * NOTE: Sets INDEX register
1074 */
1075static int lh7a40x_ep_disable(struct usb_ep *_ep)
1076{
1077 struct lh7a40x_ep *ep;
1078 unsigned long flags;
1079
1080 DEBUG("%s, %p\n", __FUNCTION__, _ep);
1081
1082 ep = container_of(_ep, struct lh7a40x_ep, ep);
1083 if (!_ep || !ep->desc) {
1084 DEBUG("%s, %s not enabled\n", __FUNCTION__,
1085 _ep ? ep->ep.name : NULL);
1086 return -EINVAL;
1087 }
1088
1089 spin_lock_irqsave(&ep->dev->lock, flags);
1090
1091 usb_set_index(ep_index(ep));
1092
1093 /* Nuke all pending requests (does flush) */
1094 nuke(ep, -ESHUTDOWN);
1095
1096 /* Disable ep IRQ */
1097 pio_irq_disable(ep_index(ep));
1098
1099 ep->desc = 0;
1100 ep->stopped = 1;
1101
1102 spin_unlock_irqrestore(&ep->dev->lock, flags);
1103
1104 DEBUG("%s: disabled %s\n", __FUNCTION__, _ep->name);
1105 return 0;
1106}
1107
1108static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep,
1109 int gfp_flags)
1110{
1111 struct lh7a40x_request *req;
1112
1113 DEBUG("%s, %p\n", __FUNCTION__, ep);
1114
1115 req = kmalloc(sizeof *req, gfp_flags);
1116 if (!req)
1117 return 0;
1118
1119 memset(req, 0, sizeof *req);
1120 INIT_LIST_HEAD(&req->queue);
1121
1122 return &req->req;
1123}
1124
1125static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req)
1126{
1127 struct lh7a40x_request *req;
1128
1129 DEBUG("%s, %p\n", __FUNCTION__, ep);
1130
1131 req = container_of(_req, struct lh7a40x_request, req);
1132 WARN_ON(!list_empty(&req->queue));
1133 kfree(req);
1134}
1135
1136static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned bytes,
1137 dma_addr_t * dma, int gfp_flags)
1138{
1139 char *retval;
1140
1141 DEBUG("%s (%p, %d, %d)\n", __FUNCTION__, ep, bytes, gfp_flags);
1142
1143 retval = kmalloc(bytes, gfp_flags & ~(__GFP_DMA | __GFP_HIGHMEM));
1144 if (retval)
1145 *dma = virt_to_bus(retval);
1146 return retval;
1147}
1148
1149static void lh7a40x_free_buffer(struct usb_ep *ep, void *buf, dma_addr_t dma,
1150 unsigned bytes)
1151{
1152 DEBUG("%s, %p\n", __FUNCTION__, ep);
1153 kfree(buf);
1154}
1155
1156/** Queue one request
1157 * Kickstart transfer if needed
1158 * NOTE: Sets INDEX register
1159 */
1160static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req,
1161 int gfp_flags)
1162{
1163 struct lh7a40x_request *req;
1164 struct lh7a40x_ep *ep;
1165 struct lh7a40x_udc *dev;
1166 unsigned long flags;
1167
1168 DEBUG("\n\n\n%s, %p\n", __FUNCTION__, _ep);
1169
1170 req = container_of(_req, struct lh7a40x_request, req);
1171 if (unlikely
1172 (!_req || !_req->complete || !_req->buf
1173 || !list_empty(&req->queue))) {
1174 DEBUG("%s, bad params\n", __FUNCTION__);
1175 return -EINVAL;
1176 }
1177
1178 ep = container_of(_ep, struct lh7a40x_ep, ep);
1179 if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
1180 DEBUG("%s, bad ep\n", __FUNCTION__);
1181 return -EINVAL;
1182 }
1183
1184 dev = ep->dev;
1185 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
1186 DEBUG("%s, bogus device state %p\n", __FUNCTION__, dev->driver);
1187 return -ESHUTDOWN;
1188 }
1189
1190 DEBUG("%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length,
1191 _req->buf);
1192
1193 spin_lock_irqsave(&dev->lock, flags);
1194
1195 _req->status = -EINPROGRESS;
1196 _req->actual = 0;
1197
1198 /* kickstart this i/o queue? */
1199 DEBUG("Add to %d Q %d %d\n", ep_index(ep), list_empty(&ep->queue),
1200 ep->stopped);
1201 if (list_empty(&ep->queue) && likely(!ep->stopped)) {
1202 u32 csr;
1203
1204 if (unlikely(ep_index(ep) == 0)) {
1205 /* EP0 */
1206 list_add_tail(&req->queue, &ep->queue);
1207 lh7a40x_ep0_kick(dev, ep);
1208 req = 0;
1209 } else if (ep_is_in(ep)) {
1210 /* EP1 & EP3 */
1211 usb_set_index(ep_index(ep));
1212 csr = usb_read(ep->csr1);
1213 pio_irq_enable(ep_index(ep));
1214 if ((csr & USB_IN_CSR1_FIFO_NOT_EMPTY) == 0) {
1215 if (write_fifo(ep, req) == 1)
1216 req = 0;
1217 }
1218 } else {
1219 /* EP2 */
1220 usb_set_index(ep_index(ep));
1221 csr = usb_read(ep->csr1);
1222 pio_irq_enable(ep_index(ep));
1223 if (!(csr & USB_OUT_CSR1_FIFO_FULL)) {
1224 if (read_fifo(ep, req) == 1)
1225 req = 0;
1226 }
1227 }
1228 }
1229
1230 /* pio or dma irq handler advances the queue. */
1231 if (likely(req != 0))
1232 list_add_tail(&req->queue, &ep->queue);
1233
1234 spin_unlock_irqrestore(&dev->lock, flags);
1235
1236 return 0;
1237}
1238
1239/* dequeue JUST ONE request */
1240static int lh7a40x_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1241{
1242 struct lh7a40x_ep *ep;
1243 struct lh7a40x_request *req;
1244 unsigned long flags;
1245
1246 DEBUG("%s, %p\n", __FUNCTION__, _ep);
1247
1248 ep = container_of(_ep, struct lh7a40x_ep, ep);
1249 if (!_ep || ep->ep.name == ep0name)
1250 return -EINVAL;
1251
1252 spin_lock_irqsave(&ep->dev->lock, flags);
1253
1254 /* make sure it's actually queued on this endpoint */
1255 list_for_each_entry(req, &ep->queue, queue) {
1256 if (&req->req == _req)
1257 break;
1258 }
1259 if (&req->req != _req) {
1260 spin_unlock_irqrestore(&ep->dev->lock, flags);
1261 return -EINVAL;
1262 }
1263
1264 done(ep, req, -ECONNRESET);
1265
1266 spin_unlock_irqrestore(&ep->dev->lock, flags);
1267 return 0;
1268}
1269
1270/** Halt specific EP
1271 * Return 0 if success
1272 * NOTE: Sets INDEX register to EP !
1273 */
1274static int lh7a40x_set_halt(struct usb_ep *_ep, int value)
1275{
1276 struct lh7a40x_ep *ep;
1277 unsigned long flags;
1278
1279 ep = container_of(_ep, struct lh7a40x_ep, ep);
1280 if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
1281 DEBUG("%s, bad ep\n", __FUNCTION__);
1282 return -EINVAL;
1283 }
1284
1285 usb_set_index(ep_index(ep));
1286
1287 DEBUG("%s, ep %d, val %d\n", __FUNCTION__, ep_index(ep), value);
1288
1289 spin_lock_irqsave(&ep->dev->lock, flags);
1290
1291 if (ep_index(ep) == 0) {
1292 /* EP0 */
1293 usb_set(EP0_SEND_STALL, ep->csr1);
1294 } else if (ep_is_in(ep)) {
1295 u32 csr = usb_read(ep->csr1);
1296 if (value && ((csr & USB_IN_CSR1_FIFO_NOT_EMPTY)
1297 || !list_empty(&ep->queue))) {
1298 /*
1299 * Attempts to halt IN endpoints will fail (returning -EAGAIN)
1300 * if any transfer requests are still queued, or if the controller
1301 * FIFO still holds bytes that the host hasn’t collected.
1302 */
1303 spin_unlock_irqrestore(&ep->dev->lock, flags);
1304 DEBUG
1305 ("Attempt to halt IN endpoint failed (returning -EAGAIN) %d %d\n",
1306 (csr & USB_IN_CSR1_FIFO_NOT_EMPTY),
1307 !list_empty(&ep->queue));
1308 return -EAGAIN;
1309 }
1310 flush(ep);
1311 if (value)
1312 usb_set(USB_IN_CSR1_SEND_STALL, ep->csr1);
1313 else {
1314 usb_clear(USB_IN_CSR1_SEND_STALL, ep->csr1);
1315 usb_set(USB_IN_CSR1_CLR_DATA_TOGGLE, ep->csr1);
1316 }
1317
1318 } else {
1319
1320 flush(ep);
1321 if (value)
1322 usb_set(USB_OUT_CSR1_SEND_STALL, ep->csr1);
1323 else {
1324 usb_clear(USB_OUT_CSR1_SEND_STALL, ep->csr1);
1325 usb_set(USB_OUT_CSR1_CLR_DATA_REG, ep->csr1);
1326 }
1327 }
1328
1329 if (value) {
1330 ep->stopped = 1;
1331 } else {
1332 ep->stopped = 0;
1333 }
1334
1335 spin_unlock_irqrestore(&ep->dev->lock, flags);
1336
1337 DEBUG("%s %s halted\n", _ep->name, value == 0 ? "NOT" : "IS");
1338
1339 return 0;
1340}
1341
1342/** Return bytes in EP FIFO
1343 * NOTE: Sets INDEX register to EP
1344 */
1345static int lh7a40x_fifo_status(struct usb_ep *_ep)
1346{
1347 u32 csr;
1348 int count = 0;
1349 struct lh7a40x_ep *ep;
1350
1351 ep = container_of(_ep, struct lh7a40x_ep, ep);
1352 if (!_ep) {
1353 DEBUG("%s, bad ep\n", __FUNCTION__);
1354 return -ENODEV;
1355 }
1356
1357 DEBUG("%s, %d\n", __FUNCTION__, ep_index(ep));
1358
1359 /* LPD can't report unclaimed bytes from IN fifos */
1360 if (ep_is_in(ep))
1361 return -EOPNOTSUPP;
1362
1363 usb_set_index(ep_index(ep));
1364
1365 csr = usb_read(ep->csr1);
1366 if (ep->dev->gadget.speed != USB_SPEED_UNKNOWN ||
1367 csr & USB_OUT_CSR1_OUT_PKT_RDY) {
1368 count = usb_read(USB_OUT_FIFO_WC1);
1369 }
1370
1371 return count;
1372}
1373
1374/** Flush EP FIFO
1375 * NOTE: Sets INDEX register to EP
1376 */
1377static void lh7a40x_fifo_flush(struct usb_ep *_ep)
1378{
1379 struct lh7a40x_ep *ep;
1380
1381 ep = container_of(_ep, struct lh7a40x_ep, ep);
1382 if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
1383 DEBUG("%s, bad ep\n", __FUNCTION__);
1384 return;
1385 }
1386
1387 usb_set_index(ep_index(ep));
1388 flush(ep);
1389}
1390
1391/****************************************************************/
1392/* End Point 0 related functions */
1393/****************************************************************/
1394
1395/* return: 0 = still running, 1 = completed, negative = errno */
1396static int write_fifo_ep0(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
1397{
1398 u32 max;
1399 unsigned count;
1400 int is_last;
1401
1402 max = ep_maxpacket(ep);
1403
1404 DEBUG_EP0("%s\n", __FUNCTION__);
1405
1406 count = write_packet(ep, req, max);
1407
1408 /* last packet is usually short (or a zlp) */
1409 if (unlikely(count != max))
1410 is_last = 1;
1411 else {
1412 if (likely(req->req.length != req->req.actual) || req->req.zero)
1413 is_last = 0;
1414 else
1415 is_last = 1;
1416 }
1417
1418 DEBUG_EP0("%s: wrote %s %d bytes%s %d left %p\n", __FUNCTION__,
1419 ep->ep.name, count,
1420 is_last ? "/L" : "", req->req.length - req->req.actual, req);
1421
1422 /* requests complete when all IN data is in the FIFO */
1423 if (is_last) {
1424 done(ep, req, 0);
1425 return 1;
1426 }
1427
1428 return 0;
1429}
1430
1431static __inline__ int lh7a40x_fifo_read(struct lh7a40x_ep *ep,
1432 unsigned char *cp, int max)
1433{
1434 int bytes;
1435 int count = usb_read(USB_OUT_FIFO_WC1);
1436 volatile u32 *fifo = (volatile u32 *)ep->fifo;
1437
1438 if (count > max)
1439 count = max;
1440 bytes = count;
1441 while (count--)
1442 *cp++ = *fifo & 0xFF;
1443 return bytes;
1444}
1445
1446static __inline__ void lh7a40x_fifo_write(struct lh7a40x_ep *ep,
1447 unsigned char *cp, int count)
1448{
1449 volatile u32 *fifo = (volatile u32 *)ep->fifo;
1450 DEBUG_EP0("fifo_write: %d %d\n", ep_index(ep), count);
1451 while (count--)
1452 *fifo = *cp++;
1453}
1454
1455static int read_fifo_ep0(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
1456{
1457 u32 csr;
1458 u8 *buf;
1459 unsigned bufferspace, count, is_short;
1460 volatile u32 *fifo = (volatile u32 *)ep->fifo;
1461
1462 DEBUG_EP0("%s\n", __FUNCTION__);
1463
1464 csr = usb_read(USB_EP0_CSR);
1465 if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY))
1466 return 0;
1467
1468 buf = req->req.buf + req->req.actual;
1469 prefetchw(buf);
1470 bufferspace = req->req.length - req->req.actual;
1471
1472 /* read all bytes from this packet */
1473 if (likely(csr & EP0_OUT_PKT_RDY)) {
1474 count = usb_read(USB_OUT_FIFO_WC1);
1475 req->req.actual += min(count, bufferspace);
1476 } else /* zlp */
1477 count = 0;
1478
1479 is_short = (count < ep->ep.maxpacket);
1480 DEBUG_EP0("read %s %02x, %d bytes%s req %p %d/%d\n",
1481 ep->ep.name, csr, count,
1482 is_short ? "/S" : "", req, req->req.actual, req->req.length);
1483
1484 while (likely(count-- != 0)) {
1485 u8 byte = (u8) (*fifo & 0xff);
1486
1487 if (unlikely(bufferspace == 0)) {
1488 /* this happens when the driver's buffer
1489 * is smaller than what the host sent.
1490 * discard the extra data.
1491 */
1492 if (req->req.status != -EOVERFLOW)
1493 DEBUG_EP0("%s overflow %d\n", ep->ep.name,
1494 count);
1495 req->req.status = -EOVERFLOW;
1496 } else {
1497 *buf++ = byte;
1498 bufferspace--;
1499 }
1500 }
1501
1502 /* completion */
1503 if (is_short || req->req.actual == req->req.length) {
1504 done(ep, req, 0);
1505 return 1;
1506 }
1507
1508 /* finished that packet. the next one may be waiting... */
1509 return 0;
1510}
1511
1512/**
1513 * udc_set_address - set the USB address for this device
1514 * @address:
1515 *
1516 * Called from control endpoint function after it decodes a set address setup packet.
1517 */
1518static void udc_set_address(struct lh7a40x_udc *dev, unsigned char address)
1519{
1520 DEBUG_EP0("%s: %d\n", __FUNCTION__, address);
1521 /* c.f. 15.1.2.2 Table 15-4 address will be used after DATA_END is set */
1522 dev->usb_address = address;
1523 usb_set((address & USB_FA_FUNCTION_ADDR), USB_FA);
1524 usb_set(USB_FA_ADDR_UPDATE | (address & USB_FA_FUNCTION_ADDR), USB_FA);
1525 /* usb_read(USB_FA); */
1526}
1527
1528/*
1529 * DATA_STATE_RECV (OUT_PKT_RDY)
1530 * - if error
1531 * set EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL bits
1532 * - else
1533 * set EP0_CLR_OUT bit
1534 if last set EP0_DATA_END bit
1535 */
1536static void lh7a40x_ep0_out(struct lh7a40x_udc *dev, u32 csr)
1537{
1538 struct lh7a40x_request *req;
1539 struct lh7a40x_ep *ep = &dev->ep[0];
1540 int ret;
1541
1542 DEBUG_EP0("%s: %x\n", __FUNCTION__, csr);
1543
1544 if (list_empty(&ep->queue))
1545 req = 0;
1546 else
1547 req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
1548
1549 if (req) {
1550
1551 if (req->req.length == 0) {
1552 DEBUG_EP0("ZERO LENGTH OUT!\n");
1553 usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
1554 dev->ep0state = WAIT_FOR_SETUP;
1555 return;
1556 }
1557 ret = read_fifo_ep0(ep, req);
1558 if (ret) {
1559 /* Done! */
1560 DEBUG_EP0("%s: finished, waiting for status\n",
1561 __FUNCTION__);
1562
1563 usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
1564 dev->ep0state = WAIT_FOR_SETUP;
1565 } else {
1566 /* Not done yet.. */
1567 DEBUG_EP0("%s: not finished\n", __FUNCTION__);
1568 usb_set(EP0_CLR_OUT, USB_EP0_CSR);
1569 }
1570 } else {
1571 DEBUG_EP0("NO REQ??!\n");
1572 }
1573}
1574
1575/*
1576 * DATA_STATE_XMIT
1577 */
1578static int lh7a40x_ep0_in(struct lh7a40x_udc *dev, u32 csr)
1579{
1580 struct lh7a40x_request *req;
1581 struct lh7a40x_ep *ep = &dev->ep[0];
1582 int ret, need_zlp = 0;
1583
1584 DEBUG_EP0("%s: %x\n", __FUNCTION__, csr);
1585
1586 if (list_empty(&ep->queue))
1587 req = 0;
1588 else
1589 req = list_entry(ep->queue.next, struct lh7a40x_request, queue);
1590
1591 if (!req) {
1592 DEBUG_EP0("%s: NULL REQ\n", __FUNCTION__);
1593 return 0;
1594 }
1595
1596 if (req->req.length == 0) {
1597
1598 usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
1599 dev->ep0state = WAIT_FOR_SETUP;
1600 return 1;
1601 }
1602
1603 if (req->req.length - req->req.actual == EP0_PACKETSIZE) {
1604 /* Next write will end with the packet size, */
1605 /* so we need Zero-length-packet */
1606 need_zlp = 1;
1607 }
1608
1609 ret = write_fifo_ep0(ep, req);
1610
1611 if (ret == 1 && !need_zlp) {
1612 /* Last packet */
1613 DEBUG_EP0("%s: finished, waiting for status\n", __FUNCTION__);
1614
1615 usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
1616 dev->ep0state = WAIT_FOR_SETUP;
1617 } else {
1618 DEBUG_EP0("%s: not finished\n", __FUNCTION__);
1619 usb_set(EP0_IN_PKT_RDY, USB_EP0_CSR);
1620 }
1621
1622 if (need_zlp) {
1623 DEBUG_EP0("%s: Need ZLP!\n", __FUNCTION__);
1624 usb_set(EP0_IN_PKT_RDY, USB_EP0_CSR);
1625 dev->ep0state = DATA_STATE_NEED_ZLP;
1626 }
1627
1628 return 1;
1629}
1630
1631static int lh7a40x_handle_get_status(struct lh7a40x_udc *dev,
1632 struct usb_ctrlrequest *ctrl)
1633{
1634 struct lh7a40x_ep *ep0 = &dev->ep[0];
1635 struct lh7a40x_ep *qep;
1636 int reqtype = (ctrl->bRequestType & USB_RECIP_MASK);
1637 u16 val = 0;
1638
1639 if (reqtype == USB_RECIP_INTERFACE) {
1640 /* This is not supported.
1641 * And according to the USB spec, this one does nothing..
1642 * Just return 0
1643 */
1644 DEBUG_SETUP("GET_STATUS: USB_RECIP_INTERFACE\n");
1645 } else if (reqtype == USB_RECIP_DEVICE) {
1646 DEBUG_SETUP("GET_STATUS: USB_RECIP_DEVICE\n");
1647 val |= (1 << 0); /* Self powered */
1648 /*val |= (1<<1); *//* Remote wakeup */
1649 } else if (reqtype == USB_RECIP_ENDPOINT) {
1650 int ep_num = (ctrl->wIndex & ~USB_DIR_IN);
1651
1652 DEBUG_SETUP
1653 ("GET_STATUS: USB_RECIP_ENDPOINT (%d), ctrl->wLength = %d\n",
1654 ep_num, ctrl->wLength);
1655
1656 if (ctrl->wLength > 2 || ep_num > 3)
1657 return -EOPNOTSUPP;
1658
1659 qep = &dev->ep[ep_num];
1660 if (ep_is_in(qep) != ((ctrl->wIndex & USB_DIR_IN) ? 1 : 0)
1661 && ep_index(qep) != 0) {
1662 return -EOPNOTSUPP;
1663 }
1664
1665 usb_set_index(ep_index(qep));
1666
1667 /* Return status on next IN token */
1668 switch (qep->ep_type) {
1669 case ep_control:
1670 val =
1671 (usb_read(qep->csr1) & EP0_SEND_STALL) ==
1672 EP0_SEND_STALL;
1673 break;
1674 case ep_bulk_in:
1675 case ep_interrupt:
1676 val =
1677 (usb_read(qep->csr1) & USB_IN_CSR1_SEND_STALL) ==
1678 USB_IN_CSR1_SEND_STALL;
1679 break;
1680 case ep_bulk_out:
1681 val =
1682 (usb_read(qep->csr1) & USB_OUT_CSR1_SEND_STALL) ==
1683 USB_OUT_CSR1_SEND_STALL;
1684 break;
1685 }
1686
1687 /* Back to EP0 index */
1688 usb_set_index(0);
1689
1690 DEBUG_SETUP("GET_STATUS, ep: %d (%x), val = %d\n", ep_num,
1691 ctrl->wIndex, val);
1692 } else {
1693 DEBUG_SETUP("Unknown REQ TYPE: %d\n", reqtype);
1694 return -EOPNOTSUPP;
1695 }
1696
1697 /* Clear "out packet ready" */
1698 usb_set((EP0_CLR_OUT), USB_EP0_CSR);
1699 /* Put status to FIFO */
1700 lh7a40x_fifo_write(ep0, (u8 *) & val, sizeof(val));
1701 /* Issue "In packet ready" */
1702 usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
1703
1704 return 0;
1705}
1706
1707/*
1708 * WAIT_FOR_SETUP (OUT_PKT_RDY)
1709 * - read data packet from EP0 FIFO
1710 * - decode command
1711 * - if error
1712 * set EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL bits
1713 * - else
1714 * set EP0_CLR_OUT | EP0_DATA_END bits
1715 */
1716static void lh7a40x_ep0_setup(struct lh7a40x_udc *dev, u32 csr)
1717{
1718 struct lh7a40x_ep *ep = &dev->ep[0];
1719 struct usb_ctrlrequest ctrl;
1720 int i, bytes, is_in;
1721
1722 DEBUG_SETUP("%s: %x\n", __FUNCTION__, csr);
1723
1724 /* Nuke all previous transfers */
1725 nuke(ep, -EPROTO);
1726
1727 /* read control req from fifo (8 bytes) */
1728 bytes = lh7a40x_fifo_read(ep, (unsigned char *)&ctrl, 8);
1729
1730 DEBUG_SETUP("Read CTRL REQ %d bytes\n", bytes);
1731 DEBUG_SETUP("CTRL.bRequestType = %d (is_in %d)\n", ctrl.bRequestType,
1732 ctrl.bRequestType == USB_DIR_IN);
1733 DEBUG_SETUP("CTRL.bRequest = %d\n", ctrl.bRequest);
1734 DEBUG_SETUP("CTRL.wLength = %d\n", ctrl.wLength);
1735 DEBUG_SETUP("CTRL.wValue = %d (%d)\n", ctrl.wValue, ctrl.wValue >> 8);
1736 DEBUG_SETUP("CTRL.wIndex = %d\n", ctrl.wIndex);
1737
1738 /* Set direction of EP0 */
1739 if (likely(ctrl.bRequestType & USB_DIR_IN)) {
1740 ep->bEndpointAddress |= USB_DIR_IN;
1741 is_in = 1;
1742 } else {
1743 ep->bEndpointAddress &= ~USB_DIR_IN;
1744 is_in = 0;
1745 }
1746
1747 dev->req_pending = 1;
1748
1749 /* Handle some SETUP packets ourselves */
1750 switch (ctrl.bRequest) {
1751 case USB_REQ_SET_ADDRESS:
1752 if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
1753 break;
1754
1755 DEBUG_SETUP("USB_REQ_SET_ADDRESS (%d)\n", ctrl.wValue);
1756 udc_set_address(dev, ctrl.wValue);
1757 usb_set((EP0_CLR_OUT | EP0_DATA_END), USB_EP0_CSR);
1758 return;
1759
1760 case USB_REQ_GET_STATUS:{
1761 if (lh7a40x_handle_get_status(dev, &ctrl) == 0)
1762 return;
1763
1764 case USB_REQ_CLEAR_FEATURE:
1765 case USB_REQ_SET_FEATURE:
1766 if (ctrl.bRequestType == USB_RECIP_ENDPOINT) {
1767 struct lh7a40x_ep *qep;
1768 int ep_num = (ctrl.wIndex & 0x0f);
1769
1770 /* Support only HALT feature */
1771 if (ctrl.wValue != 0 || ctrl.wLength != 0
1772 || ep_num > 3 || ep_num < 1)
1773 break;
1774
1775 qep = &dev->ep[ep_num];
1776 if (ctrl.bRequest == USB_REQ_SET_FEATURE) {
1777 DEBUG_SETUP("SET_FEATURE (%d)\n",
1778 ep_num);
1779 lh7a40x_set_halt(&qep->ep, 1);
1780 } else {
1781 DEBUG_SETUP("CLR_FEATURE (%d)\n",
1782 ep_num);
1783 lh7a40x_set_halt(&qep->ep, 0);
1784 }
1785 usb_set_index(0);
1786
1787 /* Reply with a ZLP on next IN token */
1788 usb_set((EP0_CLR_OUT | EP0_DATA_END),
1789 USB_EP0_CSR);
1790 return;
1791 }
1792 break;
1793 }
1794
1795 default:
1796 break;
1797 }
1798
1799 if (likely(dev->driver)) {
1800 /* device-2-host (IN) or no data setup command, process immediately */
1801 spin_unlock(&dev->lock);
1802 i = dev->driver->setup(&dev->gadget, &ctrl);
1803 spin_lock(&dev->lock);
1804
1805 if (i < 0) {
1806 /* setup processing failed, force stall */
1807 DEBUG_SETUP
1808 (" --> ERROR: gadget setup FAILED (stalling), setup returned %d\n",
1809 i);
1810 usb_set_index(0);
1811 usb_set((EP0_CLR_OUT | EP0_DATA_END | EP0_SEND_STALL),
1812 USB_EP0_CSR);
1813
1814 /* ep->stopped = 1; */
1815 dev->ep0state = WAIT_FOR_SETUP;
1816 }
1817 }
1818}
1819
1820/*
1821 * DATA_STATE_NEED_ZLP
1822 */
1823static void lh7a40x_ep0_in_zlp(struct lh7a40x_udc *dev, u32 csr)
1824{
1825 DEBUG_EP0("%s: %x\n", __FUNCTION__, csr);
1826
1827 /* c.f. Table 15-14 */
1828 usb_set((EP0_IN_PKT_RDY | EP0_DATA_END), USB_EP0_CSR);
1829 dev->ep0state = WAIT_FOR_SETUP;
1830}
1831
1832/*
1833 * handle ep0 interrupt
1834 */
1835static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr)
1836{
1837 struct lh7a40x_ep *ep = &dev->ep[0];
1838 u32 csr;
1839
1840 /* Set index 0 */
1841 usb_set_index(0);
1842 csr = usb_read(USB_EP0_CSR);
1843
1844 DEBUG_EP0("%s: csr = %x\n", __FUNCTION__, csr);
1845
1846 /*
1847 * For overview of what we should be doing see c.f. Chapter 18.1.2.4
1848 * We will follow that outline here modified by our own global state
1849 * indication which provides hints as to what we think should be
1850 * happening..
1851 */
1852
1853 /*
1854 * if SENT_STALL is set
1855 * - clear the SENT_STALL bit
1856 */
1857 if (csr & EP0_SENT_STALL) {
1858 DEBUG_EP0("%s: EP0_SENT_STALL is set: %x\n", __FUNCTION__, csr);
1859 usb_clear((EP0_SENT_STALL | EP0_SEND_STALL), USB_EP0_CSR);
1860 nuke(ep, -ECONNABORTED);
1861 dev->ep0state = WAIT_FOR_SETUP;
1862 return;
1863 }
1864
1865 /*
1866 * if a transfer is in progress && IN_PKT_RDY and OUT_PKT_RDY are clear
1867 * - fill EP0 FIFO
1868 * - if last packet
1869 * - set IN_PKT_RDY | DATA_END
1870 * - else
1871 * set IN_PKT_RDY
1872 */
1873 if (!(csr & (EP0_IN_PKT_RDY | EP0_OUT_PKT_RDY))) {
1874 DEBUG_EP0("%s: IN_PKT_RDY and OUT_PKT_RDY are clear\n",
1875 __FUNCTION__);
1876
1877 switch (dev->ep0state) {
1878 case DATA_STATE_XMIT:
1879 DEBUG_EP0("continue with DATA_STATE_XMIT\n");
1880 lh7a40x_ep0_in(dev, csr);
1881 return;
1882 case DATA_STATE_NEED_ZLP:
1883 DEBUG_EP0("continue with DATA_STATE_NEED_ZLP\n");
1884 lh7a40x_ep0_in_zlp(dev, csr);
1885 return;
1886 default:
1887 /* Stall? */
1888 DEBUG_EP0("Odd state!! state = %s\n",
1889 state_names[dev->ep0state]);
1890 dev->ep0state = WAIT_FOR_SETUP;
1891 /* nuke(ep, 0); */
1892 /* usb_set(EP0_SEND_STALL, ep->csr1); */
1893 break;
1894 }
1895 }
1896
1897 /*
1898 * if SETUP_END is set
1899 * - abort the last transfer
1900 * - set SERVICED_SETUP_END_BIT
1901 */
1902 if (csr & EP0_SETUP_END) {
1903 DEBUG_EP0("%s: EP0_SETUP_END is set: %x\n", __FUNCTION__, csr);
1904
1905 usb_set(EP0_CLR_SETUP_END, USB_EP0_CSR);
1906
1907 nuke(ep, 0);
1908 dev->ep0state = WAIT_FOR_SETUP;
1909 }
1910
1911 /*
1912 * if EP0_OUT_PKT_RDY is set
1913 * - read data packet from EP0 FIFO
1914 * - decode command
1915 * - if error
1916 * set SERVICED_OUT_PKT_RDY | DATA_END bits | SEND_STALL
1917 * - else
1918 * set SERVICED_OUT_PKT_RDY | DATA_END bits
1919 */
1920 if (csr & EP0_OUT_PKT_RDY) {
1921
1922 DEBUG_EP0("%s: EP0_OUT_PKT_RDY is set: %x\n", __FUNCTION__,
1923 csr);
1924
1925 switch (dev->ep0state) {
1926 case WAIT_FOR_SETUP:
1927 DEBUG_EP0("WAIT_FOR_SETUP\n");
1928 lh7a40x_ep0_setup(dev, csr);
1929 break;
1930
1931 case DATA_STATE_RECV:
1932 DEBUG_EP0("DATA_STATE_RECV\n");
1933 lh7a40x_ep0_out(dev, csr);
1934 break;
1935
1936 default:
1937 /* send stall? */
1938 DEBUG_EP0("strange state!! 2. send stall? state = %d\n",
1939 dev->ep0state);
1940 break;
1941 }
1942 }
1943}
1944
1945static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep)
1946{
1947 u32 csr;
1948
1949 usb_set_index(0);
1950 csr = usb_read(USB_EP0_CSR);
1951
1952 DEBUG_EP0("%s: %x\n", __FUNCTION__, csr);
1953
1954 /* Clear "out packet ready" */
1955 usb_set(EP0_CLR_OUT, USB_EP0_CSR);
1956
1957 if (ep_is_in(ep)) {
1958 dev->ep0state = DATA_STATE_XMIT;
1959 lh7a40x_ep0_in(dev, csr);
1960 } else {
1961 dev->ep0state = DATA_STATE_RECV;
1962 lh7a40x_ep0_out(dev, csr);
1963 }
1964}
1965
1966/* ---------------------------------------------------------------------------
1967 * device-scoped parts of the api to the usb controller hardware
1968 * ---------------------------------------------------------------------------
1969 */
1970
1971static int lh7a40x_udc_get_frame(struct usb_gadget *_gadget)
1972{
1973 u32 frame1 = usb_read(USB_FRM_NUM1); /* Least significant 8 bits */
1974 u32 frame2 = usb_read(USB_FRM_NUM2); /* Most significant 3 bits */
1975 DEBUG("%s, %p\n", __FUNCTION__, _gadget);
1976 return ((frame2 & 0x07) << 8) | (frame1 & 0xff);
1977}
1978
1979static int lh7a40x_udc_wakeup(struct usb_gadget *_gadget)
1980{
1981 /* host may not have enabled remote wakeup */
1982 /*if ((UDCCS0 & UDCCS0_DRWF) == 0)
1983 return -EHOSTUNREACH;
1984 udc_set_mask_UDCCR(UDCCR_RSM); */
1985 return -ENOTSUPP;
1986}
1987
1988static const struct usb_gadget_ops lh7a40x_udc_ops = {
1989 .get_frame = lh7a40x_udc_get_frame,
1990 .wakeup = lh7a40x_udc_wakeup,
1991 /* current versions must always be self-powered */
1992};
1993
1994static void nop_release(struct device *dev)
1995{
1996 DEBUG("%s %s\n", __FUNCTION__, dev->bus_id);
1997}
1998
1999static struct lh7a40x_udc memory = {
2000 .usb_address = 0,
2001
2002 .gadget = {
2003 .ops = &lh7a40x_udc_ops,
2004 .ep0 = &memory.ep[0].ep,
2005 .name = driver_name,
2006 .dev = {
2007 .bus_id = "gadget",
2008 .release = nop_release,
2009 },
2010 },
2011
2012 /* control endpoint */
2013 .ep[0] = {
2014 .ep = {
2015 .name = ep0name,
2016 .ops = &lh7a40x_ep_ops,
2017 .maxpacket = EP0_PACKETSIZE,
2018 },
2019 .dev = &memory,
2020
2021 .bEndpointAddress = 0,
2022 .bmAttributes = 0,
2023
2024 .ep_type = ep_control,
2025 .fifo = io_p2v(USB_EP0_FIFO),
2026 .csr1 = USB_EP0_CSR,
2027 .csr2 = USB_EP0_CSR,
2028 },
2029
2030 /* first group of endpoints */
2031 .ep[1] = {
2032 .ep = {
2033 .name = "ep1in-bulk",
2034 .ops = &lh7a40x_ep_ops,
2035 .maxpacket = 64,
2036 },
2037 .dev = &memory,
2038
2039 .bEndpointAddress = USB_DIR_IN | 1,
2040 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2041
2042 .ep_type = ep_bulk_in,
2043 .fifo = io_p2v(USB_EP1_FIFO),
2044 .csr1 = USB_IN_CSR1,
2045 .csr2 = USB_IN_CSR2,
2046 },
2047
2048 .ep[2] = {
2049 .ep = {
2050 .name = "ep2out-bulk",
2051 .ops = &lh7a40x_ep_ops,
2052 .maxpacket = 64,
2053 },
2054 .dev = &memory,
2055
2056 .bEndpointAddress = 2,
2057 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2058
2059 .ep_type = ep_bulk_out,
2060 .fifo = io_p2v(USB_EP2_FIFO),
2061 .csr1 = USB_OUT_CSR1,
2062 .csr2 = USB_OUT_CSR2,
2063 },
2064
2065 .ep[3] = {
2066 .ep = {
2067 .name = "ep3in-int",
2068 .ops = &lh7a40x_ep_ops,
2069 .maxpacket = 64,
2070 },
2071 .dev = &memory,
2072
2073 .bEndpointAddress = USB_DIR_IN | 3,
2074 .bmAttributes = USB_ENDPOINT_XFER_INT,
2075
2076 .ep_type = ep_interrupt,
2077 .fifo = io_p2v(USB_EP3_FIFO),
2078 .csr1 = USB_IN_CSR1,
2079 .csr2 = USB_IN_CSR2,
2080 },
2081};
2082
2083/*
2084 * probe - binds to the platform device
2085 */
2086static int lh7a40x_udc_probe(struct device *_dev)
2087{
2088 struct lh7a40x_udc *dev = &memory;
2089 int retval;
2090
2091 DEBUG("%s: %p\n", __FUNCTION__, _dev);
2092
2093 spin_lock_init(&dev->lock);
2094 dev->dev = _dev;
2095
2096 device_initialize(&dev->gadget.dev);
2097 dev->gadget.dev.parent = _dev;
2098
2099 the_controller = dev;
2100 dev_set_drvdata(_dev, dev);
2101
2102 udc_disable(dev);
2103 udc_reinit(dev);
2104
2105 /* irq setup after old hardware state is cleaned up */
2106 retval =
2107 request_irq(IRQ_USBINTR, lh7a40x_udc_irq, SA_INTERRUPT, driver_name,
2108 dev);
2109 if (retval != 0) {
2110 DEBUG(KERN_ERR "%s: can't get irq %i, err %d\n", driver_name,
2111 IRQ_USBINTR, retval);
2112 return -EBUSY;
2113 }
2114
2115 create_proc_files();
2116
2117 return retval;
2118}
2119
2120static int lh7a40x_udc_remove(struct device *_dev)
2121{
2122 struct lh7a40x_udc *dev = _dev->driver_data;
2123
2124 DEBUG("%s: %p\n", __FUNCTION__, dev);
2125
2126 udc_disable(dev);
2127 remove_proc_files();
2128 usb_gadget_unregister_driver(dev->driver);
2129
2130 free_irq(IRQ_USBINTR, dev);
2131
2132 dev_set_drvdata(_dev, 0);
2133
2134 the_controller = 0;
2135
2136 return 0;
2137}
2138
2139/*-------------------------------------------------------------------------*/
2140
2141static struct device_driver udc_driver = {
2142 .name = (char *)driver_name,
2143 .bus = &platform_bus_type,
2144 .probe = lh7a40x_udc_probe,
2145 .remove = lh7a40x_udc_remove
2146 /* FIXME power management support */
2147 /* .suspend = ... disable UDC */
2148 /* .resume = ... re-enable UDC */
2149};
2150
2151static int __init udc_init(void)
2152{
2153 DEBUG("%s: %s version %s\n", __FUNCTION__, driver_name, DRIVER_VERSION);
2154 return driver_register(&udc_driver);
2155}
2156
2157static void __exit udc_exit(void)
2158{
2159 driver_unregister(&udc_driver);
2160}
2161
2162module_init(udc_init);
2163module_exit(udc_exit);
2164
2165MODULE_DESCRIPTION(DRIVER_DESC);
2166MODULE_AUTHOR("Mikko Lahteenmaki, Bo Henriksen");
2167MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/lh7a40x_udc.h b/drivers/usb/gadget/lh7a40x_udc.h
new file mode 100644
index 000000000000..1bb455c045a9
--- /dev/null
+++ b/drivers/usb/gadget/lh7a40x_udc.h
@@ -0,0 +1,261 @@
1/*
2 * linux/drivers/usb/gadget/lh7a40x_udc.h
3 * Sharp LH7A40x on-chip full speed USB device controllers
4 *
5 * Copyright (C) 2004 Mikko Lahteenmaki, Nordic ID
6 * Copyright (C) 2004 Bo Henriksen, Nordic ID
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#ifndef __LH7A40X_H_
25#define __LH7A40X_H_
26
27#include <linux/config.h>
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/ioport.h>
31#include <linux/types.h>
32#include <linux/version.h>
33#include <linux/errno.h>
34#include <linux/delay.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/init.h>
38#include <linux/timer.h>
39#include <linux/list.h>
40#include <linux/interrupt.h>
41#include <linux/proc_fs.h>
42#include <linux/mm.h>
43#include <linux/device.h>
44#include <linux/dma-mapping.h>
45
46#include <asm/byteorder.h>
47#include <asm/dma.h>
48#include <asm/io.h>
49#include <asm/irq.h>
50#include <asm/system.h>
51#include <asm/unaligned.h>
52#include <asm/hardware.h>
53
54#include <linux/usb_ch9.h>
55#include <linux/usb_gadget.h>
56
57/*
58 * Memory map
59 */
60
61#define USB_FA 0x80000200 // function address register
62#define USB_PM 0x80000204 // power management register
63
64#define USB_IN_INT 0x80000208 // IN interrupt register bank (EP0-EP3)
65#define USB_OUT_INT 0x80000210 // OUT interrupt register bank (EP2)
66#define USB_INT 0x80000218 // interrupt register bank
67
68#define USB_IN_INT_EN 0x8000021C // IN interrupt enable register bank
69#define USB_OUT_INT_EN 0x80000224 // OUT interrupt enable register bank
70#define USB_INT_EN 0x8000022C // USB interrupt enable register bank
71
72#define USB_FRM_NUM1 0x80000230 // Frame number1 register
73#define USB_FRM_NUM2 0x80000234 // Frame number2 register
74#define USB_INDEX 0x80000238 // index register
75
76#define USB_IN_MAXP 0x80000240 // IN MAXP register
77#define USB_IN_CSR1 0x80000244 // IN CSR1 register/EP0 CSR register
78#define USB_EP0_CSR 0x80000244 // IN CSR1 register/EP0 CSR register
79#define USB_IN_CSR2 0x80000248 // IN CSR2 register
80#define USB_OUT_MAXP 0x8000024C // OUT MAXP register
81
82#define USB_OUT_CSR1 0x80000250 // OUT CSR1 register
83#define USB_OUT_CSR2 0x80000254 // OUT CSR2 register
84#define USB_OUT_FIFO_WC1 0x80000258 // OUT FIFO write count1 register
85#define USB_OUT_FIFO_WC2 0x8000025C // OUT FIFO write count2 register
86
87#define USB_RESET 0x8000044C // USB reset register
88
89#define USB_EP0_FIFO 0x80000280
90#define USB_EP1_FIFO 0x80000284
91#define USB_EP2_FIFO 0x80000288
92#define USB_EP3_FIFO 0x8000028c
93
94/*
95 * USB reset register
96 */
97#define USB_RESET_APB (1<<1) //resets USB APB control side WRITE
98#define USB_RESET_IO (1<<0) //resets USB IO side WRITE
99
100/*
101 * USB function address register
102 */
103#define USB_FA_ADDR_UPDATE (1<<7)
104#define USB_FA_FUNCTION_ADDR (0x7F)
105
106/*
107 * Power Management register
108 */
109#define PM_USB_DCP (1<<5)
110#define PM_USB_ENABLE (1<<4)
111#define PM_USB_RESET (1<<3)
112#define PM_UC_RESUME (1<<2)
113#define PM_SUSPEND_MODE (1<<1)
114#define PM_ENABLE_SUSPEND (1<<0)
115
116/*
117 * IN interrupt register
118 */
119#define USB_IN_INT_EP3 (1<<3)
120#define USB_IN_INT_EP1 (1<<1)
121#define USB_IN_INT_EP0 (1<<0)
122
123/*
124 * OUT interrupt register
125 */
126#define USB_OUT_INT_EP2 (1<<2)
127
128/*
129 * USB interrupt register
130 */
131#define USB_INT_RESET_INT (1<<2)
132#define USB_INT_RESUME_INT (1<<1)
133#define USB_INT_SUSPEND_INT (1<<0)
134
135/*
136 * USB interrupt enable register
137 */
138#define USB_INT_EN_USB_RESET_INTER (1<<2)
139#define USB_INT_EN_RESUME_INTER (1<<1)
140#define USB_INT_EN_SUSPEND_INTER (1<<0)
141
142/*
143 * INCSR1 register
144 */
145#define USB_IN_CSR1_CLR_DATA_TOGGLE (1<<6)
146#define USB_IN_CSR1_SENT_STALL (1<<5)
147#define USB_IN_CSR1_SEND_STALL (1<<4)
148#define USB_IN_CSR1_FIFO_FLUSH (1<<3)
149#define USB_IN_CSR1_FIFO_NOT_EMPTY (1<<1)
150#define USB_IN_CSR1_IN_PKT_RDY (1<<0)
151
152/*
153 * INCSR2 register
154 */
155#define USB_IN_CSR2_AUTO_SET (1<<7)
156#define USB_IN_CSR2_USB_DMA_EN (1<<4)
157
158/*
159 * OUT CSR1 register
160 */
161#define USB_OUT_CSR1_CLR_DATA_REG (1<<7)
162#define USB_OUT_CSR1_SENT_STALL (1<<6)
163#define USB_OUT_CSR1_SEND_STALL (1<<5)
164#define USB_OUT_CSR1_FIFO_FLUSH (1<<4)
165#define USB_OUT_CSR1_FIFO_FULL (1<<1)
166#define USB_OUT_CSR1_OUT_PKT_RDY (1<<0)
167
168/*
169 * OUT CSR2 register
170 */
171#define USB_OUT_CSR2_AUTO_CLR (1<<7)
172#define USB_OUT_CSR2_USB_DMA_EN (1<<4)
173
174/*
175 * EP0 CSR
176 */
177#define EP0_CLR_SETUP_END (1<<7) /* Clear "Setup Ends" Bit (w) */
178#define EP0_CLR_OUT (1<<6) /* Clear "Out packet ready" Bit (w) */
179#define EP0_SEND_STALL (1<<5) /* Send STALL Handshake (rw) */
180#define EP0_SETUP_END (1<<4) /* Setup Ends (r) */
181
182#define EP0_DATA_END (1<<3) /* Data end (rw) */
183#define EP0_SENT_STALL (1<<2) /* Sent Stall Handshake (r) */
184#define EP0_IN_PKT_RDY (1<<1) /* In packet ready (rw) */
185#define EP0_OUT_PKT_RDY (1<<0) /* Out packet ready (r) */
186
187/* general CSR */
188#define OUT_PKT_RDY (1<<0)
189#define IN_PKT_RDY (1<<0)
190
191/*
192 * IN/OUT MAXP register
193 */
194#define USB_OUT_MAXP_MAXP (0xF)
195#define USB_IN_MAXP_MAXP (0xF)
196
197// Max packet size
198//#define EP0_PACKETSIZE 0x10
199#define EP0_PACKETSIZE 0x8
200#define EP0_MAXPACKETSIZE 0x10
201
202#define UDC_MAX_ENDPOINTS 4
203
204#define WAIT_FOR_SETUP 0
205#define DATA_STATE_XMIT 1
206#define DATA_STATE_NEED_ZLP 2
207#define WAIT_FOR_OUT_STATUS 3
208#define DATA_STATE_RECV 4
209
210/* ********************************************************************************************* */
211/* IO
212 */
213
214typedef enum ep_type {
215 ep_control, ep_bulk_in, ep_bulk_out, ep_interrupt
216} ep_type_t;
217
218struct lh7a40x_ep {
219 struct usb_ep ep;
220 struct lh7a40x_udc *dev;
221
222 const struct usb_endpoint_descriptor *desc;
223 struct list_head queue;
224 unsigned long pio_irqs;
225
226 u8 stopped;
227 u8 bEndpointAddress;
228 u8 bmAttributes;
229
230 ep_type_t ep_type;
231 u32 fifo;
232 u32 csr1;
233 u32 csr2;
234};
235
236struct lh7a40x_request {
237 struct usb_request req;
238 struct list_head queue;
239};
240
241struct lh7a40x_udc {
242 struct usb_gadget gadget;
243 struct usb_gadget_driver *driver;
244 struct device *dev;
245 spinlock_t lock;
246
247 int ep0state;
248 struct lh7a40x_ep ep[UDC_MAX_ENDPOINTS];
249
250 unsigned char usb_address;
251
252 unsigned req_pending:1, req_std:1, req_config:1;
253};
254
255extern struct lh7a40x_udc *the_controller;
256
257#define ep_is_in(EP) (((EP)->bEndpointAddress&USB_DIR_IN)==USB_DIR_IN)
258#define ep_index(EP) ((EP)->bEndpointAddress&0xF)
259#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
260
261#endif
diff --git a/drivers/usb/gadget/ndis.h b/drivers/usb/gadget/ndis.h
new file mode 100644
index 000000000000..c553bbf68cab
--- /dev/null
+++ b/drivers/usb/gadget/ndis.h
@@ -0,0 +1,217 @@
1/*
2 * ndis.h
3 *
4 * ntddndis.h modified by Benedikt Spranger <b.spranger@pengutronix.de>
5 *
6 * Thanks to the cygwin development team,
7 * espacially to Casper S. Hornstrup <chorns@users.sourceforge.net>
8 *
9 * THIS SOFTWARE IS NOT COPYRIGHTED
10 *
11 * This source code is offered for use in the public domain. You may
12 * use, modify or distribute it freely.
13 *
14 * This code is distributed in the hope that it will be useful but
15 * WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESS OR IMPLIED ARE HEREBY
16 * DISCLAIMED. This includes but is not limited to warranties of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
18 *
19 */
20
21#ifndef _LINUX_NDIS_H
22#define _LINUX_NDIS_H
23
24
25#define NDIS_STATUS_MULTICAST_FULL 0xC0010009
26#define NDIS_STATUS_MULTICAST_EXISTS 0xC001000A
27#define NDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B
28
29enum NDIS_DEVICE_POWER_STATE {
30 NdisDeviceStateUnspecified = 0,
31 NdisDeviceStateD0,
32 NdisDeviceStateD1,
33 NdisDeviceStateD2,
34 NdisDeviceStateD3,
35 NdisDeviceStateMaximum
36};
37
38struct NDIS_PM_WAKE_UP_CAPABILITIES {
39 enum NDIS_DEVICE_POWER_STATE MinMagicPacketWakeUp;
40 enum NDIS_DEVICE_POWER_STATE MinPatternWakeUp;
41 enum NDIS_DEVICE_POWER_STATE MinLinkChangeWakeUp;
42};
43
44/* NDIS_PNP_CAPABILITIES.Flags constants */
45#define NDIS_DEVICE_WAKE_UP_ENABLE 0x00000001
46#define NDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE 0x00000002
47#define NDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE 0x00000004
48
49struct NDIS_PNP_CAPABILITIES {
50 u32 Flags;
51 struct NDIS_PM_WAKE_UP_CAPABILITIES WakeUpCapabilities;
52};
53
54struct NDIS_PM_PACKET_PATTERN {
55 u32 Priority;
56 u32 Reserved;
57 u32 MaskSize;
58 u32 PatternOffset;
59 u32 PatternSize;
60 u32 PatternFlags;
61};
62
63
64/* Required Object IDs (OIDs) */
65#define OID_GEN_SUPPORTED_LIST 0x00010101
66#define OID_GEN_HARDWARE_STATUS 0x00010102
67#define OID_GEN_MEDIA_SUPPORTED 0x00010103
68#define OID_GEN_MEDIA_IN_USE 0x00010104
69#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
70#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
71#define OID_GEN_LINK_SPEED 0x00010107
72#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
73#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
74#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
75#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
76#define OID_GEN_VENDOR_ID 0x0001010C
77#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D
78#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
79#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
80#define OID_GEN_DRIVER_VERSION 0x00010110
81#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
82#define OID_GEN_PROTOCOL_OPTIONS 0x00010112
83#define OID_GEN_MAC_OPTIONS 0x00010113
84#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
85#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
86#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
87#define OID_GEN_SUPPORTED_GUIDS 0x00010117
88#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
89#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
90#define OID_GEN_MACHINE_NAME 0x0001021A
91#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
92#define OID_GEN_VLAN_ID 0x0001021C
93
94/* Optional OIDs */
95#define OID_GEN_MEDIA_CAPABILITIES 0x00010201
96#define OID_GEN_PHYSICAL_MEDIUM 0x00010202
97
98/* Required statistics OIDs */
99#define OID_GEN_XMIT_OK 0x00020101
100#define OID_GEN_RCV_OK 0x00020102
101#define OID_GEN_XMIT_ERROR 0x00020103
102#define OID_GEN_RCV_ERROR 0x00020104
103#define OID_GEN_RCV_NO_BUFFER 0x00020105
104
105/* Optional statistics OIDs */
106#define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201
107#define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
108#define OID_GEN_MULTICAST_BYTES_XMIT 0x00020203
109#define OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
110#define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205
111#define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
112#define OID_GEN_DIRECTED_BYTES_RCV 0x00020207
113#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
114#define OID_GEN_MULTICAST_BYTES_RCV 0x00020209
115#define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
116#define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B
117#define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
118#define OID_GEN_RCV_CRC_ERROR 0x0002020D
119#define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
120#define OID_GEN_GET_TIME_CAPS 0x0002020F
121#define OID_GEN_GET_NETCARD_TIME 0x00020210
122#define OID_GEN_NETCARD_LOAD 0x00020211
123#define OID_GEN_DEVICE_PROFILE 0x00020212
124#define OID_GEN_INIT_TIME_MS 0x00020213
125#define OID_GEN_RESET_COUNTS 0x00020214
126#define OID_GEN_MEDIA_SENSE_COUNTS 0x00020215
127#define OID_GEN_FRIENDLY_NAME 0x00020216
128#define OID_GEN_MINIPORT_INFO 0x00020217
129#define OID_GEN_RESET_VERIFY_PARAMETERS 0x00020218
130
131/* IEEE 802.3 (Ethernet) OIDs */
132#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001
133
134#define OID_802_3_PERMANENT_ADDRESS 0x01010101
135#define OID_802_3_CURRENT_ADDRESS 0x01010102
136#define OID_802_3_MULTICAST_LIST 0x01010103
137#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
138#define OID_802_3_MAC_OPTIONS 0x01010105
139#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
140#define OID_802_3_XMIT_ONE_COLLISION 0x01020102
141#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
142#define OID_802_3_XMIT_DEFERRED 0x01020201
143#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
144#define OID_802_3_RCV_OVERRUN 0x01020203
145#define OID_802_3_XMIT_UNDERRUN 0x01020204
146#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
147#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
148#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
149
150/* OID_GEN_MINIPORT_INFO constants */
151#define NDIS_MINIPORT_BUS_MASTER 0x00000001
152#define NDIS_MINIPORT_WDM_DRIVER 0x00000002
153#define NDIS_MINIPORT_SG_LIST 0x00000004
154#define NDIS_MINIPORT_SUPPORTS_MEDIA_QUERY 0x00000008
155#define NDIS_MINIPORT_INDICATES_PACKETS 0x00000010
156#define NDIS_MINIPORT_IGNORE_PACKET_QUEUE 0x00000020
157#define NDIS_MINIPORT_IGNORE_REQUEST_QUEUE 0x00000040
158#define NDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS 0x00000080
159#define NDIS_MINIPORT_INTERMEDIATE_DRIVER 0x00000100
160#define NDIS_MINIPORT_IS_NDIS_5 0x00000200
161#define NDIS_MINIPORT_IS_CO 0x00000400
162#define NDIS_MINIPORT_DESERIALIZE 0x00000800
163#define NDIS_MINIPORT_REQUIRES_MEDIA_POLLING 0x00001000
164#define NDIS_MINIPORT_SUPPORTS_MEDIA_SENSE 0x00002000
165#define NDIS_MINIPORT_NETBOOT_CARD 0x00004000
166#define NDIS_MINIPORT_PM_SUPPORTED 0x00008000
167#define NDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE 0x00010000
168#define NDIS_MINIPORT_USES_SAFE_BUFFER_APIS 0x00020000
169#define NDIS_MINIPORT_HIDDEN 0x00040000
170#define NDIS_MINIPORT_SWENUM 0x00080000
171#define NDIS_MINIPORT_SURPRISE_REMOVE_OK 0x00100000
172#define NDIS_MINIPORT_NO_HALT_ON_SUSPEND 0x00200000
173#define NDIS_MINIPORT_HARDWARE_DEVICE 0x00400000
174#define NDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS 0x00800000
175#define NDIS_MINIPORT_64BITS_DMA 0x01000000
176
177#define NDIS_MEDIUM_802_3 0x00000000
178#define NDIS_MEDIUM_802_5 0x00000001
179#define NDIS_MEDIUM_FDDI 0x00000002
180#define NDIS_MEDIUM_WAN 0x00000003
181#define NDIS_MEDIUM_LOCAL_TALK 0x00000004
182#define NDIS_MEDIUM_DIX 0x00000005
183#define NDIS_MEDIUM_ARCENT_RAW 0x00000006
184#define NDIS_MEDIUM_ARCENT_878_2 0x00000007
185#define NDIS_MEDIUM_ATM 0x00000008
186#define NDIS_MEDIUM_WIRELESS_LAN 0x00000009
187#define NDIS_MEDIUM_IRDA 0x0000000A
188#define NDIS_MEDIUM_BPC 0x0000000B
189#define NDIS_MEDIUM_CO_WAN 0x0000000C
190#define NDIS_MEDIUM_1394 0x0000000D
191
192#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
193#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
194#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
195#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
196#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
197#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
198#define NDIS_PACKET_TYPE_SMT 0x00000040
199#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
200#define NDIS_PACKET_TYPE_GROUP 0x00000100
201#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00000200
202#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
203#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
204
205#define NDIS_MEDIA_STATE_CONNECTED 0x00000000
206#define NDIS_MEDIA_STATE_DISCONNECTED 0x00000001
207
208#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA 0x00000001
209#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED 0x00000002
210#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND 0x00000004
211#define NDIS_MAC_OPTION_NO_LOOPBACK 0x00000008
212#define NDIS_MAC_OPTION_FULL_DUPLEX 0x00000010
213#define NDIS_MAC_OPTION_EOTX_INDICATION 0x00000020
214#define NDIS_MAC_OPTION_8021P_PRIORITY 0x00000040
215#define NDIS_MAC_OPTION_RESERVED 0x80000000
216
217#endif /* _LINUX_NDIS_H */
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
new file mode 100644
index 000000000000..e5457f2026cc
--- /dev/null
+++ b/drivers/usb/gadget/net2280.c
@@ -0,0 +1,2967 @@
1/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
5 * PLX Technology Inc. (formerly NetChip Technology) supported the
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
12 * the File Storage, Serial, and Ethernet/RNDIS gadget drivers
13 * as well as Gadget Zero and Gadgetfs.
14 *
15 * DMA is enabled by default. Drivers using transfer queues might use
16 * DMA chaining to remove IRQ latencies between transfers. (Except when
17 * short OUT transfers happen.) Drivers can use the req->no_interrupt
18 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
19 * and DMA chaining is enabled.
20 *
21 * Note that almost all the errata workarounds here are only needed for
22 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
23 */
24
25/*
26 * Copyright (C) 2003 David Brownell
27 * Copyright (C) 2003-2005 PLX Technology, Inc.
28 *
29 * This program is free software; you can redistribute it and/or modify
30 * it under the terms of the GNU General Public License as published by
31 * the Free Software Foundation; either version 2 of the License, or
32 * (at your option) any later version.
33 *
34 * This program is distributed in the hope that it will be useful,
35 * but WITHOUT ANY WARRANTY; without even the implied warranty of
36 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
37 * GNU General Public License for more details.
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43
44#undef DEBUG /* messages on error and most fault paths */
45#undef VERBOSE /* extra debug messages (success too) */
46
47#include <linux/config.h>
48#include <linux/module.h>
49#include <linux/pci.h>
50#include <linux/kernel.h>
51#include <linux/delay.h>
52#include <linux/ioport.h>
53#include <linux/sched.h>
54#include <linux/slab.h>
55#include <linux/smp_lock.h>
56#include <linux/errno.h>
57#include <linux/init.h>
58#include <linux/timer.h>
59#include <linux/list.h>
60#include <linux/interrupt.h>
61#include <linux/moduleparam.h>
62#include <linux/device.h>
63#include <linux/usb_ch9.h>
64#include <linux/usb_gadget.h>
65
66#include <asm/byteorder.h>
67#include <asm/io.h>
68#include <asm/irq.h>
69#include <asm/system.h>
70#include <asm/unaligned.h>
71
72
73#define DRIVER_DESC "PLX NET2280 USB Peripheral Controller"
74#define DRIVER_VERSION "2005 Feb 03"
75
76#define DMA_ADDR_INVALID (~(dma_addr_t)0)
77#define EP_DONTUSE 13 /* nonzero */
78
79#define USE_RDK_LEDS /* GPIO pins control three LEDs */
80
81
82static const char driver_name [] = "net2280";
83static const char driver_desc [] = DRIVER_DESC;
84
85static const char ep0name [] = "ep0";
86static const char *ep_name [] = {
87 ep0name,
88 "ep-a", "ep-b", "ep-c", "ep-d",
89 "ep-e", "ep-f",
90};
91
92/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
93 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
94 *
95 * The net2280 DMA engines are not tightly integrated with their FIFOs;
96 * not all cases are (yet) handled well in this driver or the silicon.
97 * Some gadget drivers work better with the dma support here than others.
98 * These two parameters let you use PIO or more aggressive DMA.
99 */
100static int use_dma = 1;
101static int use_dma_chaining = 0;
102
103/* "modprobe net2280 use_dma=n" etc */
104module_param (use_dma, bool, S_IRUGO);
105module_param (use_dma_chaining, bool, S_IRUGO);
106
107
108/* mode 0 == ep-{a,b,c,d} 1K fifo each
109 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
110 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
111 */
112static ushort fifo_mode = 0;
113
114/* "modprobe net2280 fifo_mode=1" etc */
115module_param (fifo_mode, ushort, 0644);
116
117/* enable_suspend -- When enabled, the driver will respond to
118 * USB suspend requests by powering down the NET2280. Otherwise,
119 * USB suspend requests will be ignored. This is acceptible for
120 * self-powered devices, and helps avoid some quirks.
121 */
122static int enable_suspend = 0;
123
124/* "modprobe net2280 enable_suspend=1" etc */
125module_param (enable_suspend, bool, S_IRUGO);
126
127
128#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
129
130#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
131static char *type_string (u8 bmAttributes)
132{
133 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
134 case USB_ENDPOINT_XFER_BULK: return "bulk";
135 case USB_ENDPOINT_XFER_ISOC: return "iso";
136 case USB_ENDPOINT_XFER_INT: return "intr";
137 };
138 return "control";
139}
140#endif
141
142#include "net2280.h"
143
144#define valid_bit __constant_cpu_to_le32 (1 << VALID_BIT)
145#define dma_done_ie __constant_cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
146
147/*-------------------------------------------------------------------------*/
148
149static int
150net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
151{
152 struct net2280 *dev;
153 struct net2280_ep *ep;
154 u32 max, tmp;
155 unsigned long flags;
156
157 ep = container_of (_ep, struct net2280_ep, ep);
158 if (!_ep || !desc || ep->desc || _ep->name == ep0name
159 || desc->bDescriptorType != USB_DT_ENDPOINT)
160 return -EINVAL;
161 dev = ep->dev;
162 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
163 return -ESHUTDOWN;
164
165 /* erratum 0119 workaround ties up an endpoint number */
166 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
167 return -EDOM;
168
169 /* sanity check ep-e/ep-f since their fifos are small */
170 max = le16_to_cpu (desc->wMaxPacketSize) & 0x1fff;
171 if (ep->num > 4 && max > 64)
172 return -ERANGE;
173
174 spin_lock_irqsave (&dev->lock, flags);
175 _ep->maxpacket = max & 0x7ff;
176 ep->desc = desc;
177
178 /* ep_reset() has already been called */
179 ep->stopped = 0;
180 ep->out_overflow = 0;
181
182 /* set speed-dependent max packet; may kick in high bandwidth */
183 set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
184
185 /* FIFO lines can't go to different packets. PIO is ok, so
186 * use it instead of troublesome (non-bulk) multi-packet DMA.
187 */
188 if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
189 DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
190 ep->ep.name, ep->ep.maxpacket);
191 ep->dma = NULL;
192 }
193
194 /* set type, direction, address; reset fifo counters */
195 writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
196 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
197 if (tmp == USB_ENDPOINT_XFER_INT) {
198 /* erratum 0105 workaround prevents hs NYET */
199 if (dev->chiprev == 0100
200 && dev->gadget.speed == USB_SPEED_HIGH
201 && !(desc->bEndpointAddress & USB_DIR_IN))
202 writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
203 &ep->regs->ep_rsp);
204 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
205 /* catch some particularly blatant driver bugs */
206 if ((dev->gadget.speed == USB_SPEED_HIGH
207 && max != 512)
208 || (dev->gadget.speed == USB_SPEED_FULL
209 && max > 64)) {
210 spin_unlock_irqrestore (&dev->lock, flags);
211 return -ERANGE;
212 }
213 }
214 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
215 tmp <<= ENDPOINT_TYPE;
216 tmp |= desc->bEndpointAddress;
217 tmp |= (4 << ENDPOINT_BYTE_COUNT); /* default full fifo lines */
218 tmp |= 1 << ENDPOINT_ENABLE;
219 wmb ();
220
221 /* for OUT transfers, block the rx fifo until a read is posted */
222 ep->is_in = (tmp & USB_DIR_IN) != 0;
223 if (!ep->is_in)
224 writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
225
226 writel (tmp, &ep->regs->ep_cfg);
227
228 /* enable irqs */
229 if (!ep->dma) { /* pio, per-packet */
230 tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
231 writel (tmp, &dev->regs->pciirqenb0);
232
233 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
234 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
235 | readl (&ep->regs->ep_irqenb);
236 writel (tmp, &ep->regs->ep_irqenb);
237 } else { /* dma, per-request */
238 tmp = (1 << (8 + ep->num)); /* completion */
239 tmp |= readl (&dev->regs->pciirqenb1);
240 writel (tmp, &dev->regs->pciirqenb1);
241
242 /* for short OUT transfers, dma completions can't
243 * advance the queue; do it pio-style, by hand.
244 * NOTE erratum 0112 workaround #2
245 */
246 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
247 tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
248 writel (tmp, &ep->regs->ep_irqenb);
249
250 tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
251 writel (tmp, &dev->regs->pciirqenb0);
252 }
253 }
254
255 tmp = desc->bEndpointAddress;
256 DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
257 _ep->name, tmp & 0x0f, DIR_STRING (tmp),
258 type_string (desc->bmAttributes),
259 ep->dma ? "dma" : "pio", max);
260
261 /* pci writes may still be posted */
262 spin_unlock_irqrestore (&dev->lock, flags);
263 return 0;
264}
265
266static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
267{
268 u32 result;
269
270 do {
271 result = readl (ptr);
272 if (result == ~(u32)0) /* "device unplugged" */
273 return -ENODEV;
274 result &= mask;
275 if (result == done)
276 return 0;
277 udelay (1);
278 usec--;
279 } while (usec > 0);
280 return -ETIMEDOUT;
281}
282
283static struct usb_ep_ops net2280_ep_ops;
284
285static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
286{
287 u32 tmp;
288
289 ep->desc = NULL;
290 INIT_LIST_HEAD (&ep->queue);
291
292 ep->ep.maxpacket = ~0;
293 ep->ep.ops = &net2280_ep_ops;
294
295 /* disable the dma, irqs, endpoint... */
296 if (ep->dma) {
297 writel (0, &ep->dma->dmactl);
298 writel ( (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
299 | (1 << DMA_TRANSACTION_DONE_INTERRUPT)
300 | (1 << DMA_ABORT)
301 , &ep->dma->dmastat);
302
303 tmp = readl (&regs->pciirqenb0);
304 tmp &= ~(1 << ep->num);
305 writel (tmp, &regs->pciirqenb0);
306 } else {
307 tmp = readl (&regs->pciirqenb1);
308 tmp &= ~(1 << (8 + ep->num)); /* completion */
309 writel (tmp, &regs->pciirqenb1);
310 }
311 writel (0, &ep->regs->ep_irqenb);
312
313 /* init to our chosen defaults, notably so that we NAK OUT
314 * packets until the driver queues a read (+note erratum 0112)
315 */
316 tmp = (1 << SET_NAK_OUT_PACKETS_MODE)
317 | (1 << SET_NAK_OUT_PACKETS)
318 | (1 << CLEAR_EP_HIDE_STATUS_PHASE)
319 | (1 << CLEAR_INTERRUPT_MODE);
320
321 if (ep->num != 0) {
322 tmp |= (1 << CLEAR_ENDPOINT_TOGGLE)
323 | (1 << CLEAR_ENDPOINT_HALT);
324 }
325 writel (tmp, &ep->regs->ep_rsp);
326
327 /* scrub most status bits, and flush any fifo state */
328 writel ( (1 << TIMEOUT)
329 | (1 << USB_STALL_SENT)
330 | (1 << USB_IN_NAK_SENT)
331 | (1 << USB_IN_ACK_RCVD)
332 | (1 << USB_OUT_PING_NAK_SENT)
333 | (1 << USB_OUT_ACK_SENT)
334 | (1 << FIFO_OVERFLOW)
335 | (1 << FIFO_UNDERFLOW)
336 | (1 << FIFO_FLUSH)
337 | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
338 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
339 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
340 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
341 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
342 | (1 << DATA_IN_TOKEN_INTERRUPT)
343 , &ep->regs->ep_stat);
344
345 /* fifo size is handled separately */
346}
347
348static void nuke (struct net2280_ep *);
349
350static int net2280_disable (struct usb_ep *_ep)
351{
352 struct net2280_ep *ep;
353 unsigned long flags;
354
355 ep = container_of (_ep, struct net2280_ep, ep);
356 if (!_ep || !ep->desc || _ep->name == ep0name)
357 return -EINVAL;
358
359 spin_lock_irqsave (&ep->dev->lock, flags);
360 nuke (ep);
361 ep_reset (ep->dev->regs, ep);
362
363 VDEBUG (ep->dev, "disabled %s %s\n",
364 ep->dma ? "dma" : "pio", _ep->name);
365
366 /* synch memory views with the device */
367 (void) readl (&ep->regs->ep_cfg);
368
369 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
370 ep->dma = &ep->dev->dma [ep->num - 1];
371
372 spin_unlock_irqrestore (&ep->dev->lock, flags);
373 return 0;
374}
375
376/*-------------------------------------------------------------------------*/
377
378static struct usb_request *
379net2280_alloc_request (struct usb_ep *_ep, int gfp_flags)
380{
381 struct net2280_ep *ep;
382 struct net2280_request *req;
383
384 if (!_ep)
385 return NULL;
386 ep = container_of (_ep, struct net2280_ep, ep);
387
388 req = kmalloc (sizeof *req, gfp_flags);
389 if (!req)
390 return NULL;
391
392 memset (req, 0, sizeof *req);
393 req->req.dma = DMA_ADDR_INVALID;
394 INIT_LIST_HEAD (&req->queue);
395
396 /* this dma descriptor may be swapped with the previous dummy */
397 if (ep->dma) {
398 struct net2280_dma *td;
399
400 td = pci_pool_alloc (ep->dev->requests, gfp_flags,
401 &req->td_dma);
402 if (!td) {
403 kfree (req);
404 return NULL;
405 }
406 td->dmacount = 0; /* not VALID */
407 td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
408 td->dmadesc = td->dmaaddr;
409 req->td = td;
410 }
411 return &req->req;
412}
413
414static void
415net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
416{
417 struct net2280_ep *ep;
418 struct net2280_request *req;
419
420 ep = container_of (_ep, struct net2280_ep, ep);
421 if (!_ep || !_req)
422 return;
423
424 req = container_of (_req, struct net2280_request, req);
425 WARN_ON (!list_empty (&req->queue));
426 if (req->td)
427 pci_pool_free (ep->dev->requests, req->td, req->td_dma);
428 kfree (req);
429}
430
431/*-------------------------------------------------------------------------*/
432
433#undef USE_KMALLOC
434
435/* many common platforms have dma-coherent caches, which means that it's
436 * safe to use kmalloc() memory for all i/o buffers without using any
437 * cache flushing calls. (unless you're trying to share cache lines
438 * between dma and non-dma activities, which is a slow idea in any case.)
439 *
440 * other platforms need more care, with 2.5 having a moderately general
441 * solution (which falls down for allocations smaller than one page)
442 * that improves significantly on the 2.4 PCI allocators by removing
443 * the restriction that memory never be freed in_interrupt().
444 */
445#if defined(CONFIG_X86)
446#define USE_KMALLOC
447
448#elif defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
449#define USE_KMALLOC
450
451#elif defined(CONFIG_MIPS) && !defined(CONFIG_NONCOHERENT_IO)
452#define USE_KMALLOC
453
454/* FIXME there are other cases, including an x86-64 one ... */
455#endif
456
457/* allocating buffers this way eliminates dma mapping overhead, which
458 * on some platforms will mean eliminating a per-io buffer copy. with
459 * some kinds of system caches, further tweaks may still be needed.
460 */
461static void *
462net2280_alloc_buffer (
463 struct usb_ep *_ep,
464 unsigned bytes,
465 dma_addr_t *dma,
466 int gfp_flags
467)
468{
469 void *retval;
470 struct net2280_ep *ep;
471
472 ep = container_of (_ep, struct net2280_ep, ep);
473 if (!_ep)
474 return NULL;
475 *dma = DMA_ADDR_INVALID;
476
477#if defined(USE_KMALLOC)
478 retval = kmalloc(bytes, gfp_flags);
479 if (retval)
480 *dma = virt_to_phys(retval);
481#else
482 if (ep->dma) {
483 /* the main problem with this call is that it wastes memory
484 * on typical 1/N page allocations: it allocates 1-N pages.
485 */
486#warning Using dma_alloc_coherent even with buffers smaller than a page.
487 retval = dma_alloc_coherent(&ep->dev->pdev->dev,
488 bytes, dma, gfp_flags);
489 } else
490 retval = kmalloc(bytes, gfp_flags);
491#endif
492 return retval;
493}
494
495static void
496net2280_free_buffer (
497 struct usb_ep *_ep,
498 void *buf,
499 dma_addr_t dma,
500 unsigned bytes
501) {
502 /* free memory into the right allocator */
503#ifndef USE_KMALLOC
504 if (dma != DMA_ADDR_INVALID) {
505 struct net2280_ep *ep;
506
507 ep = container_of(_ep, struct net2280_ep, ep);
508 if (!_ep)
509 return;
510 dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
511 } else
512#endif
513 kfree (buf);
514}
515
516/*-------------------------------------------------------------------------*/
517
518/* load a packet into the fifo we use for usb IN transfers.
519 * works for all endpoints.
520 *
521 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
522 * at a time, but this code is simpler because it knows it only writes
523 * one packet. ep-a..ep-d should use dma instead.
524 */
525static void
526write_fifo (struct net2280_ep *ep, struct usb_request *req)
527{
528 struct net2280_ep_regs __iomem *regs = ep->regs;
529 u8 *buf;
530 u32 tmp;
531 unsigned count, total;
532
533 /* INVARIANT: fifo is currently empty. (testable) */
534
535 if (req) {
536 buf = req->buf + req->actual;
537 prefetch (buf);
538 total = req->length - req->actual;
539 } else {
540 total = 0;
541 buf = NULL;
542 }
543
544 /* write just one packet at a time */
545 count = ep->ep.maxpacket;
546 if (count > total) /* min() cannot be used on a bitfield */
547 count = total;
548
549 VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
550 ep->ep.name, count,
551 (count != ep->ep.maxpacket) ? " (short)" : "",
552 req);
553 while (count >= 4) {
554 /* NOTE be careful if you try to align these. fifo lines
555 * should normally be full (4 bytes) and successive partial
556 * lines are ok only in certain cases.
557 */
558 tmp = get_unaligned ((u32 *)buf);
559 cpu_to_le32s (&tmp);
560 writel (tmp, &regs->ep_data);
561 buf += 4;
562 count -= 4;
563 }
564
565 /* last fifo entry is "short" unless we wrote a full packet.
566 * also explicitly validate last word in (periodic) transfers
567 * when maxpacket is not a multiple of 4 bytes.
568 */
569 if (count || total < ep->ep.maxpacket) {
570 tmp = count ? get_unaligned ((u32 *)buf) : count;
571 cpu_to_le32s (&tmp);
572 set_fifo_bytecount (ep, count & 0x03);
573 writel (tmp, &regs->ep_data);
574 }
575
576 /* pci writes may still be posted */
577}
578
579/* work around erratum 0106: PCI and USB race over the OUT fifo.
580 * caller guarantees chiprev 0100, out endpoint is NAKing, and
581 * there's no real data in the fifo.
582 *
583 * NOTE: also used in cases where that erratum doesn't apply:
584 * where the host wrote "too much" data to us.
585 */
586static void out_flush (struct net2280_ep *ep)
587{
588 u32 __iomem *statp;
589 u32 tmp;
590
591 ASSERT_OUT_NAKING (ep);
592
593 statp = &ep->regs->ep_stat;
594 writel ( (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
595 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
596 , statp);
597 writel ((1 << FIFO_FLUSH), statp);
598 mb ();
599 tmp = readl (statp);
600 if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
601 /* high speed did bulk NYET; fifo isn't filling */
602 && ep->dev->gadget.speed == USB_SPEED_FULL) {
603 unsigned usec;
604
605 usec = 50; /* 64 byte bulk/interrupt */
606 handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
607 (1 << USB_OUT_PING_NAK_SENT), usec);
608 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
609 }
610}
611
612/* unload packet(s) from the fifo we use for usb OUT transfers.
613 * returns true iff the request completed, because of short packet
614 * or the request buffer having filled with full packets.
615 *
616 * for ep-a..ep-d this will read multiple packets out when they
617 * have been accepted.
618 */
619static int
620read_fifo (struct net2280_ep *ep, struct net2280_request *req)
621{
622 struct net2280_ep_regs __iomem *regs = ep->regs;
623 u8 *buf = req->req.buf + req->req.actual;
624 unsigned count, tmp, is_short;
625 unsigned cleanup = 0, prevent = 0;
626
627 /* erratum 0106 ... packets coming in during fifo reads might
628 * be incompletely rejected. not all cases have workarounds.
629 */
630 if (ep->dev->chiprev == 0x0100
631 && ep->dev->gadget.speed == USB_SPEED_FULL) {
632 udelay (1);
633 tmp = readl (&ep->regs->ep_stat);
634 if ((tmp & (1 << NAK_OUT_PACKETS)))
635 cleanup = 1;
636 else if ((tmp & (1 << FIFO_FULL))) {
637 start_out_naking (ep);
638 prevent = 1;
639 }
640 /* else: hope we don't see the problem */
641 }
642
643 /* never overflow the rx buffer. the fifo reads packets until
644 * it sees a short one; we might not be ready for them all.
645 */
646 prefetchw (buf);
647 count = readl (&regs->ep_avail);
648 if (unlikely (count == 0)) {
649 udelay (1);
650 tmp = readl (&ep->regs->ep_stat);
651 count = readl (&regs->ep_avail);
652 /* handled that data already? */
653 if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
654 return 0;
655 }
656
657 tmp = req->req.length - req->req.actual;
658 if (count > tmp) {
659 /* as with DMA, data overflow gets flushed */
660 if ((tmp % ep->ep.maxpacket) != 0) {
661 ERROR (ep->dev,
662 "%s out fifo %d bytes, expected %d\n",
663 ep->ep.name, count, tmp);
664 req->req.status = -EOVERFLOW;
665 cleanup = 1;
666 /* NAK_OUT_PACKETS will be set, so flushing is safe;
667 * the next read will start with the next packet
668 */
669 } /* else it's a ZLP, no worries */
670 count = tmp;
671 }
672 req->req.actual += count;
673
674 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
675
676 VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
677 ep->ep.name, count, is_short ? " (short)" : "",
678 cleanup ? " flush" : "", prevent ? " nak" : "",
679 req, req->req.actual, req->req.length);
680
681 while (count >= 4) {
682 tmp = readl (&regs->ep_data);
683 cpu_to_le32s (&tmp);
684 put_unaligned (tmp, (u32 *)buf);
685 buf += 4;
686 count -= 4;
687 }
688 if (count) {
689 tmp = readl (&regs->ep_data);
690 /* LE conversion is implicit here: */
691 do {
692 *buf++ = (u8) tmp;
693 tmp >>= 8;
694 } while (--count);
695 }
696 if (cleanup)
697 out_flush (ep);
698 if (prevent) {
699 writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
700 (void) readl (&ep->regs->ep_rsp);
701 }
702
703 return is_short || ((req->req.actual == req->req.length)
704 && !req->req.zero);
705}
706
707/* fill out dma descriptor to match a given request */
708static void
709fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
710{
711 struct net2280_dma *td = req->td;
712 u32 dmacount = req->req.length;
713
714 /* don't let DMA continue after a short OUT packet,
715 * so overruns can't affect the next transfer.
716 * in case of overruns on max-size packets, we can't
717 * stop the fifo from filling but we can flush it.
718 */
719 if (ep->is_in)
720 dmacount |= (1 << DMA_DIRECTION);
721 else if ((dmacount % ep->ep.maxpacket) != 0)
722 dmacount |= (1 << END_OF_CHAIN);
723
724 req->valid = valid;
725 if (valid)
726 dmacount |= (1 << VALID_BIT);
727 if (likely(!req->req.no_interrupt || !use_dma_chaining))
728 dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
729
730 /* td->dmadesc = previously set by caller */
731 td->dmaaddr = cpu_to_le32 (req->req.dma);
732
733 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
734 wmb ();
735 td->dmacount = cpu_to_le32p (&dmacount);
736}
737
738static const u32 dmactl_default =
739 (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
740 | (1 << DMA_CLEAR_COUNT_ENABLE)
741 /* erratum 0116 workaround part 1 (use POLLING) */
742 | (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
743 | (1 << DMA_VALID_BIT_POLLING_ENABLE)
744 | (1 << DMA_VALID_BIT_ENABLE)
745 | (1 << DMA_SCATTER_GATHER_ENABLE)
746 /* erratum 0116 workaround part 2 (no AUTOSTART) */
747 | (1 << DMA_ENABLE);
748
749static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
750{
751 handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
752}
753
754static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
755{
756 writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
757 spin_stop_dma (dma);
758}
759
760static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
761{
762 struct net2280_dma_regs __iomem *dma = ep->dma;
763
764 writel ((1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION),
765 &dma->dmacount);
766 writel (readl (&dma->dmastat), &dma->dmastat);
767
768 writel (td_dma, &dma->dmadesc);
769 writel (dmactl, &dma->dmactl);
770
771 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
772 (void) readl (&ep->dev->pci->pcimstctl);
773
774 writel ((1 << DMA_START), &dma->dmastat);
775
776 if (!ep->is_in)
777 stop_out_naking (ep);
778}
779
780static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
781{
782 u32 tmp;
783 struct net2280_dma_regs __iomem *dma = ep->dma;
784
785 /* FIXME can't use DMA for ZLPs */
786
787 /* on this path we "know" there's no dma active (yet) */
788 WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
789 writel (0, &ep->dma->dmactl);
790
791 /* previous OUT packet might have been short */
792 if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
793 & (1 << NAK_OUT_PACKETS)) != 0) {
794 writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
795 &ep->regs->ep_stat);
796
797 tmp = readl (&ep->regs->ep_avail);
798 if (tmp) {
799 writel (readl (&dma->dmastat), &dma->dmastat);
800
801 /* transfer all/some fifo data */
802 writel (req->req.dma, &dma->dmaaddr);
803 tmp = min (tmp, req->req.length);
804
805 /* dma irq, faking scatterlist status */
806 req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
807 writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
808 | tmp, &dma->dmacount);
809 req->td->dmadesc = 0;
810 req->valid = 1;
811
812 writel ((1 << DMA_ENABLE), &dma->dmactl);
813 writel ((1 << DMA_START), &dma->dmastat);
814 return;
815 }
816 }
817
818 tmp = dmactl_default;
819
820 /* force packet boundaries between dma requests, but prevent the
821 * controller from automagically writing a last "short" packet
822 * (zero length) unless the driver explicitly said to do that.
823 */
824 if (ep->is_in) {
825 if (likely ((req->req.length % ep->ep.maxpacket) != 0
826 || req->req.zero)) {
827 tmp |= (1 << DMA_FIFO_VALIDATE);
828 ep->in_fifo_validate = 1;
829 } else
830 ep->in_fifo_validate = 0;
831 }
832
833 /* init req->td, pointing to the current dummy */
834 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
835 fill_dma_desc (ep, req, 1);
836
837 if (!use_dma_chaining)
838 req->td->dmacount |= __constant_cpu_to_le32 (1 << END_OF_CHAIN);
839
840 start_queue (ep, tmp, req->td_dma);
841}
842
843static inline void
844queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
845{
846 struct net2280_dma *end;
847 dma_addr_t tmp;
848
849 /* swap new dummy for old, link; fill and maybe activate */
850 end = ep->dummy;
851 ep->dummy = req->td;
852 req->td = end;
853
854 tmp = ep->td_dma;
855 ep->td_dma = req->td_dma;
856 req->td_dma = tmp;
857
858 end->dmadesc = cpu_to_le32 (ep->td_dma);
859
860 fill_dma_desc (ep, req, valid);
861}
862
863static void
864done (struct net2280_ep *ep, struct net2280_request *req, int status)
865{
866 struct net2280 *dev;
867 unsigned stopped = ep->stopped;
868
869 list_del_init (&req->queue);
870
871 if (req->req.status == -EINPROGRESS)
872 req->req.status = status;
873 else
874 status = req->req.status;
875
876 dev = ep->dev;
877 if (req->mapped) {
878 pci_unmap_single (dev->pdev, req->req.dma, req->req.length,
879 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
880 req->req.dma = DMA_ADDR_INVALID;
881 req->mapped = 0;
882 }
883
884 if (status && status != -ESHUTDOWN)
885 VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
886 ep->ep.name, &req->req, status,
887 req->req.actual, req->req.length);
888
889 /* don't modify queue heads during completion callback */
890 ep->stopped = 1;
891 spin_unlock (&dev->lock);
892 req->req.complete (&ep->ep, &req->req);
893 spin_lock (&dev->lock);
894 ep->stopped = stopped;
895}
896
897/*-------------------------------------------------------------------------*/
898
899static int
900net2280_queue (struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
901{
902 struct net2280_request *req;
903 struct net2280_ep *ep;
904 struct net2280 *dev;
905 unsigned long flags;
906
907 /* we always require a cpu-view buffer, so that we can
908 * always use pio (as fallback or whatever).
909 */
910 req = container_of (_req, struct net2280_request, req);
911 if (!_req || !_req->complete || !_req->buf
912 || !list_empty (&req->queue))
913 return -EINVAL;
914 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
915 return -EDOM;
916 ep = container_of (_ep, struct net2280_ep, ep);
917 if (!_ep || (!ep->desc && ep->num != 0))
918 return -EINVAL;
919 dev = ep->dev;
920 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
921 return -ESHUTDOWN;
922
923 /* FIXME implement PIO fallback for ZLPs with DMA */
924 if (ep->dma && _req->length == 0)
925 return -EOPNOTSUPP;
926
927 /* set up dma mapping in case the caller didn't */
928 if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
929 _req->dma = pci_map_single (dev->pdev, _req->buf, _req->length,
930 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
931 req->mapped = 1;
932 }
933
934#if 0
935 VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
936 _ep->name, _req, _req->length, _req->buf);
937#endif
938
939 spin_lock_irqsave (&dev->lock, flags);
940
941 _req->status = -EINPROGRESS;
942 _req->actual = 0;
943
944 /* kickstart this i/o queue? */
945 if (list_empty (&ep->queue) && !ep->stopped) {
946 /* use DMA if the endpoint supports it, else pio */
947 if (ep->dma)
948 start_dma (ep, req);
949 else {
950 /* maybe there's no control data, just status ack */
951 if (ep->num == 0 && _req->length == 0) {
952 allow_status (ep);
953 done (ep, req, 0);
954 VDEBUG (dev, "%s status ack\n", ep->ep.name);
955 goto done;
956 }
957
958 /* PIO ... stuff the fifo, or unblock it. */
959 if (ep->is_in)
960 write_fifo (ep, _req);
961 else if (list_empty (&ep->queue)) {
962 u32 s;
963
964 /* OUT FIFO might have packet(s) buffered */
965 s = readl (&ep->regs->ep_stat);
966 if ((s & (1 << FIFO_EMPTY)) == 0) {
967 /* note: _req->short_not_ok is
968 * ignored here since PIO _always_
969 * stops queue advance here, and
970 * _req->status doesn't change for
971 * short reads (only _req->actual)
972 */
973 if (read_fifo (ep, req)) {
974 done (ep, req, 0);
975 if (ep->num == 0)
976 allow_status (ep);
977 /* don't queue it */
978 req = NULL;
979 } else
980 s = readl (&ep->regs->ep_stat);
981 }
982
983 /* don't NAK, let the fifo fill */
984 if (req && (s & (1 << NAK_OUT_PACKETS)))
985 writel ((1 << CLEAR_NAK_OUT_PACKETS),
986 &ep->regs->ep_rsp);
987 }
988 }
989
990 } else if (ep->dma) {
991 int valid = 1;
992
993 if (ep->is_in) {
994 int expect;
995
996 /* preventing magic zlps is per-engine state, not
997 * per-transfer; irq logic must recover hiccups.
998 */
999 expect = likely (req->req.zero
1000 || (req->req.length % ep->ep.maxpacket) != 0);
1001 if (expect != ep->in_fifo_validate)
1002 valid = 0;
1003 }
1004 queue_dma (ep, req, valid);
1005
1006 } /* else the irq handler advances the queue. */
1007
1008 if (req)
1009 list_add_tail (&req->queue, &ep->queue);
1010done:
1011 spin_unlock_irqrestore (&dev->lock, flags);
1012
1013 /* pci writes may still be posted */
1014 return 0;
1015}
1016
1017static inline void
1018dma_done (
1019 struct net2280_ep *ep,
1020 struct net2280_request *req,
1021 u32 dmacount,
1022 int status
1023)
1024{
1025 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1026 done (ep, req, status);
1027}
1028
1029static void restart_dma (struct net2280_ep *ep);
1030
1031static void scan_dma_completions (struct net2280_ep *ep)
1032{
1033 /* only look at descriptors that were "naturally" retired,
1034 * so fifo and list head state won't matter
1035 */
1036 while (!list_empty (&ep->queue)) {
1037 struct net2280_request *req;
1038 u32 tmp;
1039
1040 req = list_entry (ep->queue.next,
1041 struct net2280_request, queue);
1042 if (!req->valid)
1043 break;
1044 rmb ();
1045 tmp = le32_to_cpup (&req->td->dmacount);
1046 if ((tmp & (1 << VALID_BIT)) != 0)
1047 break;
1048
1049 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1050 * cases where DMA must be aborted; this code handles
1051 * all non-abort DMA completions.
1052 */
1053 if (unlikely (req->td->dmadesc == 0)) {
1054 /* paranoia */
1055 tmp = readl (&ep->dma->dmacount);
1056 if (tmp & DMA_BYTE_COUNT_MASK)
1057 break;
1058 /* single transfer mode */
1059 dma_done (ep, req, tmp, 0);
1060 break;
1061 } else if (!ep->is_in
1062 && (req->req.length % ep->ep.maxpacket) != 0) {
1063 tmp = readl (&ep->regs->ep_stat);
1064
1065 /* AVOID TROUBLE HERE by not issuing short reads from
1066 * your gadget driver. That helps avoids errata 0121,
1067 * 0122, and 0124; not all cases trigger the warning.
1068 */
1069 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
1070 WARN (ep->dev, "%s lost packet sync!\n",
1071 ep->ep.name);
1072 req->req.status = -EOVERFLOW;
1073 } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
1074 /* fifo gets flushed later */
1075 ep->out_overflow = 1;
1076 DEBUG (ep->dev, "%s dma, discard %d len %d\n",
1077 ep->ep.name, tmp,
1078 req->req.length);
1079 req->req.status = -EOVERFLOW;
1080 }
1081 }
1082 dma_done (ep, req, tmp, 0);
1083 }
1084}
1085
1086static void restart_dma (struct net2280_ep *ep)
1087{
1088 struct net2280_request *req;
1089 u32 dmactl = dmactl_default;
1090
1091 if (ep->stopped)
1092 return;
1093 req = list_entry (ep->queue.next, struct net2280_request, queue);
1094
1095 if (!use_dma_chaining) {
1096 start_dma (ep, req);
1097 return;
1098 }
1099
1100 /* the 2280 will be processing the queue unless queue hiccups after
1101 * the previous transfer:
1102 * IN: wanted automagic zlp, head doesn't (or vice versa)
1103 * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1104 * OUT: was "usb-short", we must restart.
1105 */
1106 if (ep->is_in && !req->valid) {
1107 struct net2280_request *entry, *prev = NULL;
1108 int reqmode, done = 0;
1109
1110 DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1111 ep->in_fifo_validate = likely (req->req.zero
1112 || (req->req.length % ep->ep.maxpacket) != 0);
1113 if (ep->in_fifo_validate)
1114 dmactl |= (1 << DMA_FIFO_VALIDATE);
1115 list_for_each_entry (entry, &ep->queue, queue) {
1116 u32 dmacount;
1117
1118 if (entry == req)
1119 continue;
1120 dmacount = entry->td->dmacount;
1121 if (!done) {
1122 reqmode = likely (entry->req.zero
1123 || (entry->req.length
1124 % ep->ep.maxpacket) != 0);
1125 if (reqmode == ep->in_fifo_validate) {
1126 entry->valid = 1;
1127 dmacount |= valid_bit;
1128 entry->td->dmacount = dmacount;
1129 prev = entry;
1130 continue;
1131 } else {
1132 /* force a hiccup */
1133 prev->td->dmacount |= dma_done_ie;
1134 done = 1;
1135 }
1136 }
1137
1138 /* walk the rest of the queue so unlinks behave */
1139 entry->valid = 0;
1140 dmacount &= ~valid_bit;
1141 entry->td->dmacount = dmacount;
1142 prev = entry;
1143 }
1144 }
1145
1146 writel (0, &ep->dma->dmactl);
1147 start_queue (ep, dmactl, req->td_dma);
1148}
1149
1150static void abort_dma (struct net2280_ep *ep)
1151{
1152 /* abort the current transfer */
1153 if (likely (!list_empty (&ep->queue))) {
1154 /* FIXME work around errata 0121, 0122, 0124 */
1155 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
1156 spin_stop_dma (ep->dma);
1157 } else
1158 stop_dma (ep->dma);
1159 scan_dma_completions (ep);
1160}
1161
1162/* dequeue ALL requests */
1163static void nuke (struct net2280_ep *ep)
1164{
1165 struct net2280_request *req;
1166
1167 /* called with spinlock held */
1168 ep->stopped = 1;
1169 if (ep->dma)
1170 abort_dma (ep);
1171 while (!list_empty (&ep->queue)) {
1172 req = list_entry (ep->queue.next,
1173 struct net2280_request,
1174 queue);
1175 done (ep, req, -ESHUTDOWN);
1176 }
1177}
1178
1179/* dequeue JUST ONE request */
1180static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1181{
1182 struct net2280_ep *ep;
1183 struct net2280_request *req;
1184 unsigned long flags;
1185 u32 dmactl;
1186 int stopped;
1187
1188 ep = container_of (_ep, struct net2280_ep, ep);
1189 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1190 return -EINVAL;
1191
1192 spin_lock_irqsave (&ep->dev->lock, flags);
1193 stopped = ep->stopped;
1194
1195 /* quiesce dma while we patch the queue */
1196 dmactl = 0;
1197 ep->stopped = 1;
1198 if (ep->dma) {
1199 dmactl = readl (&ep->dma->dmactl);
1200 /* WARNING erratum 0127 may kick in ... */
1201 stop_dma (ep->dma);
1202 scan_dma_completions (ep);
1203 }
1204
1205 /* make sure it's still queued on this endpoint */
1206 list_for_each_entry (req, &ep->queue, queue) {
1207 if (&req->req == _req)
1208 break;
1209 }
1210 if (&req->req != _req) {
1211 spin_unlock_irqrestore (&ep->dev->lock, flags);
1212 return -EINVAL;
1213 }
1214
1215 /* queue head may be partially complete. */
1216 if (ep->queue.next == &req->queue) {
1217 if (ep->dma) {
1218 DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
1219 _req->status = -ECONNRESET;
1220 abort_dma (ep);
1221 if (likely (ep->queue.next == &req->queue)) {
1222 // NOTE: misreports single-transfer mode
1223 req->td->dmacount = 0; /* invalidate */
1224 dma_done (ep, req,
1225 readl (&ep->dma->dmacount),
1226 -ECONNRESET);
1227 }
1228 } else {
1229 DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
1230 done (ep, req, -ECONNRESET);
1231 }
1232 req = NULL;
1233
1234 /* patch up hardware chaining data */
1235 } else if (ep->dma && use_dma_chaining) {
1236 if (req->queue.prev == ep->queue.next) {
1237 writel (le32_to_cpu (req->td->dmadesc),
1238 &ep->dma->dmadesc);
1239 if (req->td->dmacount & dma_done_ie)
1240 writel (readl (&ep->dma->dmacount)
1241 | dma_done_ie,
1242 &ep->dma->dmacount);
1243 } else {
1244 struct net2280_request *prev;
1245
1246 prev = list_entry (req->queue.prev,
1247 struct net2280_request, queue);
1248 prev->td->dmadesc = req->td->dmadesc;
1249 if (req->td->dmacount & dma_done_ie)
1250 prev->td->dmacount |= dma_done_ie;
1251 }
1252 }
1253
1254 if (req)
1255 done (ep, req, -ECONNRESET);
1256 ep->stopped = stopped;
1257
1258 if (ep->dma) {
1259 /* turn off dma on inactive queues */
1260 if (list_empty (&ep->queue))
1261 stop_dma (ep->dma);
1262 else if (!ep->stopped) {
1263 /* resume current request, or start new one */
1264 if (req)
1265 writel (dmactl, &ep->dma->dmactl);
1266 else
1267 start_dma (ep, list_entry (ep->queue.next,
1268 struct net2280_request, queue));
1269 }
1270 }
1271
1272 spin_unlock_irqrestore (&ep->dev->lock, flags);
1273 return 0;
1274}
1275
1276/*-------------------------------------------------------------------------*/
1277
1278static int net2280_fifo_status (struct usb_ep *_ep);
1279
1280static int
1281net2280_set_halt (struct usb_ep *_ep, int value)
1282{
1283 struct net2280_ep *ep;
1284 unsigned long flags;
1285 int retval = 0;
1286
1287 ep = container_of (_ep, struct net2280_ep, ep);
1288 if (!_ep || (!ep->desc && ep->num != 0))
1289 return -EINVAL;
1290 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1291 return -ESHUTDOWN;
1292 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1293 == USB_ENDPOINT_XFER_ISOC)
1294 return -EINVAL;
1295
1296 spin_lock_irqsave (&ep->dev->lock, flags);
1297 if (!list_empty (&ep->queue))
1298 retval = -EAGAIN;
1299 else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
1300 retval = -EAGAIN;
1301 else {
1302 VDEBUG (ep->dev, "%s %s halt\n", _ep->name,
1303 value ? "set" : "clear");
1304 /* set/clear, then synch memory views with the device */
1305 if (value) {
1306 if (ep->num == 0)
1307 ep->dev->protocol_stall = 1;
1308 else
1309 set_halt (ep);
1310 } else
1311 clear_halt (ep);
1312 (void) readl (&ep->regs->ep_rsp);
1313 }
1314 spin_unlock_irqrestore (&ep->dev->lock, flags);
1315
1316 return retval;
1317}
1318
1319static int
1320net2280_fifo_status (struct usb_ep *_ep)
1321{
1322 struct net2280_ep *ep;
1323 u32 avail;
1324
1325 ep = container_of (_ep, struct net2280_ep, ep);
1326 if (!_ep || (!ep->desc && ep->num != 0))
1327 return -ENODEV;
1328 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1329 return -ESHUTDOWN;
1330
1331 avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
1332 if (avail > ep->fifo_size)
1333 return -EOVERFLOW;
1334 if (ep->is_in)
1335 avail = ep->fifo_size - avail;
1336 return avail;
1337}
1338
1339static void
1340net2280_fifo_flush (struct usb_ep *_ep)
1341{
1342 struct net2280_ep *ep;
1343
1344 ep = container_of (_ep, struct net2280_ep, ep);
1345 if (!_ep || (!ep->desc && ep->num != 0))
1346 return;
1347 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1348 return;
1349
1350 writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
1351 (void) readl (&ep->regs->ep_rsp);
1352}
1353
1354static struct usb_ep_ops net2280_ep_ops = {
1355 .enable = net2280_enable,
1356 .disable = net2280_disable,
1357
1358 .alloc_request = net2280_alloc_request,
1359 .free_request = net2280_free_request,
1360
1361 .alloc_buffer = net2280_alloc_buffer,
1362 .free_buffer = net2280_free_buffer,
1363
1364 .queue = net2280_queue,
1365 .dequeue = net2280_dequeue,
1366
1367 .set_halt = net2280_set_halt,
1368 .fifo_status = net2280_fifo_status,
1369 .fifo_flush = net2280_fifo_flush,
1370};
1371
1372/*-------------------------------------------------------------------------*/
1373
1374static int net2280_get_frame (struct usb_gadget *_gadget)
1375{
1376 struct net2280 *dev;
1377 unsigned long flags;
1378 u16 retval;
1379
1380 if (!_gadget)
1381 return -ENODEV;
1382 dev = container_of (_gadget, struct net2280, gadget);
1383 spin_lock_irqsave (&dev->lock, flags);
1384 retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
1385 spin_unlock_irqrestore (&dev->lock, flags);
1386 return retval;
1387}
1388
1389static int net2280_wakeup (struct usb_gadget *_gadget)
1390{
1391 struct net2280 *dev;
1392 u32 tmp;
1393 unsigned long flags;
1394
1395 if (!_gadget)
1396 return 0;
1397 dev = container_of (_gadget, struct net2280, gadget);
1398
1399 spin_lock_irqsave (&dev->lock, flags);
1400 tmp = readl (&dev->usb->usbctl);
1401 if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
1402 writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
1403 spin_unlock_irqrestore (&dev->lock, flags);
1404
1405 /* pci writes may still be posted */
1406 return 0;
1407}
1408
1409static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1410{
1411 struct net2280 *dev;
1412 u32 tmp;
1413 unsigned long flags;
1414
1415 if (!_gadget)
1416 return 0;
1417 dev = container_of (_gadget, struct net2280, gadget);
1418
1419 spin_lock_irqsave (&dev->lock, flags);
1420 tmp = readl (&dev->usb->usbctl);
1421 if (value)
1422 tmp |= (1 << SELF_POWERED_STATUS);
1423 else
1424 tmp &= ~(1 << SELF_POWERED_STATUS);
1425 writel (tmp, &dev->usb->usbctl);
1426 spin_unlock_irqrestore (&dev->lock, flags);
1427
1428 return 0;
1429}
1430
1431static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1432{
1433 struct net2280 *dev;
1434 u32 tmp;
1435 unsigned long flags;
1436
1437 if (!_gadget)
1438 return -ENODEV;
1439 dev = container_of (_gadget, struct net2280, gadget);
1440
1441 spin_lock_irqsave (&dev->lock, flags);
1442 tmp = readl (&dev->usb->usbctl);
1443 dev->softconnect = (is_on != 0);
1444 if (is_on)
1445 tmp |= (1 << USB_DETECT_ENABLE);
1446 else
1447 tmp &= ~(1 << USB_DETECT_ENABLE);
1448 writel (tmp, &dev->usb->usbctl);
1449 spin_unlock_irqrestore (&dev->lock, flags);
1450
1451 return 0;
1452}
1453
1454static const struct usb_gadget_ops net2280_ops = {
1455 .get_frame = net2280_get_frame,
1456 .wakeup = net2280_wakeup,
1457 .set_selfpowered = net2280_set_selfpowered,
1458 .pullup = net2280_pullup,
1459};
1460
1461/*-------------------------------------------------------------------------*/
1462
1463#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1464
1465/* FIXME move these into procfs, and use seq_file.
1466 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1467 * and also doesn't help products using this with 2.4 kernels.
1468 */
1469
1470/* "function" sysfs attribute */
1471static ssize_t
1472show_function (struct device *_dev, char *buf)
1473{
1474 struct net2280 *dev = dev_get_drvdata (_dev);
1475
1476 if (!dev->driver
1477 || !dev->driver->function
1478 || strlen (dev->driver->function) > PAGE_SIZE)
1479 return 0;
1480 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1481}
1482static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1483
1484static ssize_t
1485show_registers (struct device *_dev, char *buf)
1486{
1487 struct net2280 *dev;
1488 char *next;
1489 unsigned size, t;
1490 unsigned long flags;
1491 int i;
1492 u32 t1, t2;
1493 char *s;
1494
1495 dev = dev_get_drvdata (_dev);
1496 next = buf;
1497 size = PAGE_SIZE;
1498 spin_lock_irqsave (&dev->lock, flags);
1499
1500 if (dev->driver)
1501 s = dev->driver->driver.name;
1502 else
1503 s = "(none)";
1504
1505 /* Main Control Registers */
1506 t = scnprintf (next, size, "%s version " DRIVER_VERSION
1507 ", chiprev %04x, dma %s\n\n"
1508 "devinit %03x fifoctl %08x gadget '%s'\n"
1509 "pci irqenb0 %02x irqenb1 %08x "
1510 "irqstat0 %04x irqstat1 %08x\n",
1511 driver_name, dev->chiprev,
1512 use_dma
1513 ? (use_dma_chaining ? "chaining" : "enabled")
1514 : "disabled",
1515 readl (&dev->regs->devinit),
1516 readl (&dev->regs->fifoctl),
1517 s,
1518 readl (&dev->regs->pciirqenb0),
1519 readl (&dev->regs->pciirqenb1),
1520 readl (&dev->regs->irqstat0),
1521 readl (&dev->regs->irqstat1));
1522 size -= t;
1523 next += t;
1524
1525 /* USB Control Registers */
1526 t1 = readl (&dev->usb->usbctl);
1527 t2 = readl (&dev->usb->usbstat);
1528 if (t1 & (1 << VBUS_PIN)) {
1529 if (t2 & (1 << HIGH_SPEED))
1530 s = "high speed";
1531 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1532 s = "powered";
1533 else
1534 s = "full speed";
1535 /* full speed bit (6) not working?? */
1536 } else
1537 s = "not attached";
1538 t = scnprintf (next, size,
1539 "stdrsp %08x usbctl %08x usbstat %08x "
1540 "addr 0x%02x (%s)\n",
1541 readl (&dev->usb->stdrsp), t1, t2,
1542 readl (&dev->usb->ouraddr), s);
1543 size -= t;
1544 next += t;
1545
1546 /* PCI Master Control Registers */
1547
1548 /* DMA Control Registers */
1549
1550 /* Configurable EP Control Registers */
1551 for (i = 0; i < 7; i++) {
1552 struct net2280_ep *ep;
1553
1554 ep = &dev->ep [i];
1555 if (i && !ep->desc)
1556 continue;
1557
1558 t1 = readl (&ep->regs->ep_cfg);
1559 t2 = readl (&ep->regs->ep_rsp) & 0xff;
1560 t = scnprintf (next, size,
1561 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1562 "irqenb %02x\n",
1563 ep->ep.name, t1, t2,
1564 (t2 & (1 << CLEAR_NAK_OUT_PACKETS))
1565 ? "NAK " : "",
1566 (t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
1567 ? "hide " : "",
1568 (t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
1569 ? "CRC " : "",
1570 (t2 & (1 << CLEAR_INTERRUPT_MODE))
1571 ? "interrupt " : "",
1572 (t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1573 ? "status " : "",
1574 (t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
1575 ? "NAKmode " : "",
1576 (t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
1577 ? "DATA1 " : "DATA0 ",
1578 (t2 & (1 << CLEAR_ENDPOINT_HALT))
1579 ? "HALT " : "",
1580 readl (&ep->regs->ep_irqenb));
1581 size -= t;
1582 next += t;
1583
1584 t = scnprintf (next, size,
1585 "\tstat %08x avail %04x "
1586 "(ep%d%s-%s)%s\n",
1587 readl (&ep->regs->ep_stat),
1588 readl (&ep->regs->ep_avail),
1589 t1 & 0x0f, DIR_STRING (t1),
1590 type_string (t1 >> 8),
1591 ep->stopped ? "*" : "");
1592 size -= t;
1593 next += t;
1594
1595 if (!ep->dma)
1596 continue;
1597
1598 t = scnprintf (next, size,
1599 " dma\tctl %08x stat %08x count %08x\n"
1600 "\taddr %08x desc %08x\n",
1601 readl (&ep->dma->dmactl),
1602 readl (&ep->dma->dmastat),
1603 readl (&ep->dma->dmacount),
1604 readl (&ep->dma->dmaaddr),
1605 readl (&ep->dma->dmadesc));
1606 size -= t;
1607 next += t;
1608
1609 }
1610
1611 /* Indexed Registers */
1612 // none yet
1613
1614 /* Statistics */
1615 t = scnprintf (next, size, "\nirqs: ");
1616 size -= t;
1617 next += t;
1618 for (i = 0; i < 7; i++) {
1619 struct net2280_ep *ep;
1620
1621 ep = &dev->ep [i];
1622 if (i && !ep->irqs)
1623 continue;
1624 t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
1625 size -= t;
1626 next += t;
1627
1628 }
1629 t = scnprintf (next, size, "\n");
1630 size -= t;
1631 next += t;
1632
1633 spin_unlock_irqrestore (&dev->lock, flags);
1634
1635 return PAGE_SIZE - size;
1636}
1637static DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL);
1638
1639static ssize_t
1640show_queues (struct device *_dev, char *buf)
1641{
1642 struct net2280 *dev;
1643 char *next;
1644 unsigned size;
1645 unsigned long flags;
1646 int i;
1647
1648 dev = dev_get_drvdata (_dev);
1649 next = buf;
1650 size = PAGE_SIZE;
1651 spin_lock_irqsave (&dev->lock, flags);
1652
1653 for (i = 0; i < 7; i++) {
1654 struct net2280_ep *ep = &dev->ep [i];
1655 struct net2280_request *req;
1656 int t;
1657
1658 if (i != 0) {
1659 const struct usb_endpoint_descriptor *d;
1660
1661 d = ep->desc;
1662 if (!d)
1663 continue;
1664 t = d->bEndpointAddress;
1665 t = scnprintf (next, size,
1666 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1667 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1668 (t & USB_DIR_IN) ? "in" : "out",
1669 ({ char *val;
1670 switch (d->bmAttributes & 0x03) {
1671 case USB_ENDPOINT_XFER_BULK:
1672 val = "bulk"; break;
1673 case USB_ENDPOINT_XFER_INT:
1674 val = "intr"; break;
1675 default:
1676 val = "iso"; break;
1677 }; val; }),
1678 le16_to_cpu (d->wMaxPacketSize) & 0x1fff,
1679 ep->dma ? "dma" : "pio", ep->fifo_size
1680 );
1681 } else /* ep0 should only have one transfer queued */
1682 t = scnprintf (next, size, "ep0 max 64 pio %s\n",
1683 ep->is_in ? "in" : "out");
1684 if (t <= 0 || t > size)
1685 goto done;
1686 size -= t;
1687 next += t;
1688
1689 if (list_empty (&ep->queue)) {
1690 t = scnprintf (next, size, "\t(nothing queued)\n");
1691 if (t <= 0 || t > size)
1692 goto done;
1693 size -= t;
1694 next += t;
1695 continue;
1696 }
1697 list_for_each_entry (req, &ep->queue, queue) {
1698 if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
1699 t = scnprintf (next, size,
1700 "\treq %p len %d/%d "
1701 "buf %p (dmacount %08x)\n",
1702 &req->req, req->req.actual,
1703 req->req.length, req->req.buf,
1704 readl (&ep->dma->dmacount));
1705 else
1706 t = scnprintf (next, size,
1707 "\treq %p len %d/%d buf %p\n",
1708 &req->req, req->req.actual,
1709 req->req.length, req->req.buf);
1710 if (t <= 0 || t > size)
1711 goto done;
1712 size -= t;
1713 next += t;
1714
1715 if (ep->dma) {
1716 struct net2280_dma *td;
1717
1718 td = req->td;
1719 t = scnprintf (next, size, "\t td %08x "
1720 " count %08x buf %08x desc %08x\n",
1721 (u32) req->td_dma,
1722 le32_to_cpu (td->dmacount),
1723 le32_to_cpu (td->dmaaddr),
1724 le32_to_cpu (td->dmadesc));
1725 if (t <= 0 || t > size)
1726 goto done;
1727 size -= t;
1728 next += t;
1729 }
1730 }
1731 }
1732
1733done:
1734 spin_unlock_irqrestore (&dev->lock, flags);
1735 return PAGE_SIZE - size;
1736}
1737static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
1738
1739
1740#else
1741
1742#define device_create_file(a,b) do {} while (0)
1743#define device_remove_file device_create_file
1744
1745#endif
1746
1747/*-------------------------------------------------------------------------*/
1748
1749/* another driver-specific mode might be a request type doing dma
1750 * to/from another device fifo instead of to/from memory.
1751 */
1752
1753static void set_fifo_mode (struct net2280 *dev, int mode)
1754{
1755 /* keeping high bits preserves BAR2 */
1756 writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1757
1758 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1759 INIT_LIST_HEAD (&dev->gadget.ep_list);
1760 list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
1761 list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
1762 switch (mode) {
1763 case 0:
1764 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1765 list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
1766 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
1767 break;
1768 case 1:
1769 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
1770 break;
1771 case 2:
1772 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1773 dev->ep [1].fifo_size = 2048;
1774 dev->ep [2].fifo_size = 1024;
1775 break;
1776 }
1777 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1778 list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
1779 list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
1780}
1781
1782/**
1783 * net2280_set_fifo_mode - change allocation of fifo buffers
1784 * @gadget: access to the net2280 device that will be updated
1785 * @mode: 0 for default, four 1kB buffers (ep-a through ep-d);
1786 * 1 for two 2kB buffers (ep-a and ep-b only);
1787 * 2 for one 2kB buffer (ep-a) and two 1kB ones (ep-b, ep-c).
1788 *
1789 * returns zero on success, else negative errno. when this succeeds,
1790 * the contents of gadget->ep_list may have changed.
1791 *
1792 * you may only call this function when endpoints a-d are all disabled.
1793 * use it whenever extra hardware buffering can help performance, such
1794 * as before enabling "high bandwidth" interrupt endpoints that use
1795 * maxpacket bigger than 512 (when double buffering would otherwise
1796 * be unavailable).
1797 */
1798int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
1799{
1800 int i;
1801 struct net2280 *dev;
1802 int status = 0;
1803 unsigned long flags;
1804
1805 if (!gadget)
1806 return -ENODEV;
1807 dev = container_of (gadget, struct net2280, gadget);
1808
1809 spin_lock_irqsave (&dev->lock, flags);
1810
1811 for (i = 1; i <= 4; i++)
1812 if (dev->ep [i].desc) {
1813 status = -EINVAL;
1814 break;
1815 }
1816 if (mode < 0 || mode > 2)
1817 status = -EINVAL;
1818 if (status == 0)
1819 set_fifo_mode (dev, mode);
1820 spin_unlock_irqrestore (&dev->lock, flags);
1821
1822 if (status == 0) {
1823 if (mode == 1)
1824 DEBUG (dev, "fifo: ep-a 2K, ep-b 2K\n");
1825 else if (mode == 2)
1826 DEBUG (dev, "fifo: ep-a 2K, ep-b 1K, ep-c 1K\n");
1827 /* else all are 1K */
1828 }
1829 return status;
1830}
1831EXPORT_SYMBOL (net2280_set_fifo_mode);
1832
1833/*-------------------------------------------------------------------------*/
1834
1835/* keeping it simple:
1836 * - one bus driver, initted first;
1837 * - one function driver, initted second
1838 *
1839 * most of the work to support multiple net2280 controllers would
1840 * be to associate this gadget driver (yes?) with all of them, or
1841 * perhaps to bind specific drivers to specific devices.
1842 */
1843
1844static struct net2280 *the_controller;
1845
1846static void usb_reset (struct net2280 *dev)
1847{
1848 u32 tmp;
1849
1850 dev->gadget.speed = USB_SPEED_UNKNOWN;
1851 (void) readl (&dev->usb->usbctl);
1852
1853 net2280_led_init (dev);
1854
1855 /* disable automatic responses, and irqs */
1856 writel (0, &dev->usb->stdrsp);
1857 writel (0, &dev->regs->pciirqenb0);
1858 writel (0, &dev->regs->pciirqenb1);
1859
1860 /* clear old dma and irq state */
1861 for (tmp = 0; tmp < 4; tmp++) {
1862 struct net2280_ep *ep = &dev->ep [tmp + 1];
1863
1864 if (ep->dma)
1865 abort_dma (ep);
1866 }
1867 writel (~0, &dev->regs->irqstat0),
1868 writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1869
1870 /* reset, and enable pci */
1871 tmp = readl (&dev->regs->devinit)
1872 | (1 << PCI_ENABLE)
1873 | (1 << FIFO_SOFT_RESET)
1874 | (1 << USB_SOFT_RESET)
1875 | (1 << M8051_RESET);
1876 writel (tmp, &dev->regs->devinit);
1877
1878 /* standard fifo and endpoint allocations */
1879 set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
1880}
1881
1882static void usb_reinit (struct net2280 *dev)
1883{
1884 u32 tmp;
1885 int init_dma;
1886
1887 /* use_dma changes are ignored till next device re-init */
1888 init_dma = use_dma;
1889
1890 /* basic endpoint init */
1891 for (tmp = 0; tmp < 7; tmp++) {
1892 struct net2280_ep *ep = &dev->ep [tmp];
1893
1894 ep->ep.name = ep_name [tmp];
1895 ep->dev = dev;
1896 ep->num = tmp;
1897
1898 if (tmp > 0 && tmp <= 4) {
1899 ep->fifo_size = 1024;
1900 if (init_dma)
1901 ep->dma = &dev->dma [tmp - 1];
1902 } else
1903 ep->fifo_size = 64;
1904 ep->regs = &dev->epregs [tmp];
1905 ep_reset (dev->regs, ep);
1906 }
1907 dev->ep [0].ep.maxpacket = 64;
1908 dev->ep [5].ep.maxpacket = 64;
1909 dev->ep [6].ep.maxpacket = 64;
1910
1911 dev->gadget.ep0 = &dev->ep [0].ep;
1912 dev->ep [0].stopped = 0;
1913 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1914
1915 /* we want to prevent lowlevel/insecure access from the USB host,
1916 * but erratum 0119 means this enable bit is ignored
1917 */
1918 for (tmp = 0; tmp < 5; tmp++)
1919 writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
1920}
1921
1922static void ep0_start (struct net2280 *dev)
1923{
1924 writel ( (1 << CLEAR_EP_HIDE_STATUS_PHASE)
1925 | (1 << CLEAR_NAK_OUT_PACKETS)
1926 | (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
1927 , &dev->epregs [0].ep_rsp);
1928
1929 /*
1930 * hardware optionally handles a bunch of standard requests
1931 * that the API hides from drivers anyway. have it do so.
1932 * endpoint status/features are handled in software, to
1933 * help pass tests for some dubious behavior.
1934 */
1935 writel ( (1 << SET_TEST_MODE)
1936 | (1 << SET_ADDRESS)
1937 | (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
1938 | (1 << GET_DEVICE_STATUS)
1939 | (1 << GET_INTERFACE_STATUS)
1940 , &dev->usb->stdrsp);
1941 writel ( (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
1942 | (1 << SELF_POWERED_USB_DEVICE)
1943 | (1 << REMOTE_WAKEUP_SUPPORT)
1944 | (dev->softconnect << USB_DETECT_ENABLE)
1945 | (1 << SELF_POWERED_STATUS)
1946 , &dev->usb->usbctl);
1947
1948 /* enable irqs so we can see ep0 and general operation */
1949 writel ( (1 << SETUP_PACKET_INTERRUPT_ENABLE)
1950 | (1 << ENDPOINT_0_INTERRUPT_ENABLE)
1951 , &dev->regs->pciirqenb0);
1952 writel ( (1 << PCI_INTERRUPT_ENABLE)
1953 | (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
1954 | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
1955 | (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
1956 | (1 << VBUS_INTERRUPT_ENABLE)
1957 | (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
1958 | (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
1959 , &dev->regs->pciirqenb1);
1960
1961 /* don't leave any writes posted */
1962 (void) readl (&dev->usb->usbctl);
1963}
1964
1965/* when a driver is successfully registered, it will receive
1966 * control requests including set_configuration(), which enables
1967 * non-control requests. then usb traffic follows until a
1968 * disconnect is reported. then a host may connect again, or
1969 * the driver might get unbound.
1970 */
1971int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1972{
1973 struct net2280 *dev = the_controller;
1974 int retval;
1975 unsigned i;
1976
1977 /* insist on high speed support from the driver, since
1978 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
1979 * "must not be used in normal operation"
1980 */
1981 if (!driver
1982 || driver->speed != USB_SPEED_HIGH
1983 || !driver->bind
1984 || !driver->unbind
1985 || !driver->setup)
1986 return -EINVAL;
1987 if (!dev)
1988 return -ENODEV;
1989 if (dev->driver)
1990 return -EBUSY;
1991
1992 for (i = 0; i < 7; i++)
1993 dev->ep [i].irqs = 0;
1994
1995 /* hook up the driver ... */
1996 dev->softconnect = 1;
1997 driver->driver.bus = NULL;
1998 dev->driver = driver;
1999 dev->gadget.dev.driver = &driver->driver;
2000 retval = driver->bind (&dev->gadget);
2001 if (retval) {
2002 DEBUG (dev, "bind to driver %s --> %d\n",
2003 driver->driver.name, retval);
2004 dev->driver = NULL;
2005 dev->gadget.dev.driver = NULL;
2006 return retval;
2007 }
2008
2009 device_create_file (&dev->pdev->dev, &dev_attr_function);
2010 device_create_file (&dev->pdev->dev, &dev_attr_queues);
2011
2012 /* ... then enable host detection and ep0; and we're ready
2013 * for set_configuration as well as eventual disconnect.
2014 */
2015 net2280_led_active (dev, 1);
2016 ep0_start (dev);
2017
2018 DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
2019 driver->driver.name,
2020 readl (&dev->usb->usbctl),
2021 readl (&dev->usb->stdrsp));
2022
2023 /* pci writes may still be posted */
2024 return 0;
2025}
2026EXPORT_SYMBOL (usb_gadget_register_driver);
2027
2028static void
2029stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2030{
2031 int i;
2032
2033 /* don't disconnect if it's not connected */
2034 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2035 driver = NULL;
2036
2037 /* stop hardware; prevent new request submissions;
2038 * and kill any outstanding requests.
2039 */
2040 usb_reset (dev);
2041 for (i = 0; i < 7; i++)
2042 nuke (&dev->ep [i]);
2043
2044 /* report disconnect; the driver is already quiesced */
2045 if (driver) {
2046 spin_unlock (&dev->lock);
2047 driver->disconnect (&dev->gadget);
2048 spin_lock (&dev->lock);
2049 }
2050
2051 usb_reinit (dev);
2052}
2053
2054int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
2055{
2056 struct net2280 *dev = the_controller;
2057 unsigned long flags;
2058
2059 if (!dev)
2060 return -ENODEV;
2061 if (!driver || driver != dev->driver)
2062 return -EINVAL;
2063
2064 spin_lock_irqsave (&dev->lock, flags);
2065 stop_activity (dev, driver);
2066 spin_unlock_irqrestore (&dev->lock, flags);
2067
2068 net2280_pullup (&dev->gadget, 0);
2069
2070 driver->unbind (&dev->gadget);
2071 dev->gadget.dev.driver = NULL;
2072 dev->driver = NULL;
2073
2074 net2280_led_active (dev, 0);
2075 device_remove_file (&dev->pdev->dev, &dev_attr_function);
2076 device_remove_file (&dev->pdev->dev, &dev_attr_queues);
2077
2078 DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
2079 return 0;
2080}
2081EXPORT_SYMBOL (usb_gadget_unregister_driver);
2082
2083
2084/*-------------------------------------------------------------------------*/
2085
2086/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2087 * also works for dma-capable endpoints, in pio mode or just
2088 * to manually advance the queue after short OUT transfers.
2089 */
2090static void handle_ep_small (struct net2280_ep *ep)
2091{
2092 struct net2280_request *req;
2093 u32 t;
2094 /* 0 error, 1 mid-data, 2 done */
2095 int mode = 1;
2096
2097 if (!list_empty (&ep->queue))
2098 req = list_entry (ep->queue.next,
2099 struct net2280_request, queue);
2100 else
2101 req = NULL;
2102
2103 /* ack all, and handle what we care about */
2104 t = readl (&ep->regs->ep_stat);
2105 ep->irqs++;
2106#if 0
2107 VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
2108 ep->ep.name, t, req ? &req->req : 0);
2109#endif
2110 writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
2111
2112 /* for ep0, monitor token irqs to catch data stage length errors
2113 * and to synchronize on status.
2114 *
2115 * also, to defer reporting of protocol stalls ... here's where
2116 * data or status first appears, handling stalls here should never
2117 * cause trouble on the host side..
2118 *
2119 * control requests could be slightly faster without token synch for
2120 * status, but status can jam up that way.
2121 */
2122 if (unlikely (ep->num == 0)) {
2123 if (ep->is_in) {
2124 /* status; stop NAKing */
2125 if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
2126 if (ep->dev->protocol_stall) {
2127 ep->stopped = 1;
2128 set_halt (ep);
2129 }
2130 if (!req)
2131 allow_status (ep);
2132 mode = 2;
2133 /* reply to extra IN data tokens with a zlp */
2134 } else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2135 if (ep->dev->protocol_stall) {
2136 ep->stopped = 1;
2137 set_halt (ep);
2138 mode = 2;
2139 } else if (!req && ep->stopped)
2140 write_fifo (ep, NULL);
2141 }
2142 } else {
2143 /* status; stop NAKing */
2144 if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2145 if (ep->dev->protocol_stall) {
2146 ep->stopped = 1;
2147 set_halt (ep);
2148 }
2149 mode = 2;
2150 /* an extra OUT token is an error */
2151 } else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
2152 && req
2153 && req->req.actual == req->req.length)
2154 || !req) {
2155 ep->dev->protocol_stall = 1;
2156 set_halt (ep);
2157 ep->stopped = 1;
2158 if (req)
2159 done (ep, req, -EOVERFLOW);
2160 req = NULL;
2161 }
2162 }
2163 }
2164
2165 if (unlikely (!req))
2166 return;
2167
2168 /* manual DMA queue advance after short OUT */
2169 if (likely (ep->dma != 0)) {
2170 if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2171 u32 count;
2172 int stopped = ep->stopped;
2173
2174 /* TRANSFERRED works around OUT_DONE erratum 0112.
2175 * we expect (N <= maxpacket) bytes; host wrote M.
2176 * iff (M < N) we won't ever see a DMA interrupt.
2177 */
2178 ep->stopped = 1;
2179 for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
2180
2181 /* any preceding dma transfers must finish.
2182 * dma handles (M >= N), may empty the queue
2183 */
2184 scan_dma_completions (ep);
2185 if (unlikely (list_empty (&ep->queue)
2186 || ep->out_overflow)) {
2187 req = NULL;
2188 break;
2189 }
2190 req = list_entry (ep->queue.next,
2191 struct net2280_request, queue);
2192
2193 /* here either (M < N), a "real" short rx;
2194 * or (M == N) and the queue didn't empty
2195 */
2196 if (likely (t & (1 << FIFO_EMPTY))) {
2197 count = readl (&ep->dma->dmacount);
2198 count &= DMA_BYTE_COUNT_MASK;
2199 if (readl (&ep->dma->dmadesc)
2200 != req->td_dma)
2201 req = NULL;
2202 break;
2203 }
2204 udelay(1);
2205 }
2206
2207 /* stop DMA, leave ep NAKing */
2208 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
2209 spin_stop_dma (ep->dma);
2210
2211 if (likely (req)) {
2212 req->td->dmacount = 0;
2213 t = readl (&ep->regs->ep_avail);
2214 dma_done (ep, req, count, t);
2215 }
2216
2217 /* also flush to prevent erratum 0106 trouble */
2218 if (unlikely (ep->out_overflow
2219 || (ep->dev->chiprev == 0x0100
2220 && ep->dev->gadget.speed
2221 == USB_SPEED_FULL))) {
2222 out_flush (ep);
2223 ep->out_overflow = 0;
2224 }
2225
2226 /* (re)start dma if needed, stop NAKing */
2227 ep->stopped = stopped;
2228 if (!list_empty (&ep->queue))
2229 restart_dma (ep);
2230 } else
2231 DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
2232 ep->ep.name, t);
2233 return;
2234
2235 /* data packet(s) received (in the fifo, OUT) */
2236 } else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
2237 if (read_fifo (ep, req) && ep->num != 0)
2238 mode = 2;
2239
2240 /* data packet(s) transmitted (IN) */
2241 } else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
2242 unsigned len;
2243
2244 len = req->req.length - req->req.actual;
2245 if (len > ep->ep.maxpacket)
2246 len = ep->ep.maxpacket;
2247 req->req.actual += len;
2248
2249 /* if we wrote it all, we're usually done */
2250 if (req->req.actual == req->req.length) {
2251 if (ep->num == 0) {
2252 /* wait for control status */
2253 if (mode != 2)
2254 req = NULL;
2255 } else if (!req->req.zero || len != ep->ep.maxpacket)
2256 mode = 2;
2257 }
2258
2259 /* there was nothing to do ... */
2260 } else if (mode == 1)
2261 return;
2262
2263 /* done */
2264 if (mode == 2) {
2265 /* stream endpoints often resubmit/unlink in completion */
2266 done (ep, req, 0);
2267
2268 /* maybe advance queue to next request */
2269 if (ep->num == 0) {
2270 /* NOTE: net2280 could let gadget driver start the
2271 * status stage later. since not all controllers let
2272 * them control that, the api doesn't (yet) allow it.
2273 */
2274 if (!ep->stopped)
2275 allow_status (ep);
2276 req = NULL;
2277 } else {
2278 if (!list_empty (&ep->queue) && !ep->stopped)
2279 req = list_entry (ep->queue.next,
2280 struct net2280_request, queue);
2281 else
2282 req = NULL;
2283 if (req && !ep->is_in)
2284 stop_out_naking (ep);
2285 }
2286 }
2287
2288 /* is there a buffer for the next packet?
2289 * for best streaming performance, make sure there is one.
2290 */
2291 if (req && !ep->stopped) {
2292
2293 /* load IN fifo with next packet (may be zlp) */
2294 if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
2295 write_fifo (ep, &req->req);
2296 }
2297}
2298
2299static struct net2280_ep *
2300get_ep_by_addr (struct net2280 *dev, u16 wIndex)
2301{
2302 struct net2280_ep *ep;
2303
2304 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2305 return &dev->ep [0];
2306 list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
2307 u8 bEndpointAddress;
2308
2309 if (!ep->desc)
2310 continue;
2311 bEndpointAddress = ep->desc->bEndpointAddress;
2312 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2313 continue;
2314 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2315 return ep;
2316 }
2317 return NULL;
2318}
2319
2320static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
2321{
2322 struct net2280_ep *ep;
2323 u32 num, scratch;
2324
2325 /* most of these don't need individual acks */
2326 stat &= ~(1 << INTA_ASSERTED);
2327 if (!stat)
2328 return;
2329 // DEBUG (dev, "irqstat0 %04x\n", stat);
2330
2331 /* starting a control request? */
2332 if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
2333 union {
2334 u32 raw [2];
2335 struct usb_ctrlrequest r;
2336 } u;
2337 int tmp = 0;
2338 struct net2280_request *req;
2339
2340 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
2341 if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
2342 dev->gadget.speed = USB_SPEED_HIGH;
2343 else
2344 dev->gadget.speed = USB_SPEED_FULL;
2345 net2280_led_speed (dev, dev->gadget.speed);
2346 DEBUG (dev, "%s speed\n",
2347 (dev->gadget.speed == USB_SPEED_HIGH)
2348 ? "high" : "full");
2349 }
2350
2351 ep = &dev->ep [0];
2352 ep->irqs++;
2353
2354 /* make sure any leftover request state is cleared */
2355 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
2356 while (!list_empty (&ep->queue)) {
2357 req = list_entry (ep->queue.next,
2358 struct net2280_request, queue);
2359 done (ep, req, (req->req.actual == req->req.length)
2360 ? 0 : -EPROTO);
2361 }
2362 ep->stopped = 0;
2363 dev->protocol_stall = 0;
2364 writel ( (1 << TIMEOUT)
2365 | (1 << USB_STALL_SENT)
2366 | (1 << USB_IN_NAK_SENT)
2367 | (1 << USB_IN_ACK_RCVD)
2368 | (1 << USB_OUT_PING_NAK_SENT)
2369 | (1 << USB_OUT_ACK_SENT)
2370 | (1 << FIFO_OVERFLOW)
2371 | (1 << FIFO_UNDERFLOW)
2372 | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
2373 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
2374 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2375 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2376 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2377 | (1 << DATA_IN_TOKEN_INTERRUPT)
2378 , &ep->regs->ep_stat);
2379 u.raw [0] = readl (&dev->usb->setup0123);
2380 u.raw [1] = readl (&dev->usb->setup4567);
2381
2382 cpu_to_le32s (&u.raw [0]);
2383 cpu_to_le32s (&u.raw [1]);
2384
2385 le16_to_cpus (&u.r.wValue);
2386 le16_to_cpus (&u.r.wIndex);
2387 le16_to_cpus (&u.r.wLength);
2388
2389 /* ack the irq */
2390 writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
2391 stat ^= (1 << SETUP_PACKET_INTERRUPT);
2392
2393 /* watch control traffic at the token level, and force
2394 * synchronization before letting the status stage happen.
2395 * FIXME ignore tokens we'll NAK, until driver responds.
2396 * that'll mean a lot less irqs for some drivers.
2397 */
2398 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2399 if (ep->is_in) {
2400 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2401 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2402 | (1 << DATA_IN_TOKEN_INTERRUPT);
2403 stop_out_naking (ep);
2404 } else
2405 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2406 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2407 | (1 << DATA_IN_TOKEN_INTERRUPT);
2408 writel (scratch, &dev->epregs [0].ep_irqenb);
2409
2410 /* we made the hardware handle most lowlevel requests;
2411 * everything else goes uplevel to the gadget code.
2412 */
2413 switch (u.r.bRequest) {
2414 case USB_REQ_GET_STATUS: {
2415 struct net2280_ep *e;
2416 u16 status;
2417
2418 /* hw handles device and interface status */
2419 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
2420 goto delegate;
2421 if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0
2422 || u.r.wLength > 2)
2423 goto do_stall;
2424
2425 if (readl (&e->regs->ep_rsp)
2426 & (1 << SET_ENDPOINT_HALT))
2427 status = __constant_cpu_to_le16 (1);
2428 else
2429 status = __constant_cpu_to_le16 (0);
2430
2431 /* don't bother with a request object! */
2432 writel (0, &dev->epregs [0].ep_irqenb);
2433 set_fifo_bytecount (ep, u.r.wLength);
2434 writel (status, &dev->epregs [0].ep_data);
2435 allow_status (ep);
2436 VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
2437 goto next_endpoints;
2438 }
2439 break;
2440 case USB_REQ_CLEAR_FEATURE: {
2441 struct net2280_ep *e;
2442
2443 /* hw handles device features */
2444 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2445 goto delegate;
2446 if (u.r.wValue != USB_ENDPOINT_HALT
2447 || u.r.wLength != 0)
2448 goto do_stall;
2449 if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0)
2450 goto do_stall;
2451 clear_halt (e);
2452 allow_status (ep);
2453 VDEBUG (dev, "%s clear halt\n", ep->ep.name);
2454 goto next_endpoints;
2455 }
2456 break;
2457 case USB_REQ_SET_FEATURE: {
2458 struct net2280_ep *e;
2459
2460 /* hw handles device features */
2461 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2462 goto delegate;
2463 if (u.r.wValue != USB_ENDPOINT_HALT
2464 || u.r.wLength != 0)
2465 goto do_stall;
2466 if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0)
2467 goto do_stall;
2468 set_halt (e);
2469 allow_status (ep);
2470 VDEBUG (dev, "%s set halt\n", ep->ep.name);
2471 goto next_endpoints;
2472 }
2473 break;
2474 default:
2475delegate:
2476 VDEBUG (dev, "setup %02x.%02x v%04x i%04x "
2477 "ep_cfg %08x\n",
2478 u.r.bRequestType, u.r.bRequest,
2479 u.r.wValue, u.r.wIndex,
2480 readl (&ep->regs->ep_cfg));
2481 spin_unlock (&dev->lock);
2482 tmp = dev->driver->setup (&dev->gadget, &u.r);
2483 spin_lock (&dev->lock);
2484 }
2485
2486 /* stall ep0 on error */
2487 if (tmp < 0) {
2488do_stall:
2489 VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
2490 u.r.bRequestType, u.r.bRequest, tmp);
2491 dev->protocol_stall = 1;
2492 }
2493
2494 /* some in/out token irq should follow; maybe stall then.
2495 * driver must queue a request (even zlp) or halt ep0
2496 * before the host times out.
2497 */
2498 }
2499
2500next_endpoints:
2501 /* endpoint data irq ? */
2502 scratch = stat & 0x7f;
2503 stat &= ~0x7f;
2504 for (num = 0; scratch; num++) {
2505 u32 t;
2506
2507 /* do this endpoint's FIFO and queue need tending? */
2508 t = 1 << num;
2509 if ((scratch & t) == 0)
2510 continue;
2511 scratch ^= t;
2512
2513 ep = &dev->ep [num];
2514 handle_ep_small (ep);
2515 }
2516
2517 if (stat)
2518 DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
2519}
2520
2521#define DMA_INTERRUPTS ( \
2522 (1 << DMA_D_INTERRUPT) \
2523 | (1 << DMA_C_INTERRUPT) \
2524 | (1 << DMA_B_INTERRUPT) \
2525 | (1 << DMA_A_INTERRUPT))
2526#define PCI_ERROR_INTERRUPTS ( \
2527 (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
2528 | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
2529 | (1 << PCI_RETRY_ABORT_INTERRUPT))
2530
2531static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
2532{
2533 struct net2280_ep *ep;
2534 u32 tmp, num, mask, scratch;
2535
2536 /* after disconnect there's nothing else to do! */
2537 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
2538 mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
2539
2540 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
2541 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRRUPT set and
2542 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
2543 * only indicates a change in the reset state).
2544 */
2545 if (stat & tmp) {
2546 writel (tmp, &dev->regs->irqstat1);
2547 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2548 ((readl (&dev->usb->usbstat) & mask) == 0))
2549 || ((readl (&dev->usb->usbctl) & (1 << VBUS_PIN)) == 0)
2550 ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
2551 DEBUG (dev, "disconnect %s\n",
2552 dev->driver->driver.name);
2553 stop_activity (dev, dev->driver);
2554 ep0_start (dev);
2555 return;
2556 }
2557 stat &= ~tmp;
2558
2559 /* vBUS can bounce ... one of many reasons to ignore the
2560 * notion of hotplug events on bus connect/disconnect!
2561 */
2562 if (!stat)
2563 return;
2564 }
2565
2566 /* NOTE: chip stays in PCI D0 state for now, but it could
2567 * enter D1 to save more power
2568 */
2569 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2570 if (stat & tmp) {
2571 writel (tmp, &dev->regs->irqstat1);
2572 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2573 if (dev->driver->suspend)
2574 dev->driver->suspend (&dev->gadget);
2575 if (!enable_suspend)
2576 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2577 } else {
2578 if (dev->driver->resume)
2579 dev->driver->resume (&dev->gadget);
2580 /* at high speed, note erratum 0133 */
2581 }
2582 stat &= ~tmp;
2583 }
2584
2585 /* clear any other status/irqs */
2586 if (stat)
2587 writel (stat, &dev->regs->irqstat1);
2588
2589 /* some status we can just ignore */
2590 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2591 | (1 << SUSPEND_REQUEST_INTERRUPT)
2592 | (1 << RESUME_INTERRUPT)
2593 | (1 << SOF_INTERRUPT));
2594 if (!stat)
2595 return;
2596 // DEBUG (dev, "irqstat1 %08x\n", stat);
2597
2598 /* DMA status, for ep-{a,b,c,d} */
2599 scratch = stat & DMA_INTERRUPTS;
2600 stat &= ~DMA_INTERRUPTS;
2601 scratch >>= 9;
2602 for (num = 0; scratch; num++) {
2603 struct net2280_dma_regs __iomem *dma;
2604
2605 tmp = 1 << num;
2606 if ((tmp & scratch) == 0)
2607 continue;
2608 scratch ^= tmp;
2609
2610 ep = &dev->ep [num + 1];
2611 dma = ep->dma;
2612
2613 if (!dma)
2614 continue;
2615
2616 /* clear ep's dma status */
2617 tmp = readl (&dma->dmastat);
2618 writel (tmp, &dma->dmastat);
2619
2620 /* chaining should stop on abort, short OUT from fifo,
2621 * or (stat0 codepath) short OUT transfer.
2622 */
2623 if (!use_dma_chaining) {
2624 if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
2625 == 0) {
2626 DEBUG (ep->dev, "%s no xact done? %08x\n",
2627 ep->ep.name, tmp);
2628 continue;
2629 }
2630 stop_dma (ep->dma);
2631 }
2632
2633 /* OUT transfers terminate when the data from the
2634 * host is in our memory. Process whatever's done.
2635 * On this path, we know transfer's last packet wasn't
2636 * less than req->length. NAK_OUT_PACKETS may be set,
2637 * or the FIFO may already be holding new packets.
2638 *
2639 * IN transfers can linger in the FIFO for a very
2640 * long time ... we ignore that for now, accounting
2641 * precisely (like PIO does) needs per-packet irqs
2642 */
2643 scan_dma_completions (ep);
2644
2645 /* disable dma on inactive queues; else maybe restart */
2646 if (list_empty (&ep->queue)) {
2647 if (use_dma_chaining)
2648 stop_dma (ep->dma);
2649 } else {
2650 tmp = readl (&dma->dmactl);
2651 if (!use_dma_chaining
2652 || (tmp & (1 << DMA_ENABLE)) == 0)
2653 restart_dma (ep);
2654 else if (ep->is_in && use_dma_chaining) {
2655 struct net2280_request *req;
2656 u32 dmacount;
2657
2658 /* the descriptor at the head of the chain
2659 * may still have VALID_BIT clear; that's
2660 * used to trigger changing DMA_FIFO_VALIDATE
2661 * (affects automagic zlp writes).
2662 */
2663 req = list_entry (ep->queue.next,
2664 struct net2280_request, queue);
2665 dmacount = req->td->dmacount;
2666 dmacount &= __constant_cpu_to_le32 (
2667 (1 << VALID_BIT)
2668 | DMA_BYTE_COUNT_MASK);
2669 if (dmacount && (dmacount & valid_bit) == 0)
2670 restart_dma (ep);
2671 }
2672 }
2673 ep->irqs++;
2674 }
2675
2676 /* NOTE: there are other PCI errors we might usefully notice.
2677 * if they appear very often, here's where to try recovering.
2678 */
2679 if (stat & PCI_ERROR_INTERRUPTS) {
2680 ERROR (dev, "pci dma error; stat %08x\n", stat);
2681 stat &= ~PCI_ERROR_INTERRUPTS;
2682 /* these are fatal errors, but "maybe" they won't
2683 * happen again ...
2684 */
2685 stop_activity (dev, dev->driver);
2686 ep0_start (dev);
2687 stat = 0;
2688 }
2689
2690 if (stat)
2691 DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
2692}
2693
2694static irqreturn_t net2280_irq (int irq, void *_dev, struct pt_regs * r)
2695{
2696 struct net2280 *dev = _dev;
2697
2698 spin_lock (&dev->lock);
2699
2700 /* handle disconnect, dma, and more */
2701 handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
2702
2703 /* control requests and PIO */
2704 handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
2705
2706 spin_unlock (&dev->lock);
2707
2708 return IRQ_HANDLED;
2709}
2710
2711/*-------------------------------------------------------------------------*/
2712
2713static void gadget_release (struct device *_dev)
2714{
2715 struct net2280 *dev = dev_get_drvdata (_dev);
2716
2717 kfree (dev);
2718}
2719
2720/* tear down the binding between this driver and the pci device */
2721
2722static void net2280_remove (struct pci_dev *pdev)
2723{
2724 struct net2280 *dev = pci_get_drvdata (pdev);
2725
2726 /* start with the driver above us */
2727 if (dev->driver) {
2728 /* should have been done already by driver model core */
2729 WARN (dev, "pci remove, driver '%s' is still registered\n",
2730 dev->driver->driver.name);
2731 usb_gadget_unregister_driver (dev->driver);
2732 }
2733
2734 /* then clean up the resources we allocated during probe() */
2735 net2280_led_shutdown (dev);
2736 if (dev->requests) {
2737 int i;
2738 for (i = 1; i < 5; i++) {
2739 if (!dev->ep [i].dummy)
2740 continue;
2741 pci_pool_free (dev->requests, dev->ep [i].dummy,
2742 dev->ep [i].td_dma);
2743 }
2744 pci_pool_destroy (dev->requests);
2745 }
2746 if (dev->got_irq)
2747 free_irq (pdev->irq, dev);
2748 if (dev->regs)
2749 iounmap (dev->regs);
2750 if (dev->region)
2751 release_mem_region (pci_resource_start (pdev, 0),
2752 pci_resource_len (pdev, 0));
2753 if (dev->enabled)
2754 pci_disable_device (pdev);
2755 device_unregister (&dev->gadget.dev);
2756 device_remove_file (&pdev->dev, &dev_attr_registers);
2757 pci_set_drvdata (pdev, NULL);
2758
2759 INFO (dev, "unbind\n");
2760
2761 the_controller = NULL;
2762}
2763
2764/* wrap this driver around the specified device, but
2765 * don't respond over USB until a gadget driver binds to us.
2766 */
2767
2768static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
2769{
2770 struct net2280 *dev;
2771 unsigned long resource, len;
2772 void __iomem *base = NULL;
2773 int retval, i;
2774 char buf [8], *bufp;
2775
2776 /* if you want to support more than one controller in a system,
2777 * usb_gadget_driver_{register,unregister}() must change.
2778 */
2779 if (the_controller) {
2780 dev_warn (&pdev->dev, "ignoring\n");
2781 return -EBUSY;
2782 }
2783
2784 /* alloc, and start init */
2785 dev = kmalloc (sizeof *dev, SLAB_KERNEL);
2786 if (dev == NULL){
2787 retval = -ENOMEM;
2788 goto done;
2789 }
2790
2791 memset (dev, 0, sizeof *dev);
2792 spin_lock_init (&dev->lock);
2793 dev->pdev = pdev;
2794 dev->gadget.ops = &net2280_ops;
2795 dev->gadget.is_dualspeed = 1;
2796
2797 /* the "gadget" abstracts/virtualizes the controller */
2798 strcpy (dev->gadget.dev.bus_id, "gadget");
2799 dev->gadget.dev.parent = &pdev->dev;
2800 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2801 dev->gadget.dev.release = gadget_release;
2802 dev->gadget.name = driver_name;
2803
2804 /* now all the pci goodies ... */
2805 if (pci_enable_device (pdev) < 0) {
2806 retval = -ENODEV;
2807 goto done;
2808 }
2809 dev->enabled = 1;
2810
2811 /* BAR 0 holds all the registers
2812 * BAR 1 is 8051 memory; unused here (note erratum 0103)
2813 * BAR 2 is fifo memory; unused here
2814 */
2815 resource = pci_resource_start (pdev, 0);
2816 len = pci_resource_len (pdev, 0);
2817 if (!request_mem_region (resource, len, driver_name)) {
2818 DEBUG (dev, "controller already in use\n");
2819 retval = -EBUSY;
2820 goto done;
2821 }
2822 dev->region = 1;
2823
2824 base = ioremap_nocache (resource, len);
2825 if (base == NULL) {
2826 DEBUG (dev, "can't map memory\n");
2827 retval = -EFAULT;
2828 goto done;
2829 }
2830 dev->regs = (struct net2280_regs __iomem *) base;
2831 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
2832 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
2833 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
2834 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
2835 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
2836
2837 /* put into initial config, link up all endpoints */
2838 writel (0, &dev->usb->usbctl);
2839 usb_reset (dev);
2840 usb_reinit (dev);
2841
2842 /* irq setup after old hardware is cleaned up */
2843 if (!pdev->irq) {
2844 ERROR (dev, "No IRQ. Check PCI setup!\n");
2845 retval = -ENODEV;
2846 goto done;
2847 }
2848#ifndef __sparc__
2849 scnprintf (buf, sizeof buf, "%d", pdev->irq);
2850 bufp = buf;
2851#else
2852 bufp = __irq_itoa(pdev->irq);
2853#endif
2854 if (request_irq (pdev->irq, net2280_irq, SA_SHIRQ, driver_name, dev)
2855 != 0) {
2856 ERROR (dev, "request interrupt %s failed\n", bufp);
2857 retval = -EBUSY;
2858 goto done;
2859 }
2860 dev->got_irq = 1;
2861
2862 /* DMA setup */
2863 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
2864 dev->requests = pci_pool_create ("requests", pdev,
2865 sizeof (struct net2280_dma),
2866 0 /* no alignment requirements */,
2867 0 /* or page-crossing issues */);
2868 if (!dev->requests) {
2869 DEBUG (dev, "can't get request pool\n");
2870 retval = -ENOMEM;
2871 goto done;
2872 }
2873 for (i = 1; i < 5; i++) {
2874 struct net2280_dma *td;
2875
2876 td = pci_pool_alloc (dev->requests, GFP_KERNEL,
2877 &dev->ep [i].td_dma);
2878 if (!td) {
2879 DEBUG (dev, "can't get dummy %d\n", i);
2880 retval = -ENOMEM;
2881 goto done;
2882 }
2883 td->dmacount = 0; /* not VALID */
2884 td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
2885 td->dmadesc = td->dmaaddr;
2886 dev->ep [i].dummy = td;
2887 }
2888
2889 /* enable lower-overhead pci memory bursts during DMA */
2890 writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
2891 // 256 write retries may not be enough...
2892 // | (1 << PCI_RETRY_ABORT_ENABLE)
2893 | (1 << DMA_READ_MULTIPLE_ENABLE)
2894 | (1 << DMA_READ_LINE_ENABLE)
2895 , &dev->pci->pcimstctl);
2896 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
2897 pci_set_master (pdev);
2898 pci_set_mwi (pdev);
2899
2900 /* ... also flushes any posted pci writes */
2901 dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
2902
2903 /* done */
2904 pci_set_drvdata (pdev, dev);
2905 INFO (dev, "%s\n", driver_desc);
2906 INFO (dev, "irq %s, pci mem %p, chip rev %04x\n",
2907 bufp, base, dev->chiprev);
2908 INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
2909 use_dma
2910 ? (use_dma_chaining ? "chaining" : "enabled")
2911 : "disabled");
2912 the_controller = dev;
2913
2914 device_register (&dev->gadget.dev);
2915 device_create_file (&pdev->dev, &dev_attr_registers);
2916
2917 return 0;
2918
2919done:
2920 if (dev)
2921 net2280_remove (pdev);
2922 return retval;
2923}
2924
2925
2926/*-------------------------------------------------------------------------*/
2927
2928static struct pci_device_id pci_ids [] = { {
2929 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
2930 .class_mask = ~0,
2931 .vendor = 0x17cc,
2932 .device = 0x2280,
2933 .subvendor = PCI_ANY_ID,
2934 .subdevice = PCI_ANY_ID,
2935
2936}, { /* end: all zeroes */ }
2937};
2938MODULE_DEVICE_TABLE (pci, pci_ids);
2939
2940/* pci driver glue; this is a "new style" PCI driver module */
2941static struct pci_driver net2280_pci_driver = {
2942 .name = (char *) driver_name,
2943 .id_table = pci_ids,
2944
2945 .probe = net2280_probe,
2946 .remove = net2280_remove,
2947
2948 /* FIXME add power management support */
2949};
2950
2951MODULE_DESCRIPTION (DRIVER_DESC);
2952MODULE_AUTHOR ("David Brownell");
2953MODULE_LICENSE ("GPL");
2954
2955static int __init init (void)
2956{
2957 if (!use_dma)
2958 use_dma_chaining = 0;
2959 return pci_register_driver (&net2280_pci_driver);
2960}
2961module_init (init);
2962
2963static void __exit cleanup (void)
2964{
2965 pci_unregister_driver (&net2280_pci_driver);
2966}
2967module_exit (cleanup);
diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h
new file mode 100644
index 000000000000..fff4509cf340
--- /dev/null
+++ b/drivers/usb/gadget/net2280.h
@@ -0,0 +1,728 @@
1/*
2 * NetChip 2280 high/full speed USB device controller.
3 * Unlike many such controllers, this one talks PCI.
4 */
5
6/*
7 * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
8 * Copyright (C) 2003 David Brownell
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25/*-------------------------------------------------------------------------*/
26
27/* NET2280 MEMORY MAPPED REGISTERS
28 *
29 * The register layout came from the chip documentation, and the bit
30 * number definitions were extracted from chip specification.
31 *
32 * Use the shift operator ('<<') to build bit masks, with readl/writel
33 * to access the registers through PCI.
34 */
35
36/* main registers, BAR0 + 0x0000 */
37struct net2280_regs {
38 // offset 0x0000
39 u32 devinit;
40#define LOCAL_CLOCK_FREQUENCY 8
41#define FORCE_PCI_RESET 7
42#define PCI_ID 6
43#define PCI_ENABLE 5
44#define FIFO_SOFT_RESET 4
45#define CFG_SOFT_RESET 3
46#define PCI_SOFT_RESET 2
47#define USB_SOFT_RESET 1
48#define M8051_RESET 0
49 u32 eectl;
50#define EEPROM_ADDRESS_WIDTH 23
51#define EEPROM_CHIP_SELECT_ACTIVE 22
52#define EEPROM_PRESENT 21
53#define EEPROM_VALID 20
54#define EEPROM_BUSY 19
55#define EEPROM_CHIP_SELECT_ENABLE 18
56#define EEPROM_BYTE_READ_START 17
57#define EEPROM_BYTE_WRITE_START 16
58#define EEPROM_READ_DATA 8
59#define EEPROM_WRITE_DATA 0
60 u32 eeclkfreq;
61 u32 _unused0;
62 // offset 0x0010
63
64 u32 pciirqenb0; /* interrupt PCI master ... */
65#define SETUP_PACKET_INTERRUPT_ENABLE 7
66#define ENDPOINT_F_INTERRUPT_ENABLE 6
67#define ENDPOINT_E_INTERRUPT_ENABLE 5
68#define ENDPOINT_D_INTERRUPT_ENABLE 4
69#define ENDPOINT_C_INTERRUPT_ENABLE 3
70#define ENDPOINT_B_INTERRUPT_ENABLE 2
71#define ENDPOINT_A_INTERRUPT_ENABLE 1
72#define ENDPOINT_0_INTERRUPT_ENABLE 0
73 u32 pciirqenb1;
74#define PCI_INTERRUPT_ENABLE 31
75#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
76#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
77#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
78#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
79#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
80#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18
81#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
82#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
83#define GPIO_INTERRUPT_ENABLE 13
84#define DMA_D_INTERRUPT_ENABLE 12
85#define DMA_C_INTERRUPT_ENABLE 11
86#define DMA_B_INTERRUPT_ENABLE 10
87#define DMA_A_INTERRUPT_ENABLE 9
88#define EEPROM_DONE_INTERRUPT_ENABLE 8
89#define VBUS_INTERRUPT_ENABLE 7
90#define CONTROL_STATUS_INTERRUPT_ENABLE 6
91#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
92#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
93#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
94#define RESUME_INTERRUPT_ENABLE 1
95#define SOF_INTERRUPT_ENABLE 0
96 u32 cpu_irqenb0; /* ... or onboard 8051 */
97#define SETUP_PACKET_INTERRUPT_ENABLE 7
98#define ENDPOINT_F_INTERRUPT_ENABLE 6
99#define ENDPOINT_E_INTERRUPT_ENABLE 5
100#define ENDPOINT_D_INTERRUPT_ENABLE 4
101#define ENDPOINT_C_INTERRUPT_ENABLE 3
102#define ENDPOINT_B_INTERRUPT_ENABLE 2
103#define ENDPOINT_A_INTERRUPT_ENABLE 1
104#define ENDPOINT_0_INTERRUPT_ENABLE 0
105 u32 cpu_irqenb1;
106#define CPU_INTERRUPT_ENABLE 31
107#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
108#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
109#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
110#define PCI_INTA_INTERRUPT_ENABLE 24
111#define PCI_PME_INTERRUPT_ENABLE 23
112#define PCI_SERR_INTERRUPT_ENABLE 22
113#define PCI_PERR_INTERRUPT_ENABLE 21
114#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
115#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
116#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
117#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
118#define GPIO_INTERRUPT_ENABLE 13
119#define DMA_D_INTERRUPT_ENABLE 12
120#define DMA_C_INTERRUPT_ENABLE 11
121#define DMA_B_INTERRUPT_ENABLE 10
122#define DMA_A_INTERRUPT_ENABLE 9
123#define EEPROM_DONE_INTERRUPT_ENABLE 8
124#define VBUS_INTERRUPT_ENABLE 7
125#define CONTROL_STATUS_INTERRUPT_ENABLE 6
126#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
127#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
128#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
129#define RESUME_INTERRUPT_ENABLE 1
130#define SOF_INTERRUPT_ENABLE 0
131
132 // offset 0x0020
133 u32 _unused1;
134 u32 usbirqenb1;
135#define USB_INTERRUPT_ENABLE 31
136#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
137#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
138#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
139#define PCI_INTA_INTERRUPT_ENABLE 24
140#define PCI_PME_INTERRUPT_ENABLE 23
141#define PCI_SERR_INTERRUPT_ENABLE 22
142#define PCI_PERR_INTERRUPT_ENABLE 21
143#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
144#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
145#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
146#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
147#define GPIO_INTERRUPT_ENABLE 13
148#define DMA_D_INTERRUPT_ENABLE 12
149#define DMA_C_INTERRUPT_ENABLE 11
150#define DMA_B_INTERRUPT_ENABLE 10
151#define DMA_A_INTERRUPT_ENABLE 9
152#define EEPROM_DONE_INTERRUPT_ENABLE 8
153#define VBUS_INTERRUPT_ENABLE 7
154#define CONTROL_STATUS_INTERRUPT_ENABLE 6
155#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
156#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
157#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
158#define RESUME_INTERRUPT_ENABLE 1
159#define SOF_INTERRUPT_ENABLE 0
160 u32 irqstat0;
161#define INTA_ASSERTED 12
162#define SETUP_PACKET_INTERRUPT 7
163#define ENDPOINT_F_INTERRUPT 6
164#define ENDPOINT_E_INTERRUPT 5
165#define ENDPOINT_D_INTERRUPT 4
166#define ENDPOINT_C_INTERRUPT 3
167#define ENDPOINT_B_INTERRUPT 2
168#define ENDPOINT_A_INTERRUPT 1
169#define ENDPOINT_0_INTERRUPT 0
170 u32 irqstat1;
171#define POWER_STATE_CHANGE_INTERRUPT 27
172#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
173#define PCI_PARITY_ERROR_INTERRUPT 25
174#define PCI_INTA_INTERRUPT 24
175#define PCI_PME_INTERRUPT 23
176#define PCI_SERR_INTERRUPT 22
177#define PCI_PERR_INTERRUPT 21
178#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20
179#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19
180#define PCI_RETRY_ABORT_INTERRUPT 17
181#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16
182#define GPIO_INTERRUPT 13
183#define DMA_D_INTERRUPT 12
184#define DMA_C_INTERRUPT 11
185#define DMA_B_INTERRUPT 10
186#define DMA_A_INTERRUPT 9
187#define EEPROM_DONE_INTERRUPT 8
188#define VBUS_INTERRUPT 7
189#define CONTROL_STATUS_INTERRUPT 6
190#define ROOT_PORT_RESET_INTERRUPT 4
191#define SUSPEND_REQUEST_INTERRUPT 3
192#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2
193#define RESUME_INTERRUPT 1
194#define SOF_INTERRUPT 0
195 // offset 0x0030
196 u32 idxaddr;
197 u32 idxdata;
198 u32 fifoctl;
199#define PCI_BASE2_RANGE 16
200#define IGNORE_FIFO_AVAILABILITY 3
201#define PCI_BASE2_SELECT 2
202#define FIFO_CONFIGURATION_SELECT 0
203 u32 _unused2;
204 // offset 0x0040
205 u32 memaddr;
206#define START 28
207#define DIRECTION 27
208#define FIFO_DIAGNOSTIC_SELECT 24
209#define MEMORY_ADDRESS 0
210 u32 memdata0;
211 u32 memdata1;
212 u32 _unused3;
213 // offset 0x0050
214 u32 gpioctl;
215#define GPIO3_LED_SELECT 12
216#define GPIO3_INTERRUPT_ENABLE 11
217#define GPIO2_INTERRUPT_ENABLE 10
218#define GPIO1_INTERRUPT_ENABLE 9
219#define GPIO0_INTERRUPT_ENABLE 8
220#define GPIO3_OUTPUT_ENABLE 7
221#define GPIO2_OUTPUT_ENABLE 6
222#define GPIO1_OUTPUT_ENABLE 5
223#define GPIO0_OUTPUT_ENABLE 4
224#define GPIO3_DATA 3
225#define GPIO2_DATA 2
226#define GPIO1_DATA 1
227#define GPIO0_DATA 0
228 u32 gpiostat;
229#define GPIO3_INTERRUPT 3
230#define GPIO2_INTERRUPT 2
231#define GPIO1_INTERRUPT 1
232#define GPIO0_INTERRUPT 0
233} __attribute__ ((packed));
234
235/* usb control, BAR0 + 0x0080 */
236struct net2280_usb_regs {
237 // offset 0x0080
238 u32 stdrsp;
239#define STALL_UNSUPPORTED_REQUESTS 31
240#define SET_TEST_MODE 16
241#define GET_OTHER_SPEED_CONFIGURATION 15
242#define GET_DEVICE_QUALIFIER 14
243#define SET_ADDRESS 13
244#define ENDPOINT_SET_CLEAR_HALT 12
245#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11
246#define GET_STRING_DESCRIPTOR_2 10
247#define GET_STRING_DESCRIPTOR_1 9
248#define GET_STRING_DESCRIPTOR_0 8
249#define GET_SET_INTERFACE 6
250#define GET_SET_CONFIGURATION 5
251#define GET_CONFIGURATION_DESCRIPTOR 4
252#define GET_DEVICE_DESCRIPTOR 3
253#define GET_ENDPOINT_STATUS 2
254#define GET_INTERFACE_STATUS 1
255#define GET_DEVICE_STATUS 0
256 u32 prodvendid;
257#define PRODUCT_ID 16
258#define VENDOR_ID 0
259 u32 relnum;
260 u32 usbctl;
261#define SERIAL_NUMBER_INDEX 16
262#define PRODUCT_ID_STRING_ENABLE 13
263#define VENDOR_ID_STRING_ENABLE 12
264#define USB_ROOT_PORT_WAKEUP_ENABLE 11
265#define VBUS_PIN 10
266#define TIMED_DISCONNECT 9
267#define SUSPEND_IMMEDIATELY 7
268#define SELF_POWERED_USB_DEVICE 6
269#define REMOTE_WAKEUP_SUPPORT 5
270#define PME_POLARITY 4
271#define USB_DETECT_ENABLE 3
272#define PME_WAKEUP_ENABLE 2
273#define DEVICE_REMOTE_WAKEUP_ENABLE 1
274#define SELF_POWERED_STATUS 0
275 // offset 0x0090
276 u32 usbstat;
277#define HIGH_SPEED 7
278#define FULL_SPEED 6
279#define GENERATE_RESUME 5
280#define GENERATE_DEVICE_REMOTE_WAKEUP 4
281 u32 xcvrdiag;
282#define FORCE_HIGH_SPEED_MODE 31
283#define FORCE_FULL_SPEED_MODE 30
284#define USB_TEST_MODE 24
285#define LINE_STATE 16
286#define TRANSCEIVER_OPERATION_MODE 2
287#define TRANSCEIVER_SELECT 1
288#define TERMINATION_SELECT 0
289 u32 setup0123;
290 u32 setup4567;
291 // offset 0x0090
292 u32 _unused0;
293 u32 ouraddr;
294#define FORCE_IMMEDIATE 7
295#define OUR_USB_ADDRESS 0
296 u32 ourconfig;
297} __attribute__ ((packed));
298
299/* pci control, BAR0 + 0x0100 */
300struct net2280_pci_regs {
301 // offset 0x0100
302 u32 pcimstctl;
303#define PCI_ARBITER_PARK_SELECT 13
304#define PCI_MULTI LEVEL_ARBITER 12
305#define PCI_RETRY_ABORT_ENABLE 11
306#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10
307#define DMA_READ_MULTIPLE_ENABLE 9
308#define DMA_READ_LINE_ENABLE 8
309#define PCI_MASTER_COMMAND_SELECT 6
310#define MEM_READ_OR_WRITE 0
311#define IO_READ_OR_WRITE 1
312#define CFG_READ_OR_WRITE 2
313#define PCI_MASTER_START 5
314#define PCI_MASTER_READ_WRITE 4
315#define PCI_MASTER_WRITE 0
316#define PCI_MASTER_READ 1
317#define PCI_MASTER_BYTE_WRITE_ENABLES 0
318 u32 pcimstaddr;
319 u32 pcimstdata;
320 u32 pcimststat;
321#define PCI_ARBITER_CLEAR 2
322#define PCI_EXTERNAL_ARBITER 1
323#define PCI_HOST_MODE 0
324} __attribute__ ((packed));
325
326/* dma control, BAR0 + 0x0180 ... array of four structs like this,
327 * for channels 0..3. see also struct net2280_dma: descriptor
328 * that can be loaded into some of these registers.
329 */
330struct net2280_dma_regs { /* [11.7] */
331 // offset 0x0180, 0x01a0, 0x01c0, 0x01e0,
332 u32 dmactl;
333#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25
334#define DMA_CLEAR_COUNT_ENABLE 21
335#define DESCRIPTOR_POLLING_RATE 19
336#define POLL_CONTINUOUS 0
337#define POLL_1_USEC 1
338#define POLL_100_USEC 2
339#define POLL_1_MSEC 3
340#define DMA_VALID_BIT_POLLING_ENABLE 18
341#define DMA_VALID_BIT_ENABLE 17
342#define DMA_SCATTER_GATHER_ENABLE 16
343#define DMA_OUT_AUTO_START_ENABLE 4
344#define DMA_PREEMPT_ENABLE 3
345#define DMA_FIFO_VALIDATE 2
346#define DMA_ENABLE 1
347#define DMA_ADDRESS_HOLD 0
348 u32 dmastat;
349#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25
350#define DMA_TRANSACTION_DONE_INTERRUPT 24
351#define DMA_ABORT 1
352#define DMA_START 0
353 u32 _unused0 [2];
354 // offset 0x0190, 0x01b0, 0x01d0, 0x01f0,
355 u32 dmacount;
356#define VALID_BIT 31
357#define DMA_DIRECTION 30
358#define DMA_DONE_INTERRUPT_ENABLE 29
359#define END_OF_CHAIN 28
360#define DMA_BYTE_COUNT_MASK ((1<<24)-1)
361#define DMA_BYTE_COUNT 0
362 u32 dmaaddr;
363 u32 dmadesc;
364 u32 _unused1;
365} __attribute__ ((packed));
366
367/* dedicated endpoint registers, BAR0 + 0x0200 */
368
369struct net2280_dep_regs { /* [11.8] */
370 // offset 0x0200, 0x0210, 0x220, 0x230, 0x240
371 u32 dep_cfg;
372 // offset 0x0204, 0x0214, 0x224, 0x234, 0x244
373 u32 dep_rsp;
374 u32 _unused [2];
375} __attribute__ ((packed));
376
377/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
378 * like this, for ep0 then the configurable endpoints A..F
379 * ep0 reserved for control; E and F have only 64 bytes of fifo
380 */
381struct net2280_ep_regs { /* [11.9] */
382 // offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0
383 u32 ep_cfg;
384#define ENDPOINT_BYTE_COUNT 16
385#define ENDPOINT_ENABLE 10
386#define ENDPOINT_TYPE 8
387#define ENDPOINT_DIRECTION 7
388#define ENDPOINT_NUMBER 0
389 u32 ep_rsp;
390#define SET_NAK_OUT_PACKETS 15
391#define SET_EP_HIDE_STATUS_PHASE 14
392#define SET_EP_FORCE_CRC_ERROR 13
393#define SET_INTERRUPT_MODE 12
394#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11
395#define SET_NAK_OUT_PACKETS_MODE 10
396#define SET_ENDPOINT_TOGGLE 9
397#define SET_ENDPOINT_HALT 8
398#define CLEAR_NAK_OUT_PACKETS 7
399#define CLEAR_EP_HIDE_STATUS_PHASE 6
400#define CLEAR_EP_FORCE_CRC_ERROR 5
401#define CLEAR_INTERRUPT_MODE 4
402#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3
403#define CLEAR_NAK_OUT_PACKETS_MODE 2
404#define CLEAR_ENDPOINT_TOGGLE 1
405#define CLEAR_ENDPOINT_HALT 0
406 u32 ep_irqenb;
407#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6
408#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5
409#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
410#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
411#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1
412#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
413 u32 ep_stat;
414#define FIFO_VALID_COUNT 24
415#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22
416#define TIMEOUT 21
417#define USB_STALL_SENT 20
418#define USB_IN_NAK_SENT 19
419#define USB_IN_ACK_RCVD 18
420#define USB_OUT_PING_NAK_SENT 17
421#define USB_OUT_ACK_SENT 16
422#define FIFO_OVERFLOW 13
423#define FIFO_UNDERFLOW 12
424#define FIFO_FULL 11
425#define FIFO_EMPTY 10
426#define FIFO_FLUSH 9
427#define SHORT_PACKET_OUT_DONE_INTERRUPT 6
428#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5
429#define NAK_OUT_PACKETS 4
430#define DATA_PACKET_RECEIVED_INTERRUPT 3
431#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
432#define DATA_OUT_PING_TOKEN_INTERRUPT 1
433#define DATA_IN_TOKEN_INTERRUPT 0
434 // offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0
435 u32 ep_avail;
436 u32 ep_data;
437 u32 _unused0 [2];
438} __attribute__ ((packed));
439
440/*-------------------------------------------------------------------------*/
441
442#ifdef __KERNEL__
443
444/* indexed registers [11.10] are accessed indirectly
445 * caller must own the device lock.
446 */
447
448static inline u32
449get_idx_reg (struct net2280_regs __iomem *regs, u32 index)
450{
451 writel (index, &regs->idxaddr);
452 /* NOTE: synchs device/cpu memory views */
453 return readl (&regs->idxdata);
454}
455
456static inline void
457set_idx_reg (struct net2280_regs __iomem *regs, u32 index, u32 value)
458{
459 writel (index, &regs->idxaddr);
460 writel (value, &regs->idxdata);
461 /* posted, may not be visible yet */
462}
463
464#endif /* __KERNEL__ */
465
466
467#define REG_DIAG 0x0
468#define RETRY_COUNTER 16
469#define FORCE_PCI_SERR 11
470#define FORCE_PCI_INTERRUPT 10
471#define FORCE_USB_INTERRUPT 9
472#define FORCE_CPU_INTERRUPT 8
473#define ILLEGAL_BYTE_ENABLES 5
474#define FAST_TIMES 4
475#define FORCE_RECEIVE_ERROR 2
476#define FORCE_TRANSMIT_CRC_ERROR 0
477#define REG_FRAME 0x02 /* from last sof */
478#define REG_CHIPREV 0x03 /* in bcd */
479#define REG_HS_NAK_RATE 0x0a /* NAK per N uframes */
480
481#define CHIPREV_1 0x0100
482#define CHIPREV_1A 0x0110
483
484#ifdef __KERNEL__
485
486/* ep a-f highspeed and fullspeed maxpacket, addresses
487 * computed from ep->num
488 */
489#define REG_EP_MAXPKT(dev,num) (((num) + 1) * 0x10 + \
490 (((dev)->gadget.speed == USB_SPEED_HIGH) ? 0 : 1))
491
492/*-------------------------------------------------------------------------*/
493
494/* [8.3] for scatter/gather i/o
495 * use struct net2280_dma_regs bitfields
496 */
497struct net2280_dma {
498 __le32 dmacount;
499 __le32 dmaaddr; /* the buffer */
500 __le32 dmadesc; /* next dma descriptor */
501 __le32 _reserved;
502} __attribute__ ((aligned (16)));
503
504/*-------------------------------------------------------------------------*/
505
506/* DRIVER DATA STRUCTURES and UTILITIES */
507
508struct net2280_ep {
509 struct usb_ep ep;
510 struct net2280_ep_regs __iomem *regs;
511 struct net2280_dma_regs __iomem *dma;
512 struct net2280_dma *dummy;
513 dma_addr_t td_dma; /* of dummy */
514 struct net2280 *dev;
515 unsigned long irqs;
516
517 /* analogous to a host-side qh */
518 struct list_head queue;
519 const struct usb_endpoint_descriptor *desc;
520 unsigned num : 8,
521 fifo_size : 12,
522 in_fifo_validate : 1,
523 out_overflow : 1,
524 stopped : 1,
525 is_in : 1,
526 is_iso : 1;
527};
528
529static inline void allow_status (struct net2280_ep *ep)
530{
531 /* ep0 only */
532 writel ( (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
533 | (1 << CLEAR_NAK_OUT_PACKETS)
534 | (1 << CLEAR_NAK_OUT_PACKETS_MODE)
535 , &ep->regs->ep_rsp);
536 ep->stopped = 1;
537}
538
539/* count (<= 4) bytes in the next fifo write will be valid */
540static inline void set_fifo_bytecount (struct net2280_ep *ep, unsigned count)
541{
542 writeb (count, 2 + (u8 __iomem *) &ep->regs->ep_cfg);
543}
544
545struct net2280_request {
546 struct usb_request req;
547 struct net2280_dma *td;
548 dma_addr_t td_dma;
549 struct list_head queue;
550 unsigned mapped : 1,
551 valid : 1;
552};
553
554struct net2280 {
555 /* each pci device provides one gadget, several endpoints */
556 struct usb_gadget gadget;
557 spinlock_t lock;
558 struct net2280_ep ep [7];
559 struct usb_gadget_driver *driver;
560 unsigned enabled : 1,
561 protocol_stall : 1,
562 softconnect : 1,
563 got_irq : 1,
564 region : 1;
565 u16 chiprev;
566
567 /* pci state used to access those endpoints */
568 struct pci_dev *pdev;
569 struct net2280_regs __iomem *regs;
570 struct net2280_usb_regs __iomem *usb;
571 struct net2280_pci_regs __iomem *pci;
572 struct net2280_dma_regs __iomem *dma;
573 struct net2280_dep_regs __iomem *dep;
574 struct net2280_ep_regs __iomem *epregs;
575
576 struct pci_pool *requests;
577 // statistics...
578};
579
580static inline void set_halt (struct net2280_ep *ep)
581{
582 /* ep0 and bulk/intr endpoints */
583 writel ( (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
584 /* set NAK_OUT for erratum 0114 */
585 | ((ep->dev->chiprev == CHIPREV_1) << SET_NAK_OUT_PACKETS)
586 | (1 << SET_ENDPOINT_HALT)
587 , &ep->regs->ep_rsp);
588}
589
590static inline void clear_halt (struct net2280_ep *ep)
591{
592 /* ep0 and bulk/intr endpoints */
593 writel ( (1 << CLEAR_ENDPOINT_HALT)
594 | (1 << CLEAR_ENDPOINT_TOGGLE)
595 /* unless the gadget driver left a short packet in the
596 * fifo, this reverses the erratum 0114 workaround.
597 */
598 | ((ep->dev->chiprev == CHIPREV_1) << CLEAR_NAK_OUT_PACKETS)
599 , &ep->regs->ep_rsp);
600}
601
602#ifdef USE_RDK_LEDS
603
604static inline void net2280_led_init (struct net2280 *dev)
605{
606 /* LED3 (green) is on during USB activity. note erratum 0113. */
607 writel ((1 << GPIO3_LED_SELECT)
608 | (1 << GPIO3_OUTPUT_ENABLE)
609 | (1 << GPIO2_OUTPUT_ENABLE)
610 | (1 << GPIO1_OUTPUT_ENABLE)
611 | (1 << GPIO0_OUTPUT_ENABLE)
612 , &dev->regs->gpioctl);
613}
614
615/* indicate speed with bi-color LED 0/1 */
616static inline
617void net2280_led_speed (struct net2280 *dev, enum usb_device_speed speed)
618{
619 u32 val = readl (&dev->regs->gpioctl);
620 switch (speed) {
621 case USB_SPEED_HIGH: /* green */
622 val &= ~(1 << GPIO0_DATA);
623 val |= (1 << GPIO1_DATA);
624 break;
625 case USB_SPEED_FULL: /* red */
626 val &= ~(1 << GPIO1_DATA);
627 val |= (1 << GPIO0_DATA);
628 break;
629 default: /* (off/black) */
630 val &= ~((1 << GPIO1_DATA) | (1 << GPIO0_DATA));
631 break;
632 }
633 writel (val, &dev->regs->gpioctl);
634}
635
636/* indicate power with LED 2 */
637static inline void net2280_led_active (struct net2280 *dev, int is_active)
638{
639 u32 val = readl (&dev->regs->gpioctl);
640
641 // FIXME this LED never seems to turn on.
642 if (is_active)
643 val |= GPIO2_DATA;
644 else
645 val &= ~GPIO2_DATA;
646 writel (val, &dev->regs->gpioctl);
647}
648static inline void net2280_led_shutdown (struct net2280 *dev)
649{
650 /* turn off all four GPIO*_DATA bits */
651 writel (readl (&dev->regs->gpioctl) & ~0x0f,
652 &dev->regs->gpioctl);
653}
654
655#else
656
657#define net2280_led_init(dev) do { } while (0)
658#define net2280_led_speed(dev, speed) do { } while (0)
659#define net2280_led_shutdown(dev) do { } while (0)
660
661#endif
662
663/*-------------------------------------------------------------------------*/
664
665#define xprintk(dev,level,fmt,args...) \
666 printk(level "%s %s: " fmt , driver_name , \
667 pci_name(dev->pdev) , ## args)
668
669#ifdef DEBUG
670#undef DEBUG
671#define DEBUG(dev,fmt,args...) \
672 xprintk(dev , KERN_DEBUG , fmt , ## args)
673#else
674#define DEBUG(dev,fmt,args...) \
675 do { } while (0)
676#endif /* DEBUG */
677
678#ifdef VERBOSE
679#define VDEBUG DEBUG
680#else
681#define VDEBUG(dev,fmt,args...) \
682 do { } while (0)
683#endif /* VERBOSE */
684
685#define ERROR(dev,fmt,args...) \
686 xprintk(dev , KERN_ERR , fmt , ## args)
687#define WARN(dev,fmt,args...) \
688 xprintk(dev , KERN_WARNING , fmt , ## args)
689#define INFO(dev,fmt,args...) \
690 xprintk(dev , KERN_INFO , fmt , ## args)
691
692/*-------------------------------------------------------------------------*/
693
694static inline void start_out_naking (struct net2280_ep *ep)
695{
696 /* NOTE: hardware races lurk here, and PING protocol issues */
697 writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
698 /* synch with device */
699 readl (&ep->regs->ep_rsp);
700}
701
702#ifdef DEBUG
703static inline void assert_out_naking (struct net2280_ep *ep, const char *where)
704{
705 u32 tmp = readl (&ep->regs->ep_stat);
706
707 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
708 DEBUG (ep->dev, "%s %s %08x !NAK\n",
709 ep->ep.name, where, tmp);
710 writel ((1 << SET_NAK_OUT_PACKETS),
711 &ep->regs->ep_rsp);
712 }
713}
714#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep,__FUNCTION__)
715#else
716#define ASSERT_OUT_NAKING(ep) do {} while (0)
717#endif
718
719static inline void stop_out_naking (struct net2280_ep *ep)
720{
721 u32 tmp;
722
723 tmp = readl (&ep->regs->ep_stat);
724 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
725 writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
726}
727
728#endif /* __KERNEL__ */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
new file mode 100644
index 000000000000..b66ea5a6ed79
--- /dev/null
+++ b/drivers/usb/gadget/omap_udc.c
@@ -0,0 +1,2872 @@
1/*
2 * omap_udc.c -- for OMAP full speed udc; most chips support OTG.
3 *
4 * Copyright (C) 2004 Texas Instruments, Inc.
5 * Copyright (C) 2004-2005 David Brownell
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#undef DEBUG
23#undef VERBOSE
24
25#include <linux/config.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/ioport.h>
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/delay.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/init.h>
35#include <linux/timer.h>
36#include <linux/list.h>
37#include <linux/interrupt.h>
38#include <linux/proc_fs.h>
39#include <linux/mm.h>
40#include <linux/moduleparam.h>
41#include <linux/device.h>
42#include <linux/usb_ch9.h>
43#include <linux/usb_gadget.h>
44#include <linux/usb_otg.h>
45#include <linux/dma-mapping.h>
46
47#include <asm/byteorder.h>
48#include <asm/io.h>
49#include <asm/irq.h>
50#include <asm/system.h>
51#include <asm/unaligned.h>
52#include <asm/mach-types.h>
53
54#include <asm/arch/dma.h>
55#include <asm/arch/mux.h>
56#include <asm/arch/usb.h>
57
58#include "omap_udc.h"
59
60#undef USB_TRACE
61
62/* bulk DMA seems to be behaving for both IN and OUT */
63#define USE_DMA
64
65/* ISO too */
66#define USE_ISO
67
68#define DRIVER_DESC "OMAP UDC driver"
69#define DRIVER_VERSION "4 October 2004"
70
71#define DMA_ADDR_INVALID (~(dma_addr_t)0)
72
73
74/*
75 * The OMAP UDC needs _very_ early endpoint setup: before enabling the
76 * D+ pullup to allow enumeration. That's too early for the gadget
77 * framework to use from usb_endpoint_enable(), which happens after
78 * enumeration as part of activating an interface. (But if we add an
79 * optional new "UDC not yet running" state to the gadget driver model,
80 * even just during driver binding, the endpoint autoconfig logic is the
81 * natural spot to manufacture new endpoints.)
82 *
83 * So instead of using endpoint enable calls to control the hardware setup,
84 * this driver defines a "fifo mode" parameter. It's used during driver
85 * initialization to choose among a set of pre-defined endpoint configs.
86 * See omap_udc_setup() for available modes, or to add others. That code
87 * lives in an init section, so use this driver as a module if you need
88 * to change the fifo mode after the kernel boots.
89 *
90 * Gadget drivers normally ignore endpoints they don't care about, and
91 * won't include them in configuration descriptors. That means only
92 * misbehaving hosts would even notice they exist.
93 */
94#ifdef USE_ISO
95static unsigned fifo_mode = 3;
96#else
97static unsigned fifo_mode = 0;
98#endif
99
100/* "modprobe omap_udc fifo_mode=42", or else as a kernel
101 * boot parameter "omap_udc:fifo_mode=42"
102 */
103module_param (fifo_mode, uint, 0);
104MODULE_PARM_DESC (fifo_mode, "endpoint setup (0 == default)");
105
106#ifdef USE_DMA
107static unsigned use_dma = 1;
108
109/* "modprobe omap_udc use_dma=y", or else as a kernel
110 * boot parameter "omap_udc:use_dma=y"
111 */
112module_param (use_dma, bool, 0);
113MODULE_PARM_DESC (use_dma, "enable/disable DMA");
114#else /* !USE_DMA */
115
116/* save a bit of code */
117#define use_dma 0
118#endif /* !USE_DMA */
119
120
121static const char driver_name [] = "omap_udc";
122static const char driver_desc [] = DRIVER_DESC;
123
124/*-------------------------------------------------------------------------*/
125
126/* there's a notion of "current endpoint" for modifying endpoint
127 * state, and PIO access to its FIFO.
128 */
129
130static void use_ep(struct omap_ep *ep, u16 select)
131{
132 u16 num = ep->bEndpointAddress & 0x0f;
133
134 if (ep->bEndpointAddress & USB_DIR_IN)
135 num |= UDC_EP_DIR;
136 UDC_EP_NUM_REG = num | select;
137 /* when select, MUST deselect later !! */
138}
139
140static inline void deselect_ep(void)
141{
142 UDC_EP_NUM_REG &= ~UDC_EP_SEL;
143 /* 6 wait states before TX will happen */
144}
145
146static void dma_channel_claim(struct omap_ep *ep, unsigned preferred);
147
148/*-------------------------------------------------------------------------*/
149
150static int omap_ep_enable(struct usb_ep *_ep,
151 const struct usb_endpoint_descriptor *desc)
152{
153 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
154 struct omap_udc *udc;
155 unsigned long flags;
156 u16 maxp;
157
158 /* catch various bogus parameters */
159 if (!_ep || !desc || ep->desc
160 || desc->bDescriptorType != USB_DT_ENDPOINT
161 || ep->bEndpointAddress != desc->bEndpointAddress
162 || ep->maxpacket < le16_to_cpu
163 (desc->wMaxPacketSize)) {
164 DBG("%s, bad ep or descriptor\n", __FUNCTION__);
165 return -EINVAL;
166 }
167 maxp = le16_to_cpu (desc->wMaxPacketSize);
168 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
169 && maxp != ep->maxpacket)
170 || desc->wMaxPacketSize > ep->maxpacket
171 || !desc->wMaxPacketSize) {
172 DBG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
173 return -ERANGE;
174 }
175
176#ifdef USE_ISO
177 if ((desc->bmAttributes == USB_ENDPOINT_XFER_ISOC
178 && desc->bInterval != 1)) {
179 /* hardware wants period = 1; USB allows 2^(Interval-1) */
180 DBG("%s, unsupported ISO period %dms\n", _ep->name,
181 1 << (desc->bInterval - 1));
182 return -EDOM;
183 }
184#else
185 if (desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
186 DBG("%s, ISO nyet\n", _ep->name);
187 return -EDOM;
188 }
189#endif
190
191 /* xfer types must match, except that interrupt ~= bulk */
192 if (ep->bmAttributes != desc->bmAttributes
193 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
194 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
195 DBG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
196 return -EINVAL;
197 }
198
199 udc = ep->udc;
200 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
201 DBG("%s, bogus device state\n", __FUNCTION__);
202 return -ESHUTDOWN;
203 }
204
205 spin_lock_irqsave(&udc->lock, flags);
206
207 ep->desc = desc;
208 ep->irqs = 0;
209 ep->stopped = 0;
210 ep->ep.maxpacket = maxp;
211
212 /* set endpoint to initial state */
213 ep->dma_channel = 0;
214 ep->has_dma = 0;
215 ep->lch = -1;
216 use_ep(ep, UDC_EP_SEL);
217 UDC_CTRL_REG = UDC_RESET_EP;
218 ep->ackwait = 0;
219 deselect_ep();
220
221 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
222 list_add(&ep->iso, &udc->iso);
223
224 /* maybe assign a DMA channel to this endpoint */
225 if (use_dma && desc->bmAttributes == USB_ENDPOINT_XFER_BULK)
226 /* FIXME ISO can dma, but prefers first channel */
227 dma_channel_claim(ep, 0);
228
229 /* PIO OUT may RX packets */
230 if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC
231 && !ep->has_dma
232 && !(ep->bEndpointAddress & USB_DIR_IN)) {
233 UDC_CTRL_REG = UDC_SET_FIFO_EN;
234 ep->ackwait = 1 + ep->double_buf;
235 }
236
237 spin_unlock_irqrestore(&udc->lock, flags);
238 VDBG("%s enabled\n", _ep->name);
239 return 0;
240}
241
242static void nuke(struct omap_ep *, int status);
243
244static int omap_ep_disable(struct usb_ep *_ep)
245{
246 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
247 unsigned long flags;
248
249 if (!_ep || !ep->desc) {
250 DBG("%s, %s not enabled\n", __FUNCTION__,
251 _ep ? ep->ep.name : NULL);
252 return -EINVAL;
253 }
254
255 spin_lock_irqsave(&ep->udc->lock, flags);
256 ep->desc = 0;
257 nuke (ep, -ESHUTDOWN);
258 ep->ep.maxpacket = ep->maxpacket;
259 ep->has_dma = 0;
260 UDC_CTRL_REG = UDC_SET_HALT;
261 list_del_init(&ep->iso);
262 del_timer(&ep->timer);
263
264 spin_unlock_irqrestore(&ep->udc->lock, flags);
265
266 VDBG("%s disabled\n", _ep->name);
267 return 0;
268}
269
270/*-------------------------------------------------------------------------*/
271
272static struct usb_request *
273omap_alloc_request(struct usb_ep *ep, int gfp_flags)
274{
275 struct omap_req *req;
276
277 req = kmalloc(sizeof *req, gfp_flags);
278 if (req) {
279 memset (req, 0, sizeof *req);
280 req->req.dma = DMA_ADDR_INVALID;
281 INIT_LIST_HEAD (&req->queue);
282 }
283 return &req->req;
284}
285
286static void
287omap_free_request(struct usb_ep *ep, struct usb_request *_req)
288{
289 struct omap_req *req = container_of(_req, struct omap_req, req);
290
291 if (_req)
292 kfree (req);
293}
294
295/*-------------------------------------------------------------------------*/
296
297static void *
298omap_alloc_buffer(
299 struct usb_ep *_ep,
300 unsigned bytes,
301 dma_addr_t *dma,
302 int gfp_flags
303)
304{
305 void *retval;
306 struct omap_ep *ep;
307
308 ep = container_of(_ep, struct omap_ep, ep);
309 if (use_dma && ep->has_dma) {
310 static int warned;
311 if (!warned && bytes < PAGE_SIZE) {
312 dev_warn(ep->udc->gadget.dev.parent,
313 "using dma_alloc_coherent for "
314 "small allocations wastes memory\n");
315 warned++;
316 }
317 return dma_alloc_coherent(ep->udc->gadget.dev.parent,
318 bytes, dma, gfp_flags);
319 }
320
321 retval = kmalloc(bytes, gfp_flags);
322 if (retval)
323 *dma = virt_to_phys(retval);
324 return retval;
325}
326
327static void omap_free_buffer(
328 struct usb_ep *_ep,
329 void *buf,
330 dma_addr_t dma,
331 unsigned bytes
332)
333{
334 struct omap_ep *ep;
335
336 ep = container_of(_ep, struct omap_ep, ep);
337 if (use_dma && _ep && ep->has_dma)
338 dma_free_coherent(ep->udc->gadget.dev.parent, bytes, buf, dma);
339 else
340 kfree (buf);
341}
342
343/*-------------------------------------------------------------------------*/
344
345static void
346done(struct omap_ep *ep, struct omap_req *req, int status)
347{
348 unsigned stopped = ep->stopped;
349
350 list_del_init(&req->queue);
351
352 if (req->req.status == -EINPROGRESS)
353 req->req.status = status;
354 else
355 status = req->req.status;
356
357 if (use_dma && ep->has_dma) {
358 if (req->mapped) {
359 dma_unmap_single(ep->udc->gadget.dev.parent,
360 req->req.dma, req->req.length,
361 (ep->bEndpointAddress & USB_DIR_IN)
362 ? DMA_TO_DEVICE
363 : DMA_FROM_DEVICE);
364 req->req.dma = DMA_ADDR_INVALID;
365 req->mapped = 0;
366 } else
367 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
368 req->req.dma, req->req.length,
369 (ep->bEndpointAddress & USB_DIR_IN)
370 ? DMA_TO_DEVICE
371 : DMA_FROM_DEVICE);
372 }
373
374#ifndef USB_TRACE
375 if (status && status != -ESHUTDOWN)
376#endif
377 VDBG("complete %s req %p stat %d len %u/%u\n",
378 ep->ep.name, &req->req, status,
379 req->req.actual, req->req.length);
380
381 /* don't modify queue heads during completion callback */
382 ep->stopped = 1;
383 spin_unlock(&ep->udc->lock);
384 req->req.complete(&ep->ep, &req->req);
385 spin_lock(&ep->udc->lock);
386 ep->stopped = stopped;
387}
388
389/*-------------------------------------------------------------------------*/
390
391#define FIFO_FULL (UDC_NON_ISO_FIFO_FULL | UDC_ISO_FIFO_FULL)
392#define FIFO_UNWRITABLE (UDC_EP_HALTED | FIFO_FULL)
393
394#define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
395#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
396
397static inline int
398write_packet(u8 *buf, struct omap_req *req, unsigned max)
399{
400 unsigned len;
401 u16 *wp;
402
403 len = min(req->req.length - req->req.actual, max);
404 req->req.actual += len;
405
406 max = len;
407 if (likely((((int)buf) & 1) == 0)) {
408 wp = (u16 *)buf;
409 while (max >= 2) {
410 UDC_DATA_REG = *wp++;
411 max -= 2;
412 }
413 buf = (u8 *)wp;
414 }
415 while (max--)
416 *(volatile u8 *)&UDC_DATA_REG = *buf++;
417 return len;
418}
419
420// FIXME change r/w fifo calling convention
421
422
423// return: 0 = still running, 1 = completed, negative = errno
424static int write_fifo(struct omap_ep *ep, struct omap_req *req)
425{
426 u8 *buf;
427 unsigned count;
428 int is_last;
429 u16 ep_stat;
430
431 buf = req->req.buf + req->req.actual;
432 prefetch(buf);
433
434 /* PIO-IN isn't double buffered except for iso */
435 ep_stat = UDC_STAT_FLG_REG;
436 if (ep_stat & FIFO_UNWRITABLE)
437 return 0;
438
439 count = ep->ep.maxpacket;
440 count = write_packet(buf, req, count);
441 UDC_CTRL_REG = UDC_SET_FIFO_EN;
442 ep->ackwait = 1;
443
444 /* last packet is often short (sometimes a zlp) */
445 if (count != ep->ep.maxpacket)
446 is_last = 1;
447 else if (req->req.length == req->req.actual
448 && !req->req.zero)
449 is_last = 1;
450 else
451 is_last = 0;
452
453 /* NOTE: requests complete when all IN data is in a
454 * FIFO (or sometimes later, if a zlp was needed).
455 * Use usb_ep_fifo_status() where needed.
456 */
457 if (is_last)
458 done(ep, req, 0);
459 return is_last;
460}
461
462static inline int
463read_packet(u8 *buf, struct omap_req *req, unsigned avail)
464{
465 unsigned len;
466 u16 *wp;
467
468 len = min(req->req.length - req->req.actual, avail);
469 req->req.actual += len;
470 avail = len;
471
472 if (likely((((int)buf) & 1) == 0)) {
473 wp = (u16 *)buf;
474 while (avail >= 2) {
475 *wp++ = UDC_DATA_REG;
476 avail -= 2;
477 }
478 buf = (u8 *)wp;
479 }
480 while (avail--)
481 *buf++ = *(volatile u8 *)&UDC_DATA_REG;
482 return len;
483}
484
485// return: 0 = still running, 1 = queue empty, negative = errno
486static int read_fifo(struct omap_ep *ep, struct omap_req *req)
487{
488 u8 *buf;
489 unsigned count, avail;
490 int is_last;
491
492 buf = req->req.buf + req->req.actual;
493 prefetchw(buf);
494
495 for (;;) {
496 u16 ep_stat = UDC_STAT_FLG_REG;
497
498 is_last = 0;
499 if (ep_stat & FIFO_EMPTY) {
500 if (!ep->double_buf)
501 break;
502 ep->fnf = 1;
503 }
504 if (ep_stat & UDC_EP_HALTED)
505 break;
506
507 if (ep_stat & FIFO_FULL)
508 avail = ep->ep.maxpacket;
509 else {
510 avail = UDC_RXFSTAT_REG;
511 ep->fnf = ep->double_buf;
512 }
513 count = read_packet(buf, req, avail);
514
515 /* partial packet reads may not be errors */
516 if (count < ep->ep.maxpacket) {
517 is_last = 1;
518 /* overflowed this request? flush extra data */
519 if (count != avail) {
520 req->req.status = -EOVERFLOW;
521 avail -= count;
522 while (avail--)
523 (void) *(volatile u8 *)&UDC_DATA_REG;
524 }
525 } else if (req->req.length == req->req.actual)
526 is_last = 1;
527 else
528 is_last = 0;
529
530 if (!ep->bEndpointAddress)
531 break;
532 if (is_last)
533 done(ep, req, 0);
534 break;
535 }
536 return is_last;
537}
538
539/*-------------------------------------------------------------------------*/
540
541static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
542{
543 dma_addr_t end;
544
545 /* IN-DMA needs this on fault/cancel paths, so 15xx misreports
546 * the last transfer's bytecount by more than a FIFO's worth.
547 */
548 if (cpu_is_omap15xx())
549 return 0;
550
551 end = omap_readw(OMAP_DMA_CSAC(ep->lch));
552 if (end == ep->dma_counter)
553 return 0;
554
555 end |= start & (0xffff << 16);
556 if (end < start)
557 end += 0x10000;
558 return end - start;
559}
560
561#define DMA_DEST_LAST(x) (cpu_is_omap15xx() \
562 ? OMAP_DMA_CSAC(x) /* really: CPC */ \
563 : OMAP_DMA_CDAC(x))
564
565static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
566{
567 dma_addr_t end;
568
569 end = omap_readw(DMA_DEST_LAST(ep->lch));
570 if (end == ep->dma_counter)
571 return 0;
572
573 end |= start & (0xffff << 16);
574 if (cpu_is_omap15xx())
575 end++;
576 if (end < start)
577 end += 0x10000;
578 return end - start;
579}
580
581
582/* Each USB transfer request using DMA maps to one or more DMA transfers.
583 * When DMA completion isn't request completion, the UDC continues with
584 * the next DMA transfer for that USB transfer.
585 */
586
587static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
588{
589 u16 txdma_ctrl;
590 unsigned length = req->req.length - req->req.actual;
591 const int sync_mode = cpu_is_omap15xx()
592 ? OMAP_DMA_SYNC_FRAME
593 : OMAP_DMA_SYNC_ELEMENT;
594
595 /* measure length in either bytes or packets */
596 if ((cpu_is_omap16xx() && length <= (UDC_TXN_TSC + 1))
597 || (cpu_is_omap15xx() && length < ep->maxpacket)) {
598 txdma_ctrl = UDC_TXN_EOT | length;
599 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
600 length, 1, sync_mode);
601 } else {
602 length = min(length / ep->maxpacket,
603 (unsigned) UDC_TXN_TSC + 1);
604 txdma_ctrl = length;
605 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
606 ep->ep.maxpacket, length, sync_mode);
607 length *= ep->maxpacket;
608 }
609 omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
610 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual);
611
612 omap_start_dma(ep->lch);
613 ep->dma_counter = omap_readw(OMAP_DMA_CSAC(ep->lch));
614 UDC_DMA_IRQ_EN_REG |= UDC_TX_DONE_IE(ep->dma_channel);
615 UDC_TXDMA_REG(ep->dma_channel) = UDC_TXN_START | txdma_ctrl;
616 req->dma_bytes = length;
617}
618
619static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
620{
621 if (status == 0) {
622 req->req.actual += req->dma_bytes;
623
624 /* return if this request needs to send data or zlp */
625 if (req->req.actual < req->req.length)
626 return;
627 if (req->req.zero
628 && req->dma_bytes != 0
629 && (req->req.actual % ep->maxpacket) == 0)
630 return;
631 } else
632 req->req.actual += dma_src_len(ep, req->req.dma
633 + req->req.actual);
634
635 /* tx completion */
636 omap_stop_dma(ep->lch);
637 UDC_DMA_IRQ_EN_REG &= ~UDC_TX_DONE_IE(ep->dma_channel);
638 done(ep, req, status);
639}
640
641static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
642{
643 unsigned packets;
644
645 /* NOTE: we filtered out "short reads" before, so we know
646 * the buffer has only whole numbers of packets.
647 */
648
649 /* set up this DMA transfer, enable the fifo, start */
650 packets = (req->req.length - req->req.actual) / ep->ep.maxpacket;
651 packets = min(packets, (unsigned)UDC_RXN_TC + 1);
652 req->dma_bytes = packets * ep->ep.maxpacket;
653 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
654 ep->ep.maxpacket, packets,
655 OMAP_DMA_SYNC_ELEMENT);
656 omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
657 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual);
658 ep->dma_counter = omap_readw(DMA_DEST_LAST(ep->lch));
659
660 UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1);
661 UDC_DMA_IRQ_EN_REG |= UDC_RX_EOT_IE(ep->dma_channel);
662 UDC_EP_NUM_REG = (ep->bEndpointAddress & 0xf);
663 UDC_CTRL_REG = UDC_SET_FIFO_EN;
664
665 omap_start_dma(ep->lch);
666}
667
668static void
669finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status)
670{
671 u16 count;
672
673 if (status == 0)
674 ep->dma_counter = (u16) (req->req.dma + req->req.actual);
675 count = dma_dest_len(ep, req->req.dma + req->req.actual);
676 count += req->req.actual;
677 if (count <= req->req.length)
678 req->req.actual = count;
679
680 if (count != req->dma_bytes || status)
681 omap_stop_dma(ep->lch);
682
683 /* if this wasn't short, request may need another transfer */
684 else if (req->req.actual < req->req.length)
685 return;
686
687 /* rx completion */
688 UDC_DMA_IRQ_EN_REG &= ~UDC_RX_EOT_IE(ep->dma_channel);
689 done(ep, req, status);
690}
691
692static void dma_irq(struct omap_udc *udc, u16 irq_src)
693{
694 u16 dman_stat = UDC_DMAN_STAT_REG;
695 struct omap_ep *ep;
696 struct omap_req *req;
697
698 /* IN dma: tx to host */
699 if (irq_src & UDC_TXN_DONE) {
700 ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)];
701 ep->irqs++;
702 /* can see TXN_DONE after dma abort */
703 if (!list_empty(&ep->queue)) {
704 req = container_of(ep->queue.next,
705 struct omap_req, queue);
706 finish_in_dma(ep, req, 0);
707 }
708 UDC_IRQ_SRC_REG = UDC_TXN_DONE;
709
710 if (!list_empty (&ep->queue)) {
711 req = container_of(ep->queue.next,
712 struct omap_req, queue);
713 next_in_dma(ep, req);
714 }
715 }
716
717 /* OUT dma: rx from host */
718 if (irq_src & UDC_RXN_EOT) {
719 ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
720 ep->irqs++;
721 /* can see RXN_EOT after dma abort */
722 if (!list_empty(&ep->queue)) {
723 req = container_of(ep->queue.next,
724 struct omap_req, queue);
725 finish_out_dma(ep, req, 0);
726 }
727 UDC_IRQ_SRC_REG = UDC_RXN_EOT;
728
729 if (!list_empty (&ep->queue)) {
730 req = container_of(ep->queue.next,
731 struct omap_req, queue);
732 next_out_dma(ep, req);
733 }
734 }
735
736 if (irq_src & UDC_RXN_CNT) {
737 ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
738 ep->irqs++;
739 /* omap15xx does this unasked... */
740 VDBG("%s, RX_CNT irq?\n", ep->ep.name);
741 UDC_IRQ_SRC_REG = UDC_RXN_CNT;
742 }
743}
744
745static void dma_error(int lch, u16 ch_status, void *data)
746{
747 struct omap_ep *ep = data;
748
749 /* if ch_status & OMAP_DMA_DROP_IRQ ... */
750 /* if ch_status & OMAP_DMA_TOUT_IRQ ... */
751 ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status);
752
753 /* complete current transfer ... */
754}
755
756static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
757{
758 u16 reg;
759 int status, restart, is_in;
760
761 is_in = ep->bEndpointAddress & USB_DIR_IN;
762 if (is_in)
763 reg = UDC_TXDMA_CFG_REG;
764 else
765 reg = UDC_RXDMA_CFG_REG;
766 reg |= 1 << 12; /* "pulse" activated */
767
768 ep->dma_channel = 0;
769 ep->lch = -1;
770 if (channel == 0 || channel > 3) {
771 if ((reg & 0x0f00) == 0)
772 channel = 3;
773 else if ((reg & 0x00f0) == 0)
774 channel = 2;
775 else if ((reg & 0x000f) == 0) /* preferred for ISO */
776 channel = 1;
777 else {
778 status = -EMLINK;
779 goto just_restart;
780 }
781 }
782 reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1));
783 ep->dma_channel = channel;
784
785 if (is_in) {
786 status = omap_request_dma(OMAP_DMA_USB_W2FC_TX0 - 1 + channel,
787 ep->ep.name, dma_error, ep, &ep->lch);
788 if (status == 0) {
789 UDC_TXDMA_CFG_REG = reg;
790 omap_set_dma_dest_params(ep->lch,
791 OMAP_DMA_PORT_TIPB,
792 OMAP_DMA_AMODE_CONSTANT,
793 (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG));
794 }
795 } else {
796 status = omap_request_dma(OMAP_DMA_USB_W2FC_RX0 - 1 + channel,
797 ep->ep.name, dma_error, ep, &ep->lch);
798 if (status == 0) {
799 UDC_RXDMA_CFG_REG = reg;
800 omap_set_dma_src_params(ep->lch,
801 OMAP_DMA_PORT_TIPB,
802 OMAP_DMA_AMODE_CONSTANT,
803 (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG));
804 }
805 }
806 if (status)
807 ep->dma_channel = 0;
808 else {
809 ep->has_dma = 1;
810 omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ);
811
812 /* channel type P: hw synch (fifo) */
813 if (!cpu_is_omap15xx())
814 omap_writew(2, OMAP_DMA_LCH_CTRL(ep->lch));
815 }
816
817just_restart:
818 /* restart any queue, even if the claim failed */
819 restart = !ep->stopped && !list_empty(&ep->queue);
820
821 if (status)
822 DBG("%s no dma channel: %d%s\n", ep->ep.name, status,
823 restart ? " (restart)" : "");
824 else
825 DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name,
826 is_in ? 't' : 'r',
827 ep->dma_channel - 1, ep->lch,
828 restart ? " (restart)" : "");
829
830 if (restart) {
831 struct omap_req *req;
832 req = container_of(ep->queue.next, struct omap_req, queue);
833 if (ep->has_dma)
834 (is_in ? next_in_dma : next_out_dma)(ep, req);
835 else {
836 use_ep(ep, UDC_EP_SEL);
837 (is_in ? write_fifo : read_fifo)(ep, req);
838 deselect_ep();
839 if (!is_in) {
840 UDC_CTRL_REG = UDC_SET_FIFO_EN;
841 ep->ackwait = 1 + ep->double_buf;
842 }
843 /* IN: 6 wait states before it'll tx */
844 }
845 }
846}
847
848static void dma_channel_release(struct omap_ep *ep)
849{
850 int shift = 4 * (ep->dma_channel - 1);
851 u16 mask = 0x0f << shift;
852 struct omap_req *req;
853 int active;
854
855 /* abort any active usb transfer request */
856 if (!list_empty(&ep->queue))
857 req = container_of(ep->queue.next, struct omap_req, queue);
858 else
859 req = 0;
860
861 active = ((1 << 7) & omap_readl(OMAP_DMA_CCR(ep->lch))) != 0;
862
863 DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
864 active ? "active" : "idle",
865 (ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
866 ep->dma_channel - 1, req);
867
868 /* wait till current packet DMA finishes, and fifo empties */
869 if (ep->bEndpointAddress & USB_DIR_IN) {
870 UDC_TXDMA_CFG_REG &= ~mask;
871
872 if (req) {
873 finish_in_dma(ep, req, -ECONNRESET);
874
875 /* clear FIFO; hosts probably won't empty it */
876 use_ep(ep, UDC_EP_SEL);
877 UDC_CTRL_REG = UDC_CLR_EP;
878 deselect_ep();
879 }
880 while (UDC_TXDMA_CFG_REG & mask)
881 udelay(10);
882 } else {
883 UDC_RXDMA_CFG_REG &= ~mask;
884
885 /* dma empties the fifo */
886 while (UDC_RXDMA_CFG_REG & mask)
887 udelay(10);
888 if (req)
889 finish_out_dma(ep, req, -ECONNRESET);
890 }
891 omap_free_dma(ep->lch);
892 ep->dma_channel = 0;
893 ep->lch = -1;
894 /* has_dma still set, till endpoint is fully quiesced */
895}
896
897
898/*-------------------------------------------------------------------------*/
899
900static int
901omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
902{
903 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
904 struct omap_req *req = container_of(_req, struct omap_req, req);
905 struct omap_udc *udc;
906 unsigned long flags;
907 int is_iso = 0;
908
909 /* catch various bogus parameters */
910 if (!_req || !req->req.complete || !req->req.buf
911 || !list_empty(&req->queue)) {
912 DBG("%s, bad params\n", __FUNCTION__);
913 return -EINVAL;
914 }
915 if (!_ep || (!ep->desc && ep->bEndpointAddress)) {
916 DBG("%s, bad ep\n", __FUNCTION__);
917 return -EINVAL;
918 }
919 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
920 if (req->req.length > ep->ep.maxpacket)
921 return -EMSGSIZE;
922 is_iso = 1;
923 }
924
925 /* this isn't bogus, but OMAP DMA isn't the only hardware to
926 * have a hard time with partial packet reads... reject it.
927 */
928 if (use_dma
929 && ep->has_dma
930 && ep->bEndpointAddress != 0
931 && (ep->bEndpointAddress & USB_DIR_IN) == 0
932 && (req->req.length % ep->ep.maxpacket) != 0) {
933 DBG("%s, no partial packet OUT reads\n", __FUNCTION__);
934 return -EMSGSIZE;
935 }
936
937 udc = ep->udc;
938 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
939 return -ESHUTDOWN;
940
941 if (use_dma && ep->has_dma) {
942 if (req->req.dma == DMA_ADDR_INVALID) {
943 req->req.dma = dma_map_single(
944 ep->udc->gadget.dev.parent,
945 req->req.buf,
946 req->req.length,
947 (ep->bEndpointAddress & USB_DIR_IN)
948 ? DMA_TO_DEVICE
949 : DMA_FROM_DEVICE);
950 req->mapped = 1;
951 } else {
952 dma_sync_single_for_device(
953 ep->udc->gadget.dev.parent,
954 req->req.dma, req->req.length,
955 (ep->bEndpointAddress & USB_DIR_IN)
956 ? DMA_TO_DEVICE
957 : DMA_FROM_DEVICE);
958 req->mapped = 0;
959 }
960 }
961
962 VDBG("%s queue req %p, len %d buf %p\n",
963 ep->ep.name, _req, _req->length, _req->buf);
964
965 spin_lock_irqsave(&udc->lock, flags);
966
967 req->req.status = -EINPROGRESS;
968 req->req.actual = 0;
969
970 /* maybe kickstart non-iso i/o queues */
971 if (is_iso)
972 UDC_IRQ_EN_REG |= UDC_SOF_IE;
973 else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
974 int is_in;
975
976 if (ep->bEndpointAddress == 0) {
977 if (!udc->ep0_pending || !list_empty (&ep->queue)) {
978 spin_unlock_irqrestore(&udc->lock, flags);
979 return -EL2HLT;
980 }
981
982 /* empty DATA stage? */
983 is_in = udc->ep0_in;
984 if (!req->req.length) {
985
986 /* chip became CONFIGURED or ADDRESSED
987 * earlier; drivers may already have queued
988 * requests to non-control endpoints
989 */
990 if (udc->ep0_set_config) {
991 u16 irq_en = UDC_IRQ_EN_REG;
992
993 irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
994 if (!udc->ep0_reset_config)
995 irq_en |= UDC_EPN_RX_IE
996 | UDC_EPN_TX_IE;
997 UDC_IRQ_EN_REG = irq_en;
998 }
999
1000 /* STATUS is reverse direction */
1001 UDC_EP_NUM_REG = is_in
1002 ? UDC_EP_SEL
1003 : (UDC_EP_SEL|UDC_EP_DIR);
1004 UDC_CTRL_REG = UDC_CLR_EP;
1005 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1006 UDC_EP_NUM_REG = udc->ep0_in ? 0 : UDC_EP_DIR;
1007
1008 /* cleanup */
1009 udc->ep0_pending = 0;
1010 done(ep, req, 0);
1011 req = 0;
1012
1013 /* non-empty DATA stage */
1014 } else if (is_in) {
1015 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
1016 } else {
1017 if (udc->ep0_setup)
1018 goto irq_wait;
1019 UDC_EP_NUM_REG = UDC_EP_SEL;
1020 }
1021 } else {
1022 is_in = ep->bEndpointAddress & USB_DIR_IN;
1023 if (!ep->has_dma)
1024 use_ep(ep, UDC_EP_SEL);
1025 /* if ISO: SOF IRQs must be enabled/disabled! */
1026 }
1027
1028 if (ep->has_dma)
1029 (is_in ? next_in_dma : next_out_dma)(ep, req);
1030 else if (req) {
1031 if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
1032 req = 0;
1033 deselect_ep();
1034 if (!is_in) {
1035 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1036 ep->ackwait = 1 + ep->double_buf;
1037 }
1038 /* IN: 6 wait states before it'll tx */
1039 }
1040 }
1041
1042irq_wait:
1043 /* irq handler advances the queue */
1044 if (req != 0)
1045 list_add_tail(&req->queue, &ep->queue);
1046 spin_unlock_irqrestore(&udc->lock, flags);
1047
1048 return 0;
1049}
1050
1051static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1052{
1053 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
1054 struct omap_req *req;
1055 unsigned long flags;
1056
1057 if (!_ep || !_req)
1058 return -EINVAL;
1059
1060 spin_lock_irqsave(&ep->udc->lock, flags);
1061
1062 /* make sure it's actually queued on this endpoint */
1063 list_for_each_entry (req, &ep->queue, queue) {
1064 if (&req->req == _req)
1065 break;
1066 }
1067 if (&req->req != _req) {
1068 spin_unlock_irqrestore(&ep->udc->lock, flags);
1069 return -EINVAL;
1070 }
1071
1072 if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
1073 int channel = ep->dma_channel;
1074
1075 /* releasing the channel cancels the request,
1076 * reclaiming the channel restarts the queue
1077 */
1078 dma_channel_release(ep);
1079 dma_channel_claim(ep, channel);
1080 } else
1081 done(ep, req, -ECONNRESET);
1082 spin_unlock_irqrestore(&ep->udc->lock, flags);
1083 return 0;
1084}
1085
1086/*-------------------------------------------------------------------------*/
1087
1088static int omap_ep_set_halt(struct usb_ep *_ep, int value)
1089{
1090 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
1091 unsigned long flags;
1092 int status = -EOPNOTSUPP;
1093
1094 spin_lock_irqsave(&ep->udc->lock, flags);
1095
1096 /* just use protocol stalls for ep0; real halts are annoying */
1097 if (ep->bEndpointAddress == 0) {
1098 if (!ep->udc->ep0_pending)
1099 status = -EINVAL;
1100 else if (value) {
1101 if (ep->udc->ep0_set_config) {
1102 WARN("error changing config?\n");
1103 UDC_SYSCON2_REG = UDC_CLR_CFG;
1104 }
1105 UDC_SYSCON2_REG = UDC_STALL_CMD;
1106 ep->udc->ep0_pending = 0;
1107 status = 0;
1108 } else /* NOP */
1109 status = 0;
1110
1111 /* otherwise, all active non-ISO endpoints can halt */
1112 } else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->desc) {
1113
1114 /* IN endpoints must already be idle */
1115 if ((ep->bEndpointAddress & USB_DIR_IN)
1116 && !list_empty(&ep->queue)) {
1117 status = -EAGAIN;
1118 goto done;
1119 }
1120
1121 if (value) {
1122 int channel;
1123
1124 if (use_dma && ep->dma_channel
1125 && !list_empty(&ep->queue)) {
1126 channel = ep->dma_channel;
1127 dma_channel_release(ep);
1128 } else
1129 channel = 0;
1130
1131 use_ep(ep, UDC_EP_SEL);
1132 if (UDC_STAT_FLG_REG & UDC_NON_ISO_FIFO_EMPTY) {
1133 UDC_CTRL_REG = UDC_SET_HALT;
1134 status = 0;
1135 } else
1136 status = -EAGAIN;
1137 deselect_ep();
1138
1139 if (channel)
1140 dma_channel_claim(ep, channel);
1141 } else {
1142 use_ep(ep, 0);
1143 UDC_CTRL_REG = UDC_RESET_EP;
1144 ep->ackwait = 0;
1145 if (!(ep->bEndpointAddress & USB_DIR_IN)) {
1146 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1147 ep->ackwait = 1 + ep->double_buf;
1148 }
1149 }
1150 }
1151done:
1152 VDBG("%s %s halt stat %d\n", ep->ep.name,
1153 value ? "set" : "clear", status);
1154
1155 spin_unlock_irqrestore(&ep->udc->lock, flags);
1156 return status;
1157}
1158
1159static struct usb_ep_ops omap_ep_ops = {
1160 .enable = omap_ep_enable,
1161 .disable = omap_ep_disable,
1162
1163 .alloc_request = omap_alloc_request,
1164 .free_request = omap_free_request,
1165
1166 .alloc_buffer = omap_alloc_buffer,
1167 .free_buffer = omap_free_buffer,
1168
1169 .queue = omap_ep_queue,
1170 .dequeue = omap_ep_dequeue,
1171
1172 .set_halt = omap_ep_set_halt,
1173 // fifo_status ... report bytes in fifo
1174 // fifo_flush ... flush fifo
1175};
1176
1177/*-------------------------------------------------------------------------*/
1178
1179static int omap_get_frame(struct usb_gadget *gadget)
1180{
1181 u16 sof = UDC_SOF_REG;
1182 return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
1183}
1184
1185static int omap_wakeup(struct usb_gadget *gadget)
1186{
1187 struct omap_udc *udc;
1188 unsigned long flags;
1189 int retval = -EHOSTUNREACH;
1190
1191 udc = container_of(gadget, struct omap_udc, gadget);
1192
1193 spin_lock_irqsave(&udc->lock, flags);
1194 if (udc->devstat & UDC_SUS) {
1195 /* NOTE: OTG spec erratum says that OTG devices may
1196 * issue wakeups without host enable.
1197 */
1198 if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
1199 DBG("remote wakeup...\n");
1200 UDC_SYSCON2_REG = UDC_RMT_WKP;
1201 retval = 0;
1202 }
1203
1204 /* NOTE: non-OTG systems may use SRP TOO... */
1205 } else if (!(udc->devstat & UDC_ATT)) {
1206 if (udc->transceiver)
1207 retval = otg_start_srp(udc->transceiver);
1208 }
1209 spin_unlock_irqrestore(&udc->lock, flags);
1210
1211 return retval;
1212}
1213
1214static int
1215omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
1216{
1217 struct omap_udc *udc;
1218 unsigned long flags;
1219 u16 syscon1;
1220
1221 udc = container_of(gadget, struct omap_udc, gadget);
1222 spin_lock_irqsave(&udc->lock, flags);
1223 syscon1 = UDC_SYSCON1_REG;
1224 if (is_selfpowered)
1225 syscon1 |= UDC_SELF_PWR;
1226 else
1227 syscon1 &= ~UDC_SELF_PWR;
1228 UDC_SYSCON1_REG = syscon1;
1229 spin_unlock_irqrestore(&udc->lock, flags);
1230
1231 return 0;
1232}
1233
1234static int can_pullup(struct omap_udc *udc)
1235{
1236 return udc->driver && udc->softconnect && udc->vbus_active;
1237}
1238
1239static void pullup_enable(struct omap_udc *udc)
1240{
1241 UDC_SYSCON1_REG |= UDC_PULLUP_EN;
1242#ifndef CONFIG_USB_OTG
1243 if (!cpu_is_omap15xx())
1244 OTG_CTRL_REG |= OTG_BSESSVLD;
1245#endif
1246 UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
1247}
1248
1249static void pullup_disable(struct omap_udc *udc)
1250{
1251#ifndef CONFIG_USB_OTG
1252 if (!cpu_is_omap15xx())
1253 OTG_CTRL_REG &= ~OTG_BSESSVLD;
1254#endif
1255 UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
1256 UDC_SYSCON1_REG &= ~UDC_PULLUP_EN;
1257}
1258
1259/*
1260 * Called by whatever detects VBUS sessions: external transceiver
1261 * driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock.
1262 */
1263static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
1264{
1265 struct omap_udc *udc;
1266 unsigned long flags;
1267
1268 udc = container_of(gadget, struct omap_udc, gadget);
1269 spin_lock_irqsave(&udc->lock, flags);
1270 VDBG("VBUS %s\n", is_active ? "on" : "off");
1271 udc->vbus_active = (is_active != 0);
1272 if (cpu_is_omap15xx()) {
1273 /* "software" detect, ignored if !VBUS_MODE_1510 */
1274 if (is_active)
1275 FUNC_MUX_CTRL_0_REG |= VBUS_CTRL_1510;
1276 else
1277 FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510;
1278 }
1279 if (can_pullup(udc))
1280 pullup_enable(udc);
1281 else
1282 pullup_disable(udc);
1283 spin_unlock_irqrestore(&udc->lock, flags);
1284 return 0;
1285}
1286
1287static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1288{
1289 struct omap_udc *udc;
1290
1291 udc = container_of(gadget, struct omap_udc, gadget);
1292 if (udc->transceiver)
1293 return otg_set_power(udc->transceiver, mA);
1294 return -EOPNOTSUPP;
1295}
1296
1297static int omap_pullup(struct usb_gadget *gadget, int is_on)
1298{
1299 struct omap_udc *udc;
1300 unsigned long flags;
1301
1302 udc = container_of(gadget, struct omap_udc, gadget);
1303 spin_lock_irqsave(&udc->lock, flags);
1304 udc->softconnect = (is_on != 0);
1305 if (can_pullup(udc))
1306 pullup_enable(udc);
1307 else
1308 pullup_disable(udc);
1309 spin_unlock_irqrestore(&udc->lock, flags);
1310 return 0;
1311}
1312
1313static struct usb_gadget_ops omap_gadget_ops = {
1314 .get_frame = omap_get_frame,
1315 .wakeup = omap_wakeup,
1316 .set_selfpowered = omap_set_selfpowered,
1317 .vbus_session = omap_vbus_session,
1318 .vbus_draw = omap_vbus_draw,
1319 .pullup = omap_pullup,
1320};
1321
1322/*-------------------------------------------------------------------------*/
1323
1324/* dequeue ALL requests; caller holds udc->lock */
1325static void nuke(struct omap_ep *ep, int status)
1326{
1327 struct omap_req *req;
1328
1329 ep->stopped = 1;
1330
1331 if (use_dma && ep->dma_channel)
1332 dma_channel_release(ep);
1333
1334 use_ep(ep, 0);
1335 UDC_CTRL_REG = UDC_CLR_EP;
1336 if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
1337 UDC_CTRL_REG = UDC_SET_HALT;
1338
1339 while (!list_empty(&ep->queue)) {
1340 req = list_entry(ep->queue.next, struct omap_req, queue);
1341 done(ep, req, status);
1342 }
1343}
1344
1345/* caller holds udc->lock */
1346static void udc_quiesce(struct omap_udc *udc)
1347{
1348 struct omap_ep *ep;
1349
1350 udc->gadget.speed = USB_SPEED_UNKNOWN;
1351 nuke(&udc->ep[0], -ESHUTDOWN);
1352 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list)
1353 nuke(ep, -ESHUTDOWN);
1354}
1355
1356/*-------------------------------------------------------------------------*/
1357
1358static void update_otg(struct omap_udc *udc)
1359{
1360 u16 devstat;
1361
1362 if (!udc->gadget.is_otg)
1363 return;
1364
1365 if (OTG_CTRL_REG & OTG_ID)
1366 devstat = UDC_DEVSTAT_REG;
1367 else
1368 devstat = 0;
1369
1370 udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE);
1371 udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT);
1372 udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT);
1373
1374 /* Enable HNP early, avoiding races on suspend irq path.
1375 * ASSUMES OTG state machine B_BUS_REQ input is true.
1376 */
1377 if (udc->gadget.b_hnp_enable)
1378 OTG_CTRL_REG = (OTG_CTRL_REG | OTG_B_HNPEN | OTG_B_BUSREQ)
1379 & ~OTG_PULLUP;
1380}
1381
1382static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1383{
1384 struct omap_ep *ep0 = &udc->ep[0];
1385 struct omap_req *req = 0;
1386
1387 ep0->irqs++;
1388
1389 /* Clear any pending requests and then scrub any rx/tx state
1390 * before starting to handle the SETUP request.
1391 */
1392 if (irq_src & UDC_SETUP) {
1393 u16 ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX);
1394
1395 nuke(ep0, 0);
1396 if (ack) {
1397 UDC_IRQ_SRC_REG = ack;
1398 irq_src = UDC_SETUP;
1399 }
1400 }
1401
1402 /* IN/OUT packets mean we're in the DATA or STATUS stage.
1403 * This driver uses only uses protocol stalls (ep0 never halts),
1404 * and if we got this far the gadget driver already had a
1405 * chance to stall. Tries to be forgiving of host oddities.
1406 *
1407 * NOTE: the last chance gadget drivers have to stall control
1408 * requests is during their request completion callback.
1409 */
1410 if (!list_empty(&ep0->queue))
1411 req = container_of(ep0->queue.next, struct omap_req, queue);
1412
1413 /* IN == TX to host */
1414 if (irq_src & UDC_EP0_TX) {
1415 int stat;
1416
1417 UDC_IRQ_SRC_REG = UDC_EP0_TX;
1418 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
1419 stat = UDC_STAT_FLG_REG;
1420 if (stat & UDC_ACK) {
1421 if (udc->ep0_in) {
1422 /* write next IN packet from response,
1423 * or set up the status stage.
1424 */
1425 if (req)
1426 stat = write_fifo(ep0, req);
1427 UDC_EP_NUM_REG = UDC_EP_DIR;
1428 if (!req && udc->ep0_pending) {
1429 UDC_EP_NUM_REG = UDC_EP_SEL;
1430 UDC_CTRL_REG = UDC_CLR_EP;
1431 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1432 UDC_EP_NUM_REG = 0;
1433 udc->ep0_pending = 0;
1434 } /* else: 6 wait states before it'll tx */
1435 } else {
1436 /* ack status stage of OUT transfer */
1437 UDC_EP_NUM_REG = UDC_EP_DIR;
1438 if (req)
1439 done(ep0, req, 0);
1440 }
1441 req = 0;
1442 } else if (stat & UDC_STALL) {
1443 UDC_CTRL_REG = UDC_CLR_HALT;
1444 UDC_EP_NUM_REG = UDC_EP_DIR;
1445 } else {
1446 UDC_EP_NUM_REG = UDC_EP_DIR;
1447 }
1448 }
1449
1450 /* OUT == RX from host */
1451 if (irq_src & UDC_EP0_RX) {
1452 int stat;
1453
1454 UDC_IRQ_SRC_REG = UDC_EP0_RX;
1455 UDC_EP_NUM_REG = UDC_EP_SEL;
1456 stat = UDC_STAT_FLG_REG;
1457 if (stat & UDC_ACK) {
1458 if (!udc->ep0_in) {
1459 stat = 0;
1460 /* read next OUT packet of request, maybe
1461 * reactiviting the fifo; stall on errors.
1462 */
1463 if (!req || (stat = read_fifo(ep0, req)) < 0) {
1464 UDC_SYSCON2_REG = UDC_STALL_CMD;
1465 udc->ep0_pending = 0;
1466 stat = 0;
1467 } else if (stat == 0)
1468 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1469 UDC_EP_NUM_REG = 0;
1470
1471 /* activate status stage */
1472 if (stat == 1) {
1473 done(ep0, req, 0);
1474 /* that may have STALLed ep0... */
1475 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
1476 UDC_CTRL_REG = UDC_CLR_EP;
1477 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1478 UDC_EP_NUM_REG = UDC_EP_DIR;
1479 udc->ep0_pending = 0;
1480 }
1481 } else {
1482 /* ack status stage of IN transfer */
1483 UDC_EP_NUM_REG = 0;
1484 if (req)
1485 done(ep0, req, 0);
1486 }
1487 } else if (stat & UDC_STALL) {
1488 UDC_CTRL_REG = UDC_CLR_HALT;
1489 UDC_EP_NUM_REG = 0;
1490 } else {
1491 UDC_EP_NUM_REG = 0;
1492 }
1493 }
1494
1495 /* SETUP starts all control transfers */
1496 if (irq_src & UDC_SETUP) {
1497 union u {
1498 u16 word[4];
1499 struct usb_ctrlrequest r;
1500 } u;
1501 int status = -EINVAL;
1502 struct omap_ep *ep;
1503
1504 /* read the (latest) SETUP message */
1505 do {
1506 UDC_EP_NUM_REG = UDC_SETUP_SEL;
1507 /* two bytes at a time */
1508 u.word[0] = UDC_DATA_REG;
1509 u.word[1] = UDC_DATA_REG;
1510 u.word[2] = UDC_DATA_REG;
1511 u.word[3] = UDC_DATA_REG;
1512 UDC_EP_NUM_REG = 0;
1513 } while (UDC_IRQ_SRC_REG & UDC_SETUP);
1514 le16_to_cpus (&u.r.wValue);
1515 le16_to_cpus (&u.r.wIndex);
1516 le16_to_cpus (&u.r.wLength);
1517
1518 /* Delegate almost all control requests to the gadget driver,
1519 * except for a handful of ch9 status/feature requests that
1520 * hardware doesn't autodecode _and_ the gadget API hides.
1521 */
1522 udc->ep0_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1523 udc->ep0_set_config = 0;
1524 udc->ep0_pending = 1;
1525 ep0->stopped = 0;
1526 ep0->ackwait = 0;
1527 switch (u.r.bRequest) {
1528 case USB_REQ_SET_CONFIGURATION:
1529 /* udc needs to know when ep != 0 is valid */
1530 if (u.r.bRequestType != USB_RECIP_DEVICE)
1531 goto delegate;
1532 if (u.r.wLength != 0)
1533 goto do_stall;
1534 udc->ep0_set_config = 1;
1535 udc->ep0_reset_config = (u.r.wValue == 0);
1536 VDBG("set config %d\n", u.r.wValue);
1537
1538 /* update udc NOW since gadget driver may start
1539 * queueing requests immediately; clear config
1540 * later if it fails the request.
1541 */
1542 if (udc->ep0_reset_config)
1543 UDC_SYSCON2_REG = UDC_CLR_CFG;
1544 else
1545 UDC_SYSCON2_REG = UDC_DEV_CFG;
1546 update_otg(udc);
1547 goto delegate;
1548 case USB_REQ_CLEAR_FEATURE:
1549 /* clear endpoint halt */
1550 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1551 goto delegate;
1552 if (u.r.wValue != USB_ENDPOINT_HALT
1553 || u.r.wLength != 0)
1554 goto do_stall;
1555 ep = &udc->ep[u.r.wIndex & 0xf];
1556 if (ep != ep0) {
1557 if (u.r.wIndex & USB_DIR_IN)
1558 ep += 16;
1559 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
1560 || !ep->desc)
1561 goto do_stall;
1562 use_ep(ep, 0);
1563 UDC_CTRL_REG = UDC_RESET_EP;
1564 ep->ackwait = 0;
1565 if (!(ep->bEndpointAddress & USB_DIR_IN)) {
1566 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1567 ep->ackwait = 1 + ep->double_buf;
1568 }
1569 }
1570 VDBG("%s halt cleared by host\n", ep->name);
1571 goto ep0out_status_stage;
1572 case USB_REQ_SET_FEATURE:
1573 /* set endpoint halt */
1574 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1575 goto delegate;
1576 if (u.r.wValue != USB_ENDPOINT_HALT
1577 || u.r.wLength != 0)
1578 goto do_stall;
1579 ep = &udc->ep[u.r.wIndex & 0xf];
1580 if (u.r.wIndex & USB_DIR_IN)
1581 ep += 16;
1582 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
1583 || ep == ep0 || !ep->desc)
1584 goto do_stall;
1585 if (use_dma && ep->has_dma) {
1586 /* this has rude side-effects (aborts) and
1587 * can't really work if DMA-IN is active
1588 */
1589 DBG("%s host set_halt, NYET \n", ep->name);
1590 goto do_stall;
1591 }
1592 use_ep(ep, 0);
1593 /* can't halt if fifo isn't empty... */
1594 UDC_CTRL_REG = UDC_CLR_EP;
1595 UDC_CTRL_REG = UDC_SET_HALT;
1596 VDBG("%s halted by host\n", ep->name);
1597ep0out_status_stage:
1598 status = 0;
1599 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
1600 UDC_CTRL_REG = UDC_CLR_EP;
1601 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1602 UDC_EP_NUM_REG = UDC_EP_DIR;
1603 udc->ep0_pending = 0;
1604 break;
1605 case USB_REQ_GET_STATUS:
1606 /* return interface status. if we were pedantic,
1607 * we'd detect non-existent interfaces, and stall.
1608 */
1609 if (u.r.bRequestType
1610 != (USB_DIR_IN|USB_RECIP_INTERFACE))
1611 goto delegate;
1612 /* return two zero bytes */
1613 UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
1614 UDC_DATA_REG = 0;
1615 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1616 UDC_EP_NUM_REG = UDC_EP_DIR;
1617 status = 0;
1618 VDBG("GET_STATUS, interface %d\n", u.r.wIndex);
1619 /* next, status stage */
1620 break;
1621 default:
1622delegate:
1623 /* activate the ep0out fifo right away */
1624 if (!udc->ep0_in && u.r.wLength) {
1625 UDC_EP_NUM_REG = 0;
1626 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1627 }
1628
1629 /* gadget drivers see class/vendor specific requests,
1630 * {SET,GET}_{INTERFACE,DESCRIPTOR,CONFIGURATION},
1631 * and more
1632 */
1633 VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
1634 u.r.bRequestType, u.r.bRequest,
1635 u.r.wValue, u.r.wIndex, u.r.wLength);
1636
1637 /* The gadget driver may return an error here,
1638 * causing an immediate protocol stall.
1639 *
1640 * Else it must issue a response, either queueing a
1641 * response buffer for the DATA stage, or halting ep0
1642 * (causing a protocol stall, not a real halt). A
1643 * zero length buffer means no DATA stage.
1644 *
1645 * It's fine to issue that response after the setup()
1646 * call returns, and this IRQ was handled.
1647 */
1648 udc->ep0_setup = 1;
1649 spin_unlock(&udc->lock);
1650 status = udc->driver->setup (&udc->gadget, &u.r);
1651 spin_lock(&udc->lock);
1652 udc->ep0_setup = 0;
1653 }
1654
1655 if (status < 0) {
1656do_stall:
1657 VDBG("req %02x.%02x protocol STALL; stat %d\n",
1658 u.r.bRequestType, u.r.bRequest, status);
1659 if (udc->ep0_set_config) {
1660 if (udc->ep0_reset_config)
1661 WARN("error resetting config?\n");
1662 else
1663 UDC_SYSCON2_REG = UDC_CLR_CFG;
1664 }
1665 UDC_SYSCON2_REG = UDC_STALL_CMD;
1666 udc->ep0_pending = 0;
1667 }
1668 }
1669}
1670
1671/*-------------------------------------------------------------------------*/
1672
1673#define OTG_FLAGS (UDC_B_HNP_ENABLE|UDC_A_HNP_SUPPORT|UDC_A_ALT_HNP_SUPPORT)
1674
1675static void devstate_irq(struct omap_udc *udc, u16 irq_src)
1676{
1677 u16 devstat, change;
1678
1679 devstat = UDC_DEVSTAT_REG;
1680 change = devstat ^ udc->devstat;
1681 udc->devstat = devstat;
1682
1683 if (change & (UDC_USB_RESET|UDC_ATT)) {
1684 udc_quiesce(udc);
1685
1686 if (change & UDC_ATT) {
1687 /* driver for any external transceiver will
1688 * have called omap_vbus_session() already
1689 */
1690 if (devstat & UDC_ATT) {
1691 udc->gadget.speed = USB_SPEED_FULL;
1692 VDBG("connect\n");
1693 if (!udc->transceiver)
1694 pullup_enable(udc);
1695 // if (driver->connect) call it
1696 } else if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1697 udc->gadget.speed = USB_SPEED_UNKNOWN;
1698 if (!udc->transceiver)
1699 pullup_disable(udc);
1700 DBG("disconnect, gadget %s\n",
1701 udc->driver->driver.name);
1702 if (udc->driver->disconnect) {
1703 spin_unlock(&udc->lock);
1704 udc->driver->disconnect(&udc->gadget);
1705 spin_lock(&udc->lock);
1706 }
1707 }
1708 change &= ~UDC_ATT;
1709 }
1710
1711 if (change & UDC_USB_RESET) {
1712 if (devstat & UDC_USB_RESET) {
1713 VDBG("RESET=1\n");
1714 } else {
1715 udc->gadget.speed = USB_SPEED_FULL;
1716 INFO("USB reset done, gadget %s\n",
1717 udc->driver->driver.name);
1718 /* ep0 traffic is legal from now on */
1719 UDC_IRQ_EN_REG = UDC_DS_CHG_IE | UDC_EP0_IE;
1720 }
1721 change &= ~UDC_USB_RESET;
1722 }
1723 }
1724 if (change & UDC_SUS) {
1725 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1726 // FIXME tell isp1301 to suspend/resume (?)
1727 if (devstat & UDC_SUS) {
1728 VDBG("suspend\n");
1729 update_otg(udc);
1730 /* HNP could be under way already */
1731 if (udc->gadget.speed == USB_SPEED_FULL
1732 && udc->driver->suspend) {
1733 spin_unlock(&udc->lock);
1734 udc->driver->suspend(&udc->gadget);
1735 spin_lock(&udc->lock);
1736 }
1737 } else {
1738 VDBG("resume\n");
1739 if (udc->gadget.speed == USB_SPEED_FULL
1740 && udc->driver->resume) {
1741 spin_unlock(&udc->lock);
1742 udc->driver->resume(&udc->gadget);
1743 spin_lock(&udc->lock);
1744 }
1745 }
1746 }
1747 change &= ~UDC_SUS;
1748 }
1749 if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) {
1750 update_otg(udc);
1751 change &= ~OTG_FLAGS;
1752 }
1753
1754 change &= ~(UDC_CFG|UDC_DEF|UDC_ADD);
1755 if (change)
1756 VDBG("devstat %03x, ignore change %03x\n",
1757 devstat, change);
1758
1759 UDC_IRQ_SRC_REG = UDC_DS_CHG;
1760}
1761
1762static irqreturn_t
1763omap_udc_irq(int irq, void *_udc, struct pt_regs *r)
1764{
1765 struct omap_udc *udc = _udc;
1766 u16 irq_src;
1767 irqreturn_t status = IRQ_NONE;
1768 unsigned long flags;
1769
1770 spin_lock_irqsave(&udc->lock, flags);
1771 irq_src = UDC_IRQ_SRC_REG;
1772
1773 /* Device state change (usb ch9 stuff) */
1774 if (irq_src & UDC_DS_CHG) {
1775 devstate_irq(_udc, irq_src);
1776 status = IRQ_HANDLED;
1777 irq_src &= ~UDC_DS_CHG;
1778 }
1779
1780 /* EP0 control transfers */
1781 if (irq_src & (UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX)) {
1782 ep0_irq(_udc, irq_src);
1783 status = IRQ_HANDLED;
1784 irq_src &= ~(UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX);
1785 }
1786
1787 /* DMA transfer completion */
1788 if (use_dma && (irq_src & (UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT))) {
1789 dma_irq(_udc, irq_src);
1790 status = IRQ_HANDLED;
1791 irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT);
1792 }
1793
1794 irq_src &= ~(UDC_SOF|UDC_EPN_TX|UDC_EPN_RX);
1795 if (irq_src)
1796 DBG("udc_irq, unhandled %03x\n", irq_src);
1797 spin_unlock_irqrestore(&udc->lock, flags);
1798
1799 return status;
1800}
1801
1802/* workaround for seemingly-lost IRQs for RX ACKs... */
1803#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
1804#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
1805
1806static void pio_out_timer(unsigned long _ep)
1807{
1808 struct omap_ep *ep = (void *) _ep;
1809 unsigned long flags;
1810 u16 stat_flg;
1811
1812 spin_lock_irqsave(&ep->udc->lock, flags);
1813 if (!list_empty(&ep->queue) && ep->ackwait) {
1814 use_ep(ep, 0);
1815 stat_flg = UDC_STAT_FLG_REG;
1816
1817 if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
1818 || (ep->double_buf && HALF_FULL(stat_flg)))) {
1819 struct omap_req *req;
1820
1821 VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
1822 req = container_of(ep->queue.next,
1823 struct omap_req, queue);
1824 UDC_EP_NUM_REG = ep->bEndpointAddress | UDC_EP_SEL;
1825 (void) read_fifo(ep, req);
1826 UDC_EP_NUM_REG = ep->bEndpointAddress;
1827 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1828 ep->ackwait = 1 + ep->double_buf;
1829 }
1830 }
1831 mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
1832 spin_unlock_irqrestore(&ep->udc->lock, flags);
1833}
1834
1835static irqreturn_t
1836omap_udc_pio_irq(int irq, void *_dev, struct pt_regs *r)
1837{
1838 u16 epn_stat, irq_src;
1839 irqreturn_t status = IRQ_NONE;
1840 struct omap_ep *ep;
1841 int epnum;
1842 struct omap_udc *udc = _dev;
1843 struct omap_req *req;
1844 unsigned long flags;
1845
1846 spin_lock_irqsave(&udc->lock, flags);
1847 epn_stat = UDC_EPN_STAT_REG;
1848 irq_src = UDC_IRQ_SRC_REG;
1849
1850 /* handle OUT first, to avoid some wasteful NAKs */
1851 if (irq_src & UDC_EPN_RX) {
1852 epnum = (epn_stat >> 8) & 0x0f;
1853 UDC_IRQ_SRC_REG = UDC_EPN_RX;
1854 status = IRQ_HANDLED;
1855 ep = &udc->ep[epnum];
1856 ep->irqs++;
1857
1858 UDC_EP_NUM_REG = epnum | UDC_EP_SEL;
1859 ep->fnf = 0;
1860 if ((UDC_STAT_FLG_REG & UDC_ACK)) {
1861 ep->ackwait--;
1862 if (!list_empty(&ep->queue)) {
1863 int stat;
1864 req = container_of(ep->queue.next,
1865 struct omap_req, queue);
1866 stat = read_fifo(ep, req);
1867 if (!ep->double_buf)
1868 ep->fnf = 1;
1869 }
1870 }
1871 /* min 6 clock delay before clearing EP_SEL ... */
1872 epn_stat = UDC_EPN_STAT_REG;
1873 epn_stat = UDC_EPN_STAT_REG;
1874 UDC_EP_NUM_REG = epnum;
1875
1876 /* enabling fifo _after_ clearing ACK, contrary to docs,
1877 * reduces lossage; timer still needed though (sigh).
1878 */
1879 if (ep->fnf) {
1880 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1881 ep->ackwait = 1 + ep->double_buf;
1882 }
1883 mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
1884 }
1885
1886 /* then IN transfers */
1887 else if (irq_src & UDC_EPN_TX) {
1888 epnum = epn_stat & 0x0f;
1889 UDC_IRQ_SRC_REG = UDC_EPN_TX;
1890 status = IRQ_HANDLED;
1891 ep = &udc->ep[16 + epnum];
1892 ep->irqs++;
1893
1894 UDC_EP_NUM_REG = epnum | UDC_EP_DIR | UDC_EP_SEL;
1895 if ((UDC_STAT_FLG_REG & UDC_ACK)) {
1896 ep->ackwait = 0;
1897 if (!list_empty(&ep->queue)) {
1898 req = container_of(ep->queue.next,
1899 struct omap_req, queue);
1900 (void) write_fifo(ep, req);
1901 }
1902 }
1903 /* min 6 clock delay before clearing EP_SEL ... */
1904 epn_stat = UDC_EPN_STAT_REG;
1905 epn_stat = UDC_EPN_STAT_REG;
1906 UDC_EP_NUM_REG = epnum | UDC_EP_DIR;
1907 /* then 6 clocks before it'd tx */
1908 }
1909
1910 spin_unlock_irqrestore(&udc->lock, flags);
1911 return status;
1912}
1913
1914#ifdef USE_ISO
1915static irqreturn_t
1916omap_udc_iso_irq(int irq, void *_dev, struct pt_regs *r)
1917{
1918 struct omap_udc *udc = _dev;
1919 struct omap_ep *ep;
1920 int pending = 0;
1921 unsigned long flags;
1922
1923 spin_lock_irqsave(&udc->lock, flags);
1924
1925 /* handle all non-DMA ISO transfers */
1926 list_for_each_entry (ep, &udc->iso, iso) {
1927 u16 stat;
1928 struct omap_req *req;
1929
1930 if (ep->has_dma || list_empty(&ep->queue))
1931 continue;
1932 req = list_entry(ep->queue.next, struct omap_req, queue);
1933
1934 use_ep(ep, UDC_EP_SEL);
1935 stat = UDC_STAT_FLG_REG;
1936
1937 /* NOTE: like the other controller drivers, this isn't
1938 * currently reporting lost or damaged frames.
1939 */
1940 if (ep->bEndpointAddress & USB_DIR_IN) {
1941 if (stat & UDC_MISS_IN)
1942 /* done(ep, req, -EPROTO) */;
1943 else
1944 write_fifo(ep, req);
1945 } else {
1946 int status = 0;
1947
1948 if (stat & UDC_NO_RXPACKET)
1949 status = -EREMOTEIO;
1950 else if (stat & UDC_ISO_ERR)
1951 status = -EILSEQ;
1952 else if (stat & UDC_DATA_FLUSH)
1953 status = -ENOSR;
1954
1955 if (status)
1956 /* done(ep, req, status) */;
1957 else
1958 read_fifo(ep, req);
1959 }
1960 deselect_ep();
1961 /* 6 wait states before next EP */
1962
1963 ep->irqs++;
1964 if (!list_empty(&ep->queue))
1965 pending = 1;
1966 }
1967 if (!pending)
1968 UDC_IRQ_EN_REG &= ~UDC_SOF_IE;
1969 UDC_IRQ_SRC_REG = UDC_SOF;
1970
1971 spin_unlock_irqrestore(&udc->lock, flags);
1972 return IRQ_HANDLED;
1973}
1974#endif
1975
1976/*-------------------------------------------------------------------------*/
1977
1978static struct omap_udc *udc;
1979
1980int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1981{
1982 int status = -ENODEV;
1983 struct omap_ep *ep;
1984 unsigned long flags;
1985
1986 /* basic sanity tests */
1987 if (!udc)
1988 return -ENODEV;
1989 if (!driver
1990 // FIXME if otg, check: driver->is_otg
1991 || driver->speed < USB_SPEED_FULL
1992 || !driver->bind
1993 || !driver->unbind
1994 || !driver->setup)
1995 return -EINVAL;
1996
1997 spin_lock_irqsave(&udc->lock, flags);
1998 if (udc->driver) {
1999 spin_unlock_irqrestore(&udc->lock, flags);
2000 return -EBUSY;
2001 }
2002
2003 /* reset state */
2004 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
2005 ep->irqs = 0;
2006 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
2007 continue;
2008 use_ep(ep, 0);
2009 UDC_CTRL_REG = UDC_SET_HALT;
2010 }
2011 udc->ep0_pending = 0;
2012 udc->ep[0].irqs = 0;
2013 udc->softconnect = 1;
2014
2015 /* hook up the driver */
2016 driver->driver.bus = 0;
2017 udc->driver = driver;
2018 udc->gadget.dev.driver = &driver->driver;
2019 spin_unlock_irqrestore(&udc->lock, flags);
2020
2021 status = driver->bind (&udc->gadget);
2022 if (status) {
2023 DBG("bind to %s --> %d\n", driver->driver.name, status);
2024 udc->gadget.dev.driver = 0;
2025 udc->driver = 0;
2026 goto done;
2027 }
2028 DBG("bound to driver %s\n", driver->driver.name);
2029
2030 UDC_IRQ_SRC_REG = UDC_IRQ_SRC_MASK;
2031
2032 /* connect to bus through transceiver */
2033 if (udc->transceiver) {
2034 status = otg_set_peripheral(udc->transceiver, &udc->gadget);
2035 if (status < 0) {
2036 ERR("can't bind to transceiver\n");
2037 driver->unbind (&udc->gadget);
2038 udc->gadget.dev.driver = 0;
2039 udc->driver = 0;
2040 goto done;
2041 }
2042 } else {
2043 if (can_pullup(udc))
2044 pullup_enable (udc);
2045 else
2046 pullup_disable (udc);
2047 }
2048
2049 /* boards that don't have VBUS sensing can't autogate 48MHz;
2050 * can't enter deep sleep while a gadget driver is active.
2051 */
2052 if (machine_is_omap_innovator() || machine_is_omap_osk())
2053 omap_vbus_session(&udc->gadget, 1);
2054
2055done:
2056 return status;
2057}
2058EXPORT_SYMBOL(usb_gadget_register_driver);
2059
2060int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
2061{
2062 unsigned long flags;
2063 int status = -ENODEV;
2064
2065 if (!udc)
2066 return -ENODEV;
2067 if (!driver || driver != udc->driver)
2068 return -EINVAL;
2069
2070 if (machine_is_omap_innovator() || machine_is_omap_osk())
2071 omap_vbus_session(&udc->gadget, 0);
2072
2073 if (udc->transceiver)
2074 (void) otg_set_peripheral(udc->transceiver, 0);
2075 else
2076 pullup_disable(udc);
2077
2078 spin_lock_irqsave(&udc->lock, flags);
2079 udc_quiesce(udc);
2080 spin_unlock_irqrestore(&udc->lock, flags);
2081
2082 driver->unbind(&udc->gadget);
2083 udc->gadget.dev.driver = 0;
2084 udc->driver = 0;
2085
2086
2087 DBG("unregistered driver '%s'\n", driver->driver.name);
2088 return status;
2089}
2090EXPORT_SYMBOL(usb_gadget_unregister_driver);
2091
2092
2093/*-------------------------------------------------------------------------*/
2094
2095#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2096
2097#include <linux/seq_file.h>
2098
2099static const char proc_filename[] = "driver/udc";
2100
2101#define FOURBITS "%s%s%s%s"
2102#define EIGHTBITS FOURBITS FOURBITS
2103
2104static void proc_ep_show(struct seq_file *s, struct omap_ep *ep)
2105{
2106 u16 stat_flg;
2107 struct omap_req *req;
2108 char buf[20];
2109
2110 use_ep(ep, 0);
2111
2112 if (use_dma && ep->has_dma)
2113 snprintf(buf, sizeof buf, "(%cxdma%d lch%d) ",
2114 (ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
2115 ep->dma_channel - 1, ep->lch);
2116 else
2117 buf[0] = 0;
2118
2119 stat_flg = UDC_STAT_FLG_REG;
2120 seq_printf(s,
2121 "\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n",
2122 ep->name, buf,
2123 ep->double_buf ? "dbuf " : "",
2124 ({char *s; switch(ep->ackwait){
2125 case 0: s = ""; break;
2126 case 1: s = "(ackw) "; break;
2127 case 2: s = "(ackw2) "; break;
2128 default: s = "(?) "; break;
2129 } s;}),
2130 ep->irqs, stat_flg,
2131 (stat_flg & UDC_NO_RXPACKET) ? "no_rxpacket " : "",
2132 (stat_flg & UDC_MISS_IN) ? "miss_in " : "",
2133 (stat_flg & UDC_DATA_FLUSH) ? "data_flush " : "",
2134 (stat_flg & UDC_ISO_ERR) ? "iso_err " : "",
2135 (stat_flg & UDC_ISO_FIFO_EMPTY) ? "iso_fifo_empty " : "",
2136 (stat_flg & UDC_ISO_FIFO_FULL) ? "iso_fifo_full " : "",
2137 (stat_flg & UDC_EP_HALTED) ? "HALT " : "",
2138 (stat_flg & UDC_STALL) ? "STALL " : "",
2139 (stat_flg & UDC_NAK) ? "NAK " : "",
2140 (stat_flg & UDC_ACK) ? "ACK " : "",
2141 (stat_flg & UDC_FIFO_EN) ? "fifo_en " : "",
2142 (stat_flg & UDC_NON_ISO_FIFO_EMPTY) ? "fifo_empty " : "",
2143 (stat_flg & UDC_NON_ISO_FIFO_FULL) ? "fifo_full " : "");
2144
2145 if (list_empty (&ep->queue))
2146 seq_printf(s, "\t(queue empty)\n");
2147 else
2148 list_for_each_entry (req, &ep->queue, queue) {
2149 unsigned length = req->req.actual;
2150
2151 if (use_dma && buf[0]) {
2152 length += ((ep->bEndpointAddress & USB_DIR_IN)
2153 ? dma_src_len : dma_dest_len)
2154 (ep, req->req.dma + length);
2155 buf[0] = 0;
2156 }
2157 seq_printf(s, "\treq %p len %d/%d buf %p\n",
2158 &req->req, length,
2159 req->req.length, req->req.buf);
2160 }
2161}
2162
2163static char *trx_mode(unsigned m, int enabled)
2164{
2165 switch (m) {
2166 case 0: return enabled ? "*6wire" : "unused";
2167 case 1: return "4wire";
2168 case 2: return "3wire";
2169 case 3: return "6wire";
2170 default: return "unknown";
2171 }
2172}
2173
2174static int proc_otg_show(struct seq_file *s)
2175{
2176 u32 tmp;
2177 u32 trans;
2178
2179 tmp = OTG_REV_REG;
2180 trans = USB_TRANSCEIVER_CTRL_REG;
2181 seq_printf(s, "OTG rev %d.%d, transceiver_ctrl %03x\n",
2182 tmp >> 4, tmp & 0xf, trans);
2183 tmp = OTG_SYSCON_1_REG;
2184 seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
2185 FOURBITS "\n", tmp,
2186 trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R),
2187 trx_mode(USB1_TRX_MODE(tmp), trans & CONF_USB1_UNI_R),
2188 (USB0_TRX_MODE(tmp) == 0)
2189 ? "internal"
2190 : trx_mode(USB0_TRX_MODE(tmp), 1),
2191 (tmp & OTG_IDLE_EN) ? " !otg" : "",
2192 (tmp & HST_IDLE_EN) ? " !host" : "",
2193 (tmp & DEV_IDLE_EN) ? " !dev" : "",
2194 (tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active");
2195 tmp = OTG_SYSCON_2_REG;
2196 seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS
2197 " b_ase_brst=%d hmc=%d\n", tmp,
2198 (tmp & OTG_EN) ? " otg_en" : "",
2199 (tmp & USBX_SYNCHRO) ? " synchro" : "",
2200 // much more SRP stuff
2201 (tmp & SRP_DATA) ? " srp_data" : "",
2202 (tmp & SRP_VBUS) ? " srp_vbus" : "",
2203 (tmp & OTG_PADEN) ? " otg_paden" : "",
2204 (tmp & HMC_PADEN) ? " hmc_paden" : "",
2205 (tmp & UHOST_EN) ? " uhost_en" : "",
2206 (tmp & HMC_TLLSPEED) ? " tllspeed" : "",
2207 (tmp & HMC_TLLATTACH) ? " tllattach" : "",
2208 B_ASE_BRST(tmp),
2209 OTG_HMC(tmp));
2210 tmp = OTG_CTRL_REG;
2211 seq_printf(s, "otg_ctrl %06x" EIGHTBITS EIGHTBITS "%s\n", tmp,
2212 (tmp & OTG_ASESSVLD) ? " asess" : "",
2213 (tmp & OTG_BSESSEND) ? " bsess_end" : "",
2214 (tmp & OTG_BSESSVLD) ? " bsess" : "",
2215 (tmp & OTG_VBUSVLD) ? " vbus" : "",
2216 (tmp & OTG_ID) ? " id" : "",
2217 (tmp & OTG_DRIVER_SEL) ? " DEVICE" : " HOST",
2218 (tmp & OTG_A_SETB_HNPEN) ? " a_setb_hnpen" : "",
2219 (tmp & OTG_A_BUSREQ) ? " a_bus" : "",
2220 (tmp & OTG_B_HNPEN) ? " b_hnpen" : "",
2221 (tmp & OTG_B_BUSREQ) ? " b_bus" : "",
2222 (tmp & OTG_BUSDROP) ? " busdrop" : "",
2223 (tmp & OTG_PULLDOWN) ? " down" : "",
2224 (tmp & OTG_PULLUP) ? " up" : "",
2225 (tmp & OTG_DRV_VBUS) ? " drv" : "",
2226 (tmp & OTG_PD_VBUS) ? " pd_vb" : "",
2227 (tmp & OTG_PU_VBUS) ? " pu_vb" : "",
2228 (tmp & OTG_PU_ID) ? " pu_id" : ""
2229 );
2230 tmp = OTG_IRQ_EN_REG;
2231 seq_printf(s, "otg_irq_en %04x" "\n", tmp);
2232 tmp = OTG_IRQ_SRC_REG;
2233 seq_printf(s, "otg_irq_src %04x" "\n", tmp);
2234 tmp = OTG_OUTCTRL_REG;
2235 seq_printf(s, "otg_outctrl %04x" "\n", tmp);
2236 tmp = OTG_TEST_REG;
2237 seq_printf(s, "otg_test %04x" "\n", tmp);
2238}
2239
2240static int proc_udc_show(struct seq_file *s, void *_)
2241{
2242 u32 tmp;
2243 struct omap_ep *ep;
2244 unsigned long flags;
2245
2246 spin_lock_irqsave(&udc->lock, flags);
2247
2248 seq_printf(s, "%s, version: " DRIVER_VERSION
2249#ifdef USE_ISO
2250 " (iso)"
2251#endif
2252 "%s\n",
2253 driver_desc,
2254 use_dma ? " (dma)" : "");
2255
2256 tmp = UDC_REV_REG & 0xff;
2257 seq_printf(s,
2258 "UDC rev %d.%d, fifo mode %d, gadget %s\n"
2259 "hmc %d, transceiver %s\n",
2260 tmp >> 4, tmp & 0xf,
2261 fifo_mode,
2262 udc->driver ? udc->driver->driver.name : "(none)",
2263 HMC,
2264 udc->transceiver ? udc->transceiver->label : "(none)");
2265 seq_printf(s, "ULPD control %04x req %04x status %04x\n",
2266 __REG16(ULPD_CLOCK_CTRL),
2267 __REG16(ULPD_SOFT_REQ),
2268 __REG16(ULPD_STATUS_REQ));
2269
2270 /* OTG controller registers */
2271 if (!cpu_is_omap15xx())
2272 proc_otg_show(s);
2273
2274 tmp = UDC_SYSCON1_REG;
2275 seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp,
2276 (tmp & UDC_CFG_LOCK) ? " cfg_lock" : "",
2277 (tmp & UDC_DATA_ENDIAN) ? " data_endian" : "",
2278 (tmp & UDC_DMA_ENDIAN) ? " dma_endian" : "",
2279 (tmp & UDC_NAK_EN) ? " nak" : "",
2280 (tmp & UDC_AUTODECODE_DIS) ? " autodecode_dis" : "",
2281 (tmp & UDC_SELF_PWR) ? " self_pwr" : "",
2282 (tmp & UDC_SOFF_DIS) ? " soff_dis" : "",
2283 (tmp & UDC_PULLUP_EN) ? " PULLUP" : "");
2284 // syscon2 is write-only
2285
2286 /* UDC controller registers */
2287 if (!(tmp & UDC_PULLUP_EN)) {
2288 seq_printf(s, "(suspended)\n");
2289 spin_unlock_irqrestore(&udc->lock, flags);
2290 return 0;
2291 }
2292
2293 tmp = UDC_DEVSTAT_REG;
2294 seq_printf(s, "devstat %04x" EIGHTBITS "%s%s\n", tmp,
2295 (tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "",
2296 (tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "",
2297 (tmp & UDC_A_ALT_HNP_SUPPORT) ? " a_alt_hnp" : "",
2298 (tmp & UDC_R_WK_OK) ? " r_wk_ok" : "",
2299 (tmp & UDC_USB_RESET) ? " usb_reset" : "",
2300 (tmp & UDC_SUS) ? " SUS" : "",
2301 (tmp & UDC_CFG) ? " CFG" : "",
2302 (tmp & UDC_ADD) ? " ADD" : "",
2303 (tmp & UDC_DEF) ? " DEF" : "",
2304 (tmp & UDC_ATT) ? " ATT" : "");
2305 seq_printf(s, "sof %04x\n", UDC_SOF_REG);
2306 tmp = UDC_IRQ_EN_REG;
2307 seq_printf(s, "irq_en %04x" FOURBITS "%s\n", tmp,
2308 (tmp & UDC_SOF_IE) ? " sof" : "",
2309 (tmp & UDC_EPN_RX_IE) ? " epn_rx" : "",
2310 (tmp & UDC_EPN_TX_IE) ? " epn_tx" : "",
2311 (tmp & UDC_DS_CHG_IE) ? " ds_chg" : "",
2312 (tmp & UDC_EP0_IE) ? " ep0" : "");
2313 tmp = UDC_IRQ_SRC_REG;
2314 seq_printf(s, "irq_src %04x" EIGHTBITS "%s%s\n", tmp,
2315 (tmp & UDC_TXN_DONE) ? " txn_done" : "",
2316 (tmp & UDC_RXN_CNT) ? " rxn_cnt" : "",
2317 (tmp & UDC_RXN_EOT) ? " rxn_eot" : "",
2318 (tmp & UDC_SOF) ? " sof" : "",
2319 (tmp & UDC_EPN_RX) ? " epn_rx" : "",
2320 (tmp & UDC_EPN_TX) ? " epn_tx" : "",
2321 (tmp & UDC_DS_CHG) ? " ds_chg" : "",
2322 (tmp & UDC_SETUP) ? " setup" : "",
2323 (tmp & UDC_EP0_RX) ? " ep0out" : "",
2324 (tmp & UDC_EP0_TX) ? " ep0in" : "");
2325 if (use_dma) {
2326 unsigned i;
2327
2328 tmp = UDC_DMA_IRQ_EN_REG;
2329 seq_printf(s, "dma_irq_en %04x%s" EIGHTBITS "\n", tmp,
2330 (tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "",
2331 (tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "",
2332 (tmp & UDC_RX_EOT_IE(3)) ? " rx2_eot" : "",
2333
2334 (tmp & UDC_TX_DONE_IE(2)) ? " tx1_done" : "",
2335 (tmp & UDC_RX_CNT_IE(2)) ? " rx1_cnt" : "",
2336 (tmp & UDC_RX_EOT_IE(2)) ? " rx1_eot" : "",
2337
2338 (tmp & UDC_TX_DONE_IE(1)) ? " tx0_done" : "",
2339 (tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "",
2340 (tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : "");
2341
2342 tmp = UDC_RXDMA_CFG_REG;
2343 seq_printf(s, "rxdma_cfg %04x\n", tmp);
2344 if (tmp) {
2345 for (i = 0; i < 3; i++) {
2346 if ((tmp & (0x0f << (i * 4))) == 0)
2347 continue;
2348 seq_printf(s, "rxdma[%d] %04x\n", i,
2349 UDC_RXDMA_REG(i + 1));
2350 }
2351 }
2352 tmp = UDC_TXDMA_CFG_REG;
2353 seq_printf(s, "txdma_cfg %04x\n", tmp);
2354 if (tmp) {
2355 for (i = 0; i < 3; i++) {
2356 if (!(tmp & (0x0f << (i * 4))))
2357 continue;
2358 seq_printf(s, "txdma[%d] %04x\n", i,
2359 UDC_TXDMA_REG(i + 1));
2360 }
2361 }
2362 }
2363
2364 tmp = UDC_DEVSTAT_REG;
2365 if (tmp & UDC_ATT) {
2366 proc_ep_show(s, &udc->ep[0]);
2367 if (tmp & UDC_ADD) {
2368 list_for_each_entry (ep, &udc->gadget.ep_list,
2369 ep.ep_list) {
2370 if (ep->desc)
2371 proc_ep_show(s, ep);
2372 }
2373 }
2374 }
2375 spin_unlock_irqrestore(&udc->lock, flags);
2376 return 0;
2377}
2378
2379static int proc_udc_open(struct inode *inode, struct file *file)
2380{
2381 return single_open(file, proc_udc_show, 0);
2382}
2383
2384static struct file_operations proc_ops = {
2385 .open = proc_udc_open,
2386 .read = seq_read,
2387 .llseek = seq_lseek,
2388 .release = single_release,
2389};
2390
2391static void create_proc_file(void)
2392{
2393 struct proc_dir_entry *pde;
2394
2395 pde = create_proc_entry (proc_filename, 0, NULL);
2396 if (pde)
2397 pde->proc_fops = &proc_ops;
2398}
2399
2400static void remove_proc_file(void)
2401{
2402 remove_proc_entry(proc_filename, 0);
2403}
2404
2405#else
2406
2407static inline void create_proc_file(void) {}
2408static inline void remove_proc_file(void) {}
2409
2410#endif
2411
2412/*-------------------------------------------------------------------------*/
2413
2414/* Before this controller can enumerate, we need to pick an endpoint
2415 * configuration, or "fifo_mode" That involves allocating 2KB of packet
2416 * buffer space among the endpoints we'll be operating.
2417 */
2418static unsigned __init
2419omap_ep_setup(char *name, u8 addr, u8 type,
2420 unsigned buf, unsigned maxp, int dbuf)
2421{
2422 struct omap_ep *ep;
2423 u16 epn_rxtx = 0;
2424
2425 /* OUT endpoints first, then IN */
2426 ep = &udc->ep[addr & 0xf];
2427 if (addr & USB_DIR_IN)
2428 ep += 16;
2429
2430 /* in case of ep init table bugs */
2431 BUG_ON(ep->name[0]);
2432
2433 /* chip setup ... bit values are same for IN, OUT */
2434 if (type == USB_ENDPOINT_XFER_ISOC) {
2435 switch (maxp) {
2436 case 8: epn_rxtx = 0 << 12; break;
2437 case 16: epn_rxtx = 1 << 12; break;
2438 case 32: epn_rxtx = 2 << 12; break;
2439 case 64: epn_rxtx = 3 << 12; break;
2440 case 128: epn_rxtx = 4 << 12; break;
2441 case 256: epn_rxtx = 5 << 12; break;
2442 case 512: epn_rxtx = 6 << 12; break;
2443 default: BUG();
2444 }
2445 epn_rxtx |= UDC_EPN_RX_ISO;
2446 dbuf = 1;
2447 } else {
2448 /* double-buffering "not supported" on 15xx,
2449 * and ignored for PIO-IN on 16xx
2450 */
2451 if (!use_dma || cpu_is_omap15xx())
2452 dbuf = 0;
2453
2454 switch (maxp) {
2455 case 8: epn_rxtx = 0 << 12; break;
2456 case 16: epn_rxtx = 1 << 12; break;
2457 case 32: epn_rxtx = 2 << 12; break;
2458 case 64: epn_rxtx = 3 << 12; break;
2459 default: BUG();
2460 }
2461 if (dbuf && addr)
2462 epn_rxtx |= UDC_EPN_RX_DB;
2463 init_timer(&ep->timer);
2464 ep->timer.function = pio_out_timer;
2465 ep->timer.data = (unsigned long) ep;
2466 }
2467 if (addr)
2468 epn_rxtx |= UDC_EPN_RX_VALID;
2469 BUG_ON(buf & 0x07);
2470 epn_rxtx |= buf >> 3;
2471
2472 DBG("%s addr %02x rxtx %04x maxp %d%s buf %d\n",
2473 name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf);
2474
2475 if (addr & USB_DIR_IN)
2476 UDC_EP_TX_REG(addr & 0xf) = epn_rxtx;
2477 else
2478 UDC_EP_RX_REG(addr) = epn_rxtx;
2479
2480 /* next endpoint's buffer starts after this one's */
2481 buf += maxp;
2482 if (dbuf)
2483 buf += maxp;
2484 BUG_ON(buf > 2048);
2485
2486 /* set up driver data structures */
2487 BUG_ON(strlen(name) >= sizeof ep->name);
2488 strlcpy(ep->name, name, sizeof ep->name);
2489 INIT_LIST_HEAD(&ep->queue);
2490 INIT_LIST_HEAD(&ep->iso);
2491 ep->bEndpointAddress = addr;
2492 ep->bmAttributes = type;
2493 ep->double_buf = dbuf;
2494 ep->udc = udc;
2495
2496 ep->ep.name = ep->name;
2497 ep->ep.ops = &omap_ep_ops;
2498 ep->ep.maxpacket = ep->maxpacket = maxp;
2499 list_add_tail (&ep->ep.ep_list, &udc->gadget.ep_list);
2500
2501 return buf;
2502}
2503
2504static void omap_udc_release(struct device *dev)
2505{
2506 complete(udc->done);
2507 kfree (udc);
2508 udc = 0;
2509}
2510
2511static int __init
2512omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
2513{
2514 unsigned tmp, buf;
2515
2516 /* abolish any previous hardware state */
2517 UDC_SYSCON1_REG = 0;
2518 UDC_IRQ_EN_REG = 0;
2519 UDC_IRQ_SRC_REG = UDC_IRQ_SRC_MASK;
2520 UDC_DMA_IRQ_EN_REG = 0;
2521 UDC_RXDMA_CFG_REG = 0;
2522 UDC_TXDMA_CFG_REG = 0;
2523
2524 /* UDC_PULLUP_EN gates the chip clock */
2525 // OTG_SYSCON_1_REG |= DEV_IDLE_EN;
2526
2527 udc = kmalloc (sizeof *udc, SLAB_KERNEL);
2528 if (!udc)
2529 return -ENOMEM;
2530
2531 memset(udc, 0, sizeof *udc);
2532 spin_lock_init (&udc->lock);
2533
2534 udc->gadget.ops = &omap_gadget_ops;
2535 udc->gadget.ep0 = &udc->ep[0].ep;
2536 INIT_LIST_HEAD(&udc->gadget.ep_list);
2537 INIT_LIST_HEAD(&udc->iso);
2538 udc->gadget.speed = USB_SPEED_UNKNOWN;
2539 udc->gadget.name = driver_name;
2540
2541 device_initialize(&udc->gadget.dev);
2542 strcpy (udc->gadget.dev.bus_id, "gadget");
2543 udc->gadget.dev.release = omap_udc_release;
2544 udc->gadget.dev.parent = &odev->dev;
2545 if (use_dma)
2546 udc->gadget.dev.dma_mask = odev->dev.dma_mask;
2547
2548 udc->transceiver = xceiv;
2549
2550 /* ep0 is special; put it right after the SETUP buffer */
2551 buf = omap_ep_setup("ep0", 0, USB_ENDPOINT_XFER_CONTROL,
2552 8 /* after SETUP */, 64 /* maxpacket */, 0);
2553 list_del_init(&udc->ep[0].ep.ep_list);
2554
2555 /* initially disable all non-ep0 endpoints */
2556 for (tmp = 1; tmp < 15; tmp++) {
2557 UDC_EP_RX_REG(tmp) = 0;
2558 UDC_EP_TX_REG(tmp) = 0;
2559 }
2560
2561#define OMAP_BULK_EP(name,addr) \
2562 buf = omap_ep_setup(name "-bulk", addr, \
2563 USB_ENDPOINT_XFER_BULK, buf, 64, 1);
2564#define OMAP_INT_EP(name,addr, maxp) \
2565 buf = omap_ep_setup(name "-int", addr, \
2566 USB_ENDPOINT_XFER_INT, buf, maxp, 0);
2567#define OMAP_ISO_EP(name,addr, maxp) \
2568 buf = omap_ep_setup(name "-iso", addr, \
2569 USB_ENDPOINT_XFER_ISOC, buf, maxp, 1);
2570
2571 switch (fifo_mode) {
2572 case 0:
2573 OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
2574 OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
2575 OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
2576 break;
2577 case 1:
2578 OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
2579 OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
2580 OMAP_BULK_EP("ep3in", USB_DIR_IN | 3);
2581 OMAP_BULK_EP("ep4out", USB_DIR_OUT | 4);
2582
2583 OMAP_BULK_EP("ep5in", USB_DIR_IN | 5);
2584 OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
2585 OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
2586 OMAP_BULK_EP("ep6out", USB_DIR_OUT | 6);
2587
2588 OMAP_BULK_EP("ep7in", USB_DIR_IN | 7);
2589 OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
2590 OMAP_BULK_EP("ep8in", USB_DIR_IN | 8);
2591 OMAP_BULK_EP("ep8out", USB_DIR_OUT | 8);
2592
2593 OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
2594 OMAP_INT_EP("ep10out", USB_DIR_IN | 10, 16);
2595 OMAP_INT_EP("ep11in", USB_DIR_IN | 9, 16);
2596 OMAP_INT_EP("ep12out", USB_DIR_IN | 10, 16);
2597 break;
2598
2599#ifdef USE_ISO
2600 case 2: /* mixed iso/bulk */
2601 OMAP_ISO_EP("ep1in", USB_DIR_IN | 1, 256);
2602 OMAP_ISO_EP("ep2out", USB_DIR_OUT | 2, 256);
2603 OMAP_ISO_EP("ep3in", USB_DIR_IN | 3, 128);
2604 OMAP_ISO_EP("ep4out", USB_DIR_OUT | 4, 128);
2605
2606 OMAP_INT_EP("ep5in", USB_DIR_IN | 5, 16);
2607
2608 OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
2609 OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
2610 OMAP_INT_EP("ep8in", USB_DIR_IN | 8, 16);
2611 break;
2612 case 3: /* mixed bulk/iso */
2613 OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
2614 OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
2615 OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
2616
2617 OMAP_BULK_EP("ep4in", USB_DIR_IN | 4);
2618 OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
2619 OMAP_INT_EP("ep6in", USB_DIR_IN | 6, 16);
2620
2621 OMAP_ISO_EP("ep7in", USB_DIR_IN | 7, 256);
2622 OMAP_ISO_EP("ep8out", USB_DIR_OUT | 8, 256);
2623 OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
2624 break;
2625#endif
2626
2627 /* add more modes as needed */
2628
2629 default:
2630 ERR("unsupported fifo_mode #%d\n", fifo_mode);
2631 return -ENODEV;
2632 }
2633 UDC_SYSCON1_REG = UDC_CFG_LOCK|UDC_SELF_PWR;
2634 INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf);
2635 return 0;
2636}
2637
2638static int __init omap_udc_probe(struct device *dev)
2639{
2640 struct platform_device *odev = to_platform_device(dev);
2641 int status = -ENODEV;
2642 int hmc;
2643 struct otg_transceiver *xceiv = 0;
2644 const char *type = 0;
2645 struct omap_usb_config *config = dev->platform_data;
2646
2647 /* NOTE: "knows" the order of the resources! */
2648 if (!request_mem_region(odev->resource[0].start,
2649 odev->resource[0].end - odev->resource[0].start + 1,
2650 driver_name)) {
2651 DBG("request_mem_region failed\n");
2652 return -EBUSY;
2653 }
2654
2655 INFO("OMAP UDC rev %d.%d%s\n",
2656 UDC_REV_REG >> 4, UDC_REV_REG & 0xf,
2657 config->otg ? ", Mini-AB" : "");
2658
2659 /* use the mode given to us by board init code */
2660 if (cpu_is_omap15xx()) {
2661 hmc = HMC_1510;
2662 type = "(unknown)";
2663
2664 if (machine_is_omap_innovator()) {
2665 /* just set up software VBUS detect, and then
2666 * later rig it so we always report VBUS.
2667 * FIXME without really sensing VBUS, we can't
2668 * know when to turn PULLUP_EN on/off; and that
2669 * means we always "need" the 48MHz clock.
2670 */
2671 u32 tmp = FUNC_MUX_CTRL_0_REG;
2672
2673 FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510;
2674 tmp |= VBUS_MODE_1510;
2675 tmp &= ~VBUS_CTRL_1510;
2676 FUNC_MUX_CTRL_0_REG = tmp;
2677 }
2678 } else {
2679 hmc = HMC_1610;
2680 switch (hmc) {
2681 case 3:
2682 case 11:
2683 case 16:
2684 case 19:
2685 case 25:
2686 xceiv = otg_get_transceiver();
2687 if (!xceiv) {
2688 DBG("external transceiver not registered!\n");
2689 if (config->otg)
2690 goto cleanup0;
2691 type = "(unknown external)";
2692 } else
2693 type = xceiv->label;
2694 break;
2695 case 0: /* POWERUP DEFAULT == 0 */
2696 case 4:
2697 case 12:
2698 case 20:
2699 type = "INTEGRATED";
2700 break;
2701 case 21: /* internal loopback */
2702 type = "(loopback)";
2703 break;
2704 case 14: /* transceiverless */
2705 type = "(none)";
2706 break;
2707
2708 default:
2709 ERR("unrecognized UDC HMC mode %d\n", hmc);
2710 return -ENODEV;
2711 }
2712 }
2713 INFO("hmc mode %d, transceiver %s\n", hmc, type);
2714
2715 /* a "gadget" abstracts/virtualizes the controller */
2716 status = omap_udc_setup(odev, xceiv);
2717 if (status) {
2718 goto cleanup0;
2719 }
2720 xceiv = 0;
2721 // "udc" is now valid
2722 pullup_disable(udc);
2723#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
2724 udc->gadget.is_otg = (config->otg != 0);
2725#endif
2726
2727 /* USB general purpose IRQ: ep0, state changes, dma, etc */
2728 status = request_irq(odev->resource[1].start, omap_udc_irq,
2729 SA_SAMPLE_RANDOM, driver_name, udc);
2730 if (status != 0) {
2731 ERR( "can't get irq %ld, err %d\n",
2732 odev->resource[1].start, status);
2733 goto cleanup1;
2734 }
2735
2736 /* USB "non-iso" IRQ (PIO for all but ep0) */
2737 status = request_irq(odev->resource[2].start, omap_udc_pio_irq,
2738 SA_SAMPLE_RANDOM, "omap_udc pio", udc);
2739 if (status != 0) {
2740 ERR( "can't get irq %ld, err %d\n",
2741 odev->resource[2].start, status);
2742 goto cleanup2;
2743 }
2744#ifdef USE_ISO
2745 status = request_irq(odev->resource[3].start, omap_udc_iso_irq,
2746 SA_INTERRUPT, "omap_udc iso", udc);
2747 if (status != 0) {
2748 ERR("can't get irq %ld, err %d\n",
2749 odev->resource[3].start, status);
2750 goto cleanup3;
2751 }
2752#endif
2753
2754 create_proc_file();
2755 device_add(&udc->gadget.dev);
2756 return 0;
2757
2758#ifdef USE_ISO
2759cleanup3:
2760 free_irq(odev->resource[2].start, udc);
2761#endif
2762
2763cleanup2:
2764 free_irq(odev->resource[1].start, udc);
2765
2766cleanup1:
2767 kfree (udc);
2768 udc = 0;
2769
2770cleanup0:
2771 if (xceiv)
2772 put_device(xceiv->dev);
2773 release_mem_region(odev->resource[0].start,
2774 odev->resource[0].end - odev->resource[0].start + 1);
2775 return status;
2776}
2777
2778static int __exit omap_udc_remove(struct device *dev)
2779{
2780 struct platform_device *odev = to_platform_device(dev);
2781 DECLARE_COMPLETION(done);
2782
2783 if (!udc)
2784 return -ENODEV;
2785
2786 udc->done = &done;
2787
2788 pullup_disable(udc);
2789 if (udc->transceiver) {
2790 put_device(udc->transceiver->dev);
2791 udc->transceiver = 0;
2792 }
2793 UDC_SYSCON1_REG = 0;
2794
2795 remove_proc_file();
2796
2797#ifdef USE_ISO
2798 free_irq(odev->resource[3].start, udc);
2799#endif
2800 free_irq(odev->resource[2].start, udc);
2801 free_irq(odev->resource[1].start, udc);
2802
2803 release_mem_region(odev->resource[0].start,
2804 odev->resource[0].end - odev->resource[0].start + 1);
2805
2806 device_unregister(&udc->gadget.dev);
2807 wait_for_completion(&done);
2808
2809 return 0;
2810}
2811
2812/* suspend/resume/wakeup from sysfs (echo > power/state) */
2813
2814static int omap_udc_suspend(struct device *dev, u32 state, u32 level)
2815{
2816 if (level != 0)
2817 return 0;
2818
2819 DBG("suspend, state %d\n", state);
2820 omap_pullup(&udc->gadget, 0);
2821 udc->gadget.dev.power.power_state = 3;
2822 udc->gadget.dev.parent->power.power_state = 3;
2823 return 0;
2824}
2825
2826static int omap_udc_resume(struct device *dev, u32 level)
2827{
2828 if (level != 0)
2829 return 0;
2830
2831 DBG("resume + wakeup/SRP\n");
2832 udc->gadget.dev.parent->power.power_state = 0;
2833 udc->gadget.dev.power.power_state = 0;
2834 omap_pullup(&udc->gadget, 1);
2835
2836 /* maybe the host would enumerate us if we nudged it */
2837 msleep(100);
2838 return omap_wakeup(&udc->gadget);
2839}
2840
2841/*-------------------------------------------------------------------------*/
2842
2843static struct device_driver udc_driver = {
2844 .name = (char *) driver_name,
2845 .bus = &platform_bus_type,
2846 .probe = omap_udc_probe,
2847 .remove = __exit_p(omap_udc_remove),
2848 .suspend = omap_udc_suspend,
2849 .resume = omap_udc_resume,
2850};
2851
2852static int __init udc_init(void)
2853{
2854 INFO("%s, version: " DRIVER_VERSION
2855#ifdef USE_ISO
2856 " (iso)"
2857#endif
2858 "%s\n", driver_desc,
2859 use_dma ? " (dma)" : "");
2860 return driver_register(&udc_driver);
2861}
2862module_init(udc_init);
2863
2864static void __exit udc_exit(void)
2865{
2866 driver_unregister(&udc_driver);
2867}
2868module_exit(udc_exit);
2869
2870MODULE_DESCRIPTION(DRIVER_DESC);
2871MODULE_LICENSE("GPL");
2872
diff --git a/drivers/usb/gadget/omap_udc.h b/drivers/usb/gadget/omap_udc.h
new file mode 100644
index 000000000000..c9e68541622c
--- /dev/null
+++ b/drivers/usb/gadget/omap_udc.h
@@ -0,0 +1,208 @@
1/*
2 * omap_udc.h -- for omap 3.2 udc, with OTG support
3 *
4 * 2004 (C) Texas Instruments, Inc.
5 * 2004 (C) David Brownell
6 */
7
8/*
9 * USB device/endpoint management registers
10 */
11#define UDC_REG(offset) __REG16(UDC_BASE + (offset))
12
13#define UDC_REV_REG UDC_REG(0x0) /* Revision */
14#define UDC_EP_NUM_REG UDC_REG(0x4) /* Which endpoint */
15# define UDC_SETUP_SEL (1 << 6)
16# define UDC_EP_SEL (1 << 5)
17# define UDC_EP_DIR (1 << 4)
18 /* low 4 bits for endpoint number */
19#define UDC_DATA_REG UDC_REG(0x08) /* Endpoint FIFO */
20#define UDC_CTRL_REG UDC_REG(0x0C) /* Endpoint control */
21# define UDC_CLR_HALT (1 << 7)
22# define UDC_SET_HALT (1 << 6)
23# define UDC_SET_FIFO_EN (1 << 2)
24# define UDC_CLR_EP (1 << 1)
25# define UDC_RESET_EP (1 << 0)
26#define UDC_STAT_FLG_REG UDC_REG(0x10) /* Endpoint status */
27# define UDC_NO_RXPACKET (1 << 15)
28# define UDC_MISS_IN (1 << 14)
29# define UDC_DATA_FLUSH (1 << 13)
30# define UDC_ISO_ERR (1 << 12)
31# define UDC_ISO_FIFO_EMPTY (1 << 9)
32# define UDC_ISO_FIFO_FULL (1 << 8)
33# define UDC_EP_HALTED (1 << 6)
34# define UDC_STALL (1 << 5)
35# define UDC_NAK (1 << 4)
36# define UDC_ACK (1 << 3)
37# define UDC_FIFO_EN (1 << 2)
38# define UDC_NON_ISO_FIFO_EMPTY (1 << 1)
39# define UDC_NON_ISO_FIFO_FULL (1 << 0)
40#define UDC_RXFSTAT_REG UDC_REG(0x14) /* OUT bytecount */
41#define UDC_SYSCON1_REG UDC_REG(0x18) /* System config 1 */
42# define UDC_CFG_LOCK (1 << 8)
43# define UDC_DATA_ENDIAN (1 << 7)
44# define UDC_DMA_ENDIAN (1 << 6)
45# define UDC_NAK_EN (1 << 4)
46# define UDC_AUTODECODE_DIS (1 << 3)
47# define UDC_SELF_PWR (1 << 2)
48# define UDC_SOFF_DIS (1 << 1)
49# define UDC_PULLUP_EN (1 << 0)
50#define UDC_SYSCON2_REG UDC_REG(0x1C) /* System config 2 */
51# define UDC_RMT_WKP (1 << 6)
52# define UDC_STALL_CMD (1 << 5)
53# define UDC_DEV_CFG (1 << 3)
54# define UDC_CLR_CFG (1 << 2)
55#define UDC_DEVSTAT_REG UDC_REG(0x20) /* Device status */
56# define UDC_B_HNP_ENABLE (1 << 9)
57# define UDC_A_HNP_SUPPORT (1 << 8)
58# define UDC_A_ALT_HNP_SUPPORT (1 << 7)
59# define UDC_R_WK_OK (1 << 6)
60# define UDC_USB_RESET (1 << 5)
61# define UDC_SUS (1 << 4)
62# define UDC_CFG (1 << 3)
63# define UDC_ADD (1 << 2)
64# define UDC_DEF (1 << 1)
65# define UDC_ATT (1 << 0)
66#define UDC_SOF_REG UDC_REG(0x24) /* Start of frame */
67# define UDC_FT_LOCK (1 << 12)
68# define UDC_TS_OK (1 << 11)
69# define UDC_TS 0x03ff
70#define UDC_IRQ_EN_REG UDC_REG(0x28) /* Interrupt enable */
71# define UDC_SOF_IE (1 << 7)
72# define UDC_EPN_RX_IE (1 << 5)
73# define UDC_EPN_TX_IE (1 << 4)
74# define UDC_DS_CHG_IE (1 << 3)
75# define UDC_EP0_IE (1 << 0)
76#define UDC_DMA_IRQ_EN_REG UDC_REG(0x2C) /* DMA irq enable */
77 /* rx/tx dma channels numbered 1-3 not 0-2 */
78# define UDC_TX_DONE_IE(n) (1 << (4 * (n) - 2))
79# define UDC_RX_CNT_IE(n) (1 << (4 * (n) - 3))
80# define UDC_RX_EOT_IE(n) (1 << (4 * (n) - 4))
81#define UDC_IRQ_SRC_REG UDC_REG(0x30) /* Interrupt source */
82# define UDC_TXN_DONE (1 << 10)
83# define UDC_RXN_CNT (1 << 9)
84# define UDC_RXN_EOT (1 << 8)
85# define UDC_SOF (1 << 7)
86# define UDC_EPN_RX (1 << 5)
87# define UDC_EPN_TX (1 << 4)
88# define UDC_DS_CHG (1 << 3)
89# define UDC_SETUP (1 << 2)
90# define UDC_EP0_RX (1 << 1)
91# define UDC_EP0_TX (1 << 0)
92# define UDC_IRQ_SRC_MASK 0x7bf
93#define UDC_EPN_STAT_REG UDC_REG(0x34) /* EP irq status */
94#define UDC_DMAN_STAT_REG UDC_REG(0x38) /* DMA irq status */
95# define UDC_DMA_RX_SB (1 << 12)
96# define UDC_DMA_RX_SRC(x) (((x)>>8) & 0xf)
97# define UDC_DMA_TX_SRC(x) (((x)>>0) & 0xf)
98
99
100/* DMA configuration registers: up to three channels in each direction. */
101#define UDC_RXDMA_CFG_REG UDC_REG(0x40) /* 3 eps for RX DMA */
102#define UDC_TXDMA_CFG_REG UDC_REG(0x44) /* 3 eps for TX DMA */
103#define UDC_DATA_DMA_REG UDC_REG(0x48) /* rx/tx fifo addr */
104
105/* rx/tx dma control, numbering channels 1-3 not 0-2 */
106#define UDC_TXDMA_REG(chan) UDC_REG(0x50 - 4 + 4 * (chan))
107# define UDC_TXN_EOT (1 << 15) /* bytes vs packets */
108# define UDC_TXN_START (1 << 14) /* start transfer */
109# define UDC_TXN_TSC 0x03ff /* units in xfer */
110#define UDC_RXDMA_REG(chan) UDC_REG(0x60 - 4 + 4 * (chan))
111# define UDC_RXN_STOP (1 << 15) /* enable EOT irq */
112# define UDC_RXN_TC 0x00ff /* packets in xfer */
113
114
115/*
116 * Endpoint configuration registers (used before CFG_LOCK is set)
117 * UDC_EP_TX_REG(0) is unused
118 */
119#define UDC_EP_RX_REG(endpoint) UDC_REG(0x80 + (endpoint)*4)
120# define UDC_EPN_RX_VALID (1 << 15)
121# define UDC_EPN_RX_DB (1 << 14)
122 /* buffer size in bits 13, 12 */
123# define UDC_EPN_RX_ISO (1 << 11)
124 /* buffer pointer in low 11 bits */
125#define UDC_EP_TX_REG(endpoint) UDC_REG(0xc0 + (endpoint)*4)
126 /* same bitfields as in RX_REG */
127
128/*-------------------------------------------------------------------------*/
129
130struct omap_req {
131 struct usb_request req;
132 struct list_head queue;
133 unsigned dma_bytes;
134 unsigned mapped:1;
135};
136
137struct omap_ep {
138 struct usb_ep ep;
139 struct list_head queue;
140 unsigned long irqs;
141 struct list_head iso;
142 const struct usb_endpoint_descriptor *desc;
143 char name[14];
144 u16 maxpacket;
145 u8 bEndpointAddress;
146 u8 bmAttributes;
147 unsigned double_buf:1;
148 unsigned stopped:1;
149 unsigned fnf:1;
150 unsigned has_dma:1;
151 u8 ackwait;
152 u8 dma_channel;
153 u16 dma_counter;
154 int lch;
155 struct omap_udc *udc;
156 struct timer_list timer;
157};
158
159struct omap_udc {
160 struct usb_gadget gadget;
161 struct usb_gadget_driver *driver;
162 spinlock_t lock;
163 struct omap_ep ep[32];
164 u16 devstat;
165 struct otg_transceiver *transceiver;
166 struct list_head iso;
167 unsigned softconnect:1;
168 unsigned vbus_active:1;
169 unsigned ep0_pending:1;
170 unsigned ep0_in:1;
171 unsigned ep0_set_config:1;
172 unsigned ep0_reset_config:1;
173 unsigned ep0_setup:1;
174
175 struct completion *done;
176};
177
178/*-------------------------------------------------------------------------*/
179
180#ifdef DEBUG
181#define DBG(stuff...) printk(KERN_DEBUG "udc: " stuff)
182#else
183#define DBG(stuff...) do{}while(0)
184#endif
185
186#ifdef VERBOSE
187# define VDBG DBG
188#else
189# define VDBG(stuff...) do{}while(0)
190#endif
191
192#define ERR(stuff...) printk(KERN_ERR "udc: " stuff)
193#define WARN(stuff...) printk(KERN_WARNING "udc: " stuff)
194#define INFO(stuff...) printk(KERN_INFO "udc: " stuff)
195
196/*-------------------------------------------------------------------------*/
197
198#define MOD_CONF_CTRL_0_REG __REG32(MOD_CONF_CTRL_0)
199#define VBUS_W2FC_1510 (1 << 17) /* 0 gpio0, 1 dvdd2 pin */
200
201#define FUNC_MUX_CTRL_0_REG __REG32(FUNC_MUX_CTRL_0)
202#define VBUS_CTRL_1510 (1 << 19) /* 1 connected (software) */
203#define VBUS_MODE_1510 (1 << 18) /* 0 hardware, 1 software */
204
205#define HMC_1510 ((MOD_CONF_CTRL_0_REG >> 1) & 0x3f)
206#define HMC_1610 (OTG_SYSCON_2_REG & 0x3f)
207#define HMC (cpu_is_omap15xx() ? HMC_1510 : HMC_1610)
208
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
new file mode 100644
index 000000000000..6390c5726d81
--- /dev/null
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -0,0 +1,2648 @@
1/*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
3 * Intel PXA2xx and IXP4xx on-chip full speed USB device controllers
4 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 */
26
27#undef DEBUG
28// #define VERBOSE DBG_VERBOSE
29
30#include <linux/config.h>
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/ioport.h>
34#include <linux/types.h>
35#include <linux/version.h>
36#include <linux/errno.h>
37#include <linux/delay.h>
38#include <linux/sched.h>
39#include <linux/slab.h>
40#include <linux/init.h>
41#include <linux/timer.h>
42#include <linux/list.h>
43#include <linux/interrupt.h>
44#include <linux/proc_fs.h>
45#include <linux/mm.h>
46#include <linux/device.h>
47#include <linux/dma-mapping.h>
48
49#include <asm/byteorder.h>
50#include <asm/dma.h>
51#include <asm/io.h>
52#include <asm/irq.h>
53#include <asm/system.h>
54#include <asm/mach-types.h>
55#include <asm/unaligned.h>
56#include <asm/hardware.h>
57#include <asm/arch/pxa-regs.h>
58
59#include <linux/usb_ch9.h>
60#include <linux/usb_gadget.h>
61
62#include <asm/arch/udc.h>
63
64
65/*
66 * This driver handles the USB Device Controller (UDC) in Intel's PXA 2xx
67 * series processors. The UDC for the IXP 4xx series is very similar.
68 * There are fifteen endpoints, in addition to ep0.
69 *
70 * Such controller drivers work with a gadget driver. The gadget driver
71 * returns descriptors, implements configuration and data protocols used
72 * by the host to interact with this device, and allocates endpoints to
73 * the different protocol interfaces. The controller driver virtualizes
74 * usb hardware so that the gadget drivers will be more portable.
75 *
76 * This UDC hardware wants to implement a bit too much USB protocol, so
77 * it constrains the sorts of USB configuration change events that work.
78 * The errata for these chips are misleading; some "fixed" bugs from
79 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
80 */
81
82#define DRIVER_VERSION "14-Dec-2003"
83#define DRIVER_DESC "PXA 2xx USB Device Controller driver"
84
85
86static const char driver_name [] = "pxa2xx_udc";
87
88static const char ep0name [] = "ep0";
89
90
91// #define USE_DMA
92// #define USE_OUT_DMA
93// #define DISABLE_TEST_MODE
94
95#ifdef CONFIG_ARCH_IXP4XX
96#undef USE_DMA
97
98/* cpu-specific register addresses are compiled in to this code */
99#ifdef CONFIG_ARCH_PXA
100#error "Can't configure both IXP and PXA"
101#endif
102
103#endif
104
105#include "pxa2xx_udc.h"
106
107
108#ifdef USE_DMA
109static int use_dma = 1;
110module_param(use_dma, bool, 0);
111MODULE_PARM_DESC (use_dma, "true to use dma");
112
113static void dma_nodesc_handler (int dmach, void *_ep, struct pt_regs *r);
114static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
115
116#ifdef USE_OUT_DMA
117#define DMASTR " (dma support)"
118#else
119#define DMASTR " (dma in)"
120#endif
121
122#else /* !USE_DMA */
123#define DMASTR " (pio only)"
124#undef USE_OUT_DMA
125#endif
126
127#ifdef CONFIG_USB_PXA2XX_SMALL
128#define SIZE_STR " (small)"
129#else
130#define SIZE_STR ""
131#endif
132
133#ifdef DISABLE_TEST_MODE
134/* (mode == 0) == no undocumented chip tweaks
135 * (mode & 1) == double buffer bulk IN
136 * (mode & 2) == double buffer bulk OUT
137 * ... so mode = 3 (or 7, 15, etc) does it for both
138 */
139static ushort fifo_mode = 0;
140module_param(fifo_mode, ushort, 0);
141MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
142#endif
143
144/* ---------------------------------------------------------------------------
145 * endpoint related parts of the api to the usb controller hardware,
146 * used by gadget driver; and the inner talker-to-hardware core.
147 * ---------------------------------------------------------------------------
148 */
149
150static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
151static void nuke (struct pxa2xx_ep *, int status);
152
153static void pio_irq_enable(int bEndpointAddress)
154{
155 bEndpointAddress &= 0xf;
156 if (bEndpointAddress < 8)
157 UICR0 &= ~(1 << bEndpointAddress);
158 else {
159 bEndpointAddress -= 8;
160 UICR1 &= ~(1 << bEndpointAddress);
161 }
162}
163
164static void pio_irq_disable(int bEndpointAddress)
165{
166 bEndpointAddress &= 0xf;
167 if (bEndpointAddress < 8)
168 UICR0 |= 1 << bEndpointAddress;
169 else {
170 bEndpointAddress -= 8;
171 UICR1 |= 1 << bEndpointAddress;
172 }
173}
174
175/* The UDCCR reg contains mask and interrupt status bits,
176 * so using '|=' isn't safe as it may ack an interrupt.
177 */
178#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
179
180static inline void udc_set_mask_UDCCR(int mask)
181{
182 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
183}
184
185static inline void udc_clear_mask_UDCCR(int mask)
186{
187 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
188}
189
190static inline void udc_ack_int_UDCCR(int mask)
191{
192 /* udccr contains the bits we dont want to change */
193 __u32 udccr = UDCCR & UDCCR_MASK_BITS;
194
195 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
196}
197
198/*
199 * endpoint enable/disable
200 *
201 * we need to verify the descriptors used to enable endpoints. since pxa2xx
202 * endpoint configurations are fixed, and are pretty much always enabled,
203 * there's not a lot to manage here.
204 *
205 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
206 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
207 * for a single interface (with only the default altsetting) and for gadget
208 * drivers that don't halt endpoints (not reset by set_interface). that also
209 * means that if you use ISO, you must violate the USB spec rule that all
210 * iso endpoints must be in non-default altsettings.
211 */
212static int pxa2xx_ep_enable (struct usb_ep *_ep,
213 const struct usb_endpoint_descriptor *desc)
214{
215 struct pxa2xx_ep *ep;
216 struct pxa2xx_udc *dev;
217
218 ep = container_of (_ep, struct pxa2xx_ep, ep);
219 if (!_ep || !desc || ep->desc || _ep->name == ep0name
220 || desc->bDescriptorType != USB_DT_ENDPOINT
221 || ep->bEndpointAddress != desc->bEndpointAddress
222 || ep->fifo_size < le16_to_cpu
223 (desc->wMaxPacketSize)) {
224 DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
225 return -EINVAL;
226 }
227
228 /* xfer types must match, except that interrupt ~= bulk */
229 if (ep->bmAttributes != desc->bmAttributes
230 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
231 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
232 DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
233 return -EINVAL;
234 }
235
236 /* hardware _could_ do smaller, but driver doesn't */
237 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
238 && le16_to_cpu (desc->wMaxPacketSize)
239 != BULK_FIFO_SIZE)
240 || !desc->wMaxPacketSize) {
241 DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
242 return -ERANGE;
243 }
244
245 dev = ep->dev;
246 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
247 DMSG("%s, bogus device state\n", __FUNCTION__);
248 return -ESHUTDOWN;
249 }
250
251 ep->desc = desc;
252 ep->dma = -1;
253 ep->stopped = 0;
254 ep->pio_irqs = ep->dma_irqs = 0;
255 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
256
257 /* flush fifo (mostly for OUT buffers) */
258 pxa2xx_ep_fifo_flush (_ep);
259
260 /* ... reset halt state too, if we could ... */
261
262#ifdef USE_DMA
263 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
264 * bind it to the endpoint. otherwise use PIO.
265 */
266 switch (ep->bmAttributes) {
267 case USB_ENDPOINT_XFER_ISOC:
268 if (le16_to_cpu(desc->wMaxPacketSize) % 32)
269 break;
270 // fall through
271 case USB_ENDPOINT_XFER_BULK:
272 if (!use_dma || !ep->reg_drcmr)
273 break;
274 ep->dma = pxa_request_dma ((char *)_ep->name,
275 (le16_to_cpu (desc->wMaxPacketSize) > 64)
276 ? DMA_PRIO_MEDIUM /* some iso */
277 : DMA_PRIO_LOW,
278 dma_nodesc_handler, ep);
279 if (ep->dma >= 0) {
280 *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
281 DMSG("%s using dma%d\n", _ep->name, ep->dma);
282 }
283 }
284#endif
285
286 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
287 return 0;
288}
289
290static int pxa2xx_ep_disable (struct usb_ep *_ep)
291{
292 struct pxa2xx_ep *ep;
293
294 ep = container_of (_ep, struct pxa2xx_ep, ep);
295 if (!_ep || !ep->desc) {
296 DMSG("%s, %s not enabled\n", __FUNCTION__,
297 _ep ? ep->ep.name : NULL);
298 return -EINVAL;
299 }
300 nuke (ep, -ESHUTDOWN);
301
302#ifdef USE_DMA
303 if (ep->dma >= 0) {
304 *ep->reg_drcmr = 0;
305 pxa_free_dma (ep->dma);
306 ep->dma = -1;
307 }
308#endif
309
310 /* flush fifo (mostly for IN buffers) */
311 pxa2xx_ep_fifo_flush (_ep);
312
313 ep->desc = NULL;
314 ep->stopped = 1;
315
316 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
317 return 0;
318}
319
320/*-------------------------------------------------------------------------*/
321
322/* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
323 * must still pass correctly initialized endpoints, since other controller
324 * drivers may care about how it's currently set up (dma issues etc).
325 */
326
327/*
328 * pxa2xx_ep_alloc_request - allocate a request data structure
329 */
330static struct usb_request *
331pxa2xx_ep_alloc_request (struct usb_ep *_ep, int gfp_flags)
332{
333 struct pxa2xx_request *req;
334
335 req = kmalloc (sizeof *req, gfp_flags);
336 if (!req)
337 return NULL;
338
339 memset (req, 0, sizeof *req);
340 INIT_LIST_HEAD (&req->queue);
341 return &req->req;
342}
343
344
345/*
346 * pxa2xx_ep_free_request - deallocate a request data structure
347 */
348static void
349pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
350{
351 struct pxa2xx_request *req;
352
353 req = container_of (_req, struct pxa2xx_request, req);
354 WARN_ON (!list_empty (&req->queue));
355 kfree(req);
356}
357
358
359/* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
360 * no device-affinity and the heap works perfectly well for i/o buffers.
361 * It wastes much less memory than dma_alloc_coherent() would, and even
362 * prevents cacheline (32 bytes wide) sharing problems.
363 */
364static void *
365pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
366 dma_addr_t *dma, int gfp_flags)
367{
368 char *retval;
369
370 retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
371 if (retval)
372#ifdef USE_DMA
373 *dma = virt_to_bus (retval);
374#else
375 *dma = (dma_addr_t)~0;
376#endif
377 return retval;
378}
379
380static void
381pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
382 unsigned bytes)
383{
384 kfree (buf);
385}
386
387/*-------------------------------------------------------------------------*/
388
389/*
390 * done - retire a request; caller blocked irqs
391 */
392static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
393{
394 unsigned stopped = ep->stopped;
395
396 list_del_init(&req->queue);
397
398 if (likely (req->req.status == -EINPROGRESS))
399 req->req.status = status;
400 else
401 status = req->req.status;
402
403 if (status && status != -ESHUTDOWN)
404 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
405 ep->ep.name, &req->req, status,
406 req->req.actual, req->req.length);
407
408 /* don't modify queue heads during completion callback */
409 ep->stopped = 1;
410 req->req.complete(&ep->ep, &req->req);
411 ep->stopped = stopped;
412}
413
414
415static inline void ep0_idle (struct pxa2xx_udc *dev)
416{
417 dev->ep0state = EP0_IDLE;
418}
419
420static int
421write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
422{
423 u8 *buf;
424 unsigned length, count;
425
426 buf = req->req.buf + req->req.actual;
427 prefetch(buf);
428
429 /* how big will this packet be? */
430 length = min(req->req.length - req->req.actual, max);
431 req->req.actual += length;
432
433 count = length;
434 while (likely(count--))
435 *uddr = *buf++;
436
437 return length;
438}
439
440/*
441 * write to an IN endpoint fifo, as many packets as possible.
442 * irqs will use this to write the rest later.
443 * caller guarantees at least one packet buffer is ready (or a zlp).
444 */
445static int
446write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
447{
448 unsigned max;
449
450 max = le16_to_cpu(ep->desc->wMaxPacketSize);
451 do {
452 unsigned count;
453 int is_last, is_short;
454
455 count = write_packet(ep->reg_uddr, req, max);
456
457 /* last packet is usually short (or a zlp) */
458 if (unlikely (count != max))
459 is_last = is_short = 1;
460 else {
461 if (likely(req->req.length != req->req.actual)
462 || req->req.zero)
463 is_last = 0;
464 else
465 is_last = 1;
466 /* interrupt/iso maxpacket may not fill the fifo */
467 is_short = unlikely (max < ep->fifo_size);
468 }
469
470 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
471 ep->ep.name, count,
472 is_last ? "/L" : "", is_short ? "/S" : "",
473 req->req.length - req->req.actual, req);
474
475 /* let loose that packet. maybe try writing another one,
476 * double buffering might work. TSP, TPC, and TFS
477 * bit values are the same for all normal IN endpoints.
478 */
479 *ep->reg_udccs = UDCCS_BI_TPC;
480 if (is_short)
481 *ep->reg_udccs = UDCCS_BI_TSP;
482
483 /* requests complete when all IN data is in the FIFO */
484 if (is_last) {
485 done (ep, req, 0);
486 if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
487 pio_irq_disable (ep->bEndpointAddress);
488#ifdef USE_DMA
489 /* unaligned data and zlps couldn't use dma */
490 if (unlikely(!list_empty(&ep->queue))) {
491 req = list_entry(ep->queue.next,
492 struct pxa2xx_request, queue);
493 kick_dma(ep,req);
494 return 0;
495 }
496#endif
497 }
498 return 1;
499 }
500
501 // TODO experiment: how robust can fifo mode tweaking be?
502 // double buffering is off in the default fifo mode, which
503 // prevents TFS from being set here.
504
505 } while (*ep->reg_udccs & UDCCS_BI_TFS);
506 return 0;
507}
508
509/* caller asserts req->pending (ep0 irq status nyet cleared); starts
510 * ep0 data stage. these chips want very simple state transitions.
511 */
512static inline
513void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
514{
515 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
516 USIR0 = USIR0_IR0;
517 dev->req_pending = 0;
518 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
519 __FUNCTION__, tag, UDCCS0, flags);
520}
521
522static int
523write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
524{
525 unsigned count;
526 int is_short;
527
528 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
529 ep->dev->stats.write.bytes += count;
530
531 /* last packet "must be" short (or a zlp) */
532 is_short = (count != EP0_FIFO_SIZE);
533
534 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
535 req->req.length - req->req.actual, req);
536
537 if (unlikely (is_short)) {
538 if (ep->dev->req_pending)
539 ep0start(ep->dev, UDCCS0_IPR, "short IN");
540 else
541 UDCCS0 = UDCCS0_IPR;
542
543 count = req->req.length;
544 done (ep, req, 0);
545 ep0_idle(ep->dev);
546#if 1
547 /* This seems to get rid of lost status irqs in some cases:
548 * host responds quickly, or next request involves config
549 * change automagic, or should have been hidden, or ...
550 *
551 * FIXME get rid of all udelays possible...
552 */
553 if (count >= EP0_FIFO_SIZE) {
554 count = 100;
555 do {
556 if ((UDCCS0 & UDCCS0_OPR) != 0) {
557 /* clear OPR, generate ack */
558 UDCCS0 = UDCCS0_OPR;
559 break;
560 }
561 count--;
562 udelay(1);
563 } while (count);
564 }
565#endif
566 } else if (ep->dev->req_pending)
567 ep0start(ep->dev, 0, "IN");
568 return is_short;
569}
570
571
572/*
573 * read_fifo - unload packet(s) from the fifo we use for usb OUT
574 * transfers and put them into the request. caller should have made
575 * sure there's at least one packet ready.
576 *
577 * returns true if the request completed because of short packet or the
578 * request buffer having filled (and maybe overran till end-of-packet).
579 */
580static int
581read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
582{
583 for (;;) {
584 u32 udccs;
585 u8 *buf;
586 unsigned bufferspace, count, is_short;
587
588 /* make sure there's a packet in the FIFO.
589 * UDCCS_{BO,IO}_RPC are all the same bit value.
590 * UDCCS_{BO,IO}_RNE are all the same bit value.
591 */
592 udccs = *ep->reg_udccs;
593 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
594 break;
595 buf = req->req.buf + req->req.actual;
596 prefetchw(buf);
597 bufferspace = req->req.length - req->req.actual;
598
599 /* read all bytes from this packet */
600 if (likely (udccs & UDCCS_BO_RNE)) {
601 count = 1 + (0x0ff & *ep->reg_ubcr);
602 req->req.actual += min (count, bufferspace);
603 } else /* zlp */
604 count = 0;
605 is_short = (count < ep->ep.maxpacket);
606 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
607 ep->ep.name, udccs, count,
608 is_short ? "/S" : "",
609 req, req->req.actual, req->req.length);
610 while (likely (count-- != 0)) {
611 u8 byte = (u8) *ep->reg_uddr;
612
613 if (unlikely (bufferspace == 0)) {
614 /* this happens when the driver's buffer
615 * is smaller than what the host sent.
616 * discard the extra data.
617 */
618 if (req->req.status != -EOVERFLOW)
619 DMSG("%s overflow %d\n",
620 ep->ep.name, count);
621 req->req.status = -EOVERFLOW;
622 } else {
623 *buf++ = byte;
624 bufferspace--;
625 }
626 }
627 *ep->reg_udccs = UDCCS_BO_RPC;
628 /* RPC/RSP/RNE could now reflect the other packet buffer */
629
630 /* iso is one request per packet */
631 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
632 if (udccs & UDCCS_IO_ROF)
633 req->req.status = -EHOSTUNREACH;
634 /* more like "is_done" */
635 is_short = 1;
636 }
637
638 /* completion */
639 if (is_short || req->req.actual == req->req.length) {
640 done (ep, req, 0);
641 if (list_empty(&ep->queue))
642 pio_irq_disable (ep->bEndpointAddress);
643 return 1;
644 }
645
646 /* finished that packet. the next one may be waiting... */
647 }
648 return 0;
649}
650
651/*
652 * special ep0 version of the above. no UBCR0 or double buffering; status
653 * handshaking is magic. most device protocols don't need control-OUT.
654 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
655 * protocols do use them.
656 */
657static int
658read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
659{
660 u8 *buf, byte;
661 unsigned bufferspace;
662
663 buf = req->req.buf + req->req.actual;
664 bufferspace = req->req.length - req->req.actual;
665
666 while (UDCCS0 & UDCCS0_RNE) {
667 byte = (u8) UDDR0;
668
669 if (unlikely (bufferspace == 0)) {
670 /* this happens when the driver's buffer
671 * is smaller than what the host sent.
672 * discard the extra data.
673 */
674 if (req->req.status != -EOVERFLOW)
675 DMSG("%s overflow\n", ep->ep.name);
676 req->req.status = -EOVERFLOW;
677 } else {
678 *buf++ = byte;
679 req->req.actual++;
680 bufferspace--;
681 }
682 }
683
684 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
685
686 /* completion */
687 if (req->req.actual >= req->req.length)
688 return 1;
689
690 /* finished that packet. the next one may be waiting... */
691 return 0;
692}
693
694#ifdef USE_DMA
695
696#define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
697
698static void
699start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
700{
701 u32 dcmd = req->req.length;
702 u32 buf = req->req.dma;
703 u32 fifo = io_v2p ((u32)ep->reg_uddr);
704
705 /* caller guarantees there's a packet or more remaining
706 * - IN may end with a short packet (TSP set separately),
707 * - OUT is always full length
708 */
709 buf += req->req.actual;
710 dcmd -= req->req.actual;
711 ep->dma_fixup = 0;
712
713 /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
714 DCSR(ep->dma) = DCSR_NODESC;
715 if (is_in) {
716 DSADR(ep->dma) = buf;
717 DTADR(ep->dma) = fifo;
718 if (dcmd > MAX_IN_DMA)
719 dcmd = MAX_IN_DMA;
720 else
721 ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
722 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
723 | DCMD_FLOWTRG | DCMD_INCSRCADDR;
724 } else {
725#ifdef USE_OUT_DMA
726 DSADR(ep->dma) = fifo;
727 DTADR(ep->dma) = buf;
728 if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
729 dcmd = ep->ep.maxpacket;
730 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
731 | DCMD_FLOWSRC | DCMD_INCTRGADDR;
732#endif
733 }
734 DCMD(ep->dma) = dcmd;
735 DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
736 | (unlikely(is_in)
737 ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
738 : 0); /* use handle_ep() */
739}
740
741static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
742{
743 int is_in = ep->bEndpointAddress & USB_DIR_IN;
744
745 if (is_in) {
746 /* unaligned tx buffers and zlps only work with PIO */
747 if ((req->req.dma & 0x0f) != 0
748 || unlikely((req->req.length - req->req.actual)
749 == 0)) {
750 pio_irq_enable(ep->bEndpointAddress);
751 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
752 (void) write_fifo(ep, req);
753 } else {
754 start_dma_nodesc(ep, req, USB_DIR_IN);
755 }
756 } else {
757 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
758 DMSG("%s short dma read...\n", ep->ep.name);
759 /* we're always set up for pio out */
760 read_fifo (ep, req);
761 } else {
762 *ep->reg_udccs = UDCCS_BO_DME
763 | (*ep->reg_udccs & UDCCS_BO_FST);
764 start_dma_nodesc(ep, req, USB_DIR_OUT);
765 }
766 }
767}
768
769static void cancel_dma(struct pxa2xx_ep *ep)
770{
771 struct pxa2xx_request *req;
772 u32 tmp;
773
774 if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
775 return;
776
777 DCSR(ep->dma) = 0;
778 while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
779 cpu_relax();
780
781 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
782 tmp = DCMD(ep->dma) & DCMD_LENGTH;
783 req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
784
785 /* the last tx packet may be incomplete, so flush the fifo.
786 * FIXME correct req.actual if we can
787 */
788 if (ep->bEndpointAddress & USB_DIR_IN)
789 *ep->reg_udccs = UDCCS_BI_FTF;
790}
791
792/* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
793static void dma_nodesc_handler(int dmach, void *_ep, struct pt_regs *r)
794{
795 struct pxa2xx_ep *ep = _ep;
796 struct pxa2xx_request *req;
797 u32 tmp, completed;
798
799 local_irq_disable();
800
801 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
802
803 ep->dma_irqs++;
804 ep->dev->stats.irqs++;
805 HEX_DISPLAY(ep->dev->stats.irqs);
806
807 /* ack/clear */
808 tmp = DCSR(ep->dma);
809 DCSR(ep->dma) = tmp;
810 if ((tmp & DCSR_STOPSTATE) == 0
811 || (DDADR(ep->dma) & DDADR_STOP) != 0) {
812 DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
813 ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
814 goto done;
815 }
816 DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
817
818 /* update transfer status */
819 completed = tmp & DCSR_BUSERR;
820 if (ep->bEndpointAddress & USB_DIR_IN)
821 tmp = DSADR(ep->dma);
822 else
823 tmp = DTADR(ep->dma);
824 req->req.actual = tmp - req->req.dma;
825
826 /* FIXME seems we sometimes see partial transfers... */
827
828 if (unlikely(completed != 0))
829 req->req.status = -EIO;
830 else if (req->req.actual) {
831 /* these registers have zeroes in low bits; they miscount
832 * some (end-of-transfer) short packets: tx 14 as tx 12
833 */
834 if (ep->dma_fixup)
835 req->req.actual = min(req->req.actual + 3,
836 req->req.length);
837
838 tmp = (req->req.length - req->req.actual);
839 completed = (tmp == 0);
840 if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
841
842 /* maybe validate final short packet ... */
843 if ((req->req.actual % ep->ep.maxpacket) != 0)
844 *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
845
846 /* ... or zlp, using pio fallback */
847 else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
848 && req->req.zero) {
849 DMSG("%s zlp terminate ...\n", ep->ep.name);
850 completed = 0;
851 }
852 }
853 }
854
855 if (likely(completed)) {
856 done(ep, req, 0);
857
858 /* maybe re-activate after completion */
859 if (ep->stopped || list_empty(&ep->queue))
860 goto done;
861 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
862 }
863 kick_dma(ep, req);
864done:
865 local_irq_enable();
866}
867
868#endif
869
870/*-------------------------------------------------------------------------*/
871
872static int
873pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
874{
875 struct pxa2xx_request *req;
876 struct pxa2xx_ep *ep;
877 struct pxa2xx_udc *dev;
878 unsigned long flags;
879
880 req = container_of(_req, struct pxa2xx_request, req);
881 if (unlikely (!_req || !_req->complete || !_req->buf
882 || !list_empty(&req->queue))) {
883 DMSG("%s, bad params\n", __FUNCTION__);
884 return -EINVAL;
885 }
886
887 ep = container_of(_ep, struct pxa2xx_ep, ep);
888 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
889 DMSG("%s, bad ep\n", __FUNCTION__);
890 return -EINVAL;
891 }
892
893 dev = ep->dev;
894 if (unlikely (!dev->driver
895 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
896 DMSG("%s, bogus device state\n", __FUNCTION__);
897 return -ESHUTDOWN;
898 }
899
900 /* iso is always one packet per request, that's the only way
901 * we can report per-packet status. that also helps with dma.
902 */
903 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
904 && req->req.length > le16_to_cpu
905 (ep->desc->wMaxPacketSize)))
906 return -EMSGSIZE;
907
908#ifdef USE_DMA
909 // FIXME caller may already have done the dma mapping
910 if (ep->dma >= 0) {
911 _req->dma = dma_map_single(dev->dev,
912 _req->buf, _req->length,
913 ((ep->bEndpointAddress & USB_DIR_IN) != 0)
914 ? DMA_TO_DEVICE
915 : DMA_FROM_DEVICE);
916 }
917#endif
918
919 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
920 _ep->name, _req, _req->length, _req->buf);
921
922 local_irq_save(flags);
923
924 _req->status = -EINPROGRESS;
925 _req->actual = 0;
926
927 /* kickstart this i/o queue? */
928 if (list_empty(&ep->queue) && !ep->stopped) {
929 if (ep->desc == 0 /* ep0 */) {
930 unsigned length = _req->length;
931
932 switch (dev->ep0state) {
933 case EP0_IN_DATA_PHASE:
934 dev->stats.write.ops++;
935 if (write_ep0_fifo(ep, req))
936 req = NULL;
937 break;
938
939 case EP0_OUT_DATA_PHASE:
940 dev->stats.read.ops++;
941 /* messy ... */
942 if (dev->req_config) {
943 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
944 dev->has_cfr ? "" : " raced");
945 if (dev->has_cfr)
946 UDCCFR = UDCCFR_AREN|UDCCFR_ACM
947 |UDCCFR_MB1;
948 done(ep, req, 0);
949 dev->ep0state = EP0_END_XFER;
950 local_irq_restore (flags);
951 return 0;
952 }
953 if (dev->req_pending)
954 ep0start(dev, UDCCS0_IPR, "OUT");
955 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
956 && read_ep0_fifo(ep, req))) {
957 ep0_idle(dev);
958 done(ep, req, 0);
959 req = NULL;
960 }
961 break;
962
963 default:
964 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
965 local_irq_restore (flags);
966 return -EL2HLT;
967 }
968#ifdef USE_DMA
969 /* either start dma or prime pio pump */
970 } else if (ep->dma >= 0) {
971 kick_dma(ep, req);
972#endif
973 /* can the FIFO can satisfy the request immediately? */
974 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0
975 && (*ep->reg_udccs & UDCCS_BI_TFS) != 0
976 && write_fifo(ep, req)) {
977 req = NULL;
978 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
979 && read_fifo(ep, req)) {
980 req = NULL;
981 }
982
983 if (likely (req && ep->desc) && ep->dma < 0)
984 pio_irq_enable(ep->bEndpointAddress);
985 }
986
987 /* pio or dma irq handler advances the queue. */
988 if (likely (req != 0))
989 list_add_tail(&req->queue, &ep->queue);
990 local_irq_restore(flags);
991
992 return 0;
993}
994
995
996/*
997 * nuke - dequeue ALL requests
998 */
999static void nuke(struct pxa2xx_ep *ep, int status)
1000{
1001 struct pxa2xx_request *req;
1002
1003 /* called with irqs blocked */
1004#ifdef USE_DMA
1005 if (ep->dma >= 0 && !ep->stopped)
1006 cancel_dma(ep);
1007#endif
1008 while (!list_empty(&ep->queue)) {
1009 req = list_entry(ep->queue.next,
1010 struct pxa2xx_request,
1011 queue);
1012 done(ep, req, status);
1013 }
1014 if (ep->desc)
1015 pio_irq_disable (ep->bEndpointAddress);
1016}
1017
1018
1019/* dequeue JUST ONE request */
1020static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1021{
1022 struct pxa2xx_ep *ep;
1023 struct pxa2xx_request *req;
1024 unsigned long flags;
1025
1026 ep = container_of(_ep, struct pxa2xx_ep, ep);
1027 if (!_ep || ep->ep.name == ep0name)
1028 return -EINVAL;
1029
1030 local_irq_save(flags);
1031
1032 /* make sure it's actually queued on this endpoint */
1033 list_for_each_entry (req, &ep->queue, queue) {
1034 if (&req->req == _req)
1035 break;
1036 }
1037 if (&req->req != _req) {
1038 local_irq_restore(flags);
1039 return -EINVAL;
1040 }
1041
1042#ifdef USE_DMA
1043 if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
1044 cancel_dma(ep);
1045 done(ep, req, -ECONNRESET);
1046 /* restart i/o */
1047 if (!list_empty(&ep->queue)) {
1048 req = list_entry(ep->queue.next,
1049 struct pxa2xx_request, queue);
1050 kick_dma(ep, req);
1051 }
1052 } else
1053#endif
1054 done(ep, req, -ECONNRESET);
1055
1056 local_irq_restore(flags);
1057 return 0;
1058}
1059
1060/*-------------------------------------------------------------------------*/
1061
1062static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1063{
1064 struct pxa2xx_ep *ep;
1065 unsigned long flags;
1066
1067 ep = container_of(_ep, struct pxa2xx_ep, ep);
1068 if (unlikely (!_ep
1069 || (!ep->desc && ep->ep.name != ep0name))
1070 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1071 DMSG("%s, bad ep\n", __FUNCTION__);
1072 return -EINVAL;
1073 }
1074 if (value == 0) {
1075 /* this path (reset toggle+halt) is needed to implement
1076 * SET_INTERFACE on normal hardware. but it can't be
1077 * done from software on the PXA UDC, and the hardware
1078 * forgets to do it as part of SET_INTERFACE automagic.
1079 */
1080 DMSG("only host can clear %s halt\n", _ep->name);
1081 return -EROFS;
1082 }
1083
1084 local_irq_save(flags);
1085
1086 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1087 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
1088 || !list_empty(&ep->queue))) {
1089 local_irq_restore(flags);
1090 return -EAGAIN;
1091 }
1092
1093 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1094 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
1095
1096 /* ep0 needs special care */
1097 if (!ep->desc) {
1098 start_watchdog(ep->dev);
1099 ep->dev->req_pending = 0;
1100 ep->dev->ep0state = EP0_STALL;
1101
1102 /* and bulk/intr endpoints like dropping stalls too */
1103 } else {
1104 unsigned i;
1105 for (i = 0; i < 1000; i += 20) {
1106 if (*ep->reg_udccs & UDCCS_BI_SST)
1107 break;
1108 udelay(20);
1109 }
1110 }
1111 local_irq_restore(flags);
1112
1113 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1114 return 0;
1115}
1116
1117static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
1118{
1119 struct pxa2xx_ep *ep;
1120
1121 ep = container_of(_ep, struct pxa2xx_ep, ep);
1122 if (!_ep) {
1123 DMSG("%s, bad ep\n", __FUNCTION__);
1124 return -ENODEV;
1125 }
1126 /* pxa can't report unclaimed bytes from IN fifos */
1127 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1128 return -EOPNOTSUPP;
1129 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1130 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
1131 return 0;
1132 else
1133 return (*ep->reg_ubcr & 0xfff) + 1;
1134}
1135
1136static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
1137{
1138 struct pxa2xx_ep *ep;
1139
1140 ep = container_of(_ep, struct pxa2xx_ep, ep);
1141 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1142 DMSG("%s, bad ep\n", __FUNCTION__);
1143 return;
1144 }
1145
1146 /* toggle and halt bits stay unchanged */
1147
1148 /* for OUT, just read and discard the FIFO contents. */
1149 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1150 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
1151 (void) *ep->reg_uddr;
1152 return;
1153 }
1154
1155 /* most IN status is the same, but ISO can't stall */
1156 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1157 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
1158 ? 0 : UDCCS_BI_SST;
1159}
1160
1161
1162static struct usb_ep_ops pxa2xx_ep_ops = {
1163 .enable = pxa2xx_ep_enable,
1164 .disable = pxa2xx_ep_disable,
1165
1166 .alloc_request = pxa2xx_ep_alloc_request,
1167 .free_request = pxa2xx_ep_free_request,
1168
1169 .alloc_buffer = pxa2xx_ep_alloc_buffer,
1170 .free_buffer = pxa2xx_ep_free_buffer,
1171
1172 .queue = pxa2xx_ep_queue,
1173 .dequeue = pxa2xx_ep_dequeue,
1174
1175 .set_halt = pxa2xx_ep_set_halt,
1176 .fifo_status = pxa2xx_ep_fifo_status,
1177 .fifo_flush = pxa2xx_ep_fifo_flush,
1178};
1179
1180
1181/* ---------------------------------------------------------------------------
1182 * device-scoped parts of the api to the usb controller hardware
1183 * ---------------------------------------------------------------------------
1184 */
1185
1186static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
1187{
1188 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
1189}
1190
1191static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
1192{
1193 /* host may not have enabled remote wakeup */
1194 if ((UDCCS0 & UDCCS0_DRWF) == 0)
1195 return -EHOSTUNREACH;
1196 udc_set_mask_UDCCR(UDCCR_RSM);
1197 return 0;
1198}
1199
1200static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
1201static void udc_enable (struct pxa2xx_udc *);
1202static void udc_disable(struct pxa2xx_udc *);
1203
1204/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1205 * in active use.
1206 */
1207static int pullup(struct pxa2xx_udc *udc, int is_active)
1208{
1209 is_active = is_active && udc->vbus && udc->pullup;
1210 DMSG("%s\n", is_active ? "active" : "inactive");
1211 if (is_active)
1212 udc_enable(udc);
1213 else {
1214 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1215 DMSG("disconnect %s\n", udc->driver
1216 ? udc->driver->driver.name
1217 : "(no driver)");
1218 stop_activity(udc, udc->driver);
1219 }
1220 udc_disable(udc);
1221 }
1222 return 0;
1223}
1224
1225/* VBUS reporting logically comes from a transceiver */
1226static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1227{
1228 struct pxa2xx_udc *udc;
1229
1230 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1231 udc->vbus = is_active = (is_active != 0);
1232 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1233 pullup(udc, is_active);
1234 return 0;
1235}
1236
1237/* drivers may have software control over D+ pullup */
1238static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
1239{
1240 struct pxa2xx_udc *udc;
1241
1242 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1243
1244 /* not all boards support pullup control */
1245 if (!udc->mach->udc_command)
1246 return -EOPNOTSUPP;
1247
1248 is_active = (is_active != 0);
1249 udc->pullup = is_active;
1250 pullup(udc, is_active);
1251 return 0;
1252}
1253
1254static const struct usb_gadget_ops pxa2xx_udc_ops = {
1255 .get_frame = pxa2xx_udc_get_frame,
1256 .wakeup = pxa2xx_udc_wakeup,
1257 .vbus_session = pxa2xx_udc_vbus_session,
1258 .pullup = pxa2xx_udc_pullup,
1259
1260 // .vbus_draw ... boards may consume current from VBUS, up to
1261 // 100-500mA based on config. the 500uA suspend ceiling means
1262 // that exclusively vbus-powered PXA designs violate USB specs.
1263};
1264
1265/*-------------------------------------------------------------------------*/
1266
1267#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1268
1269static const char proc_node_name [] = "driver/udc";
1270
1271static int
1272udc_proc_read(char *page, char **start, off_t off, int count,
1273 int *eof, void *_dev)
1274{
1275 char *buf = page;
1276 struct pxa2xx_udc *dev = _dev;
1277 char *next = buf;
1278 unsigned size = count;
1279 unsigned long flags;
1280 int i, t;
1281 u32 tmp;
1282
1283 if (off != 0)
1284 return 0;
1285
1286 local_irq_save(flags);
1287
1288 /* basic device status */
1289 t = scnprintf(next, size, DRIVER_DESC "\n"
1290 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1291 driver_name, DRIVER_VERSION SIZE_STR DMASTR,
1292 dev->driver ? dev->driver->driver.name : "(none)",
1293 is_usb_connected() ? "full speed" : "disconnected");
1294 size -= t;
1295 next += t;
1296
1297 /* registers for device and ep0 */
1298 t = scnprintf(next, size,
1299 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1300 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
1301 size -= t;
1302 next += t;
1303
1304 tmp = UDCCR;
1305 t = scnprintf(next, size,
1306 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1307 (tmp & UDCCR_REM) ? " rem" : "",
1308 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1309 (tmp & UDCCR_SRM) ? " srm" : "",
1310 (tmp & UDCCR_SUSIR) ? " susir" : "",
1311 (tmp & UDCCR_RESIR) ? " resir" : "",
1312 (tmp & UDCCR_RSM) ? " rsm" : "",
1313 (tmp & UDCCR_UDA) ? " uda" : "",
1314 (tmp & UDCCR_UDE) ? " ude" : "");
1315 size -= t;
1316 next += t;
1317
1318 tmp = UDCCS0;
1319 t = scnprintf(next, size,
1320 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1321 (tmp & UDCCS0_SA) ? " sa" : "",
1322 (tmp & UDCCS0_RNE) ? " rne" : "",
1323 (tmp & UDCCS0_FST) ? " fst" : "",
1324 (tmp & UDCCS0_SST) ? " sst" : "",
1325 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1326 (tmp & UDCCS0_FTF) ? " ftf" : "",
1327 (tmp & UDCCS0_IPR) ? " ipr" : "",
1328 (tmp & UDCCS0_OPR) ? " opr" : "");
1329 size -= t;
1330 next += t;
1331
1332 if (dev->has_cfr) {
1333 tmp = UDCCFR;
1334 t = scnprintf(next, size,
1335 "udccfr %02X =%s%s\n", tmp,
1336 (tmp & UDCCFR_AREN) ? " aren" : "",
1337 (tmp & UDCCFR_ACM) ? " acm" : "");
1338 size -= t;
1339 next += t;
1340 }
1341
1342 if (!is_usb_connected() || !dev->driver)
1343 goto done;
1344
1345 t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1346 dev->stats.write.bytes, dev->stats.write.ops,
1347 dev->stats.read.bytes, dev->stats.read.ops,
1348 dev->stats.irqs);
1349 size -= t;
1350 next += t;
1351
1352 /* dump endpoint queues */
1353 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1354 struct pxa2xx_ep *ep = &dev->ep [i];
1355 struct pxa2xx_request *req;
1356 int t;
1357
1358 if (i != 0) {
1359 const struct usb_endpoint_descriptor *d;
1360
1361 d = ep->desc;
1362 if (!d)
1363 continue;
1364 tmp = *dev->ep [i].reg_udccs;
1365 t = scnprintf(next, size,
1366 "%s max %d %s udccs %02x irqs %lu/%lu\n",
1367 ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
1368 (ep->dma >= 0) ? "dma" : "pio", tmp,
1369 ep->pio_irqs, ep->dma_irqs);
1370 /* TODO translate all five groups of udccs bits! */
1371
1372 } else /* ep0 should only have one transfer queued */
1373 t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
1374 ep->pio_irqs);
1375 if (t <= 0 || t > size)
1376 goto done;
1377 size -= t;
1378 next += t;
1379
1380 if (list_empty(&ep->queue)) {
1381 t = scnprintf(next, size, "\t(nothing queued)\n");
1382 if (t <= 0 || t > size)
1383 goto done;
1384 size -= t;
1385 next += t;
1386 continue;
1387 }
1388 list_for_each_entry(req, &ep->queue, queue) {
1389#ifdef USE_DMA
1390 if (ep->dma >= 0 && req->queue.prev == &ep->queue)
1391 t = scnprintf(next, size,
1392 "\treq %p len %d/%d "
1393 "buf %p (dma%d dcmd %08x)\n",
1394 &req->req, req->req.actual,
1395 req->req.length, req->req.buf,
1396 ep->dma, DCMD(ep->dma)
1397 // low 13 bits == bytes-to-go
1398 );
1399 else
1400#endif
1401 t = scnprintf(next, size,
1402 "\treq %p len %d/%d buf %p\n",
1403 &req->req, req->req.actual,
1404 req->req.length, req->req.buf);
1405 if (t <= 0 || t > size)
1406 goto done;
1407 size -= t;
1408 next += t;
1409 }
1410 }
1411
1412done:
1413 local_irq_restore(flags);
1414 *eof = 1;
1415 return count - size;
1416}
1417
1418#define create_proc_files() \
1419 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
1420#define remove_proc_files() \
1421 remove_proc_entry(proc_node_name, NULL)
1422
1423#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1424
1425#define create_proc_files() do {} while (0)
1426#define remove_proc_files() do {} while (0)
1427
1428#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1429
1430/* "function" sysfs attribute */
1431static ssize_t
1432show_function (struct device *_dev, char *buf)
1433{
1434 struct pxa2xx_udc *dev = dev_get_drvdata (_dev);
1435
1436 if (!dev->driver
1437 || !dev->driver->function
1438 || strlen (dev->driver->function) > PAGE_SIZE)
1439 return 0;
1440 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1441}
1442static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1443
1444/*-------------------------------------------------------------------------*/
1445
1446/*
1447 * udc_disable - disable USB device controller
1448 */
1449static void udc_disable(struct pxa2xx_udc *dev)
1450{
1451 /* block all irqs */
1452 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
1453 UICR0 = UICR1 = 0xff;
1454 UFNRH = UFNRH_SIM;
1455
1456 /* if hardware supports it, disconnect from usb */
1457 make_usb_disappear();
1458
1459 udc_clear_mask_UDCCR(UDCCR_UDE);
1460
1461#ifdef CONFIG_ARCH_PXA
1462 /* Disable clock for USB device */
1463 pxa_set_cken(CKEN11_USB, 0);
1464#endif
1465
1466 ep0_idle (dev);
1467 dev->gadget.speed = USB_SPEED_UNKNOWN;
1468 LED_CONNECTED_OFF;
1469}
1470
1471
1472/*
1473 * udc_reinit - initialize software state
1474 */
1475static void udc_reinit(struct pxa2xx_udc *dev)
1476{
1477 u32 i;
1478
1479 /* device/ep0 records init */
1480 INIT_LIST_HEAD (&dev->gadget.ep_list);
1481 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1482 dev->ep0state = EP0_IDLE;
1483
1484 /* basic endpoint records init */
1485 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1486 struct pxa2xx_ep *ep = &dev->ep[i];
1487
1488 if (i != 0)
1489 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1490
1491 ep->desc = NULL;
1492 ep->stopped = 0;
1493 INIT_LIST_HEAD (&ep->queue);
1494 ep->pio_irqs = ep->dma_irqs = 0;
1495 }
1496
1497 /* the rest was statically initialized, and is read-only */
1498}
1499
1500/* until it's enabled, this UDC should be completely invisible
1501 * to any USB host.
1502 */
1503static void udc_enable (struct pxa2xx_udc *dev)
1504{
1505 udc_clear_mask_UDCCR(UDCCR_UDE);
1506
1507#ifdef CONFIG_ARCH_PXA
1508 /* Enable clock for USB device */
1509 pxa_set_cken(CKEN11_USB, 1);
1510 udelay(5);
1511#endif
1512
1513 /* try to clear these bits before we enable the udc */
1514 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1515
1516 ep0_idle(dev);
1517 dev->gadget.speed = USB_SPEED_UNKNOWN;
1518 dev->stats.irqs = 0;
1519
1520 /*
1521 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1522 * - enable UDC
1523 * - if RESET is already in progress, ack interrupt
1524 * - unmask reset interrupt
1525 */
1526 udc_set_mask_UDCCR(UDCCR_UDE);
1527 if (!(UDCCR & UDCCR_UDA))
1528 udc_ack_int_UDCCR(UDCCR_RSTIR);
1529
1530 if (dev->has_cfr /* UDC_RES2 is defined */) {
1531 /* pxa255 (a0+) can avoid a set_config race that could
1532 * prevent gadget drivers from configuring correctly
1533 */
1534 UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
1535 } else {
1536 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1537 * which could result in missing packets and interrupts.
1538 * supposedly one bit per endpoint, controlling whether it
1539 * double buffers or not; ACM/AREN bits fit into the holes.
1540 * zero bits (like USIR0_IRx) disable double buffering.
1541 */
1542 UDC_RES1 = 0x00;
1543 UDC_RES2 = 0x00;
1544 }
1545
1546#ifdef DISABLE_TEST_MODE
1547 /* "test mode" seems to have become the default in later chip
1548 * revs, preventing double buffering (and invalidating docs).
1549 * this EXPERIMENT enables it for bulk endpoints by tweaking
1550 * undefined/reserved register bits (that other drivers clear).
1551 * Belcarra code comments noted this usage.
1552 */
1553 if (fifo_mode & 1) { /* IN endpoints */
1554 UDC_RES1 |= USIR0_IR1|USIR0_IR6;
1555 UDC_RES2 |= USIR1_IR11;
1556 }
1557 if (fifo_mode & 2) { /* OUT endpoints */
1558 UDC_RES1 |= USIR0_IR2|USIR0_IR7;
1559 UDC_RES2 |= USIR1_IR12;
1560 }
1561#endif
1562
1563 /* enable suspend/resume and reset irqs */
1564 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
1565
1566 /* enable ep0 irqs */
1567 UICR0 &= ~UICR0_IM0;
1568
1569 /* if hardware supports it, pullup D+ and wait for reset */
1570 let_usb_appear();
1571}
1572
1573
1574/* when a driver is successfully registered, it will receive
1575 * control requests including set_configuration(), which enables
1576 * non-control requests. then usb traffic follows until a
1577 * disconnect is reported. then a host may connect again, or
1578 * the driver might get unbound.
1579 */
1580int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1581{
1582 struct pxa2xx_udc *dev = the_controller;
1583 int retval;
1584
1585 if (!driver
1586 || driver->speed != USB_SPEED_FULL
1587 || !driver->bind
1588 || !driver->unbind
1589 || !driver->disconnect
1590 || !driver->setup)
1591 return -EINVAL;
1592 if (!dev)
1593 return -ENODEV;
1594 if (dev->driver)
1595 return -EBUSY;
1596
1597 /* first hook up the driver ... */
1598 dev->driver = driver;
1599 dev->gadget.dev.driver = &driver->driver;
1600 dev->pullup = 1;
1601
1602 device_add (&dev->gadget.dev);
1603 retval = driver->bind(&dev->gadget);
1604 if (retval) {
1605 DMSG("bind to driver %s --> error %d\n",
1606 driver->driver.name, retval);
1607 device_del (&dev->gadget.dev);
1608
1609 dev->driver = NULL;
1610 dev->gadget.dev.driver = NULL;
1611 return retval;
1612 }
1613 device_create_file(dev->dev, &dev_attr_function);
1614
1615 /* ... then enable host detection and ep0; and we're ready
1616 * for set_configuration as well as eventual disconnect.
1617 */
1618 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1619 pullup(dev, 1);
1620 dump_state(dev);
1621 return 0;
1622}
1623EXPORT_SYMBOL(usb_gadget_register_driver);
1624
1625static void
1626stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1627{
1628 int i;
1629
1630 /* don't disconnect drivers more than once */
1631 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1632 driver = NULL;
1633 dev->gadget.speed = USB_SPEED_UNKNOWN;
1634
1635 /* prevent new request submissions, kill any outstanding requests */
1636 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1637 struct pxa2xx_ep *ep = &dev->ep[i];
1638
1639 ep->stopped = 1;
1640 nuke(ep, -ESHUTDOWN);
1641 }
1642 del_timer_sync(&dev->timer);
1643
1644 /* report disconnect; the driver is already quiesced */
1645 LED_CONNECTED_OFF;
1646 if (driver)
1647 driver->disconnect(&dev->gadget);
1648
1649 /* re-init driver-visible data structures */
1650 udc_reinit(dev);
1651}
1652
1653int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1654{
1655 struct pxa2xx_udc *dev = the_controller;
1656
1657 if (!dev)
1658 return -ENODEV;
1659 if (!driver || driver != dev->driver)
1660 return -EINVAL;
1661
1662 local_irq_disable();
1663 pullup(dev, 0);
1664 stop_activity(dev, driver);
1665 local_irq_enable();
1666
1667 driver->unbind(&dev->gadget);
1668 dev->driver = NULL;
1669
1670 device_del (&dev->gadget.dev);
1671 device_remove_file(dev->dev, &dev_attr_function);
1672
1673 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1674 dump_state(dev);
1675 return 0;
1676}
1677EXPORT_SYMBOL(usb_gadget_unregister_driver);
1678
1679
1680/*-------------------------------------------------------------------------*/
1681
1682#ifdef CONFIG_ARCH_LUBBOCK
1683
1684/* Lubbock has separate connect and disconnect irqs. More typical designs
1685 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1686 */
1687
1688static irqreturn_t
1689lubbock_vbus_irq(int irq, void *_dev, struct pt_regs *r)
1690{
1691 struct pxa2xx_udc *dev = _dev;
1692 int vbus;
1693
1694 dev->stats.irqs++;
1695 HEX_DISPLAY(dev->stats.irqs);
1696 switch (irq) {
1697 case LUBBOCK_USB_IRQ:
1698 LED_CONNECTED_ON;
1699 vbus = 1;
1700 disable_irq(LUBBOCK_USB_IRQ);
1701 enable_irq(LUBBOCK_USB_DISC_IRQ);
1702 break;
1703 case LUBBOCK_USB_DISC_IRQ:
1704 LED_CONNECTED_OFF;
1705 vbus = 0;
1706 disable_irq(LUBBOCK_USB_DISC_IRQ);
1707 enable_irq(LUBBOCK_USB_IRQ);
1708 break;
1709 default:
1710 return IRQ_NONE;
1711 }
1712
1713 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1714 return IRQ_HANDLED;
1715}
1716
1717#endif
1718
1719
1720/*-------------------------------------------------------------------------*/
1721
1722static inline void clear_ep_state (struct pxa2xx_udc *dev)
1723{
1724 unsigned i;
1725
1726 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1727 * fifos, and pending transactions mustn't be continued in any case.
1728 */
1729 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1730 nuke(&dev->ep[i], -ECONNABORTED);
1731}
1732
1733static void udc_watchdog(unsigned long _dev)
1734{
1735 struct pxa2xx_udc *dev = (void *)_dev;
1736
1737 local_irq_disable();
1738 if (dev->ep0state == EP0_STALL
1739 && (UDCCS0 & UDCCS0_FST) == 0
1740 && (UDCCS0 & UDCCS0_SST) == 0) {
1741 UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
1742 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1743 start_watchdog(dev);
1744 }
1745 local_irq_enable();
1746}
1747
1748static void handle_ep0 (struct pxa2xx_udc *dev)
1749{
1750 u32 udccs0 = UDCCS0;
1751 struct pxa2xx_ep *ep = &dev->ep [0];
1752 struct pxa2xx_request *req;
1753 union {
1754 struct usb_ctrlrequest r;
1755 u8 raw [8];
1756 u32 word [2];
1757 } u;
1758
1759 if (list_empty(&ep->queue))
1760 req = NULL;
1761 else
1762 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
1763
1764 /* clear stall status */
1765 if (udccs0 & UDCCS0_SST) {
1766 nuke(ep, -EPIPE);
1767 UDCCS0 = UDCCS0_SST;
1768 del_timer(&dev->timer);
1769 ep0_idle(dev);
1770 }
1771
1772 /* previous request unfinished? non-error iff back-to-back ... */
1773 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1774 nuke(ep, 0);
1775 del_timer(&dev->timer);
1776 ep0_idle(dev);
1777 }
1778
1779 switch (dev->ep0state) {
1780 case EP0_IDLE:
1781 /* late-breaking status? */
1782 udccs0 = UDCCS0;
1783
1784 /* start control request? */
1785 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1786 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1787 int i;
1788
1789 nuke (ep, -EPROTO);
1790
1791 /* read SETUP packet */
1792 for (i = 0; i < 8; i++) {
1793 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
1794bad_setup:
1795 DMSG("SETUP %d!\n", i);
1796 goto stall;
1797 }
1798 u.raw [i] = (u8) UDDR0;
1799 }
1800 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
1801 goto bad_setup;
1802
1803got_setup:
1804 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1805 u.r.bRequestType, u.r.bRequest,
1806 le16_to_cpu(u.r.wValue),
1807 le16_to_cpu(u.r.wIndex),
1808 le16_to_cpu(u.r.wLength));
1809
1810 /* cope with automagic for some standard requests. */
1811 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1812 == USB_TYPE_STANDARD;
1813 dev->req_config = 0;
1814 dev->req_pending = 1;
1815 switch (u.r.bRequest) {
1816 /* hardware restricts gadget drivers here! */
1817 case USB_REQ_SET_CONFIGURATION:
1818 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1819 /* reflect hardware's automagic
1820 * up to the gadget driver.
1821 */
1822config_change:
1823 dev->req_config = 1;
1824 clear_ep_state(dev);
1825 /* if !has_cfr, there's no synch
1826 * else use AREN (later) not SA|OPR
1827 * USIR0_IR0 acts edge sensitive
1828 */
1829 }
1830 break;
1831 /* ... and here, even more ... */
1832 case USB_REQ_SET_INTERFACE:
1833 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1834 /* udc hardware is broken by design:
1835 * - altsetting may only be zero;
1836 * - hw resets all interfaces' eps;
1837 * - ep reset doesn't include halt(?).
1838 */
1839 DMSG("broken set_interface (%d/%d)\n",
1840 le16_to_cpu(u.r.wIndex),
1841 le16_to_cpu(u.r.wValue));
1842 goto config_change;
1843 }
1844 break;
1845 /* hardware was supposed to hide this */
1846 case USB_REQ_SET_ADDRESS:
1847 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1848 ep0start(dev, 0, "address");
1849 return;
1850 }
1851 break;
1852 }
1853
1854 if (u.r.bRequestType & USB_DIR_IN)
1855 dev->ep0state = EP0_IN_DATA_PHASE;
1856 else
1857 dev->ep0state = EP0_OUT_DATA_PHASE;
1858
1859 i = dev->driver->setup(&dev->gadget, &u.r);
1860 if (i < 0) {
1861 /* hardware automagic preventing STALL... */
1862 if (dev->req_config) {
1863 /* hardware sometimes neglects to tell
1864 * tell us about config change events,
1865 * so later ones may fail...
1866 */
1867 WARN("config change %02x fail %d?\n",
1868 u.r.bRequest, i);
1869 return;
1870 /* TODO experiment: if has_cfr,
1871 * hardware didn't ACK; maybe we
1872 * could actually STALL!
1873 */
1874 }
1875 DBG(DBG_VERBOSE, "protocol STALL, "
1876 "%02x err %d\n", UDCCS0, i);
1877stall:
1878 /* the watchdog timer helps deal with cases
1879 * where udc seems to clear FST wrongly, and
1880 * then NAKs instead of STALLing.
1881 */
1882 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1883 start_watchdog(dev);
1884 dev->ep0state = EP0_STALL;
1885
1886 /* deferred i/o == no response yet */
1887 } else if (dev->req_pending) {
1888 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1889 || dev->req_std || u.r.wLength))
1890 ep0start(dev, 0, "defer");
1891 else
1892 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1893 }
1894
1895 /* expect at least one data or status stage irq */
1896 return;
1897
1898 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1899 == (UDCCS0_OPR|UDCCS0_SA))) {
1900 unsigned i;
1901
1902 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1903 * still observed on a pxa255 a0.
1904 */
1905 DBG(DBG_VERBOSE, "e131\n");
1906 nuke(ep, -EPROTO);
1907
1908 /* read SETUP data, but don't trust it too much */
1909 for (i = 0; i < 8; i++)
1910 u.raw [i] = (u8) UDDR0;
1911 if ((u.r.bRequestType & USB_RECIP_MASK)
1912 > USB_RECIP_OTHER)
1913 goto stall;
1914 if (u.word [0] == 0 && u.word [1] == 0)
1915 goto stall;
1916 goto got_setup;
1917 } else {
1918 /* some random early IRQ:
1919 * - we acked FST
1920 * - IPR cleared
1921 * - OPR got set, without SA (likely status stage)
1922 */
1923 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
1924 }
1925 break;
1926 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1927 if (udccs0 & UDCCS0_OPR) {
1928 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
1929 DBG(DBG_VERBOSE, "ep0in premature status\n");
1930 if (req)
1931 done(ep, req, 0);
1932 ep0_idle(dev);
1933 } else /* irq was IPR clearing */ {
1934 if (req) {
1935 /* this IN packet might finish the request */
1936 (void) write_ep0_fifo(ep, req);
1937 } /* else IN token before response was written */
1938 }
1939 break;
1940 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1941 if (udccs0 & UDCCS0_OPR) {
1942 if (req) {
1943 /* this OUT packet might finish the request */
1944 if (read_ep0_fifo(ep, req))
1945 done(ep, req, 0);
1946 /* else more OUT packets expected */
1947 } /* else OUT token before read was issued */
1948 } else /* irq was IPR clearing */ {
1949 DBG(DBG_VERBOSE, "ep0out premature status\n");
1950 if (req)
1951 done(ep, req, 0);
1952 ep0_idle(dev);
1953 }
1954 break;
1955 case EP0_END_XFER:
1956 if (req)
1957 done(ep, req, 0);
1958 /* ack control-IN status (maybe in-zlp was skipped)
1959 * also appears after some config change events.
1960 */
1961 if (udccs0 & UDCCS0_OPR)
1962 UDCCS0 = UDCCS0_OPR;
1963 ep0_idle(dev);
1964 break;
1965 case EP0_STALL:
1966 UDCCS0 = UDCCS0_FST;
1967 break;
1968 }
1969 USIR0 = USIR0_IR0;
1970}
1971
1972static void handle_ep(struct pxa2xx_ep *ep)
1973{
1974 struct pxa2xx_request *req;
1975 int is_in = ep->bEndpointAddress & USB_DIR_IN;
1976 int completed;
1977 u32 udccs, tmp;
1978
1979 do {
1980 completed = 0;
1981 if (likely (!list_empty(&ep->queue)))
1982 req = list_entry(ep->queue.next,
1983 struct pxa2xx_request, queue);
1984 else
1985 req = NULL;
1986
1987 // TODO check FST handling
1988
1989 udccs = *ep->reg_udccs;
1990 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
1991 tmp = UDCCS_BI_TUR;
1992 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
1993 tmp |= UDCCS_BI_SST;
1994 tmp &= udccs;
1995 if (likely (tmp))
1996 *ep->reg_udccs = tmp;
1997 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
1998 completed = write_fifo(ep, req);
1999
2000 } else { /* irq from RPC (or for ISO, ROF) */
2001 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2002 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
2003 else
2004 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
2005 tmp &= udccs;
2006 if (likely(tmp))
2007 *ep->reg_udccs = tmp;
2008
2009 /* fifos can hold packets, ready for reading... */
2010 if (likely(req)) {
2011#ifdef USE_OUT_DMA
2012// TODO didn't yet debug out-dma. this approach assumes
2013// the worst about short packets and RPC; it might be better.
2014
2015 if (likely(ep->dma >= 0)) {
2016 if (!(udccs & UDCCS_BO_RSP)) {
2017 *ep->reg_udccs = UDCCS_BO_RPC;
2018 ep->dma_irqs++;
2019 return;
2020 }
2021 }
2022#endif
2023 completed = read_fifo(ep, req);
2024 } else
2025 pio_irq_disable (ep->bEndpointAddress);
2026 }
2027 ep->pio_irqs++;
2028 } while (completed);
2029}
2030
2031/*
2032 * pxa2xx_udc_irq - interrupt handler
2033 *
2034 * avoid delays in ep0 processing. the control handshaking isn't always
2035 * under software control (pxa250c0 and the pxa255 are better), and delays
2036 * could cause usb protocol errors.
2037 */
2038static irqreturn_t
2039pxa2xx_udc_irq(int irq, void *_dev, struct pt_regs *r)
2040{
2041 struct pxa2xx_udc *dev = _dev;
2042 int handled;
2043
2044 dev->stats.irqs++;
2045 HEX_DISPLAY(dev->stats.irqs);
2046 do {
2047 u32 udccr = UDCCR;
2048
2049 handled = 0;
2050
2051 /* SUSpend Interrupt Request */
2052 if (unlikely(udccr & UDCCR_SUSIR)) {
2053 udc_ack_int_UDCCR(UDCCR_SUSIR);
2054 handled = 1;
2055 DBG(DBG_VERBOSE, "USB suspend%s\n", is_usb_connected()
2056 ? "" : "+disconnect");
2057
2058 if (!is_usb_connected())
2059 stop_activity(dev, dev->driver);
2060 else if (dev->gadget.speed != USB_SPEED_UNKNOWN
2061 && dev->driver
2062 && dev->driver->suspend)
2063 dev->driver->suspend(&dev->gadget);
2064 ep0_idle (dev);
2065 }
2066
2067 /* RESume Interrupt Request */
2068 if (unlikely(udccr & UDCCR_RESIR)) {
2069 udc_ack_int_UDCCR(UDCCR_RESIR);
2070 handled = 1;
2071 DBG(DBG_VERBOSE, "USB resume\n");
2072
2073 if (dev->gadget.speed != USB_SPEED_UNKNOWN
2074 && dev->driver
2075 && dev->driver->resume
2076 && is_usb_connected())
2077 dev->driver->resume(&dev->gadget);
2078 }
2079
2080 /* ReSeT Interrupt Request - USB reset */
2081 if (unlikely(udccr & UDCCR_RSTIR)) {
2082 udc_ack_int_UDCCR(UDCCR_RSTIR);
2083 handled = 1;
2084
2085 if ((UDCCR & UDCCR_UDA) == 0) {
2086 DBG(DBG_VERBOSE, "USB reset start\n");
2087
2088 /* reset driver and endpoints,
2089 * in case that's not yet done
2090 */
2091 stop_activity (dev, dev->driver);
2092
2093 } else {
2094 DBG(DBG_VERBOSE, "USB reset end\n");
2095 dev->gadget.speed = USB_SPEED_FULL;
2096 LED_CONNECTED_ON;
2097 memset(&dev->stats, 0, sizeof dev->stats);
2098 /* driver and endpoints are still reset */
2099 }
2100
2101 } else {
2102 u32 usir0 = USIR0 & ~UICR0;
2103 u32 usir1 = USIR1 & ~UICR1;
2104 int i;
2105
2106 if (unlikely (!usir0 && !usir1))
2107 continue;
2108
2109 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
2110
2111 /* control traffic */
2112 if (usir0 & USIR0_IR0) {
2113 dev->ep[0].pio_irqs++;
2114 handle_ep0(dev);
2115 handled = 1;
2116 }
2117
2118 /* endpoint data transfers */
2119 for (i = 0; i < 8; i++) {
2120 u32 tmp = 1 << i;
2121
2122 if (i && (usir0 & tmp)) {
2123 handle_ep(&dev->ep[i]);
2124 USIR0 |= tmp;
2125 handled = 1;
2126 }
2127 if (usir1 & tmp) {
2128 handle_ep(&dev->ep[i+8]);
2129 USIR1 |= tmp;
2130 handled = 1;
2131 }
2132 }
2133 }
2134
2135 /* we could also ask for 1 msec SOF (SIR) interrupts */
2136
2137 } while (handled);
2138 return IRQ_HANDLED;
2139}
2140
2141/*-------------------------------------------------------------------------*/
2142
2143static void nop_release (struct device *dev)
2144{
2145 DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
2146}
2147
2148/* this uses load-time allocation and initialization (instead of
2149 * doing it at run-time) to save code, eliminate fault paths, and
2150 * be more obviously correct.
2151 */
2152static struct pxa2xx_udc memory = {
2153 .gadget = {
2154 .ops = &pxa2xx_udc_ops,
2155 .ep0 = &memory.ep[0].ep,
2156 .name = driver_name,
2157 .dev = {
2158 .bus_id = "gadget",
2159 .release = nop_release,
2160 },
2161 },
2162
2163 /* control endpoint */
2164 .ep[0] = {
2165 .ep = {
2166 .name = ep0name,
2167 .ops = &pxa2xx_ep_ops,
2168 .maxpacket = EP0_FIFO_SIZE,
2169 },
2170 .dev = &memory,
2171 .reg_udccs = &UDCCS0,
2172 .reg_uddr = &UDDR0,
2173 },
2174
2175 /* first group of endpoints */
2176 .ep[1] = {
2177 .ep = {
2178 .name = "ep1in-bulk",
2179 .ops = &pxa2xx_ep_ops,
2180 .maxpacket = BULK_FIFO_SIZE,
2181 },
2182 .dev = &memory,
2183 .fifo_size = BULK_FIFO_SIZE,
2184 .bEndpointAddress = USB_DIR_IN | 1,
2185 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2186 .reg_udccs = &UDCCS1,
2187 .reg_uddr = &UDDR1,
2188 drcmr (25)
2189 },
2190 .ep[2] = {
2191 .ep = {
2192 .name = "ep2out-bulk",
2193 .ops = &pxa2xx_ep_ops,
2194 .maxpacket = BULK_FIFO_SIZE,
2195 },
2196 .dev = &memory,
2197 .fifo_size = BULK_FIFO_SIZE,
2198 .bEndpointAddress = 2,
2199 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2200 .reg_udccs = &UDCCS2,
2201 .reg_ubcr = &UBCR2,
2202 .reg_uddr = &UDDR2,
2203 drcmr (26)
2204 },
2205#ifndef CONFIG_USB_PXA2XX_SMALL
2206 .ep[3] = {
2207 .ep = {
2208 .name = "ep3in-iso",
2209 .ops = &pxa2xx_ep_ops,
2210 .maxpacket = ISO_FIFO_SIZE,
2211 },
2212 .dev = &memory,
2213 .fifo_size = ISO_FIFO_SIZE,
2214 .bEndpointAddress = USB_DIR_IN | 3,
2215 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2216 .reg_udccs = &UDCCS3,
2217 .reg_uddr = &UDDR3,
2218 drcmr (27)
2219 },
2220 .ep[4] = {
2221 .ep = {
2222 .name = "ep4out-iso",
2223 .ops = &pxa2xx_ep_ops,
2224 .maxpacket = ISO_FIFO_SIZE,
2225 },
2226 .dev = &memory,
2227 .fifo_size = ISO_FIFO_SIZE,
2228 .bEndpointAddress = 4,
2229 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2230 .reg_udccs = &UDCCS4,
2231 .reg_ubcr = &UBCR4,
2232 .reg_uddr = &UDDR4,
2233 drcmr (28)
2234 },
2235 .ep[5] = {
2236 .ep = {
2237 .name = "ep5in-int",
2238 .ops = &pxa2xx_ep_ops,
2239 .maxpacket = INT_FIFO_SIZE,
2240 },
2241 .dev = &memory,
2242 .fifo_size = INT_FIFO_SIZE,
2243 .bEndpointAddress = USB_DIR_IN | 5,
2244 .bmAttributes = USB_ENDPOINT_XFER_INT,
2245 .reg_udccs = &UDCCS5,
2246 .reg_uddr = &UDDR5,
2247 },
2248
2249 /* second group of endpoints */
2250 .ep[6] = {
2251 .ep = {
2252 .name = "ep6in-bulk",
2253 .ops = &pxa2xx_ep_ops,
2254 .maxpacket = BULK_FIFO_SIZE,
2255 },
2256 .dev = &memory,
2257 .fifo_size = BULK_FIFO_SIZE,
2258 .bEndpointAddress = USB_DIR_IN | 6,
2259 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2260 .reg_udccs = &UDCCS6,
2261 .reg_uddr = &UDDR6,
2262 drcmr (30)
2263 },
2264 .ep[7] = {
2265 .ep = {
2266 .name = "ep7out-bulk",
2267 .ops = &pxa2xx_ep_ops,
2268 .maxpacket = BULK_FIFO_SIZE,
2269 },
2270 .dev = &memory,
2271 .fifo_size = BULK_FIFO_SIZE,
2272 .bEndpointAddress = 7,
2273 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2274 .reg_udccs = &UDCCS7,
2275 .reg_ubcr = &UBCR7,
2276 .reg_uddr = &UDDR7,
2277 drcmr (31)
2278 },
2279 .ep[8] = {
2280 .ep = {
2281 .name = "ep8in-iso",
2282 .ops = &pxa2xx_ep_ops,
2283 .maxpacket = ISO_FIFO_SIZE,
2284 },
2285 .dev = &memory,
2286 .fifo_size = ISO_FIFO_SIZE,
2287 .bEndpointAddress = USB_DIR_IN | 8,
2288 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2289 .reg_udccs = &UDCCS8,
2290 .reg_uddr = &UDDR8,
2291 drcmr (32)
2292 },
2293 .ep[9] = {
2294 .ep = {
2295 .name = "ep9out-iso",
2296 .ops = &pxa2xx_ep_ops,
2297 .maxpacket = ISO_FIFO_SIZE,
2298 },
2299 .dev = &memory,
2300 .fifo_size = ISO_FIFO_SIZE,
2301 .bEndpointAddress = 9,
2302 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2303 .reg_udccs = &UDCCS9,
2304 .reg_ubcr = &UBCR9,
2305 .reg_uddr = &UDDR9,
2306 drcmr (33)
2307 },
2308 .ep[10] = {
2309 .ep = {
2310 .name = "ep10in-int",
2311 .ops = &pxa2xx_ep_ops,
2312 .maxpacket = INT_FIFO_SIZE,
2313 },
2314 .dev = &memory,
2315 .fifo_size = INT_FIFO_SIZE,
2316 .bEndpointAddress = USB_DIR_IN | 10,
2317 .bmAttributes = USB_ENDPOINT_XFER_INT,
2318 .reg_udccs = &UDCCS10,
2319 .reg_uddr = &UDDR10,
2320 },
2321
2322 /* third group of endpoints */
2323 .ep[11] = {
2324 .ep = {
2325 .name = "ep11in-bulk",
2326 .ops = &pxa2xx_ep_ops,
2327 .maxpacket = BULK_FIFO_SIZE,
2328 },
2329 .dev = &memory,
2330 .fifo_size = BULK_FIFO_SIZE,
2331 .bEndpointAddress = USB_DIR_IN | 11,
2332 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2333 .reg_udccs = &UDCCS11,
2334 .reg_uddr = &UDDR11,
2335 drcmr (35)
2336 },
2337 .ep[12] = {
2338 .ep = {
2339 .name = "ep12out-bulk",
2340 .ops = &pxa2xx_ep_ops,
2341 .maxpacket = BULK_FIFO_SIZE,
2342 },
2343 .dev = &memory,
2344 .fifo_size = BULK_FIFO_SIZE,
2345 .bEndpointAddress = 12,
2346 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2347 .reg_udccs = &UDCCS12,
2348 .reg_ubcr = &UBCR12,
2349 .reg_uddr = &UDDR12,
2350 drcmr (36)
2351 },
2352 .ep[13] = {
2353 .ep = {
2354 .name = "ep13in-iso",
2355 .ops = &pxa2xx_ep_ops,
2356 .maxpacket = ISO_FIFO_SIZE,
2357 },
2358 .dev = &memory,
2359 .fifo_size = ISO_FIFO_SIZE,
2360 .bEndpointAddress = USB_DIR_IN | 13,
2361 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2362 .reg_udccs = &UDCCS13,
2363 .reg_uddr = &UDDR13,
2364 drcmr (37)
2365 },
2366 .ep[14] = {
2367 .ep = {
2368 .name = "ep14out-iso",
2369 .ops = &pxa2xx_ep_ops,
2370 .maxpacket = ISO_FIFO_SIZE,
2371 },
2372 .dev = &memory,
2373 .fifo_size = ISO_FIFO_SIZE,
2374 .bEndpointAddress = 14,
2375 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2376 .reg_udccs = &UDCCS14,
2377 .reg_ubcr = &UBCR14,
2378 .reg_uddr = &UDDR14,
2379 drcmr (38)
2380 },
2381 .ep[15] = {
2382 .ep = {
2383 .name = "ep15in-int",
2384 .ops = &pxa2xx_ep_ops,
2385 .maxpacket = INT_FIFO_SIZE,
2386 },
2387 .dev = &memory,
2388 .fifo_size = INT_FIFO_SIZE,
2389 .bEndpointAddress = USB_DIR_IN | 15,
2390 .bmAttributes = USB_ENDPOINT_XFER_INT,
2391 .reg_udccs = &UDCCS15,
2392 .reg_uddr = &UDDR15,
2393 },
2394#endif /* !CONFIG_USB_PXA2XX_SMALL */
2395};
2396
2397#define CP15R0_VENDOR_MASK 0xffffe000
2398
2399#if defined(CONFIG_ARCH_PXA)
2400#define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2401
2402#elif defined(CONFIG_ARCH_IXP4XX)
2403#define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2404
2405#endif
2406
2407#define CP15R0_PROD_MASK 0x000003f0
2408#define PXA25x 0x00000100 /* and PXA26x */
2409#define PXA210 0x00000120
2410
2411#define CP15R0_REV_MASK 0x0000000f
2412
2413#define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2414
2415#define PXA255_A0 0x00000106 /* or PXA260_B1 */
2416#define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2417#define PXA250_B2 0x00000104
2418#define PXA250_B1 0x00000103 /* or PXA260_A0 */
2419#define PXA250_B0 0x00000102
2420#define PXA250_A1 0x00000101
2421#define PXA250_A0 0x00000100
2422
2423#define PXA210_C0 0x00000125
2424#define PXA210_B2 0x00000124
2425#define PXA210_B1 0x00000123
2426#define PXA210_B0 0x00000122
2427#define IXP425_A0 0x000001c1
2428
2429/*
2430 * probe - binds to the platform device
2431 */
2432static int __init pxa2xx_udc_probe(struct device *_dev)
2433{
2434 struct pxa2xx_udc *dev = &memory;
2435 int retval, out_dma = 1;
2436 u32 chiprev;
2437
2438 /* insist on Intel/ARM/XScale */
2439 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2440 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2441 printk(KERN_ERR "%s: not XScale!\n", driver_name);
2442 return -ENODEV;
2443 }
2444
2445 /* trigger chiprev-specific logic */
2446 switch (chiprev & CP15R0_PRODREV_MASK) {
2447#if defined(CONFIG_ARCH_PXA)
2448 case PXA255_A0:
2449 dev->has_cfr = 1;
2450 break;
2451 case PXA250_A0:
2452 case PXA250_A1:
2453 /* A0/A1 "not released"; ep 13, 15 unusable */
2454 /* fall through */
2455 case PXA250_B2: case PXA210_B2:
2456 case PXA250_B1: case PXA210_B1:
2457 case PXA250_B0: case PXA210_B0:
2458 out_dma = 0;
2459 /* fall through */
2460 case PXA250_C0: case PXA210_C0:
2461 break;
2462#elif defined(CONFIG_ARCH_IXP4XX)
2463 case IXP425_A0:
2464 out_dma = 0;
2465 break;
2466#endif
2467 default:
2468 out_dma = 0;
2469 printk(KERN_ERR "%s: unrecognized processor: %08x\n",
2470 driver_name, chiprev);
2471 /* iop3xx, ixp4xx, ... */
2472 return -ENODEV;
2473 }
2474
2475 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
2476 dev->has_cfr ? "" : " (!cfr)",
2477 out_dma ? "" : " (broken dma-out)",
2478 SIZE_STR DMASTR
2479 );
2480
2481#ifdef USE_DMA
2482#ifndef USE_OUT_DMA
2483 out_dma = 0;
2484#endif
2485 /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
2486 if (!out_dma) {
2487 DMSG("disabled OUT dma\n");
2488 dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
2489 dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
2490 dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
2491 }
2492#endif
2493
2494 /* other non-static parts of init */
2495 dev->dev = _dev;
2496 dev->mach = _dev->platform_data;
2497
2498 init_timer(&dev->timer);
2499 dev->timer.function = udc_watchdog;
2500 dev->timer.data = (unsigned long) dev;
2501
2502 device_initialize(&dev->gadget.dev);
2503 dev->gadget.dev.parent = _dev;
2504 dev->gadget.dev.dma_mask = _dev->dma_mask;
2505
2506 the_controller = dev;
2507 dev_set_drvdata(_dev, dev);
2508
2509 udc_disable(dev);
2510 udc_reinit(dev);
2511
2512 dev->vbus = is_usb_connected();
2513
2514 /* irq setup after old hardware state is cleaned up */
2515 retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
2516 SA_INTERRUPT, driver_name, dev);
2517 if (retval != 0) {
2518 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2519 driver_name, IRQ_USB, retval);
2520 return -EBUSY;
2521 }
2522 dev->got_irq = 1;
2523
2524#ifdef CONFIG_ARCH_LUBBOCK
2525 if (machine_is_lubbock()) {
2526 retval = request_irq(LUBBOCK_USB_DISC_IRQ,
2527 lubbock_vbus_irq,
2528 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2529 driver_name, dev);
2530 if (retval != 0) {
2531 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2532 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2533lubbock_fail0:
2534 free_irq(IRQ_USB, dev);
2535 return -EBUSY;
2536 }
2537 retval = request_irq(LUBBOCK_USB_IRQ,
2538 lubbock_vbus_irq,
2539 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2540 driver_name, dev);
2541 if (retval != 0) {
2542 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2543 driver_name, LUBBOCK_USB_IRQ, retval);
2544 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2545 goto lubbock_fail0;
2546 }
2547#ifdef DEBUG
2548 /* with U-Boot (but not BLOB), hex is off by default */
2549 HEX_DISPLAY(dev->stats.irqs);
2550 LUB_DISC_BLNK_LED &= 0xff;
2551#endif
2552 }
2553#endif
2554 create_proc_files();
2555
2556 return 0;
2557}
2558static int __exit pxa2xx_udc_remove(struct device *_dev)
2559{
2560 struct pxa2xx_udc *dev = dev_get_drvdata(_dev);
2561
2562 udc_disable(dev);
2563 remove_proc_files();
2564 usb_gadget_unregister_driver(dev->driver);
2565
2566 if (dev->got_irq) {
2567 free_irq(IRQ_USB, dev);
2568 dev->got_irq = 0;
2569 }
2570 if (machine_is_lubbock()) {
2571 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2572 free_irq(LUBBOCK_USB_IRQ, dev);
2573 }
2574 dev_set_drvdata(_dev, NULL);
2575 the_controller = NULL;
2576 return 0;
2577}
2578
2579/*-------------------------------------------------------------------------*/
2580
2581#ifdef CONFIG_PM
2582
2583/* USB suspend (controlled by the host) and system suspend (controlled
2584 * by the PXA) don't necessarily work well together. If USB is active,
2585 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2586 * mode, or any deeper PM saving state.
2587 *
2588 * For now, we punt and forcibly disconnect from the USB host when PXA
2589 * enters any suspend state. While we're disconnected, we always disable
2590 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2591 * Boards without software pullup control shouldn't use those states.
2592 * VBUS IRQs should probably be ignored so that the PXA device just acts
2593 * "dead" to USB hosts until system resume.
2594 */
2595static int pxa2xx_udc_suspend(struct device *dev, u32 state, u32 level)
2596{
2597 struct pxa2xx_udc *udc = dev_get_drvdata(dev);
2598
2599 if (level == SUSPEND_POWER_DOWN) {
2600 if (!udc->mach->udc_command)
2601 WARN("USB host won't detect disconnect!\n");
2602 pullup(udc, 0);
2603 }
2604 return 0;
2605}
2606
2607static int pxa2xx_udc_resume(struct device *dev, u32 level)
2608{
2609 struct pxa2xx_udc *udc = dev_get_drvdata(dev);
2610
2611 if (level == RESUME_POWER_ON)
2612 pullup(udc, 1);
2613 return 0;
2614}
2615
2616#else
2617#define pxa2xx_udc_suspend NULL
2618#define pxa2xx_udc_resume NULL
2619#endif
2620
2621/*-------------------------------------------------------------------------*/
2622
2623static struct device_driver udc_driver = {
2624 .name = "pxa2xx-udc",
2625 .bus = &platform_bus_type,
2626 .probe = pxa2xx_udc_probe,
2627 .remove = __exit_p(pxa2xx_udc_remove),
2628 .suspend = pxa2xx_udc_suspend,
2629 .resume = pxa2xx_udc_resume,
2630};
2631
2632static int __init udc_init(void)
2633{
2634 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
2635 return driver_register(&udc_driver);
2636}
2637module_init(udc_init);
2638
2639static void __exit udc_exit(void)
2640{
2641 driver_unregister(&udc_driver);
2642}
2643module_exit(udc_exit);
2644
2645MODULE_DESCRIPTION(DRIVER_DESC);
2646MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2647MODULE_LICENSE("GPL");
2648
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h
new file mode 100644
index 000000000000..1f3a7d999da7
--- /dev/null
+++ b/drivers/usb/gadget/pxa2xx_udc.h
@@ -0,0 +1,320 @@
1/*
2 * linux/drivers/usb/gadget/pxa2xx_udc.h
3 * Intel PXA2xx on-chip full speed USB device controller
4 *
5 * Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix
6 * Copyright (C) 2003 David Brownell
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef __LINUX_USB_GADGET_PXA2XX_H
25#define __LINUX_USB_GADGET_PXA2XX_H
26
27#include <linux/types.h>
28
29/*-------------------------------------------------------------------------*/
30
31/* pxa2xx has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
32#define UFNRH_SIR (1 << 7) /* SOF interrupt request */
33#define UFNRH_SIM (1 << 6) /* SOF interrupt mask */
34#define UFNRH_IPE14 (1 << 5) /* ISO packet error, ep14 */
35#define UFNRH_IPE9 (1 << 4) /* ISO packet error, ep9 */
36#define UFNRH_IPE4 (1 << 3) /* ISO packet error, ep4 */
37
38/* pxa255 has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
39#define UDCCFR UDC_RES2 /* UDC Control Function Register */
40#define UDCCFR_AREN (1 << 7) /* ACK response enable (now) */
41#define UDCCFR_ACM (1 << 2) /* ACK control mode (wait for AREN) */
42
43/* latest pxa255 errata define new "must be one" bits in UDCCFR */
44#define UDCCFR_MB1 (0xff & ~(UDCCFR_AREN|UDCCFR_ACM))
45
46/*-------------------------------------------------------------------------*/
47
48struct pxa2xx_udc;
49
50struct pxa2xx_ep {
51 struct usb_ep ep;
52 struct pxa2xx_udc *dev;
53
54 const struct usb_endpoint_descriptor *desc;
55 struct list_head queue;
56 unsigned long pio_irqs;
57 unsigned long dma_irqs;
58 short dma;
59
60 unsigned short fifo_size;
61 u8 bEndpointAddress;
62 u8 bmAttributes;
63
64 unsigned stopped : 1;
65 unsigned dma_fixup : 1;
66
67 /* UDCCS = UDC Control/Status for this EP
68 * UBCR = UDC Byte Count Remaining (contents of OUT fifo)
69 * UDDR = UDC Endpoint Data Register (the fifo)
70 * DRCM = DMA Request Channel Map
71 */
72 volatile u32 *reg_udccs;
73 volatile u32 *reg_ubcr;
74 volatile u32 *reg_uddr;
75#ifdef USE_DMA
76 volatile u32 *reg_drcmr;
77#define drcmr(n) .reg_drcmr = & DRCMR ## n ,
78#else
79#define drcmr(n)
80#endif
81};
82
83struct pxa2xx_request {
84 struct usb_request req;
85 struct list_head queue;
86};
87
88enum ep0_state {
89 EP0_IDLE,
90 EP0_IN_DATA_PHASE,
91 EP0_OUT_DATA_PHASE,
92 EP0_END_XFER,
93 EP0_STALL,
94};
95
96#define EP0_FIFO_SIZE ((unsigned)16)
97#define BULK_FIFO_SIZE ((unsigned)64)
98#define ISO_FIFO_SIZE ((unsigned)256)
99#define INT_FIFO_SIZE ((unsigned)8)
100
101struct udc_stats {
102 struct ep0stats {
103 unsigned long ops;
104 unsigned long bytes;
105 } read, write;
106 unsigned long irqs;
107};
108
109#ifdef CONFIG_USB_PXA2XX_SMALL
110/* when memory's tight, SMALL config saves code+data. */
111#undef USE_DMA
112#define PXA_UDC_NUM_ENDPOINTS 3
113#endif
114
115#ifndef PXA_UDC_NUM_ENDPOINTS
116#define PXA_UDC_NUM_ENDPOINTS 16
117#endif
118
119struct pxa2xx_udc {
120 struct usb_gadget gadget;
121 struct usb_gadget_driver *driver;
122
123 enum ep0_state ep0state;
124 struct udc_stats stats;
125 unsigned got_irq : 1,
126 vbus : 1,
127 pullup : 1,
128 has_cfr : 1,
129 req_pending : 1,
130 req_std : 1,
131 req_config : 1;
132
133#define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200))
134 struct timer_list timer;
135
136 struct device *dev;
137 struct pxa2xx_udc_mach_info *mach;
138 u64 dma_mask;
139 struct pxa2xx_ep ep [PXA_UDC_NUM_ENDPOINTS];
140};
141
142/*-------------------------------------------------------------------------*/
143
144#ifdef CONFIG_ARCH_LUBBOCK
145#include <asm/arch/lubbock.h>
146/* lubbock can also report usb connect/disconnect irqs */
147
148#ifdef DEBUG
149#define HEX_DISPLAY(n) if (machine_is_lubbock()) { LUB_HEXLED = (n); }
150#endif
151
152#endif
153
154/*-------------------------------------------------------------------------*/
155
156/* LEDs are only for debug */
157#ifndef HEX_DISPLAY
158#define HEX_DISPLAY(n) do {} while(0)
159#endif
160
161#ifdef DEBUG
162#include <asm/leds.h>
163
164#define LED_CONNECTED_ON leds_event(led_green_on)
165#define LED_CONNECTED_OFF do { \
166 leds_event(led_green_off); \
167 HEX_DISPLAY(0); \
168 } while(0)
169#endif
170
171#ifndef LED_CONNECTED_ON
172#define LED_CONNECTED_ON do {} while(0)
173#define LED_CONNECTED_OFF do {} while(0)
174#endif
175
176/*-------------------------------------------------------------------------*/
177
178static struct pxa2xx_udc *the_controller;
179
180/* one GPIO should be used to detect host disconnect */
181static inline int is_usb_connected(void)
182{
183 if (!the_controller->mach->udc_is_connected)
184 return 1;
185 return the_controller->mach->udc_is_connected();
186}
187
188/* one GPIO should force the host to see this device (or not) */
189static inline void make_usb_disappear(void)
190{
191 if (!the_controller->mach->udc_command)
192 return;
193 the_controller->mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
194}
195
196static inline void let_usb_appear(void)
197{
198 if (!the_controller->mach->udc_command)
199 return;
200 the_controller->mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
201}
202
203/*-------------------------------------------------------------------------*/
204
205/*
206 * Debugging support vanishes in non-debug builds. DBG_NORMAL should be
207 * mostly silent during normal use/testing, with no timing side-effects.
208 */
209#define DBG_NORMAL 1 /* error paths, device state transitions */
210#define DBG_VERBOSE 2 /* add some success path trace info */
211#define DBG_NOISY 3 /* ... even more: request level */
212#define DBG_VERY_NOISY 4 /* ... even more: packet level */
213
214#ifdef DEBUG
215
216static const char *state_name[] = {
217 "EP0_IDLE",
218 "EP0_IN_DATA_PHASE", "EP0_OUT_DATA_PHASE",
219 "EP0_END_XFER", "EP0_STALL"
220};
221
222#define DMSG(stuff...) printk(KERN_DEBUG "udc: " stuff)
223
224#ifdef VERBOSE
225# define UDC_DEBUG DBG_VERBOSE
226#else
227# define UDC_DEBUG DBG_NORMAL
228#endif
229
230static void __attribute__ ((__unused__))
231dump_udccr(const char *label)
232{
233 u32 udccr = UDCCR;
234 DMSG("%s %02X =%s%s%s%s%s%s%s%s\n",
235 label, udccr,
236 (udccr & UDCCR_REM) ? " rem" : "",
237 (udccr & UDCCR_RSTIR) ? " rstir" : "",
238 (udccr & UDCCR_SRM) ? " srm" : "",
239 (udccr & UDCCR_SUSIR) ? " susir" : "",
240 (udccr & UDCCR_RESIR) ? " resir" : "",
241 (udccr & UDCCR_RSM) ? " rsm" : "",
242 (udccr & UDCCR_UDA) ? " uda" : "",
243 (udccr & UDCCR_UDE) ? " ude" : "");
244}
245
246static void __attribute__ ((__unused__))
247dump_udccs0(const char *label)
248{
249 u32 udccs0 = UDCCS0;
250
251 DMSG("%s %s %02X =%s%s%s%s%s%s%s%s\n",
252 label, state_name[the_controller->ep0state], udccs0,
253 (udccs0 & UDCCS0_SA) ? " sa" : "",
254 (udccs0 & UDCCS0_RNE) ? " rne" : "",
255 (udccs0 & UDCCS0_FST) ? " fst" : "",
256 (udccs0 & UDCCS0_SST) ? " sst" : "",
257 (udccs0 & UDCCS0_DRWF) ? " dwrf" : "",
258 (udccs0 & UDCCS0_FTF) ? " ftf" : "",
259 (udccs0 & UDCCS0_IPR) ? " ipr" : "",
260 (udccs0 & UDCCS0_OPR) ? " opr" : "");
261}
262
263static void __attribute__ ((__unused__))
264dump_state(struct pxa2xx_udc *dev)
265{
266 u32 tmp;
267 unsigned i;
268
269 DMSG("%s %s, uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
270 is_usb_connected() ? "host " : "disconnected",
271 state_name[dev->ep0state],
272 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
273 dump_udccr("udccr");
274 if (dev->has_cfr) {
275 tmp = UDCCFR;
276 DMSG("udccfr %02X =%s%s\n", tmp,
277 (tmp & UDCCFR_AREN) ? " aren" : "",
278 (tmp & UDCCFR_ACM) ? " acm" : "");
279 }
280
281 if (!dev->driver) {
282 DMSG("no gadget driver bound\n");
283 return;
284 } else
285 DMSG("ep0 driver '%s'\n", dev->driver->driver.name);
286
287 if (!is_usb_connected())
288 return;
289
290 dump_udccs0 ("udccs0");
291 DMSG("ep0 IN %lu/%lu, OUT %lu/%lu\n",
292 dev->stats.write.bytes, dev->stats.write.ops,
293 dev->stats.read.bytes, dev->stats.read.ops);
294
295 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) {
296 if (dev->ep [i].desc == 0)
297 continue;
298 DMSG ("udccs%d = %02x\n", i, *dev->ep->reg_udccs);
299 }
300}
301
302#else
303
304#define DMSG(stuff...) do{}while(0)
305
306#define dump_udccr(x) do{}while(0)
307#define dump_udccs0(x) do{}while(0)
308#define dump_state(x) do{}while(0)
309
310#define UDC_DEBUG ((unsigned)0)
311
312#endif
313
314#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
315
316#define WARN(stuff...) printk(KERN_WARNING "udc: " stuff)
317#define INFO(stuff...) printk(KERN_INFO "udc: " stuff)
318
319
320#endif /* __LINUX_USB_GADGET_PXA2XX_H */
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
new file mode 100644
index 000000000000..6c5197850edc
--- /dev/null
+++ b/drivers/usb/gadget/rndis.c
@@ -0,0 +1,1428 @@
1/*
2 * RNDIS MSG parser
3 *
4 * Version: $Id: rndis.c,v 1.19 2004/03/25 21:33:46 robert Exp $
5 *
6 * Authors: Benedikt Spranger, Pengutronix
7 * Robert Schwebel, Pengutronix
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This software was originally developed in conformance with
14 * Microsoft's Remote NDIS Specification License Agreement.
15 *
16 * 03/12/2004 Kai-Uwe Bloem <linux-development@auerswald.de>
17 * Fixed message length bug in init_response
18 *
19 * 03/25/2004 Kai-Uwe Bloem <linux-development@auerswald.de>
20 * Fixed rndis_rm_hdr length bug.
21 *
22 * Copyright (C) 2004 by David Brownell
23 * updates to merge with Linux 2.6, better match RNDIS spec
24 */
25
26#include <linux/config.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/version.h>
32#include <linux/init.h>
33#include <linux/list.h>
34#include <linux/proc_fs.h>
35#include <linux/netdevice.h>
36
37#include <asm/io.h>
38#include <asm/byteorder.h>
39#include <asm/system.h>
40
41
42#undef RNDIS_PM
43#undef VERBOSE
44
45#include "rndis.h"
46
47
48/* The driver for your USB chip needs to support ep0 OUT to work with
49 * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional).
50 *
51 * Windows hosts need an INF file like Documentation/usb/linux.inf
52 * and will be happier if you provide the host_addr module parameter.
53 */
54
55#if 0
56#define DEBUG(str,args...) do { \
57 if (rndis_debug) \
58 printk(KERN_DEBUG str , ## args ); \
59 } while (0)
60static int rndis_debug = 0;
61
62module_param (rndis_debug, bool, 0);
63MODULE_PARM_DESC (rndis_debug, "enable debugging");
64
65#else
66
67#define rndis_debug 0
68#define DEBUG(str,args...) do{}while(0)
69#endif
70
71#define RNDIS_MAX_CONFIGS 1
72
73
74static rndis_params rndis_per_dev_params [RNDIS_MAX_CONFIGS];
75
76/* Driver Version */
77static const __le32 rndis_driver_version = __constant_cpu_to_le32 (1);
78
79/* Function Prototypes */
80static int rndis_init_response (int configNr, rndis_init_msg_type *buf);
81static int rndis_query_response (int configNr, rndis_query_msg_type *buf);
82static int rndis_set_response (int configNr, rndis_set_msg_type *buf);
83static int rndis_reset_response (int configNr, rndis_reset_msg_type *buf);
84static int rndis_keepalive_response (int configNr,
85 rndis_keepalive_msg_type *buf);
86
87static rndis_resp_t *rndis_add_response (int configNr, u32 length);
88
89
90/* NDIS Functions */
91static int gen_ndis_query_resp (int configNr, u32 OID, rndis_resp_t *r)
92{
93 int retval = -ENOTSUPP;
94 u32 length = 0;
95 __le32 *tmp;
96 int i, count;
97 rndis_query_cmplt_type *resp;
98
99 if (!r) return -ENOMEM;
100 resp = (rndis_query_cmplt_type *) r->buf;
101
102 if (!resp) return -ENOMEM;
103
104 switch (OID) {
105
106 /* general oids (table 4-1) */
107
108 /* mandatory */
109 case OID_GEN_SUPPORTED_LIST:
110 DEBUG ("%s: OID_GEN_SUPPORTED_LIST\n", __FUNCTION__);
111 length = sizeof (oid_supported_list);
112 count = length / sizeof (u32);
113 tmp = (__le32 *) ((u8 *)resp + 24);
114 for (i = 0; i < count; i++)
115 tmp[i] = cpu_to_le32 (oid_supported_list[i]);
116 retval = 0;
117 break;
118
119 /* mandatory */
120 case OID_GEN_HARDWARE_STATUS:
121 DEBUG("%s: OID_GEN_HARDWARE_STATUS\n", __FUNCTION__);
122 length = 4;
123 /* Bogus question!
124 * Hardware must be ready to receive high level protocols.
125 * BTW:
126 * reddite ergo quae sunt Caesaris Caesari
127 * et quae sunt Dei Deo!
128 */
129 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
130 retval = 0;
131 break;
132
133 /* mandatory */
134 case OID_GEN_MEDIA_SUPPORTED:
135 DEBUG("%s: OID_GEN_MEDIA_SUPPORTED\n", __FUNCTION__);
136 length = 4;
137 *((__le32 *) resp + 6) = cpu_to_le32 (
138 rndis_per_dev_params [configNr].medium);
139 retval = 0;
140 break;
141
142 /* mandatory */
143 case OID_GEN_MEDIA_IN_USE:
144 DEBUG("%s: OID_GEN_MEDIA_IN_USE\n", __FUNCTION__);
145 length = 4;
146 /* one medium, one transport... (maybe you do it better) */
147 *((__le32 *) resp + 6) = cpu_to_le32 (
148 rndis_per_dev_params [configNr].medium);
149 retval = 0;
150 break;
151
152 /* mandatory */
153 case OID_GEN_MAXIMUM_FRAME_SIZE:
154 DEBUG("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __FUNCTION__);
155 if (rndis_per_dev_params [configNr].dev) {
156 length = 4;
157 *((__le32 *) resp + 6) = cpu_to_le32 (
158 rndis_per_dev_params [configNr].dev->mtu);
159 retval = 0;
160 } else {
161 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
162 retval = 0;
163 }
164 break;
165
166 /* mandatory */
167 case OID_GEN_LINK_SPEED:
168 DEBUG("%s: OID_GEN_LINK_SPEED\n", __FUNCTION__);
169 length = 4;
170 if (rndis_per_dev_params [configNr].media_state
171 == NDIS_MEDIA_STATE_DISCONNECTED)
172 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
173 else
174 *((__le32 *) resp + 6) = cpu_to_le32 (
175 rndis_per_dev_params [configNr].speed);
176 retval = 0;
177 break;
178
179 /* mandatory */
180 case OID_GEN_TRANSMIT_BLOCK_SIZE:
181 DEBUG("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __FUNCTION__);
182 if (rndis_per_dev_params [configNr].dev) {
183 length = 4;
184 *((__le32 *) resp + 6) = cpu_to_le32 (
185 rndis_per_dev_params [configNr].dev->mtu);
186 retval = 0;
187 }
188 break;
189
190 /* mandatory */
191 case OID_GEN_RECEIVE_BLOCK_SIZE:
192 DEBUG("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __FUNCTION__);
193 if (rndis_per_dev_params [configNr].dev) {
194 length = 4;
195 *((__le32 *) resp + 6) = cpu_to_le32 (
196 rndis_per_dev_params [configNr].dev->mtu);
197 retval = 0;
198 }
199 break;
200
201 /* mandatory */
202 case OID_GEN_VENDOR_ID:
203 DEBUG("%s: OID_GEN_VENDOR_ID\n", __FUNCTION__);
204 length = 4;
205 *((__le32 *) resp + 6) = cpu_to_le32 (
206 rndis_per_dev_params [configNr].vendorID);
207 retval = 0;
208 break;
209
210 /* mandatory */
211 case OID_GEN_VENDOR_DESCRIPTION:
212 DEBUG("%s: OID_GEN_VENDOR_DESCRIPTION\n", __FUNCTION__);
213 length = strlen (rndis_per_dev_params [configNr].vendorDescr);
214 memcpy ((u8 *) resp + 24,
215 rndis_per_dev_params [configNr].vendorDescr, length);
216 retval = 0;
217 break;
218
219 case OID_GEN_VENDOR_DRIVER_VERSION:
220 DEBUG("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __FUNCTION__);
221 length = 4;
222 /* Created as LE */
223 *((__le32 *) resp + 6) = rndis_driver_version;
224 retval = 0;
225 break;
226
227 /* mandatory */
228 case OID_GEN_CURRENT_PACKET_FILTER:
229 DEBUG("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __FUNCTION__);
230 length = 4;
231 *((__le32 *) resp + 6) = cpu_to_le32 (
232 rndis_per_dev_params[configNr].filter);
233 retval = 0;
234 break;
235
236 /* mandatory */
237 case OID_GEN_MAXIMUM_TOTAL_SIZE:
238 DEBUG("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __FUNCTION__);
239 length = 4;
240 *((__le32 *) resp + 6) = __constant_cpu_to_le32(
241 RNDIS_MAX_TOTAL_SIZE);
242 retval = 0;
243 break;
244
245 /* mandatory */
246 case OID_GEN_MEDIA_CONNECT_STATUS:
247 DEBUG("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __FUNCTION__);
248 length = 4;
249 *((__le32 *) resp + 6) = cpu_to_le32 (
250 rndis_per_dev_params [configNr]
251 .media_state);
252 retval = 0;
253 break;
254
255 case OID_GEN_PHYSICAL_MEDIUM:
256 DEBUG("%s: OID_GEN_PHYSICAL_MEDIUM\n", __FUNCTION__);
257 length = 4;
258 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
259 retval = 0;
260 break;
261
262 /* The RNDIS specification is incomplete/wrong. Some versions
263 * of MS-Windows expect OIDs that aren't specified there. Other
264 * versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
265 */
266 case OID_GEN_MAC_OPTIONS: /* from WinME */
267 DEBUG("%s: OID_GEN_MAC_OPTIONS\n", __FUNCTION__);
268 length = 4;
269 *((__le32 *) resp + 6) = __constant_cpu_to_le32(
270 NDIS_MAC_OPTION_RECEIVE_SERIALIZED
271 | NDIS_MAC_OPTION_FULL_DUPLEX);
272 retval = 0;
273 break;
274
275 /* statistics OIDs (table 4-2) */
276
277 /* mandatory */
278 case OID_GEN_XMIT_OK:
279 DEBUG("%s: OID_GEN_XMIT_OK\n", __FUNCTION__);
280 if (rndis_per_dev_params [configNr].stats) {
281 length = 4;
282 *((__le32 *) resp + 6) = cpu_to_le32 (
283 rndis_per_dev_params [configNr].stats->tx_packets -
284 rndis_per_dev_params [configNr].stats->tx_errors -
285 rndis_per_dev_params [configNr].stats->tx_dropped);
286 retval = 0;
287 } else {
288 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
289 retval = 0;
290 }
291 break;
292
293 /* mandatory */
294 case OID_GEN_RCV_OK:
295 DEBUG("%s: OID_GEN_RCV_OK\n", __FUNCTION__);
296 if (rndis_per_dev_params [configNr].stats) {
297 length = 4;
298 *((__le32 *) resp + 6) = cpu_to_le32 (
299 rndis_per_dev_params [configNr].stats->rx_packets -
300 rndis_per_dev_params [configNr].stats->rx_errors -
301 rndis_per_dev_params [configNr].stats->rx_dropped);
302 retval = 0;
303 } else {
304 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
305 retval = 0;
306 }
307 break;
308
309 /* mandatory */
310 case OID_GEN_XMIT_ERROR:
311 DEBUG("%s: OID_GEN_XMIT_ERROR\n", __FUNCTION__);
312 if (rndis_per_dev_params [configNr].stats) {
313 length = 4;
314 *((__le32 *) resp + 6) = cpu_to_le32 (
315 rndis_per_dev_params [configNr]
316 .stats->tx_errors);
317 retval = 0;
318 } else {
319 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
320 retval = 0;
321 }
322 break;
323
324 /* mandatory */
325 case OID_GEN_RCV_ERROR:
326 DEBUG("%s: OID_GEN_RCV_ERROR\n", __FUNCTION__);
327 if (rndis_per_dev_params [configNr].stats) {
328 *((__le32 *) resp + 6) = cpu_to_le32 (
329 rndis_per_dev_params [configNr]
330 .stats->rx_errors);
331 retval = 0;
332 } else {
333 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
334 retval = 0;
335 }
336 break;
337
338 /* mandatory */
339 case OID_GEN_RCV_NO_BUFFER:
340 DEBUG("%s: OID_GEN_RCV_NO_BUFFER\n", __FUNCTION__);
341 if (rndis_per_dev_params [configNr].stats) {
342 *((__le32 *) resp + 6) = cpu_to_le32 (
343 rndis_per_dev_params [configNr]
344 .stats->rx_dropped);
345 retval = 0;
346 } else {
347 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
348 retval = 0;
349 }
350 break;
351
352#ifdef RNDIS_OPTIONAL_STATS
353 case OID_GEN_DIRECTED_BYTES_XMIT:
354 DEBUG("%s: OID_GEN_DIRECTED_BYTES_XMIT\n", __FUNCTION__);
355 /*
356 * Aunt Tilly's size of shoes
357 * minus antarctica count of penguins
358 * divided by weight of Alpha Centauri
359 */
360 if (rndis_per_dev_params [configNr].stats) {
361 length = 4;
362 *((__le32 *) resp + 6) = cpu_to_le32 (
363 (rndis_per_dev_params [configNr]
364 .stats->tx_packets -
365 rndis_per_dev_params [configNr]
366 .stats->tx_errors -
367 rndis_per_dev_params [configNr]
368 .stats->tx_dropped)
369 * 123);
370 retval = 0;
371 } else {
372 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
373 retval = 0;
374 }
375 break;
376
377 case OID_GEN_DIRECTED_FRAMES_XMIT:
378 DEBUG("%s: OID_GEN_DIRECTED_FRAMES_XMIT\n", __FUNCTION__);
379 /* dito */
380 if (rndis_per_dev_params [configNr].stats) {
381 length = 4;
382 *((__le32 *) resp + 6) = cpu_to_le32 (
383 (rndis_per_dev_params [configNr]
384 .stats->tx_packets -
385 rndis_per_dev_params [configNr]
386 .stats->tx_errors -
387 rndis_per_dev_params [configNr]
388 .stats->tx_dropped)
389 / 123);
390 retval = 0;
391 } else {
392 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
393 retval = 0;
394 }
395 break;
396
397 case OID_GEN_MULTICAST_BYTES_XMIT:
398 DEBUG("%s: OID_GEN_MULTICAST_BYTES_XMIT\n", __FUNCTION__);
399 if (rndis_per_dev_params [configNr].stats) {
400 *((__le32 *) resp + 6) = cpu_to_le32 (
401 rndis_per_dev_params [configNr]
402 .stats->multicast*1234);
403 retval = 0;
404 } else {
405 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
406 retval = 0;
407 }
408 break;
409
410 case OID_GEN_MULTICAST_FRAMES_XMIT:
411 DEBUG("%s: OID_GEN_MULTICAST_FRAMES_XMIT\n", __FUNCTION__);
412 if (rndis_per_dev_params [configNr].stats) {
413 *((__le32 *) resp + 6) = cpu_to_le32 (
414 rndis_per_dev_params [configNr]
415 .stats->multicast);
416 retval = 0;
417 } else {
418 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
419 retval = 0;
420 }
421 break;
422
423 case OID_GEN_BROADCAST_BYTES_XMIT:
424 DEBUG("%s: OID_GEN_BROADCAST_BYTES_XMIT\n", __FUNCTION__);
425 if (rndis_per_dev_params [configNr].stats) {
426 *((__le32 *) resp + 6) = cpu_to_le32 (
427 rndis_per_dev_params [configNr]
428 .stats->tx_packets/42*255);
429 retval = 0;
430 } else {
431 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
432 retval = 0;
433 }
434 break;
435
436 case OID_GEN_BROADCAST_FRAMES_XMIT:
437 DEBUG("%s: OID_GEN_BROADCAST_FRAMES_XMIT\n", __FUNCTION__);
438 if (rndis_per_dev_params [configNr].stats) {
439 *((__le32 *) resp + 6) = cpu_to_le32 (
440 rndis_per_dev_params [configNr]
441 .stats->tx_packets/42);
442 retval = 0;
443 } else {
444 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
445 retval = 0;
446 }
447 break;
448
449 case OID_GEN_DIRECTED_BYTES_RCV:
450 DEBUG("%s: OID_GEN_DIRECTED_BYTES_RCV\n", __FUNCTION__);
451 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
452 retval = 0;
453 break;
454
455 case OID_GEN_DIRECTED_FRAMES_RCV:
456 DEBUG("%s: OID_GEN_DIRECTED_FRAMES_RCV\n", __FUNCTION__);
457 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
458 retval = 0;
459 break;
460
461 case OID_GEN_MULTICAST_BYTES_RCV:
462 DEBUG("%s: OID_GEN_MULTICAST_BYTES_RCV\n", __FUNCTION__);
463 if (rndis_per_dev_params [configNr].stats) {
464 *((__le32 *) resp + 6) = cpu_to_le32 (
465 rndis_per_dev_params [configNr]
466 .stats->multicast * 1111);
467 retval = 0;
468 } else {
469 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
470 retval = 0;
471 }
472 break;
473
474 case OID_GEN_MULTICAST_FRAMES_RCV:
475 DEBUG("%s: OID_GEN_MULTICAST_FRAMES_RCV\n", __FUNCTION__);
476 if (rndis_per_dev_params [configNr].stats) {
477 *((__le32 *) resp + 6) = cpu_to_le32 (
478 rndis_per_dev_params [configNr]
479 .stats->multicast);
480 retval = 0;
481 } else {
482 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
483 retval = 0;
484 }
485 break;
486
487 case OID_GEN_BROADCAST_BYTES_RCV:
488 DEBUG("%s: OID_GEN_BROADCAST_BYTES_RCV\n", __FUNCTION__);
489 if (rndis_per_dev_params [configNr].stats) {
490 *((__le32 *) resp + 6) = cpu_to_le32 (
491 rndis_per_dev_params [configNr]
492 .stats->rx_packets/42*255);
493 retval = 0;
494 } else {
495 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
496 retval = 0;
497 }
498 break;
499
500 case OID_GEN_BROADCAST_FRAMES_RCV:
501 DEBUG("%s: OID_GEN_BROADCAST_FRAMES_RCV\n", __FUNCTION__);
502 if (rndis_per_dev_params [configNr].stats) {
503 *((__le32 *) resp + 6) = cpu_to_le32 (
504 rndis_per_dev_params [configNr]
505 .stats->rx_packets/42);
506 retval = 0;
507 } else {
508 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
509 retval = 0;
510 }
511 break;
512
513 case OID_GEN_RCV_CRC_ERROR:
514 DEBUG("%s: OID_GEN_RCV_CRC_ERROR\n", __FUNCTION__);
515 if (rndis_per_dev_params [configNr].stats) {
516 *((__le32 *) resp + 6) = cpu_to_le32 (
517 rndis_per_dev_params [configNr]
518 .stats->rx_crc_errors);
519 retval = 0;
520 } else {
521 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
522 retval = 0;
523 }
524 break;
525
526 case OID_GEN_TRANSMIT_QUEUE_LENGTH:
527 DEBUG("%s: OID_GEN_TRANSMIT_QUEUE_LENGTH\n", __FUNCTION__);
528 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
529 retval = 0;
530 break;
531#endif /* RNDIS_OPTIONAL_STATS */
532
533 /* ieee802.3 OIDs (table 4-3) */
534
535 /* mandatory */
536 case OID_802_3_PERMANENT_ADDRESS:
537 DEBUG("%s: OID_802_3_PERMANENT_ADDRESS\n", __FUNCTION__);
538 if (rndis_per_dev_params [configNr].dev) {
539 length = ETH_ALEN;
540 memcpy ((u8 *) resp + 24,
541 rndis_per_dev_params [configNr].host_mac,
542 length);
543 retval = 0;
544 } else {
545 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
546 retval = 0;
547 }
548 break;
549
550 /* mandatory */
551 case OID_802_3_CURRENT_ADDRESS:
552 DEBUG("%s: OID_802_3_CURRENT_ADDRESS\n", __FUNCTION__);
553 if (rndis_per_dev_params [configNr].dev) {
554 length = ETH_ALEN;
555 memcpy ((u8 *) resp + 24,
556 rndis_per_dev_params [configNr].host_mac,
557 length);
558 retval = 0;
559 }
560 break;
561
562 /* mandatory */
563 case OID_802_3_MULTICAST_LIST:
564 DEBUG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
565 length = 4;
566 /* Multicast base address only */
567 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0xE0000000);
568 retval = 0;
569 break;
570
571 /* mandatory */
572 case OID_802_3_MAXIMUM_LIST_SIZE:
573 DEBUG("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __FUNCTION__);
574 length = 4;
575 /* Multicast base address only */
576 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (1);
577 retval = 0;
578 break;
579
580 case OID_802_3_MAC_OPTIONS:
581 DEBUG("%s: OID_802_3_MAC_OPTIONS\n", __FUNCTION__);
582 break;
583
584 /* ieee802.3 statistics OIDs (table 4-4) */
585
586 /* mandatory */
587 case OID_802_3_RCV_ERROR_ALIGNMENT:
588 DEBUG("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __FUNCTION__);
589 if (rndis_per_dev_params [configNr].stats)
590 {
591 length = 4;
592 *((__le32 *) resp + 6) = cpu_to_le32 (
593 rndis_per_dev_params [configNr]
594 .stats->rx_frame_errors);
595 retval = 0;
596 }
597 break;
598
599 /* mandatory */
600 case OID_802_3_XMIT_ONE_COLLISION:
601 DEBUG("%s: OID_802_3_XMIT_ONE_COLLISION\n", __FUNCTION__);
602 length = 4;
603 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
604 retval = 0;
605 break;
606
607 /* mandatory */
608 case OID_802_3_XMIT_MORE_COLLISIONS:
609 DEBUG("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __FUNCTION__);
610 length = 4;
611 *((__le32 *) resp + 6) = __constant_cpu_to_le32 (0);
612 retval = 0;
613 break;
614
615#ifdef RNDIS_OPTIONAL_STATS
616 case OID_802_3_XMIT_DEFERRED:
617 DEBUG("%s: OID_802_3_XMIT_DEFERRED\n", __FUNCTION__);
618 /* TODO */
619 break;
620
621 case OID_802_3_XMIT_MAX_COLLISIONS:
622 DEBUG("%s: OID_802_3_XMIT_MAX_COLLISIONS\n", __FUNCTION__);
623 /* TODO */
624 break;
625
626 case OID_802_3_RCV_OVERRUN:
627 DEBUG("%s: OID_802_3_RCV_OVERRUN\n", __FUNCTION__);
628 /* TODO */
629 break;
630
631 case OID_802_3_XMIT_UNDERRUN:
632 DEBUG("%s: OID_802_3_XMIT_UNDERRUN\n", __FUNCTION__);
633 /* TODO */
634 break;
635
636 case OID_802_3_XMIT_HEARTBEAT_FAILURE:
637 DEBUG("%s: OID_802_3_XMIT_HEARTBEAT_FAILURE\n", __FUNCTION__);
638 /* TODO */
639 break;
640
641 case OID_802_3_XMIT_TIMES_CRS_LOST:
642 DEBUG("%s: OID_802_3_XMIT_TIMES_CRS_LOST\n", __FUNCTION__);
643 /* TODO */
644 break;
645
646 case OID_802_3_XMIT_LATE_COLLISIONS:
647 DEBUG("%s: OID_802_3_XMIT_LATE_COLLISIONS\n", __FUNCTION__);
648 /* TODO */
649 break;
650#endif /* RNDIS_OPTIONAL_STATS */
651
652#ifdef RNDIS_PM
653 /* power management OIDs (table 4-5) */
654 case OID_PNP_CAPABILITIES:
655 DEBUG("%s: OID_PNP_CAPABILITIES\n", __FUNCTION__);
656
657 /* just PM, and remote wakeup on link status change
658 * (not magic packet or pattern match)
659 */
660 length = sizeof (struct NDIS_PNP_CAPABILITIES);
661 memset (resp, 0, length);
662 {
663 struct NDIS_PNP_CAPABILITIES *caps = (void *) resp;
664
665 caps->Flags = NDIS_DEVICE_WAKE_UP_ENABLE;
666 caps->WakeUpCapabilities.MinLinkChangeWakeUp
667 = NdisDeviceStateD3;
668
669 /* FIXME then use usb_gadget_wakeup(), and
670 * set USB_CONFIG_ATT_WAKEUP in config desc
671 */
672 }
673 retval = 0;
674 break;
675 case OID_PNP_QUERY_POWER:
676 DEBUG("%s: OID_PNP_QUERY_POWER\n", __FUNCTION__);
677 /* sure, handle any power state that maps to USB suspend */
678 retval = 0;
679 break;
680#endif
681
682 default:
683 printk (KERN_WARNING "%s: query unknown OID 0x%08X\n",
684 __FUNCTION__, OID);
685 }
686
687 resp->InformationBufferOffset = __constant_cpu_to_le32 (16);
688 resp->InformationBufferLength = cpu_to_le32 (length);
689 resp->MessageLength = cpu_to_le32 (24 + length);
690 r->length = 24 + length;
691 return retval;
692}
693
694static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
695 rndis_resp_t *r)
696{
697 rndis_set_cmplt_type *resp;
698 int i, retval = -ENOTSUPP;
699 struct rndis_params *params;
700
701 if (!r)
702 return -ENOMEM;
703 resp = (rndis_set_cmplt_type *) r->buf;
704 if (!resp)
705 return -ENOMEM;
706
707 DEBUG("set OID %08x value, len %d:\n", OID, buf_len);
708 for (i = 0; i < buf_len; i += 16) {
709 DEBUG ("%03d: "
710 " %02x %02x %02x %02x"
711 " %02x %02x %02x %02x"
712 " %02x %02x %02x %02x"
713 " %02x %02x %02x %02x"
714 "\n",
715 i,
716 buf[i], buf [i+1],
717 buf[i+2], buf[i+3],
718 buf[i+4], buf [i+5],
719 buf[i+6], buf[i+7],
720 buf[i+8], buf [i+9],
721 buf[i+10], buf[i+11],
722 buf[i+12], buf [i+13],
723 buf[i+14], buf[i+15]);
724 }
725
726 switch (OID) {
727 case OID_GEN_CURRENT_PACKET_FILTER:
728 params = &rndis_per_dev_params [configNr];
729 retval = 0;
730
731 /* FIXME use these NDIS_PACKET_TYPE_* bitflags to
732 * filter packets in hard_start_xmit()
733 * NDIS_PACKET_TYPE_x == USB_CDC_PACKET_TYPE_x for x in:
734 * PROMISCUOUS, DIRECTED,
735 * MULTICAST, ALL_MULTICAST, BROADCAST
736 */
737 params->filter = le32_to_cpup((__le32 *)buf);
738 DEBUG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
739 __FUNCTION__, params->filter);
740
741 /* this call has a significant side effect: it's
742 * what makes the packet flow start and stop, like
743 * activating the CDC Ethernet altsetting.
744 */
745 if (params->filter) {
746 params->state = RNDIS_DATA_INITIALIZED;
747 netif_carrier_on(params->dev);
748 if (netif_running(params->dev))
749 netif_wake_queue (params->dev);
750 } else {
751 params->state = RNDIS_INITIALIZED;
752 netif_carrier_off (params->dev);
753 netif_stop_queue (params->dev);
754 }
755 break;
756
757 case OID_802_3_MULTICAST_LIST:
758 /* I think we can ignore this */
759 DEBUG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
760 retval = 0;
761 break;
762#if 0
763 case OID_GEN_RNDIS_CONFIG_PARAMETER:
764 {
765 struct rndis_config_parameter *param;
766 param = (struct rndis_config_parameter *) buf;
767 DEBUG("%s: OID_GEN_RNDIS_CONFIG_PARAMETER '%*s'\n",
768 __FUNCTION__,
769 min(cpu_to_le32(param->ParameterNameLength),80),
770 buf + param->ParameterNameOffset);
771 retval = 0;
772 }
773 break;
774#endif
775
776#ifdef RNDIS_PM
777 case OID_PNP_SET_POWER:
778 DEBUG ("OID_PNP_SET_POWER\n");
779 /* sure, handle any power state that maps to USB suspend */
780 retval = 0;
781 break;
782
783 case OID_PNP_ENABLE_WAKE_UP:
784 /* always-connected ... */
785 DEBUG ("OID_PNP_ENABLE_WAKE_UP\n");
786 retval = 0;
787 break;
788
789 // no PM resume patterns supported (specified where?)
790 // so OID_PNP_{ADD,REMOVE}_WAKE_UP_PATTERN always fails
791#endif
792
793 default:
794 printk (KERN_WARNING "%s: set unknown OID 0x%08X, size %d\n",
795 __FUNCTION__, OID, buf_len);
796 }
797
798 return retval;
799}
800
801/*
802 * Response Functions
803 */
804
805static int rndis_init_response (int configNr, rndis_init_msg_type *buf)
806{
807 rndis_init_cmplt_type *resp;
808 rndis_resp_t *r;
809
810 if (!rndis_per_dev_params [configNr].dev) return -ENOTSUPP;
811
812 r = rndis_add_response (configNr, sizeof (rndis_init_cmplt_type));
813
814 if (!r) return -ENOMEM;
815
816 resp = (rndis_init_cmplt_type *) r->buf;
817
818 if (!resp) return -ENOMEM;
819
820 resp->MessageType = __constant_cpu_to_le32 (
821 REMOTE_NDIS_INITIALIZE_CMPLT);
822 resp->MessageLength = __constant_cpu_to_le32 (52);
823 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
824 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
825 resp->MajorVersion = __constant_cpu_to_le32 (RNDIS_MAJOR_VERSION);
826 resp->MinorVersion = __constant_cpu_to_le32 (RNDIS_MINOR_VERSION);
827 resp->DeviceFlags = __constant_cpu_to_le32 (RNDIS_DF_CONNECTIONLESS);
828 resp->Medium = __constant_cpu_to_le32 (RNDIS_MEDIUM_802_3);
829 resp->MaxPacketsPerTransfer = __constant_cpu_to_le32 (1);
830 resp->MaxTransferSize = cpu_to_le32 (
831 rndis_per_dev_params [configNr].dev->mtu
832 + sizeof (struct ethhdr)
833 + sizeof (struct rndis_packet_msg_type)
834 + 22);
835 resp->PacketAlignmentFactor = __constant_cpu_to_le32 (0);
836 resp->AFListOffset = __constant_cpu_to_le32 (0);
837 resp->AFListSize = __constant_cpu_to_le32 (0);
838
839 if (rndis_per_dev_params [configNr].ack)
840 rndis_per_dev_params [configNr].ack (
841 rndis_per_dev_params [configNr].dev);
842
843 return 0;
844}
845
846static int rndis_query_response (int configNr, rndis_query_msg_type *buf)
847{
848 rndis_query_cmplt_type *resp;
849 rndis_resp_t *r;
850
851 // DEBUG("%s: OID = %08X\n", __FUNCTION__, cpu_to_le32(buf->OID));
852 if (!rndis_per_dev_params [configNr].dev) return -ENOTSUPP;
853
854 /*
855 * we need more memory:
856 * oid_supported_list is the largest answer
857 */
858 r = rndis_add_response (configNr, sizeof (oid_supported_list));
859
860 if (!r) return -ENOMEM;
861 resp = (rndis_query_cmplt_type *) r->buf;
862
863 if (!resp) return -ENOMEM;
864
865 resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_QUERY_CMPLT);
866 resp->MessageLength = __constant_cpu_to_le32 (24);
867 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
868
869 if (gen_ndis_query_resp (configNr, le32_to_cpu (buf->OID), r)) {
870 /* OID not supported */
871 resp->Status = __constant_cpu_to_le32 (
872 RNDIS_STATUS_NOT_SUPPORTED);
873 resp->InformationBufferLength = __constant_cpu_to_le32 (0);
874 resp->InformationBufferOffset = __constant_cpu_to_le32 (0);
875 } else
876 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
877
878 if (rndis_per_dev_params [configNr].ack)
879 rndis_per_dev_params [configNr].ack (
880 rndis_per_dev_params [configNr].dev);
881 return 0;
882}
883
884static int rndis_set_response (int configNr, rndis_set_msg_type *buf)
885{
886 u32 BufLength, BufOffset;
887 rndis_set_cmplt_type *resp;
888 rndis_resp_t *r;
889
890 r = rndis_add_response (configNr, sizeof (rndis_set_cmplt_type));
891
892 if (!r) return -ENOMEM;
893 resp = (rndis_set_cmplt_type *) r->buf;
894 if (!resp) return -ENOMEM;
895
896 BufLength = le32_to_cpu (buf->InformationBufferLength);
897 BufOffset = le32_to_cpu (buf->InformationBufferOffset);
898
899#ifdef VERBOSE
900 DEBUG("%s: Length: %d\n", __FUNCTION__, BufLength);
901 DEBUG("%s: Offset: %d\n", __FUNCTION__, BufOffset);
902 DEBUG("%s: InfoBuffer: ", __FUNCTION__);
903
904 for (i = 0; i < BufLength; i++) {
905 DEBUG ("%02x ", *(((u8 *) buf) + i + 8 + BufOffset));
906 }
907
908 DEBUG ("\n");
909#endif
910
911 resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_SET_CMPLT);
912 resp->MessageLength = __constant_cpu_to_le32 (16);
913 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
914 if (gen_ndis_set_resp (configNr, le32_to_cpu (buf->OID),
915 ((u8 *) buf) + 8 + BufOffset, BufLength, r))
916 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_NOT_SUPPORTED);
917 else resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
918
919 if (rndis_per_dev_params [configNr].ack)
920 rndis_per_dev_params [configNr].ack (
921 rndis_per_dev_params [configNr].dev);
922
923 return 0;
924}
925
926static int rndis_reset_response (int configNr, rndis_reset_msg_type *buf)
927{
928 rndis_reset_cmplt_type *resp;
929 rndis_resp_t *r;
930
931 r = rndis_add_response (configNr, sizeof (rndis_reset_cmplt_type));
932
933 if (!r) return -ENOMEM;
934 resp = (rndis_reset_cmplt_type *) r->buf;
935 if (!resp) return -ENOMEM;
936
937 resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_RESET_CMPLT);
938 resp->MessageLength = __constant_cpu_to_le32 (16);
939 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
940 /* resent information */
941 resp->AddressingReset = __constant_cpu_to_le32 (1);
942
943 if (rndis_per_dev_params [configNr].ack)
944 rndis_per_dev_params [configNr].ack (
945 rndis_per_dev_params [configNr].dev);
946
947 return 0;
948}
949
950static int rndis_keepalive_response (int configNr,
951 rndis_keepalive_msg_type *buf)
952{
953 rndis_keepalive_cmplt_type *resp;
954 rndis_resp_t *r;
955
956 /* host "should" check only in RNDIS_DATA_INITIALIZED state */
957
958 r = rndis_add_response (configNr, sizeof (rndis_keepalive_cmplt_type));
959 resp = (rndis_keepalive_cmplt_type *) r->buf;
960 if (!resp) return -ENOMEM;
961
962 resp->MessageType = __constant_cpu_to_le32 (
963 REMOTE_NDIS_KEEPALIVE_CMPLT);
964 resp->MessageLength = __constant_cpu_to_le32 (16);
965 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
966 resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
967
968 if (rndis_per_dev_params [configNr].ack)
969 rndis_per_dev_params [configNr].ack (
970 rndis_per_dev_params [configNr].dev);
971
972 return 0;
973}
974
975
976/*
977 * Device to Host Comunication
978 */
979static int rndis_indicate_status_msg (int configNr, u32 status)
980{
981 rndis_indicate_status_msg_type *resp;
982 rndis_resp_t *r;
983
984 if (rndis_per_dev_params [configNr].state == RNDIS_UNINITIALIZED)
985 return -ENOTSUPP;
986
987 r = rndis_add_response (configNr,
988 sizeof (rndis_indicate_status_msg_type));
989 if (!r) return -ENOMEM;
990
991 resp = (rndis_indicate_status_msg_type *) r->buf;
992 if (!resp) return -ENOMEM;
993
994 resp->MessageType = __constant_cpu_to_le32 (
995 REMOTE_NDIS_INDICATE_STATUS_MSG);
996 resp->MessageLength = __constant_cpu_to_le32 (20);
997 resp->Status = cpu_to_le32 (status);
998 resp->StatusBufferLength = __constant_cpu_to_le32 (0);
999 resp->StatusBufferOffset = __constant_cpu_to_le32 (0);
1000
1001 if (rndis_per_dev_params [configNr].ack)
1002 rndis_per_dev_params [configNr].ack (
1003 rndis_per_dev_params [configNr].dev);
1004 return 0;
1005}
1006
1007int rndis_signal_connect (int configNr)
1008{
1009 rndis_per_dev_params [configNr].media_state
1010 = NDIS_MEDIA_STATE_CONNECTED;
1011 return rndis_indicate_status_msg (configNr,
1012 RNDIS_STATUS_MEDIA_CONNECT);
1013}
1014
1015int rndis_signal_disconnect (int configNr)
1016{
1017 rndis_per_dev_params [configNr].media_state
1018 = NDIS_MEDIA_STATE_DISCONNECTED;
1019 return rndis_indicate_status_msg (configNr,
1020 RNDIS_STATUS_MEDIA_DISCONNECT);
1021}
1022
1023void rndis_set_host_mac (int configNr, const u8 *addr)
1024{
1025 rndis_per_dev_params [configNr].host_mac = addr;
1026}
1027
1028/*
1029 * Message Parser
1030 */
1031int rndis_msg_parser (u8 configNr, u8 *buf)
1032{
1033 u32 MsgType, MsgLength;
1034 __le32 *tmp;
1035 struct rndis_params *params;
1036
1037 if (!buf)
1038 return -ENOMEM;
1039
1040 tmp = (__le32 *) buf;
1041 MsgType = le32_to_cpup(tmp++);
1042 MsgLength = le32_to_cpup(tmp++);
1043
1044 if (configNr >= RNDIS_MAX_CONFIGS)
1045 return -ENOTSUPP;
1046 params = &rndis_per_dev_params [configNr];
1047
1048 /* For USB: responses may take up to 10 seconds */
1049 switch (MsgType)
1050 {
1051 case REMOTE_NDIS_INITIALIZE_MSG:
1052 DEBUG("%s: REMOTE_NDIS_INITIALIZE_MSG\n",
1053 __FUNCTION__ );
1054 params->state = RNDIS_INITIALIZED;
1055 return rndis_init_response (configNr,
1056 (rndis_init_msg_type *) buf);
1057
1058 case REMOTE_NDIS_HALT_MSG:
1059 DEBUG("%s: REMOTE_NDIS_HALT_MSG\n",
1060 __FUNCTION__ );
1061 params->state = RNDIS_UNINITIALIZED;
1062 if (params->dev) {
1063 netif_carrier_off (params->dev);
1064 netif_stop_queue (params->dev);
1065 }
1066 return 0;
1067
1068 case REMOTE_NDIS_QUERY_MSG:
1069 return rndis_query_response (configNr,
1070 (rndis_query_msg_type *) buf);
1071
1072 case REMOTE_NDIS_SET_MSG:
1073 return rndis_set_response (configNr,
1074 (rndis_set_msg_type *) buf);
1075
1076 case REMOTE_NDIS_RESET_MSG:
1077 DEBUG("%s: REMOTE_NDIS_RESET_MSG\n",
1078 __FUNCTION__ );
1079 return rndis_reset_response (configNr,
1080 (rndis_reset_msg_type *) buf);
1081
1082 case REMOTE_NDIS_KEEPALIVE_MSG:
1083 /* For USB: host does this every 5 seconds */
1084#ifdef VERBOSE
1085 DEBUG("%s: REMOTE_NDIS_KEEPALIVE_MSG\n",
1086 __FUNCTION__ );
1087#endif
1088 return rndis_keepalive_response (configNr,
1089 (rndis_keepalive_msg_type *)
1090 buf);
1091
1092 default:
1093 /* At least Windows XP emits some undefined RNDIS messages.
1094 * In one case those messages seemed to relate to the host
1095 * suspending itself.
1096 */
1097 printk (KERN_WARNING
1098 "%s: unknown RNDIS message 0x%08X len %d\n",
1099 __FUNCTION__ , MsgType, MsgLength);
1100 {
1101 unsigned i;
1102 for (i = 0; i < MsgLength; i += 16) {
1103 DEBUG ("%03d: "
1104 " %02x %02x %02x %02x"
1105 " %02x %02x %02x %02x"
1106 " %02x %02x %02x %02x"
1107 " %02x %02x %02x %02x"
1108 "\n",
1109 i,
1110 buf[i], buf [i+1],
1111 buf[i+2], buf[i+3],
1112 buf[i+4], buf [i+5],
1113 buf[i+6], buf[i+7],
1114 buf[i+8], buf [i+9],
1115 buf[i+10], buf[i+11],
1116 buf[i+12], buf [i+13],
1117 buf[i+14], buf[i+15]);
1118 }
1119 }
1120 break;
1121 }
1122
1123 return -ENOTSUPP;
1124}
1125
1126int rndis_register (int (* rndis_control_ack) (struct net_device *))
1127{
1128 u8 i;
1129
1130 for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
1131 if (!rndis_per_dev_params [i].used) {
1132 rndis_per_dev_params [i].used = 1;
1133 rndis_per_dev_params [i].ack = rndis_control_ack;
1134 DEBUG("%s: configNr = %d\n", __FUNCTION__, i);
1135 return i;
1136 }
1137 }
1138 DEBUG("failed\n");
1139
1140 return -1;
1141}
1142
1143void rndis_deregister (int configNr)
1144{
1145 DEBUG("%s: \n", __FUNCTION__ );
1146
1147 if (configNr >= RNDIS_MAX_CONFIGS) return;
1148 rndis_per_dev_params [configNr].used = 0;
1149
1150 return;
1151}
1152
1153int rndis_set_param_dev (u8 configNr, struct net_device *dev,
1154 struct net_device_stats *stats)
1155{
1156 DEBUG("%s:\n", __FUNCTION__ );
1157 if (!dev || !stats) return -1;
1158 if (configNr >= RNDIS_MAX_CONFIGS) return -1;
1159
1160 rndis_per_dev_params [configNr].dev = dev;
1161 rndis_per_dev_params [configNr].stats = stats;
1162
1163 return 0;
1164}
1165
1166int rndis_set_param_vendor (u8 configNr, u32 vendorID, const char *vendorDescr)
1167{
1168 DEBUG("%s:\n", __FUNCTION__ );
1169 if (!vendorDescr) return -1;
1170 if (configNr >= RNDIS_MAX_CONFIGS) return -1;
1171
1172 rndis_per_dev_params [configNr].vendorID = vendorID;
1173 rndis_per_dev_params [configNr].vendorDescr = vendorDescr;
1174
1175 return 0;
1176}
1177
1178int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed)
1179{
1180 DEBUG("%s:\n", __FUNCTION__ );
1181 if (configNr >= RNDIS_MAX_CONFIGS) return -1;
1182
1183 rndis_per_dev_params [configNr].medium = medium;
1184 rndis_per_dev_params [configNr].speed = speed;
1185
1186 return 0;
1187}
1188
1189void rndis_add_hdr (struct sk_buff *skb)
1190{
1191 struct rndis_packet_msg_type *header;
1192
1193 if (!skb)
1194 return;
1195 header = (void *) skb_push (skb, sizeof *header);
1196 memset (header, 0, sizeof *header);
1197 header->MessageType = __constant_cpu_to_le32 (1);
1198 header->MessageLength = cpu_to_le32(skb->len);
1199 header->DataOffset = __constant_cpu_to_le32 (36);
1200 header->OOBDataOffset = cpu_to_le32(skb->len - 44);
1201}
1202
1203void rndis_free_response (int configNr, u8 *buf)
1204{
1205 rndis_resp_t *r;
1206 struct list_head *act, *tmp;
1207
1208 list_for_each_safe (act, tmp,
1209 &(rndis_per_dev_params [configNr].resp_queue))
1210 {
1211 r = list_entry (act, rndis_resp_t, list);
1212 if (r && r->buf == buf) {
1213 list_del (&r->list);
1214 kfree (r);
1215 }
1216 }
1217}
1218
1219u8 *rndis_get_next_response (int configNr, u32 *length)
1220{
1221 rndis_resp_t *r;
1222 struct list_head *act, *tmp;
1223
1224 if (!length) return NULL;
1225
1226 list_for_each_safe (act, tmp,
1227 &(rndis_per_dev_params [configNr].resp_queue))
1228 {
1229 r = list_entry (act, rndis_resp_t, list);
1230 if (!r->send) {
1231 r->send = 1;
1232 *length = r->length;
1233 return r->buf;
1234 }
1235 }
1236
1237 return NULL;
1238}
1239
1240static rndis_resp_t *rndis_add_response (int configNr, u32 length)
1241{
1242 rndis_resp_t *r;
1243
1244 r = kmalloc (sizeof (rndis_resp_t) + length, GFP_ATOMIC);
1245 if (!r) return NULL;
1246
1247 r->buf = (u8 *) (r + 1);
1248 r->length = length;
1249 r->send = 0;
1250
1251 list_add_tail (&r->list,
1252 &(rndis_per_dev_params [configNr].resp_queue));
1253 return r;
1254}
1255
1256int rndis_rm_hdr (u8 *buf, u32 *length)
1257{
1258 u32 i, messageLen, dataOffset;
1259 __le32 *tmp;
1260
1261 tmp = (__le32 *) buf;
1262
1263 if (!buf || !length) return -1;
1264 if (le32_to_cpup(tmp++) != 1) return -1;
1265
1266 messageLen = le32_to_cpup(tmp++);
1267 dataOffset = le32_to_cpup(tmp++) + 8;
1268
1269 if (messageLen < dataOffset || messageLen > *length) return -1;
1270
1271 for (i = dataOffset; i < messageLen; i++)
1272 buf [i - dataOffset] = buf [i];
1273
1274 *length = messageLen - dataOffset;
1275
1276 return 0;
1277}
1278
1279#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1280
1281static int rndis_proc_read (char *page, char **start, off_t off, int count, int *eof,
1282 void *data)
1283{
1284 char *out = page;
1285 int len;
1286 rndis_params *param = (rndis_params *) data;
1287
1288 out += snprintf (out, count,
1289 "Config Nr. %d\n"
1290 "used : %s\n"
1291 "state : %s\n"
1292 "medium : 0x%08X\n"
1293 "speed : %d\n"
1294 "cable : %s\n"
1295 "vendor ID : 0x%08X\n"
1296 "vendor : %s\n",
1297 param->confignr, (param->used) ? "y" : "n",
1298 ({ char *s = "?";
1299 switch (param->state) {
1300 case RNDIS_UNINITIALIZED:
1301 s = "RNDIS_UNINITIALIZED"; break;
1302 case RNDIS_INITIALIZED:
1303 s = "RNDIS_INITIALIZED"; break;
1304 case RNDIS_DATA_INITIALIZED:
1305 s = "RNDIS_DATA_INITIALIZED"; break;
1306 }; s; }),
1307 param->medium,
1308 (param->media_state) ? 0 : param->speed*100,
1309 (param->media_state) ? "disconnected" : "connected",
1310 param->vendorID, param->vendorDescr);
1311
1312 len = out - page;
1313 len -= off;
1314
1315 if (len < count) {
1316 *eof = 1;
1317 if (len <= 0)
1318 return 0;
1319 } else
1320 len = count;
1321
1322 *start = page + off;
1323 return len;
1324}
1325
1326static int rndis_proc_write (struct file *file, const char __user *buffer,
1327 unsigned long count, void *data)
1328{
1329 rndis_params *p = data;
1330 u32 speed = 0;
1331 int i, fl_speed = 0;
1332
1333 for (i = 0; i < count; i++) {
1334 char c;
1335 if (get_user(c, buffer))
1336 return -EFAULT;
1337 switch (c) {
1338 case '0':
1339 case '1':
1340 case '2':
1341 case '3':
1342 case '4':
1343 case '5':
1344 case '6':
1345 case '7':
1346 case '8':
1347 case '9':
1348 fl_speed = 1;
1349 speed = speed*10 + c - '0';
1350 break;
1351 case 'C':
1352 case 'c':
1353 rndis_signal_connect (p->confignr);
1354 break;
1355 case 'D':
1356 case 'd':
1357 rndis_signal_disconnect(p->confignr);
1358 break;
1359 default:
1360 if (fl_speed) p->speed = speed;
1361 else DEBUG ("%c is not valid\n", c);
1362 break;
1363 }
1364
1365 buffer++;
1366 }
1367
1368 return count;
1369}
1370
1371#define NAME_TEMPLATE "driver/rndis-%03d"
1372
1373static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
1374
1375#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1376
1377
1378int __init rndis_init (void)
1379{
1380 u8 i;
1381
1382 for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
1383#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1384 char name [20];
1385
1386 sprintf (name, NAME_TEMPLATE, i);
1387 if (!(rndis_connect_state [i]
1388 = create_proc_entry (name, 0660, NULL)))
1389 {
1390 DEBUG ("%s :remove entries", __FUNCTION__);
1391 while (i) {
1392 sprintf (name, NAME_TEMPLATE, --i);
1393 remove_proc_entry (name, NULL);
1394 }
1395 DEBUG ("\n");
1396 return -EIO;
1397 }
1398
1399 rndis_connect_state [i]->nlink = 1;
1400 rndis_connect_state [i]->write_proc = rndis_proc_write;
1401 rndis_connect_state [i]->read_proc = rndis_proc_read;
1402 rndis_connect_state [i]->data = (void *)
1403 (rndis_per_dev_params + i);
1404#endif
1405 rndis_per_dev_params [i].confignr = i;
1406 rndis_per_dev_params [i].used = 0;
1407 rndis_per_dev_params [i].state = RNDIS_UNINITIALIZED;
1408 rndis_per_dev_params [i].media_state
1409 = NDIS_MEDIA_STATE_DISCONNECTED;
1410 INIT_LIST_HEAD (&(rndis_per_dev_params [i].resp_queue));
1411 }
1412
1413 return 0;
1414}
1415
1416void rndis_exit (void)
1417{
1418#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1419 u8 i;
1420 char name [20];
1421
1422 for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
1423 sprintf (name, NAME_TEMPLATE, i);
1424 remove_proc_entry (name, NULL);
1425 }
1426#endif
1427}
1428
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
new file mode 100644
index 000000000000..822501852c50
--- /dev/null
+++ b/drivers/usb/gadget/rndis.h
@@ -0,0 +1,348 @@
1/*
2 * RNDIS Definitions for Remote NDIS
3 *
4 * Version: $Id: rndis.h,v 1.15 2004/03/25 21:33:46 robert Exp $
5 *
6 * Authors: Benedikt Spranger, Pengutronix
7 * Robert Schwebel, Pengutronix
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This software was originally developed in conformance with
14 * Microsoft's Remote NDIS Specification License Agreement.
15 */
16
17#ifndef _LINUX_RNDIS_H
18#define _LINUX_RNDIS_H
19
20#include "ndis.h"
21
22#define RNDIS_MAXIMUM_FRAME_SIZE 1518
23#define RNDIS_MAX_TOTAL_SIZE 1558
24
25/* Remote NDIS Versions */
26#define RNDIS_MAJOR_VERSION 1
27#define RNDIS_MINOR_VERSION 0
28
29/* Status Values */
30#define RNDIS_STATUS_SUCCESS 0x00000000U /* Success */
31#define RNDIS_STATUS_FAILURE 0xC0000001U /* Unspecified error */
32#define RNDIS_STATUS_INVALID_DATA 0xC0010015U /* Invalid data */
33#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BBU /* Unsupported request */
34#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000BU /* Device connected */
35#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000CU /* Device disconnected */
36/* For all not specified status messages:
37 * RNDIS_STATUS_Xxx -> NDIS_STATUS_Xxx
38 */
39
40/* Message Set for Connectionless (802.3) Devices */
41#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002U /* Initialize device */
42#define REMOTE_NDIS_HALT_MSG 0x00000003U
43#define REMOTE_NDIS_QUERY_MSG 0x00000004U
44#define REMOTE_NDIS_SET_MSG 0x00000005U
45#define REMOTE_NDIS_RESET_MSG 0x00000006U
46#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007U
47#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008U
48
49/* Message completion */
50#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002U
51#define REMOTE_NDIS_QUERY_CMPLT 0x80000004U
52#define REMOTE_NDIS_SET_CMPLT 0x80000005U
53#define REMOTE_NDIS_RESET_CMPLT 0x80000006U
54#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008U
55
56/* Device Flags */
57#define RNDIS_DF_CONNECTIONLESS 0x00000001U
58#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002U
59
60#define RNDIS_MEDIUM_802_3 0x00000000U
61
62/* from drivers/net/sk98lin/h/skgepnmi.h */
63#define OID_PNP_CAPABILITIES 0xFD010100
64#define OID_PNP_SET_POWER 0xFD010101
65#define OID_PNP_QUERY_POWER 0xFD010102
66#define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103
67#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104
68#define OID_PNP_ENABLE_WAKE_UP 0xFD010106
69
70
71/* supported OIDs */
72static const u32 oid_supported_list [] =
73{
74 /* the general stuff */
75 OID_GEN_SUPPORTED_LIST,
76 OID_GEN_HARDWARE_STATUS,
77 OID_GEN_MEDIA_SUPPORTED,
78 OID_GEN_MEDIA_IN_USE,
79 OID_GEN_MAXIMUM_FRAME_SIZE,
80 OID_GEN_LINK_SPEED,
81 OID_GEN_TRANSMIT_BLOCK_SIZE,
82 OID_GEN_RECEIVE_BLOCK_SIZE,
83 OID_GEN_VENDOR_ID,
84 OID_GEN_VENDOR_DESCRIPTION,
85 OID_GEN_VENDOR_DRIVER_VERSION,
86 OID_GEN_CURRENT_PACKET_FILTER,
87 OID_GEN_MAXIMUM_TOTAL_SIZE,
88 OID_GEN_MEDIA_CONNECT_STATUS,
89 OID_GEN_PHYSICAL_MEDIUM,
90#if 0
91 OID_GEN_RNDIS_CONFIG_PARAMETER,
92#endif
93
94 /* the statistical stuff */
95 OID_GEN_XMIT_OK,
96 OID_GEN_RCV_OK,
97 OID_GEN_XMIT_ERROR,
98 OID_GEN_RCV_ERROR,
99 OID_GEN_RCV_NO_BUFFER,
100#ifdef RNDIS_OPTIONAL_STATS
101 OID_GEN_DIRECTED_BYTES_XMIT,
102 OID_GEN_DIRECTED_FRAMES_XMIT,
103 OID_GEN_MULTICAST_BYTES_XMIT,
104 OID_GEN_MULTICAST_FRAMES_XMIT,
105 OID_GEN_BROADCAST_BYTES_XMIT,
106 OID_GEN_BROADCAST_FRAMES_XMIT,
107 OID_GEN_DIRECTED_BYTES_RCV,
108 OID_GEN_DIRECTED_FRAMES_RCV,
109 OID_GEN_MULTICAST_BYTES_RCV,
110 OID_GEN_MULTICAST_FRAMES_RCV,
111 OID_GEN_BROADCAST_BYTES_RCV,
112 OID_GEN_BROADCAST_FRAMES_RCV,
113 OID_GEN_RCV_CRC_ERROR,
114 OID_GEN_TRANSMIT_QUEUE_LENGTH,
115#endif /* RNDIS_OPTIONAL_STATS */
116
117 /* mandatory 802.3 */
118 /* the general stuff */
119 OID_802_3_PERMANENT_ADDRESS,
120 OID_802_3_CURRENT_ADDRESS,
121 OID_802_3_MULTICAST_LIST,
122 OID_802_3_MAC_OPTIONS,
123 OID_802_3_MAXIMUM_LIST_SIZE,
124
125 /* the statistical stuff */
126 OID_802_3_RCV_ERROR_ALIGNMENT,
127 OID_802_3_XMIT_ONE_COLLISION,
128 OID_802_3_XMIT_MORE_COLLISIONS,
129#ifdef RNDIS_OPTIONAL_STATS
130 OID_802_3_XMIT_DEFERRED,
131 OID_802_3_XMIT_MAX_COLLISIONS,
132 OID_802_3_RCV_OVERRUN,
133 OID_802_3_XMIT_UNDERRUN,
134 OID_802_3_XMIT_HEARTBEAT_FAILURE,
135 OID_802_3_XMIT_TIMES_CRS_LOST,
136 OID_802_3_XMIT_LATE_COLLISIONS,
137#endif /* RNDIS_OPTIONAL_STATS */
138
139#ifdef RNDIS_PM
140 /* PM and wakeup are mandatory for USB: */
141
142 /* power management */
143 OID_PNP_CAPABILITIES,
144 OID_PNP_QUERY_POWER,
145 OID_PNP_SET_POWER,
146
147 /* wake up host */
148 OID_PNP_ENABLE_WAKE_UP,
149 OID_PNP_ADD_WAKE_UP_PATTERN,
150 OID_PNP_REMOVE_WAKE_UP_PATTERN,
151#endif
152};
153
154
155typedef struct rndis_init_msg_type
156{
157 __le32 MessageType;
158 __le32 MessageLength;
159 __le32 RequestID;
160 __le32 MajorVersion;
161 __le32 MinorVersion;
162 __le32 MaxTransferSize;
163} rndis_init_msg_type;
164
165typedef struct rndis_init_cmplt_type
166{
167 __le32 MessageType;
168 __le32 MessageLength;
169 __le32 RequestID;
170 __le32 Status;
171 __le32 MajorVersion;
172 __le32 MinorVersion;
173 __le32 DeviceFlags;
174 __le32 Medium;
175 __le32 MaxPacketsPerTransfer;
176 __le32 MaxTransferSize;
177 __le32 PacketAlignmentFactor;
178 __le32 AFListOffset;
179 __le32 AFListSize;
180} rndis_init_cmplt_type;
181
182typedef struct rndis_halt_msg_type
183{
184 __le32 MessageType;
185 __le32 MessageLength;
186 __le32 RequestID;
187} rndis_halt_msg_type;
188
189typedef struct rndis_query_msg_type
190{
191 __le32 MessageType;
192 __le32 MessageLength;
193 __le32 RequestID;
194 __le32 OID;
195 __le32 InformationBufferLength;
196 __le32 InformationBufferOffset;
197 __le32 DeviceVcHandle;
198} rndis_query_msg_type;
199
200typedef struct rndis_query_cmplt_type
201{
202 __le32 MessageType;
203 __le32 MessageLength;
204 __le32 RequestID;
205 __le32 Status;
206 __le32 InformationBufferLength;
207 __le32 InformationBufferOffset;
208} rndis_query_cmplt_type;
209
210typedef struct rndis_set_msg_type
211{
212 __le32 MessageType;
213 __le32 MessageLength;
214 __le32 RequestID;
215 __le32 OID;
216 __le32 InformationBufferLength;
217 __le32 InformationBufferOffset;
218 __le32 DeviceVcHandle;
219} rndis_set_msg_type;
220
221typedef struct rndis_set_cmplt_type
222{
223 __le32 MessageType;
224 __le32 MessageLength;
225 __le32 RequestID;
226 __le32 Status;
227} rndis_set_cmplt_type;
228
229typedef struct rndis_reset_msg_type
230{
231 __le32 MessageType;
232 __le32 MessageLength;
233 __le32 Reserved;
234} rndis_reset_msg_type;
235
236typedef struct rndis_reset_cmplt_type
237{
238 __le32 MessageType;
239 __le32 MessageLength;
240 __le32 Status;
241 __le32 AddressingReset;
242} rndis_reset_cmplt_type;
243
244typedef struct rndis_indicate_status_msg_type
245{
246 __le32 MessageType;
247 __le32 MessageLength;
248 __le32 Status;
249 __le32 StatusBufferLength;
250 __le32 StatusBufferOffset;
251} rndis_indicate_status_msg_type;
252
253typedef struct rndis_keepalive_msg_type
254{
255 __le32 MessageType;
256 __le32 MessageLength;
257 __le32 RequestID;
258} rndis_keepalive_msg_type;
259
260typedef struct rndis_keepalive_cmplt_type
261{
262 __le32 MessageType;
263 __le32 MessageLength;
264 __le32 RequestID;
265 __le32 Status;
266} rndis_keepalive_cmplt_type;
267
268struct rndis_packet_msg_type
269{
270 __le32 MessageType;
271 __le32 MessageLength;
272 __le32 DataOffset;
273 __le32 DataLength;
274 __le32 OOBDataOffset;
275 __le32 OOBDataLength;
276 __le32 NumOOBDataElements;
277 __le32 PerPacketInfoOffset;
278 __le32 PerPacketInfoLength;
279 __le32 VcHandle;
280 __le32 Reserved;
281};
282
283struct rndis_config_parameter
284{
285 __le32 ParameterNameOffset;
286 __le32 ParameterNameLength;
287 __le32 ParameterType;
288 __le32 ParameterValueOffset;
289 __le32 ParameterValueLength;
290};
291
292/* implementation specific */
293enum rndis_state
294{
295 RNDIS_UNINITIALIZED,
296 RNDIS_INITIALIZED,
297 RNDIS_DATA_INITIALIZED,
298};
299
300typedef struct rndis_resp_t
301{
302 struct list_head list;
303 u8 *buf;
304 u32 length;
305 int send;
306} rndis_resp_t;
307
308typedef struct rndis_params
309{
310 u8 confignr;
311 int used;
312 enum rndis_state state;
313 u32 filter;
314 u32 medium;
315 u32 speed;
316 u32 media_state;
317 const u8 *host_mac;
318 struct net_device *dev;
319 struct net_device_stats *stats;
320 u32 vendorID;
321 const char *vendorDescr;
322 int (*ack) (struct net_device *);
323 struct list_head resp_queue;
324} rndis_params;
325
326/* RNDIS Message parser and other useless functions */
327int rndis_msg_parser (u8 configNr, u8 *buf);
328int rndis_register (int (*rndis_control_ack) (struct net_device *));
329void rndis_deregister (int configNr);
330int rndis_set_param_dev (u8 configNr, struct net_device *dev,
331 struct net_device_stats *stats);
332int rndis_set_param_vendor (u8 configNr, u32 vendorID,
333 const char *vendorDescr);
334int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
335void rndis_add_hdr (struct sk_buff *skb);
336int rndis_rm_hdr (u8 *buf, u32 *length);
337u8 *rndis_get_next_response (int configNr, u32 *length);
338void rndis_free_response (int configNr, u8 *buf);
339
340int rndis_signal_connect (int configNr);
341int rndis_signal_disconnect (int configNr);
342int rndis_state (int configNr);
343extern void rndis_set_host_mac (int configNr, const u8 *addr);
344
345int __init rndis_init (void);
346void rndis_exit (void);
347
348#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
new file mode 100644
index 000000000000..2af3f785d5a1
--- /dev/null
+++ b/drivers/usb/gadget/serial.c
@@ -0,0 +1,2436 @@
1/*
2 * g_serial.c -- USB gadget serial driver
3 *
4 * Copyright 2003 (C) Al Borchers (alborchers@steinerpoint.com)
5 *
6 * This code is based in part on the Gadget Zero driver, which
7 * is Copyright (C) 2003 by David Brownell, all rights reserved.
8 *
9 * This code also borrows from usbserial.c, which is
10 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
11 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
12 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
13 *
14 * This software is distributed under the terms of the GNU General
15 * Public License ("GPL") as published by the Free Software Foundation,
16 * either version 2 of that License or (at your option) any later version.
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/delay.h>
24#include <linux/ioport.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/smp_lock.h>
28#include <linux/errno.h>
29#include <linux/init.h>
30#include <linux/timer.h>
31#include <linux/list.h>
32#include <linux/interrupt.h>
33#include <linux/utsname.h>
34#include <linux/wait.h>
35#include <linux/proc_fs.h>
36#include <linux/device.h>
37#include <linux/tty.h>
38#include <linux/tty_flip.h>
39
40#include <asm/byteorder.h>
41#include <asm/io.h>
42#include <asm/irq.h>
43#include <asm/system.h>
44#include <asm/unaligned.h>
45#include <asm/uaccess.h>
46
47#include <linux/usb_ch9.h>
48#include <linux/usb_cdc.h>
49#include <linux/usb_gadget.h>
50
51#include "gadget_chips.h"
52
53
54/* Wait Cond */
55
56#define __wait_cond_interruptible(wq, condition, lock, flags, ret) \
57do { \
58 wait_queue_t __wait; \
59 init_waitqueue_entry(&__wait, current); \
60 \
61 add_wait_queue(&wq, &__wait); \
62 for (;;) { \
63 set_current_state(TASK_INTERRUPTIBLE); \
64 if (condition) \
65 break; \
66 if (!signal_pending(current)) { \
67 spin_unlock_irqrestore(lock, flags); \
68 schedule(); \
69 spin_lock_irqsave(lock, flags); \
70 continue; \
71 } \
72 ret = -ERESTARTSYS; \
73 break; \
74 } \
75 current->state = TASK_RUNNING; \
76 remove_wait_queue(&wq, &__wait); \
77} while (0)
78
79#define wait_cond_interruptible(wq, condition, lock, flags) \
80({ \
81 int __ret = 0; \
82 if (!(condition)) \
83 __wait_cond_interruptible(wq, condition, lock, flags, \
84 __ret); \
85 __ret; \
86})
87
88#define __wait_cond_interruptible_timeout(wq, condition, lock, flags, \
89 timeout, ret) \
90do { \
91 signed long __timeout = timeout; \
92 wait_queue_t __wait; \
93 init_waitqueue_entry(&__wait, current); \
94 \
95 add_wait_queue(&wq, &__wait); \
96 for (;;) { \
97 set_current_state(TASK_INTERRUPTIBLE); \
98 if (__timeout == 0) \
99 break; \
100 if (condition) \
101 break; \
102 if (!signal_pending(current)) { \
103 spin_unlock_irqrestore(lock, flags); \
104 __timeout = schedule_timeout(__timeout); \
105 spin_lock_irqsave(lock, flags); \
106 continue; \
107 } \
108 ret = -ERESTARTSYS; \
109 break; \
110 } \
111 current->state = TASK_RUNNING; \
112 remove_wait_queue(&wq, &__wait); \
113} while (0)
114
115#define wait_cond_interruptible_timeout(wq, condition, lock, flags, \
116 timeout) \
117({ \
118 int __ret = 0; \
119 if (!(condition)) \
120 __wait_cond_interruptible_timeout(wq, condition, lock, \
121 flags, timeout, __ret); \
122 __ret; \
123})
124
125
126/* Defines */
127
128#define GS_VERSION_STR "v2.0"
129#define GS_VERSION_NUM 0x0200
130
131#define GS_LONG_NAME "Gadget Serial"
132#define GS_SHORT_NAME "g_serial"
133
134#define GS_MAJOR 127
135#define GS_MINOR_START 0
136
137#define GS_NUM_PORTS 16
138
139#define GS_NUM_CONFIGS 1
140#define GS_NO_CONFIG_ID 0
141#define GS_BULK_CONFIG_ID 1
142#define GS_ACM_CONFIG_ID 2
143
144#define GS_MAX_NUM_INTERFACES 2
145#define GS_BULK_INTERFACE_ID 0
146#define GS_CONTROL_INTERFACE_ID 0
147#define GS_DATA_INTERFACE_ID 1
148
149#define GS_MAX_DESC_LEN 256
150
151#define GS_DEFAULT_READ_Q_SIZE 32
152#define GS_DEFAULT_WRITE_Q_SIZE 32
153
154#define GS_DEFAULT_WRITE_BUF_SIZE 8192
155#define GS_TMP_BUF_SIZE 8192
156
157#define GS_CLOSE_TIMEOUT 15
158
159#define GS_DEFAULT_USE_ACM 0
160
161#define GS_DEFAULT_DTE_RATE 9600
162#define GS_DEFAULT_DATA_BITS 8
163#define GS_DEFAULT_PARITY USB_CDC_NO_PARITY
164#define GS_DEFAULT_CHAR_FORMAT USB_CDC_1_STOP_BITS
165
166/* select highspeed/fullspeed, hiding highspeed if not configured */
167#ifdef CONFIG_USB_GADGET_DUALSPEED
168#define GS_SPEED_SELECT(is_hs,hs,fs) ((is_hs) ? (hs) : (fs))
169#else
170#define GS_SPEED_SELECT(is_hs,hs,fs) (fs)
171#endif /* CONFIG_USB_GADGET_DUALSPEED */
172
173/* debug settings */
174#ifdef GS_DEBUG
175static int debug = 1;
176
177#define gs_debug(format, arg...) \
178 do { if (debug) printk(KERN_DEBUG format, ## arg); } while(0)
179#define gs_debug_level(level, format, arg...) \
180 do { if (debug>=level) printk(KERN_DEBUG format, ## arg); } while(0)
181
182#else
183
184#define gs_debug(format, arg...) \
185 do { } while(0)
186#define gs_debug_level(level, format, arg...) \
187 do { } while(0)
188
189#endif /* GS_DEBUG */
190
191/* Thanks to NetChip Technologies for donating this product ID.
192 *
193 * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
194 * Instead: allocate your own, using normal USB-IF procedures.
195 */
196#define GS_VENDOR_ID 0x0525 /* NetChip */
197#define GS_PRODUCT_ID 0xa4a6 /* Linux-USB Serial Gadget */
198#define GS_CDC_PRODUCT_ID 0xa4a7 /* ... as CDC-ACM */
199
200#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
201#define GS_NOTIFY_MAXPACKET 8
202
203
204/* Structures */
205
206struct gs_dev;
207
208/* circular buffer */
209struct gs_buf {
210 unsigned int buf_size;
211 char *buf_buf;
212 char *buf_get;
213 char *buf_put;
214};
215
216/* list of requests */
217struct gs_req_entry {
218 struct list_head re_entry;
219 struct usb_request *re_req;
220};
221
222/* the port structure holds info for each port, one for each minor number */
223struct gs_port {
224 struct gs_dev *port_dev; /* pointer to device struct */
225 struct tty_struct *port_tty; /* pointer to tty struct */
226 spinlock_t port_lock;
227 int port_num;
228 int port_open_count;
229 int port_in_use; /* open/close in progress */
230 wait_queue_head_t port_write_wait;/* waiting to write */
231 struct gs_buf *port_write_buf;
232 struct usb_cdc_line_coding port_line_coding;
233};
234
235/* the device structure holds info for the USB device */
236struct gs_dev {
237 struct usb_gadget *dev_gadget; /* gadget device pointer */
238 spinlock_t dev_lock; /* lock for set/reset config */
239 int dev_config; /* configuration number */
240 struct usb_ep *dev_notify_ep; /* address of notify endpoint */
241 struct usb_ep *dev_in_ep; /* address of in endpoint */
242 struct usb_ep *dev_out_ep; /* address of out endpoint */
243 struct usb_endpoint_descriptor /* desciptor of notify ep */
244 *dev_notify_ep_desc;
245 struct usb_endpoint_descriptor /* descriptor of in endpoint */
246 *dev_in_ep_desc;
247 struct usb_endpoint_descriptor /* descriptor of out endpoint */
248 *dev_out_ep_desc;
249 struct usb_request *dev_ctrl_req; /* control request */
250 struct list_head dev_req_list; /* list of write requests */
251 int dev_sched_port; /* round robin port scheduled */
252 struct gs_port *dev_port[GS_NUM_PORTS]; /* the ports */
253};
254
255
256/* Functions */
257
258/* module */
259static int __init gs_module_init(void);
260static void __exit gs_module_exit(void);
261
262/* tty driver */
263static int gs_open(struct tty_struct *tty, struct file *file);
264static void gs_close(struct tty_struct *tty, struct file *file);
265static int gs_write(struct tty_struct *tty,
266 const unsigned char *buf, int count);
267static void gs_put_char(struct tty_struct *tty, unsigned char ch);
268static void gs_flush_chars(struct tty_struct *tty);
269static int gs_write_room(struct tty_struct *tty);
270static int gs_chars_in_buffer(struct tty_struct *tty);
271static void gs_throttle(struct tty_struct * tty);
272static void gs_unthrottle(struct tty_struct * tty);
273static void gs_break(struct tty_struct *tty, int break_state);
274static int gs_ioctl(struct tty_struct *tty, struct file *file,
275 unsigned int cmd, unsigned long arg);
276static void gs_set_termios(struct tty_struct *tty, struct termios *old);
277
278static int gs_send(struct gs_dev *dev);
279static int gs_send_packet(struct gs_dev *dev, char *packet,
280 unsigned int size);
281static int gs_recv_packet(struct gs_dev *dev, char *packet,
282 unsigned int size);
283static void gs_read_complete(struct usb_ep *ep, struct usb_request *req);
284static void gs_write_complete(struct usb_ep *ep, struct usb_request *req);
285
286/* gadget driver */
287static int gs_bind(struct usb_gadget *gadget);
288static void gs_unbind(struct usb_gadget *gadget);
289static int gs_setup(struct usb_gadget *gadget,
290 const struct usb_ctrlrequest *ctrl);
291static int gs_setup_standard(struct usb_gadget *gadget,
292 const struct usb_ctrlrequest *ctrl);
293static int gs_setup_class(struct usb_gadget *gadget,
294 const struct usb_ctrlrequest *ctrl);
295static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req);
296static void gs_disconnect(struct usb_gadget *gadget);
297static int gs_set_config(struct gs_dev *dev, unsigned config);
298static void gs_reset_config(struct gs_dev *dev);
299static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
300 u8 type, unsigned int index, int is_otg);
301
302static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len,
303 int kmalloc_flags);
304static void gs_free_req(struct usb_ep *ep, struct usb_request *req);
305
306static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len,
307 int kmalloc_flags);
308static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req);
309
310static int gs_alloc_ports(struct gs_dev *dev, int kmalloc_flags);
311static void gs_free_ports(struct gs_dev *dev);
312
313/* circular buffer */
314static struct gs_buf *gs_buf_alloc(unsigned int size, int kmalloc_flags);
315static void gs_buf_free(struct gs_buf *gb);
316static void gs_buf_clear(struct gs_buf *gb);
317static unsigned int gs_buf_data_avail(struct gs_buf *gb);
318static unsigned int gs_buf_space_avail(struct gs_buf *gb);
319static unsigned int gs_buf_put(struct gs_buf *gb, const char *buf,
320 unsigned int count);
321static unsigned int gs_buf_get(struct gs_buf *gb, char *buf,
322 unsigned int count);
323
324/* external functions */
325extern int net2280_set_fifo_mode(struct usb_gadget *gadget, int mode);
326
327
328/* Globals */
329
330static struct gs_dev *gs_device;
331
332static const char *EP_IN_NAME;
333static const char *EP_OUT_NAME;
334static const char *EP_NOTIFY_NAME;
335
336static struct semaphore gs_open_close_sem[GS_NUM_PORTS];
337
338static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
339static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
340
341static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
342
343static unsigned int use_acm = GS_DEFAULT_USE_ACM;
344
345
346/* tty driver struct */
347static struct tty_operations gs_tty_ops = {
348 .open = gs_open,
349 .close = gs_close,
350 .write = gs_write,
351 .put_char = gs_put_char,
352 .flush_chars = gs_flush_chars,
353 .write_room = gs_write_room,
354 .ioctl = gs_ioctl,
355 .set_termios = gs_set_termios,
356 .throttle = gs_throttle,
357 .unthrottle = gs_unthrottle,
358 .break_ctl = gs_break,
359 .chars_in_buffer = gs_chars_in_buffer,
360};
361static struct tty_driver *gs_tty_driver;
362
363/* gadget driver struct */
364static struct usb_gadget_driver gs_gadget_driver = {
365#ifdef CONFIG_USB_GADGET_DUALSPEED
366 .speed = USB_SPEED_HIGH,
367#else
368 .speed = USB_SPEED_FULL,
369#endif /* CONFIG_USB_GADGET_DUALSPEED */
370 .function = GS_LONG_NAME,
371 .bind = gs_bind,
372 .unbind = gs_unbind,
373 .setup = gs_setup,
374 .disconnect = gs_disconnect,
375 .driver = {
376 .name = GS_SHORT_NAME,
377 /* .shutdown = ... */
378 /* .suspend = ... */
379 /* .resume = ... */
380 },
381};
382
383
384/* USB descriptors */
385
386#define GS_MANUFACTURER_STR_ID 1
387#define GS_PRODUCT_STR_ID 2
388#define GS_SERIAL_STR_ID 3
389#define GS_BULK_CONFIG_STR_ID 4
390#define GS_ACM_CONFIG_STR_ID 5
391#define GS_CONTROL_STR_ID 6
392#define GS_DATA_STR_ID 7
393
394/* static strings, in UTF-8 */
395static char manufacturer[50];
396static struct usb_string gs_strings[] = {
397 { GS_MANUFACTURER_STR_ID, manufacturer },
398 { GS_PRODUCT_STR_ID, GS_LONG_NAME },
399 { GS_SERIAL_STR_ID, "0" },
400 { GS_BULK_CONFIG_STR_ID, "Gadget Serial Bulk" },
401 { GS_ACM_CONFIG_STR_ID, "Gadget Serial CDC ACM" },
402 { GS_CONTROL_STR_ID, "Gadget Serial Control" },
403 { GS_DATA_STR_ID, "Gadget Serial Data" },
404 { } /* end of list */
405};
406
407static struct usb_gadget_strings gs_string_table = {
408 .language = 0x0409, /* en-us */
409 .strings = gs_strings,
410};
411
412static struct usb_device_descriptor gs_device_desc = {
413 .bLength = USB_DT_DEVICE_SIZE,
414 .bDescriptorType = USB_DT_DEVICE,
415 .bcdUSB = __constant_cpu_to_le16(0x0200),
416 .bDeviceSubClass = 0,
417 .bDeviceProtocol = 0,
418 .idVendor = __constant_cpu_to_le16(GS_VENDOR_ID),
419 .idProduct = __constant_cpu_to_le16(GS_PRODUCT_ID),
420 .iManufacturer = GS_MANUFACTURER_STR_ID,
421 .iProduct = GS_PRODUCT_STR_ID,
422 .iSerialNumber = GS_SERIAL_STR_ID,
423 .bNumConfigurations = GS_NUM_CONFIGS,
424};
425
426static struct usb_otg_descriptor gs_otg_descriptor = {
427 .bLength = sizeof(gs_otg_descriptor),
428 .bDescriptorType = USB_DT_OTG,
429 .bmAttributes = USB_OTG_SRP,
430};
431
432static struct usb_config_descriptor gs_bulk_config_desc = {
433 .bLength = USB_DT_CONFIG_SIZE,
434 .bDescriptorType = USB_DT_CONFIG,
435 /* .wTotalLength computed dynamically */
436 .bNumInterfaces = 1,
437 .bConfigurationValue = GS_BULK_CONFIG_ID,
438 .iConfiguration = GS_BULK_CONFIG_STR_ID,
439 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
440 .bMaxPower = 1,
441};
442
443static struct usb_config_descriptor gs_acm_config_desc = {
444 .bLength = USB_DT_CONFIG_SIZE,
445 .bDescriptorType = USB_DT_CONFIG,
446 /* .wTotalLength computed dynamically */
447 .bNumInterfaces = 2,
448 .bConfigurationValue = GS_ACM_CONFIG_ID,
449 .iConfiguration = GS_ACM_CONFIG_STR_ID,
450 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
451 .bMaxPower = 1,
452};
453
454static const struct usb_interface_descriptor gs_bulk_interface_desc = {
455 .bLength = USB_DT_INTERFACE_SIZE,
456 .bDescriptorType = USB_DT_INTERFACE,
457 .bInterfaceNumber = GS_BULK_INTERFACE_ID,
458 .bNumEndpoints = 2,
459 .bInterfaceClass = USB_CLASS_CDC_DATA,
460 .bInterfaceSubClass = 0,
461 .bInterfaceProtocol = 0,
462 .iInterface = GS_DATA_STR_ID,
463};
464
465static const struct usb_interface_descriptor gs_control_interface_desc = {
466 .bLength = USB_DT_INTERFACE_SIZE,
467 .bDescriptorType = USB_DT_INTERFACE,
468 .bInterfaceNumber = GS_CONTROL_INTERFACE_ID,
469 .bNumEndpoints = 1,
470 .bInterfaceClass = USB_CLASS_COMM,
471 .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
472 .bInterfaceProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
473 .iInterface = GS_CONTROL_STR_ID,
474};
475
476static const struct usb_interface_descriptor gs_data_interface_desc = {
477 .bLength = USB_DT_INTERFACE_SIZE,
478 .bDescriptorType = USB_DT_INTERFACE,
479 .bInterfaceNumber = GS_DATA_INTERFACE_ID,
480 .bNumEndpoints = 2,
481 .bInterfaceClass = USB_CLASS_CDC_DATA,
482 .bInterfaceSubClass = 0,
483 .bInterfaceProtocol = 0,
484 .iInterface = GS_DATA_STR_ID,
485};
486
487static const struct usb_cdc_header_desc gs_header_desc = {
488 .bLength = sizeof(gs_header_desc),
489 .bDescriptorType = USB_DT_CS_INTERFACE,
490 .bDescriptorSubType = USB_CDC_HEADER_TYPE,
491 .bcdCDC = __constant_cpu_to_le16(0x0110),
492};
493
494static const struct usb_cdc_call_mgmt_descriptor gs_call_mgmt_descriptor = {
495 .bLength = sizeof(gs_call_mgmt_descriptor),
496 .bDescriptorType = USB_DT_CS_INTERFACE,
497 .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
498 .bmCapabilities = 0,
499 .bDataInterface = 1, /* index of data interface */
500};
501
502static struct usb_cdc_acm_descriptor gs_acm_descriptor = {
503 .bLength = sizeof(gs_acm_descriptor),
504 .bDescriptorType = USB_DT_CS_INTERFACE,
505 .bDescriptorSubType = USB_CDC_ACM_TYPE,
506 .bmCapabilities = 0,
507};
508
509static const struct usb_cdc_union_desc gs_union_desc = {
510 .bLength = sizeof(gs_union_desc),
511 .bDescriptorType = USB_DT_CS_INTERFACE,
512 .bDescriptorSubType = USB_CDC_UNION_TYPE,
513 .bMasterInterface0 = 0, /* index of control interface */
514 .bSlaveInterface0 = 1, /* index of data interface */
515};
516
517static struct usb_endpoint_descriptor gs_fullspeed_notify_desc = {
518 .bLength = USB_DT_ENDPOINT_SIZE,
519 .bDescriptorType = USB_DT_ENDPOINT,
520 .bEndpointAddress = USB_DIR_IN,
521 .bmAttributes = USB_ENDPOINT_XFER_INT,
522 .wMaxPacketSize = __constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
523 .bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
524};
525
526static struct usb_endpoint_descriptor gs_fullspeed_in_desc = {
527 .bLength = USB_DT_ENDPOINT_SIZE,
528 .bDescriptorType = USB_DT_ENDPOINT,
529 .bEndpointAddress = USB_DIR_IN,
530 .bmAttributes = USB_ENDPOINT_XFER_BULK,
531};
532
533static struct usb_endpoint_descriptor gs_fullspeed_out_desc = {
534 .bLength = USB_DT_ENDPOINT_SIZE,
535 .bDescriptorType = USB_DT_ENDPOINT,
536 .bEndpointAddress = USB_DIR_OUT,
537 .bmAttributes = USB_ENDPOINT_XFER_BULK,
538};
539
540static const struct usb_descriptor_header *gs_bulk_fullspeed_function[] = {
541 (struct usb_descriptor_header *) &gs_otg_descriptor,
542 (struct usb_descriptor_header *) &gs_bulk_interface_desc,
543 (struct usb_descriptor_header *) &gs_fullspeed_in_desc,
544 (struct usb_descriptor_header *) &gs_fullspeed_out_desc,
545 NULL,
546};
547
548static const struct usb_descriptor_header *gs_acm_fullspeed_function[] = {
549 (struct usb_descriptor_header *) &gs_otg_descriptor,
550 (struct usb_descriptor_header *) &gs_control_interface_desc,
551 (struct usb_descriptor_header *) &gs_header_desc,
552 (struct usb_descriptor_header *) &gs_call_mgmt_descriptor,
553 (struct usb_descriptor_header *) &gs_acm_descriptor,
554 (struct usb_descriptor_header *) &gs_union_desc,
555 (struct usb_descriptor_header *) &gs_fullspeed_notify_desc,
556 (struct usb_descriptor_header *) &gs_data_interface_desc,
557 (struct usb_descriptor_header *) &gs_fullspeed_in_desc,
558 (struct usb_descriptor_header *) &gs_fullspeed_out_desc,
559 NULL,
560};
561
562#ifdef CONFIG_USB_GADGET_DUALSPEED
563static struct usb_endpoint_descriptor gs_highspeed_notify_desc = {
564 .bLength = USB_DT_ENDPOINT_SIZE,
565 .bDescriptorType = USB_DT_ENDPOINT,
566 .bEndpointAddress = USB_DIR_IN,
567 .bmAttributes = USB_ENDPOINT_XFER_INT,
568 .wMaxPacketSize = __constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
569 .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
570};
571
572static struct usb_endpoint_descriptor gs_highspeed_in_desc = {
573 .bLength = USB_DT_ENDPOINT_SIZE,
574 .bDescriptorType = USB_DT_ENDPOINT,
575 .bmAttributes = USB_ENDPOINT_XFER_BULK,
576 .wMaxPacketSize = __constant_cpu_to_le16(512),
577};
578
579static struct usb_endpoint_descriptor gs_highspeed_out_desc = {
580 .bLength = USB_DT_ENDPOINT_SIZE,
581 .bDescriptorType = USB_DT_ENDPOINT,
582 .bmAttributes = USB_ENDPOINT_XFER_BULK,
583 .wMaxPacketSize = __constant_cpu_to_le16(512),
584};
585
586static struct usb_qualifier_descriptor gs_qualifier_desc = {
587 .bLength = sizeof(struct usb_qualifier_descriptor),
588 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
589 .bcdUSB = __constant_cpu_to_le16 (0x0200),
590 /* assumes ep0 uses the same value for both speeds ... */
591 .bNumConfigurations = GS_NUM_CONFIGS,
592};
593
594static const struct usb_descriptor_header *gs_bulk_highspeed_function[] = {
595 (struct usb_descriptor_header *) &gs_otg_descriptor,
596 (struct usb_descriptor_header *) &gs_bulk_interface_desc,
597 (struct usb_descriptor_header *) &gs_highspeed_in_desc,
598 (struct usb_descriptor_header *) &gs_highspeed_out_desc,
599 NULL,
600};
601
602static const struct usb_descriptor_header *gs_acm_highspeed_function[] = {
603 (struct usb_descriptor_header *) &gs_otg_descriptor,
604 (struct usb_descriptor_header *) &gs_control_interface_desc,
605 (struct usb_descriptor_header *) &gs_header_desc,
606 (struct usb_descriptor_header *) &gs_call_mgmt_descriptor,
607 (struct usb_descriptor_header *) &gs_acm_descriptor,
608 (struct usb_descriptor_header *) &gs_union_desc,
609 (struct usb_descriptor_header *) &gs_highspeed_notify_desc,
610 (struct usb_descriptor_header *) &gs_data_interface_desc,
611 (struct usb_descriptor_header *) &gs_highspeed_in_desc,
612 (struct usb_descriptor_header *) &gs_highspeed_out_desc,
613 NULL,
614};
615
616#endif /* CONFIG_USB_GADGET_DUALSPEED */
617
618
619/* Module */
620MODULE_DESCRIPTION(GS_LONG_NAME);
621MODULE_AUTHOR("Al Borchers");
622MODULE_LICENSE("GPL");
623
624#ifdef GS_DEBUG
625module_param(debug, int, S_IRUGO|S_IWUSR);
626MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on");
627#endif
628
629module_param(read_q_size, uint, S_IRUGO);
630MODULE_PARM_DESC(read_q_size, "Read request queue size, default=32");
631
632module_param(write_q_size, uint, S_IRUGO);
633MODULE_PARM_DESC(write_q_size, "Write request queue size, default=32");
634
635module_param(write_buf_size, uint, S_IRUGO);
636MODULE_PARM_DESC(write_buf_size, "Write buffer size, default=8192");
637
638module_param(use_acm, uint, S_IRUGO);
639MODULE_PARM_DESC(use_acm, "Use CDC ACM, 0=no, 1=yes, default=no");
640
641module_init(gs_module_init);
642module_exit(gs_module_exit);
643
644/*
645* gs_module_init
646*
647* Register as a USB gadget driver and a tty driver.
648*/
649static int __init gs_module_init(void)
650{
651 int i;
652 int retval;
653
654 retval = usb_gadget_register_driver(&gs_gadget_driver);
655 if (retval) {
656 printk(KERN_ERR "gs_module_init: cannot register gadget driver, ret=%d\n", retval);
657 return retval;
658 }
659
660 gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
661 if (!gs_tty_driver)
662 return -ENOMEM;
663 gs_tty_driver->owner = THIS_MODULE;
664 gs_tty_driver->driver_name = GS_SHORT_NAME;
665 gs_tty_driver->name = "ttygs";
666 gs_tty_driver->devfs_name = "usb/ttygs/";
667 gs_tty_driver->major = GS_MAJOR;
668 gs_tty_driver->minor_start = GS_MINOR_START;
669 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
670 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
671 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS;
672 gs_tty_driver->init_termios = tty_std_termios;
673 gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
674 tty_set_operations(gs_tty_driver, &gs_tty_ops);
675
676 for (i=0; i < GS_NUM_PORTS; i++)
677 sema_init(&gs_open_close_sem[i], 1);
678
679 retval = tty_register_driver(gs_tty_driver);
680 if (retval) {
681 usb_gadget_unregister_driver(&gs_gadget_driver);
682 put_tty_driver(gs_tty_driver);
683 printk(KERN_ERR "gs_module_init: cannot register tty driver, ret=%d\n", retval);
684 return retval;
685 }
686
687 printk(KERN_INFO "gs_module_init: %s %s loaded\n", GS_LONG_NAME, GS_VERSION_STR);
688 return 0;
689}
690
691/*
692* gs_module_exit
693*
694* Unregister as a tty driver and a USB gadget driver.
695*/
696static void __exit gs_module_exit(void)
697{
698 tty_unregister_driver(gs_tty_driver);
699 put_tty_driver(gs_tty_driver);
700 usb_gadget_unregister_driver(&gs_gadget_driver);
701
702 printk(KERN_INFO "gs_module_exit: %s %s unloaded\n", GS_LONG_NAME, GS_VERSION_STR);
703}
704
705/* TTY Driver */
706
707/*
708 * gs_open
709 */
710static int gs_open(struct tty_struct *tty, struct file *file)
711{
712 int port_num;
713 unsigned long flags;
714 struct gs_port *port;
715 struct gs_dev *dev;
716 struct gs_buf *buf;
717 struct semaphore *sem;
718 int ret;
719
720 port_num = tty->index;
721
722 gs_debug("gs_open: (%d,%p,%p)\n", port_num, tty, file);
723
724 if (port_num < 0 || port_num >= GS_NUM_PORTS) {
725 printk(KERN_ERR "gs_open: (%d,%p,%p) invalid port number\n",
726 port_num, tty, file);
727 return -ENODEV;
728 }
729
730 dev = gs_device;
731
732 if (dev == NULL) {
733 printk(KERN_ERR "gs_open: (%d,%p,%p) NULL device pointer\n",
734 port_num, tty, file);
735 return -ENODEV;
736 }
737
738 sem = &gs_open_close_sem[port_num];
739 if (down_interruptible(sem)) {
740 printk(KERN_ERR
741 "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n",
742 port_num, tty, file);
743 return -ERESTARTSYS;
744 }
745
746 spin_lock_irqsave(&dev->dev_lock, flags);
747
748 if (dev->dev_config == GS_NO_CONFIG_ID) {
749 printk(KERN_ERR
750 "gs_open: (%d,%p,%p) device is not connected\n",
751 port_num, tty, file);
752 ret = -ENODEV;
753 goto exit_unlock_dev;
754 }
755
756 port = dev->dev_port[port_num];
757
758 if (port == NULL) {
759 printk(KERN_ERR "gs_open: (%d,%p,%p) NULL port pointer\n",
760 port_num, tty, file);
761 ret = -ENODEV;
762 goto exit_unlock_dev;
763 }
764
765 spin_lock(&port->port_lock);
766 spin_unlock(&dev->dev_lock);
767
768 if (port->port_dev == NULL) {
769 printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (1)\n",
770 port_num, tty, file);
771 ret = -EIO;
772 goto exit_unlock_port;
773 }
774
775 if (port->port_open_count > 0) {
776 ++port->port_open_count;
777 gs_debug("gs_open: (%d,%p,%p) already open\n",
778 port_num, tty, file);
779 ret = 0;
780 goto exit_unlock_port;
781 }
782
783 tty->driver_data = NULL;
784
785 /* mark port as in use, we can drop port lock and sleep if necessary */
786 port->port_in_use = 1;
787
788 /* allocate write buffer on first open */
789 if (port->port_write_buf == NULL) {
790 spin_unlock_irqrestore(&port->port_lock, flags);
791 buf = gs_buf_alloc(write_buf_size, GFP_KERNEL);
792 spin_lock_irqsave(&port->port_lock, flags);
793
794 /* might have been disconnected while asleep, check */
795 if (port->port_dev == NULL) {
796 printk(KERN_ERR
797 "gs_open: (%d,%p,%p) port disconnected (2)\n",
798 port_num, tty, file);
799 port->port_in_use = 0;
800 ret = -EIO;
801 goto exit_unlock_port;
802 }
803
804 if ((port->port_write_buf=buf) == NULL) {
805 printk(KERN_ERR "gs_open: (%d,%p,%p) cannot allocate port write buffer\n",
806 port_num, tty, file);
807 port->port_in_use = 0;
808 ret = -ENOMEM;
809 goto exit_unlock_port;
810 }
811
812 }
813
814 /* wait for carrier detect (not implemented) */
815
816 /* might have been disconnected while asleep, check */
817 if (port->port_dev == NULL) {
818 printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (3)\n",
819 port_num, tty, file);
820 port->port_in_use = 0;
821 ret = -EIO;
822 goto exit_unlock_port;
823 }
824
825 tty->driver_data = port;
826 port->port_tty = tty;
827 port->port_open_count = 1;
828 port->port_in_use = 0;
829
830 gs_debug("gs_open: (%d,%p,%p) completed\n", port_num, tty, file);
831
832 ret = 0;
833
834exit_unlock_port:
835 spin_unlock_irqrestore(&port->port_lock, flags);
836 up(sem);
837 return ret;
838
839exit_unlock_dev:
840 spin_unlock_irqrestore(&dev->dev_lock, flags);
841 up(sem);
842 return ret;
843
844}
845
846/*
847 * gs_close
848 */
849static void gs_close(struct tty_struct *tty, struct file *file)
850{
851 unsigned long flags;
852 struct gs_port *port = tty->driver_data;
853 struct semaphore *sem;
854
855 if (port == NULL) {
856 printk(KERN_ERR "gs_close: NULL port pointer\n");
857 return;
858 }
859
860 gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file);
861
862 sem = &gs_open_close_sem[port->port_num];
863 down(sem);
864
865 spin_lock_irqsave(&port->port_lock, flags);
866
867 if (port->port_open_count == 0) {
868 printk(KERN_ERR
869 "gs_close: (%d,%p,%p) port is already closed\n",
870 port->port_num, tty, file);
871 goto exit;
872 }
873
874 if (port->port_open_count > 1) {
875 --port->port_open_count;
876 goto exit;
877 }
878
879 /* free disconnected port on final close */
880 if (port->port_dev == NULL) {
881 kfree(port);
882 goto exit;
883 }
884
885 /* mark port as closed but in use, we can drop port lock */
886 /* and sleep if necessary */
887 port->port_in_use = 1;
888 port->port_open_count = 0;
889
890 /* wait for write buffer to drain, or */
891 /* at most GS_CLOSE_TIMEOUT seconds */
892 if (gs_buf_data_avail(port->port_write_buf) > 0) {
893 wait_cond_interruptible_timeout(port->port_write_wait,
894 port->port_dev == NULL
895 || gs_buf_data_avail(port->port_write_buf) == 0,
896 &port->port_lock, flags, GS_CLOSE_TIMEOUT * HZ);
897 }
898
899 /* free disconnected port on final close */
900 /* (might have happened during the above sleep) */
901 if (port->port_dev == NULL) {
902 kfree(port);
903 goto exit;
904 }
905
906 gs_buf_clear(port->port_write_buf);
907
908 tty->driver_data = NULL;
909 port->port_tty = NULL;
910 port->port_in_use = 0;
911
912 gs_debug("gs_close: (%d,%p,%p) completed\n",
913 port->port_num, tty, file);
914
915exit:
916 spin_unlock_irqrestore(&port->port_lock, flags);
917 up(sem);
918}
919
920/*
921 * gs_write
922 */
923static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
924{
925 unsigned long flags;
926 struct gs_port *port = tty->driver_data;
927 int ret;
928
929 if (port == NULL) {
930 printk(KERN_ERR "gs_write: NULL port pointer\n");
931 return -EIO;
932 }
933
934 gs_debug("gs_write: (%d,%p) writing %d bytes\n", port->port_num, tty,
935 count);
936
937 if (count == 0)
938 return 0;
939
940 spin_lock_irqsave(&port->port_lock, flags);
941
942 if (port->port_dev == NULL) {
943 printk(KERN_ERR "gs_write: (%d,%p) port is not connected\n",
944 port->port_num, tty);
945 ret = -EIO;
946 goto exit;
947 }
948
949 if (port->port_open_count == 0) {
950 printk(KERN_ERR "gs_write: (%d,%p) port is closed\n",
951 port->port_num, tty);
952 ret = -EBADF;
953 goto exit;
954 }
955
956 count = gs_buf_put(port->port_write_buf, buf, count);
957
958 spin_unlock_irqrestore(&port->port_lock, flags);
959
960 gs_send(gs_device);
961
962 gs_debug("gs_write: (%d,%p) wrote %d bytes\n", port->port_num, tty,
963 count);
964
965 return count;
966
967exit:
968 spin_unlock_irqrestore(&port->port_lock, flags);
969 return ret;
970}
971
972/*
973 * gs_put_char
974 */
975static void gs_put_char(struct tty_struct *tty, unsigned char ch)
976{
977 unsigned long flags;
978 struct gs_port *port = tty->driver_data;
979
980 if (port == NULL) {
981 printk(KERN_ERR "gs_put_char: NULL port pointer\n");
982 return;
983 }
984
985 gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p, %p, %p\n", port->port_num, tty, ch, __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2));
986
987 spin_lock_irqsave(&port->port_lock, flags);
988
989 if (port->port_dev == NULL) {
990 printk(KERN_ERR "gs_put_char: (%d,%p) port is not connected\n",
991 port->port_num, tty);
992 goto exit;
993 }
994
995 if (port->port_open_count == 0) {
996 printk(KERN_ERR "gs_put_char: (%d,%p) port is closed\n",
997 port->port_num, tty);
998 goto exit;
999 }
1000
1001 gs_buf_put(port->port_write_buf, &ch, 1);
1002
1003exit:
1004 spin_unlock_irqrestore(&port->port_lock, flags);
1005}
1006
1007/*
1008 * gs_flush_chars
1009 */
1010static void gs_flush_chars(struct tty_struct *tty)
1011{
1012 unsigned long flags;
1013 struct gs_port *port = tty->driver_data;
1014
1015 if (port == NULL) {
1016 printk(KERN_ERR "gs_flush_chars: NULL port pointer\n");
1017 return;
1018 }
1019
1020 gs_debug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
1021
1022 spin_lock_irqsave(&port->port_lock, flags);
1023
1024 if (port->port_dev == NULL) {
1025 printk(KERN_ERR
1026 "gs_flush_chars: (%d,%p) port is not connected\n",
1027 port->port_num, tty);
1028 goto exit;
1029 }
1030
1031 if (port->port_open_count == 0) {
1032 printk(KERN_ERR "gs_flush_chars: (%d,%p) port is closed\n",
1033 port->port_num, tty);
1034 goto exit;
1035 }
1036
1037 spin_unlock_irqrestore(&port->port_lock, flags);
1038
1039 gs_send(gs_device);
1040
1041 return;
1042
1043exit:
1044 spin_unlock_irqrestore(&port->port_lock, flags);
1045}
1046
1047/*
1048 * gs_write_room
1049 */
1050static int gs_write_room(struct tty_struct *tty)
1051{
1052
1053 int room = 0;
1054 unsigned long flags;
1055 struct gs_port *port = tty->driver_data;
1056
1057
1058 if (port == NULL)
1059 return 0;
1060
1061 spin_lock_irqsave(&port->port_lock, flags);
1062
1063 if (port->port_dev != NULL && port->port_open_count > 0
1064 && port->port_write_buf != NULL)
1065 room = gs_buf_space_avail(port->port_write_buf);
1066
1067 spin_unlock_irqrestore(&port->port_lock, flags);
1068
1069 gs_debug("gs_write_room: (%d,%p) room=%d\n",
1070 port->port_num, tty, room);
1071
1072 return room;
1073}
1074
1075/*
1076 * gs_chars_in_buffer
1077 */
1078static int gs_chars_in_buffer(struct tty_struct *tty)
1079{
1080 int chars = 0;
1081 unsigned long flags;
1082 struct gs_port *port = tty->driver_data;
1083
1084 if (port == NULL)
1085 return 0;
1086
1087 spin_lock_irqsave(&port->port_lock, flags);
1088
1089 if (port->port_dev != NULL && port->port_open_count > 0
1090 && port->port_write_buf != NULL)
1091 chars = gs_buf_data_avail(port->port_write_buf);
1092
1093 spin_unlock_irqrestore(&port->port_lock, flags);
1094
1095 gs_debug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
1096 port->port_num, tty, chars);
1097
1098 return chars;
1099}
1100
1101/*
1102 * gs_throttle
1103 */
1104static void gs_throttle(struct tty_struct *tty)
1105{
1106}
1107
1108/*
1109 * gs_unthrottle
1110 */
1111static void gs_unthrottle(struct tty_struct *tty)
1112{
1113}
1114
1115/*
1116 * gs_break
1117 */
1118static void gs_break(struct tty_struct *tty, int break_state)
1119{
1120}
1121
1122/*
1123 * gs_ioctl
1124 */
1125static int gs_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
1126{
1127 struct gs_port *port = tty->driver_data;
1128
1129 if (port == NULL) {
1130 printk(KERN_ERR "gs_ioctl: NULL port pointer\n");
1131 return -EIO;
1132 }
1133
1134 gs_debug("gs_ioctl: (%d,%p,%p) cmd=0x%4.4x, arg=%lu\n",
1135 port->port_num, tty, file, cmd, arg);
1136
1137 /* handle ioctls */
1138
1139 /* could not handle ioctl */
1140 return -ENOIOCTLCMD;
1141}
1142
1143/*
1144 * gs_set_termios
1145 */
1146static void gs_set_termios(struct tty_struct *tty, struct termios *old)
1147{
1148}
1149
1150/*
1151* gs_send
1152*
1153* This function finds available write requests, calls
1154* gs_send_packet to fill these packets with data, and
1155* continues until either there are no more write requests
1156* available or no more data to send. This function is
1157* run whenever data arrives or write requests are available.
1158*/
1159static int gs_send(struct gs_dev *dev)
1160{
1161 int ret,len;
1162 unsigned long flags;
1163 struct usb_ep *ep;
1164 struct usb_request *req;
1165 struct gs_req_entry *req_entry;
1166
1167 if (dev == NULL) {
1168 printk(KERN_ERR "gs_send: NULL device pointer\n");
1169 return -ENODEV;
1170 }
1171
1172 spin_lock_irqsave(&dev->dev_lock, flags);
1173
1174 ep = dev->dev_in_ep;
1175
1176 while(!list_empty(&dev->dev_req_list)) {
1177
1178 req_entry = list_entry(dev->dev_req_list.next,
1179 struct gs_req_entry, re_entry);
1180
1181 req = req_entry->re_req;
1182
1183 len = gs_send_packet(dev, req->buf, ep->maxpacket);
1184
1185 if (len > 0) {
1186gs_debug_level(3, "gs_send: len=%d, 0x%2.2x 0x%2.2x 0x%2.2x ...\n", len, *((unsigned char *)req->buf), *((unsigned char *)req->buf+1), *((unsigned char *)req->buf+2));
1187 list_del(&req_entry->re_entry);
1188 req->length = len;
1189 if ((ret=usb_ep_queue(ep, req, GFP_ATOMIC))) {
1190 printk(KERN_ERR
1191 "gs_send: cannot queue read request, ret=%d\n",
1192 ret);
1193 break;
1194 }
1195 } else {
1196 break;
1197 }
1198
1199 }
1200
1201 spin_unlock_irqrestore(&dev->dev_lock, flags);
1202
1203 return 0;
1204}
1205
1206/*
1207 * gs_send_packet
1208 *
1209 * If there is data to send, a packet is built in the given
1210 * buffer and the size is returned. If there is no data to
1211 * send, 0 is returned. If there is any error a negative
1212 * error number is returned.
1213 *
1214 * Called during USB completion routine, on interrupt time.
1215 *
1216 * We assume that disconnect will not happen until all completion
1217 * routines have completed, so we can assume that the dev_port
1218 * array does not change during the lifetime of this function.
1219 */
1220static int gs_send_packet(struct gs_dev *dev, char *packet, unsigned int size)
1221{
1222 unsigned int len;
1223 struct gs_port *port;
1224
1225 /* TEMPORARY -- only port 0 is supported right now */
1226 port = dev->dev_port[0];
1227
1228 if (port == NULL) {
1229 printk(KERN_ERR
1230 "gs_send_packet: port=%d, NULL port pointer\n",
1231 0);
1232 return -EIO;
1233 }
1234
1235 spin_lock(&port->port_lock);
1236
1237 len = gs_buf_data_avail(port->port_write_buf);
1238 if (len < size)
1239 size = len;
1240
1241 if (size == 0)
1242 goto exit;
1243
1244 size = gs_buf_get(port->port_write_buf, packet, size);
1245
1246 if (port->port_tty)
1247 wake_up_interruptible(&port->port_tty->write_wait);
1248
1249exit:
1250 spin_unlock(&port->port_lock);
1251 return size;
1252}
1253
1254/*
1255 * gs_recv_packet
1256 *
1257 * Called for each USB packet received. Reads the packet
1258 * header and stuffs the data in the appropriate tty buffer.
1259 * Returns 0 if successful, or a negative error number.
1260 *
1261 * Called during USB completion routine, on interrupt time.
1262 *
1263 * We assume that disconnect will not happen until all completion
1264 * routines have completed, so we can assume that the dev_port
1265 * array does not change during the lifetime of this function.
1266 */
1267static int gs_recv_packet(struct gs_dev *dev, char *packet, unsigned int size)
1268{
1269 unsigned int len;
1270 struct gs_port *port;
1271 int ret;
1272
1273 /* TEMPORARY -- only port 0 is supported right now */
1274 port = dev->dev_port[0];
1275
1276 if (port == NULL) {
1277 printk(KERN_ERR "gs_recv_packet: port=%d, NULL port pointer\n",
1278 port->port_num);
1279 return -EIO;
1280 }
1281
1282 spin_lock(&port->port_lock);
1283
1284 if (port->port_open_count == 0) {
1285 printk(KERN_ERR "gs_recv_packet: port=%d, port is closed\n",
1286 port->port_num);
1287 ret = -EIO;
1288 goto exit;
1289 }
1290
1291 if (port->port_tty == NULL) {
1292 printk(KERN_ERR "gs_recv_packet: port=%d, NULL tty pointer\n",
1293 port->port_num);
1294 ret = -EIO;
1295 goto exit;
1296 }
1297
1298 if (port->port_tty->magic != TTY_MAGIC) {
1299 printk(KERN_ERR "gs_recv_packet: port=%d, bad tty magic\n",
1300 port->port_num);
1301 ret = -EIO;
1302 goto exit;
1303 }
1304
1305 len = (unsigned int)(TTY_FLIPBUF_SIZE - port->port_tty->flip.count);
1306 if (len < size)
1307 size = len;
1308
1309 if (size > 0) {
1310 memcpy(port->port_tty->flip.char_buf_ptr, packet, size);
1311 port->port_tty->flip.char_buf_ptr += size;
1312 port->port_tty->flip.count += size;
1313 tty_flip_buffer_push(port->port_tty);
1314 wake_up_interruptible(&port->port_tty->read_wait);
1315 }
1316
1317 ret = 0;
1318
1319exit:
1320 spin_unlock(&port->port_lock);
1321 return ret;
1322}
1323
1324/*
1325* gs_read_complete
1326*/
1327static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
1328{
1329 int ret;
1330 struct gs_dev *dev = ep->driver_data;
1331
1332 if (dev == NULL) {
1333 printk(KERN_ERR "gs_read_complete: NULL device pointer\n");
1334 return;
1335 }
1336
1337 switch(req->status) {
1338 case 0:
1339 /* normal completion */
1340 gs_recv_packet(dev, req->buf, req->actual);
1341requeue:
1342 req->length = ep->maxpacket;
1343 if ((ret=usb_ep_queue(ep, req, GFP_ATOMIC))) {
1344 printk(KERN_ERR
1345 "gs_read_complete: cannot queue read request, ret=%d\n",
1346 ret);
1347 }
1348 break;
1349
1350 case -ESHUTDOWN:
1351 /* disconnect */
1352 gs_debug("gs_read_complete: shutdown\n");
1353 gs_free_req(ep, req);
1354 break;
1355
1356 default:
1357 /* unexpected */
1358 printk(KERN_ERR
1359 "gs_read_complete: unexpected status error, status=%d\n",
1360 req->status);
1361 goto requeue;
1362 break;
1363 }
1364}
1365
1366/*
1367* gs_write_complete
1368*/
1369static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
1370{
1371 struct gs_dev *dev = ep->driver_data;
1372 struct gs_req_entry *gs_req = req->context;
1373
1374 if (dev == NULL) {
1375 printk(KERN_ERR "gs_write_complete: NULL device pointer\n");
1376 return;
1377 }
1378
1379 switch(req->status) {
1380 case 0:
1381 /* normal completion */
1382requeue:
1383 if (gs_req == NULL) {
1384 printk(KERN_ERR
1385 "gs_write_complete: NULL request pointer\n");
1386 return;
1387 }
1388
1389 spin_lock(&dev->dev_lock);
1390 list_add(&gs_req->re_entry, &dev->dev_req_list);
1391 spin_unlock(&dev->dev_lock);
1392
1393 gs_send(dev);
1394
1395 break;
1396
1397 case -ESHUTDOWN:
1398 /* disconnect */
1399 gs_debug("gs_write_complete: shutdown\n");
1400 gs_free_req(ep, req);
1401 break;
1402
1403 default:
1404 printk(KERN_ERR
1405 "gs_write_complete: unexpected status error, status=%d\n",
1406 req->status);
1407 goto requeue;
1408 break;
1409 }
1410}
1411
1412/* Gadget Driver */
1413
1414/*
1415 * gs_bind
1416 *
1417 * Called on module load. Allocates and initializes the device
1418 * structure and a control request.
1419 */
1420static int gs_bind(struct usb_gadget *gadget)
1421{
1422 int ret;
1423 struct usb_ep *ep;
1424 struct gs_dev *dev;
1425
1426 /* device specific */
1427 if (gadget_is_net2280(gadget)) {
1428 gs_device_desc.bcdDevice =
1429 __constant_cpu_to_le16(GS_VERSION_NUM|0x0001);
1430 } else if (gadget_is_pxa(gadget)) {
1431 gs_device_desc.bcdDevice =
1432 __constant_cpu_to_le16(GS_VERSION_NUM|0x0002);
1433 } else if (gadget_is_sh(gadget)) {
1434 gs_device_desc.bcdDevice =
1435 __constant_cpu_to_le16(GS_VERSION_NUM|0x0003);
1436 /* sh doesn't support multiple interfaces or configs */
1437 use_acm = 0;
1438 } else if (gadget_is_sa1100(gadget)) {
1439 gs_device_desc.bcdDevice =
1440 __constant_cpu_to_le16(GS_VERSION_NUM|0x0004);
1441 /* sa1100 doesn't support necessary endpoints */
1442 use_acm = 0;
1443 } else if (gadget_is_goku(gadget)) {
1444 gs_device_desc.bcdDevice =
1445 __constant_cpu_to_le16(GS_VERSION_NUM|0x0005);
1446 } else if (gadget_is_mq11xx(gadget)) {
1447 gs_device_desc.bcdDevice =
1448 __constant_cpu_to_le16(GS_VERSION_NUM|0x0006);
1449 } else if (gadget_is_omap(gadget)) {
1450 gs_device_desc.bcdDevice =
1451 __constant_cpu_to_le16(GS_VERSION_NUM|0x0007);
1452 } else if (gadget_is_lh7a40x(gadget)) {
1453 gs_device_desc.bcdDevice =
1454 __constant_cpu_to_le16(GS_VERSION_NUM|0x0008);
1455 } else if (gadget_is_n9604(gadget)) {
1456 gs_device_desc.bcdDevice =
1457 __constant_cpu_to_le16(GS_VERSION_NUM|0x0009);
1458 } else if (gadget_is_pxa27x(gadget)) {
1459 gs_device_desc.bcdDevice =
1460 __constant_cpu_to_le16(GS_VERSION_NUM|0x0011);
1461 } else if (gadget_is_s3c2410(gadget)) {
1462 gs_device_desc.bcdDevice =
1463 __constant_cpu_to_le16(GS_VERSION_NUM|0x0012);
1464 } else if (gadget_is_at91(gadget)) {
1465 gs_device_desc.bcdDevice =
1466 __constant_cpu_to_le16(GS_VERSION_NUM|0x0013);
1467 } else {
1468 printk(KERN_WARNING "gs_bind: controller '%s' not recognized\n",
1469 gadget->name);
1470 /* unrecognized, but safe unless bulk is REALLY quirky */
1471 gs_device_desc.bcdDevice =
1472 __constant_cpu_to_le16(GS_VERSION_NUM|0x0099);
1473 }
1474
1475 usb_ep_autoconfig_reset(gadget);
1476
1477 ep = usb_ep_autoconfig(gadget, &gs_fullspeed_in_desc);
1478 if (!ep)
1479 goto autoconf_fail;
1480 EP_IN_NAME = ep->name;
1481 ep->driver_data = ep; /* claim the endpoint */
1482
1483 ep = usb_ep_autoconfig(gadget, &gs_fullspeed_out_desc);
1484 if (!ep)
1485 goto autoconf_fail;
1486 EP_OUT_NAME = ep->name;
1487 ep->driver_data = ep; /* claim the endpoint */
1488
1489 if (use_acm) {
1490 ep = usb_ep_autoconfig(gadget, &gs_fullspeed_notify_desc);
1491 if (!ep) {
1492 printk(KERN_ERR "gs_bind: cannot run ACM on %s\n", gadget->name);
1493 goto autoconf_fail;
1494 }
1495 gs_device_desc.idProduct = __constant_cpu_to_le16(
1496 GS_CDC_PRODUCT_ID),
1497 EP_NOTIFY_NAME = ep->name;
1498 ep->driver_data = ep; /* claim the endpoint */
1499 }
1500
1501 gs_device_desc.bDeviceClass = use_acm
1502 ? USB_CLASS_COMM : USB_CLASS_VENDOR_SPEC;
1503 gs_device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
1504
1505#ifdef CONFIG_USB_GADGET_DUALSPEED
1506 gs_qualifier_desc.bDeviceClass = use_acm
1507 ? USB_CLASS_COMM : USB_CLASS_VENDOR_SPEC;
1508 /* assume ep0 uses the same packet size for both speeds */
1509 gs_qualifier_desc.bMaxPacketSize0 = gs_device_desc.bMaxPacketSize0;
1510 /* assume endpoints are dual-speed */
1511 gs_highspeed_notify_desc.bEndpointAddress =
1512 gs_fullspeed_notify_desc.bEndpointAddress;
1513 gs_highspeed_in_desc.bEndpointAddress =
1514 gs_fullspeed_in_desc.bEndpointAddress;
1515 gs_highspeed_out_desc.bEndpointAddress =
1516 gs_fullspeed_out_desc.bEndpointAddress;
1517#endif /* CONFIG_USB_GADGET_DUALSPEED */
1518
1519 usb_gadget_set_selfpowered(gadget);
1520
1521 if (gadget->is_otg) {
1522 gs_otg_descriptor.bmAttributes |= USB_OTG_HNP,
1523 gs_bulk_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1524 gs_acm_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1525 }
1526
1527 gs_device = dev = kmalloc(sizeof(struct gs_dev), GFP_KERNEL);
1528 if (dev == NULL)
1529 return -ENOMEM;
1530
1531 snprintf(manufacturer, sizeof(manufacturer), "%s %s with %s",
1532 system_utsname.sysname, system_utsname.release,
1533 gadget->name);
1534
1535 memset(dev, 0, sizeof(struct gs_dev));
1536 dev->dev_gadget = gadget;
1537 spin_lock_init(&dev->dev_lock);
1538 INIT_LIST_HEAD(&dev->dev_req_list);
1539 set_gadget_data(gadget, dev);
1540
1541 if ((ret=gs_alloc_ports(dev, GFP_KERNEL)) != 0) {
1542 printk(KERN_ERR "gs_bind: cannot allocate ports\n");
1543 gs_unbind(gadget);
1544 return ret;
1545 }
1546
1547 /* preallocate control response and buffer */
1548 dev->dev_ctrl_req = gs_alloc_req(gadget->ep0, GS_MAX_DESC_LEN,
1549 GFP_KERNEL);
1550 if (dev->dev_ctrl_req == NULL) {
1551 gs_unbind(gadget);
1552 return -ENOMEM;
1553 }
1554 dev->dev_ctrl_req->complete = gs_setup_complete;
1555
1556 gadget->ep0->driver_data = dev;
1557
1558 printk(KERN_INFO "gs_bind: %s %s bound\n",
1559 GS_LONG_NAME, GS_VERSION_STR);
1560
1561 return 0;
1562
1563autoconf_fail:
1564 printk(KERN_ERR "gs_bind: cannot autoconfigure on %s\n", gadget->name);
1565 return -ENODEV;
1566}
1567
1568/*
1569 * gs_unbind
1570 *
1571 * Called on module unload. Frees the control request and device
1572 * structure.
1573 */
1574static void gs_unbind(struct usb_gadget *gadget)
1575{
1576 struct gs_dev *dev = get_gadget_data(gadget);
1577
1578 gs_device = NULL;
1579
1580 /* read/write requests already freed, only control request remains */
1581 if (dev != NULL) {
1582 if (dev->dev_ctrl_req != NULL) {
1583 gs_free_req(gadget->ep0, dev->dev_ctrl_req);
1584 dev->dev_ctrl_req = NULL;
1585 }
1586 gs_free_ports(dev);
1587 kfree(dev);
1588 set_gadget_data(gadget, NULL);
1589 }
1590
1591 printk(KERN_INFO "gs_unbind: %s %s unbound\n", GS_LONG_NAME,
1592 GS_VERSION_STR);
1593}
1594
1595/*
1596 * gs_setup
1597 *
1598 * Implements all the control endpoint functionality that's not
1599 * handled in hardware or the hardware driver.
1600 *
1601 * Returns the size of the data sent to the host, or a negative
1602 * error number.
1603 */
1604static int gs_setup(struct usb_gadget *gadget,
1605 const struct usb_ctrlrequest *ctrl)
1606{
1607 int ret = -EOPNOTSUPP;
1608 struct gs_dev *dev = get_gadget_data(gadget);
1609 struct usb_request *req = dev->dev_ctrl_req;
1610 u16 wIndex = ctrl->wIndex;
1611 u16 wValue = ctrl->wValue;
1612 u16 wLength = ctrl->wLength;
1613
1614 switch (ctrl->bRequestType & USB_TYPE_MASK) {
1615 case USB_TYPE_STANDARD:
1616 ret = gs_setup_standard(gadget,ctrl);
1617 break;
1618
1619 case USB_TYPE_CLASS:
1620 ret = gs_setup_class(gadget,ctrl);
1621 break;
1622
1623 default:
1624 printk(KERN_ERR "gs_setup: unknown request, type=%02x, request=%02x, value=%04x, index=%04x, length=%d\n",
1625 ctrl->bRequestType, ctrl->bRequest,
1626 wValue, wIndex, wLength);
1627 break;
1628 }
1629
1630 /* respond with data transfer before status phase? */
1631 if (ret >= 0) {
1632 req->length = ret;
1633 req->zero = ret < wLength
1634 && (ret % gadget->ep0->maxpacket) == 0;
1635 ret = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
1636 if (ret < 0) {
1637 printk(KERN_ERR "gs_setup: cannot queue response, ret=%d\n",
1638 ret);
1639 req->status = 0;
1640 gs_setup_complete(gadget->ep0, req);
1641 }
1642 }
1643
1644 /* device either stalls (ret < 0) or reports success */
1645 return ret;
1646}
1647
1648static int gs_setup_standard(struct usb_gadget *gadget,
1649 const struct usb_ctrlrequest *ctrl)
1650{
1651 int ret = -EOPNOTSUPP;
1652 struct gs_dev *dev = get_gadget_data(gadget);
1653 struct usb_request *req = dev->dev_ctrl_req;
1654 u16 wIndex = ctrl->wIndex;
1655 u16 wValue = ctrl->wValue;
1656 u16 wLength = ctrl->wLength;
1657
1658 switch (ctrl->bRequest) {
1659 case USB_REQ_GET_DESCRIPTOR:
1660 if (ctrl->bRequestType != USB_DIR_IN)
1661 break;
1662
1663 switch (wValue >> 8) {
1664 case USB_DT_DEVICE:
1665 ret = min(wLength,
1666 (u16)sizeof(struct usb_device_descriptor));
1667 memcpy(req->buf, &gs_device_desc, ret);
1668 break;
1669
1670#ifdef CONFIG_USB_GADGET_DUALSPEED
1671 case USB_DT_DEVICE_QUALIFIER:
1672 if (!gadget->is_dualspeed)
1673 break;
1674 ret = min(wLength,
1675 (u16)sizeof(struct usb_qualifier_descriptor));
1676 memcpy(req->buf, &gs_qualifier_desc, ret);
1677 break;
1678
1679 case USB_DT_OTHER_SPEED_CONFIG:
1680 if (!gadget->is_dualspeed)
1681 break;
1682 /* fall through */
1683#endif /* CONFIG_USB_GADGET_DUALSPEED */
1684 case USB_DT_CONFIG:
1685 ret = gs_build_config_buf(req->buf, gadget->speed,
1686 wValue >> 8, wValue & 0xff,
1687 gadget->is_otg);
1688 if (ret >= 0)
1689 ret = min(wLength, (u16)ret);
1690 break;
1691
1692 case USB_DT_STRING:
1693 /* wIndex == language code. */
1694 ret = usb_gadget_get_string(&gs_string_table,
1695 wValue & 0xff, req->buf);
1696 if (ret >= 0)
1697 ret = min(wLength, (u16)ret);
1698 break;
1699 }
1700 break;
1701
1702 case USB_REQ_SET_CONFIGURATION:
1703 if (ctrl->bRequestType != 0)
1704 break;
1705 spin_lock(&dev->dev_lock);
1706 ret = gs_set_config(dev, wValue);
1707 spin_unlock(&dev->dev_lock);
1708 break;
1709
1710 case USB_REQ_GET_CONFIGURATION:
1711 if (ctrl->bRequestType != USB_DIR_IN)
1712 break;
1713 *(u8 *)req->buf = dev->dev_config;
1714 ret = min(wLength, (u16)1);
1715 break;
1716
1717 case USB_REQ_SET_INTERFACE:
1718 if (ctrl->bRequestType != USB_RECIP_INTERFACE
1719 || !dev->dev_config
1720 || wIndex >= GS_MAX_NUM_INTERFACES)
1721 break;
1722 if (dev->dev_config == GS_BULK_CONFIG_ID
1723 && wIndex != GS_BULK_INTERFACE_ID)
1724 break;
1725 /* no alternate interface settings */
1726 if (wValue != 0)
1727 break;
1728 spin_lock(&dev->dev_lock);
1729 /* PXA hardware partially handles SET_INTERFACE;
1730 * we need to kluge around that interference. */
1731 if (gadget_is_pxa(gadget)) {
1732 ret = gs_set_config(dev, use_acm ?
1733 GS_ACM_CONFIG_ID : GS_BULK_CONFIG_ID);
1734 goto set_interface_done;
1735 }
1736 if (dev->dev_config != GS_BULK_CONFIG_ID
1737 && wIndex == GS_CONTROL_INTERFACE_ID) {
1738 if (dev->dev_notify_ep) {
1739 usb_ep_disable(dev->dev_notify_ep);
1740 usb_ep_enable(dev->dev_notify_ep, dev->dev_notify_ep_desc);
1741 }
1742 } else {
1743 usb_ep_disable(dev->dev_in_ep);
1744 usb_ep_disable(dev->dev_out_ep);
1745 usb_ep_enable(dev->dev_in_ep, dev->dev_in_ep_desc);
1746 usb_ep_enable(dev->dev_out_ep, dev->dev_out_ep_desc);
1747 }
1748 ret = 0;
1749set_interface_done:
1750 spin_unlock(&dev->dev_lock);
1751 break;
1752
1753 case USB_REQ_GET_INTERFACE:
1754 if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)
1755 || dev->dev_config == GS_NO_CONFIG_ID)
1756 break;
1757 if (wIndex >= GS_MAX_NUM_INTERFACES
1758 || (dev->dev_config == GS_BULK_CONFIG_ID
1759 && wIndex != GS_BULK_INTERFACE_ID)) {
1760 ret = -EDOM;
1761 break;
1762 }
1763 /* no alternate interface settings */
1764 *(u8 *)req->buf = 0;
1765 ret = min(wLength, (u16)1);
1766 break;
1767
1768 default:
1769 printk(KERN_ERR "gs_setup: unknown standard request, type=%02x, request=%02x, value=%04x, index=%04x, length=%d\n",
1770 ctrl->bRequestType, ctrl->bRequest,
1771 wValue, wIndex, wLength);
1772 break;
1773 }
1774
1775 return ret;
1776}
1777
1778static int gs_setup_class(struct usb_gadget *gadget,
1779 const struct usb_ctrlrequest *ctrl)
1780{
1781 int ret = -EOPNOTSUPP;
1782 struct gs_dev *dev = get_gadget_data(gadget);
1783 struct gs_port *port = dev->dev_port[0]; /* ACM only has one port */
1784 struct usb_request *req = dev->dev_ctrl_req;
1785 u16 wIndex = ctrl->wIndex;
1786 u16 wValue = ctrl->wValue;
1787 u16 wLength = ctrl->wLength;
1788
1789 switch (ctrl->bRequest) {
1790 case USB_CDC_REQ_SET_LINE_CODING:
1791 ret = min(wLength,
1792 (u16)sizeof(struct usb_cdc_line_coding));
1793 if (port) {
1794 spin_lock(&port->port_lock);
1795 memcpy(&port->port_line_coding, req->buf, ret);
1796 spin_unlock(&port->port_lock);
1797 }
1798 break;
1799
1800 case USB_CDC_REQ_GET_LINE_CODING:
1801 port = dev->dev_port[0]; /* ACM only has one port */
1802 ret = min(wLength,
1803 (u16)sizeof(struct usb_cdc_line_coding));
1804 if (port) {
1805 spin_lock(&port->port_lock);
1806 memcpy(req->buf, &port->port_line_coding, ret);
1807 spin_unlock(&port->port_lock);
1808 }
1809 break;
1810
1811 case USB_CDC_REQ_SET_CONTROL_LINE_STATE:
1812 ret = 0;
1813 break;
1814
1815 default:
1816 printk(KERN_ERR "gs_setup: unknown class request, type=%02x, request=%02x, value=%04x, index=%04x, length=%d\n",
1817 ctrl->bRequestType, ctrl->bRequest,
1818 wValue, wIndex, wLength);
1819 break;
1820 }
1821
1822 return ret;
1823}
1824
1825/*
1826 * gs_setup_complete
1827 */
1828static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req)
1829{
1830 if (req->status || req->actual != req->length) {
1831 printk(KERN_ERR "gs_setup_complete: status error, status=%d, actual=%d, length=%d\n",
1832 req->status, req->actual, req->length);
1833 }
1834}
1835
1836/*
1837 * gs_disconnect
1838 *
1839 * Called when the device is disconnected. Frees the closed
1840 * ports and disconnects open ports. Open ports will be freed
1841 * on close. Then reallocates the ports for the next connection.
1842 */
1843static void gs_disconnect(struct usb_gadget *gadget)
1844{
1845 unsigned long flags;
1846 struct gs_dev *dev = get_gadget_data(gadget);
1847
1848 spin_lock_irqsave(&dev->dev_lock, flags);
1849
1850 gs_reset_config(dev);
1851
1852 /* free closed ports and disconnect open ports */
1853 /* (open ports will be freed when closed) */
1854 gs_free_ports(dev);
1855
1856 /* re-allocate ports for the next connection */
1857 if (gs_alloc_ports(dev, GFP_ATOMIC) != 0)
1858 printk(KERN_ERR "gs_disconnect: cannot re-allocate ports\n");
1859
1860 spin_unlock_irqrestore(&dev->dev_lock, flags);
1861
1862 printk(KERN_INFO "gs_disconnect: %s disconnected\n", GS_LONG_NAME);
1863}
1864
1865/*
1866 * gs_set_config
1867 *
1868 * Configures the device by enabling device specific
1869 * optimizations, setting up the endpoints, allocating
1870 * read and write requests and queuing read requests.
1871 *
1872 * The device lock must be held when calling this function.
1873 */
1874static int gs_set_config(struct gs_dev *dev, unsigned config)
1875{
1876 int i;
1877 int ret = 0;
1878 struct usb_gadget *gadget = dev->dev_gadget;
1879 struct usb_ep *ep;
1880 struct usb_endpoint_descriptor *ep_desc;
1881 struct usb_request *req;
1882 struct gs_req_entry *req_entry;
1883
1884 if (dev == NULL) {
1885 printk(KERN_ERR "gs_set_config: NULL device pointer\n");
1886 return 0;
1887 }
1888
1889 if (config == dev->dev_config)
1890 return 0;
1891
1892 gs_reset_config(dev);
1893
1894 switch (config) {
1895 case GS_NO_CONFIG_ID:
1896 return 0;
1897 case GS_BULK_CONFIG_ID:
1898 if (use_acm)
1899 return -EINVAL;
1900 /* device specific optimizations */
1901 if (gadget_is_net2280(gadget))
1902 net2280_set_fifo_mode(gadget, 1);
1903 break;
1904 case GS_ACM_CONFIG_ID:
1905 if (!use_acm)
1906 return -EINVAL;
1907 /* device specific optimizations */
1908 if (gadget_is_net2280(gadget))
1909 net2280_set_fifo_mode(gadget, 1);
1910 break;
1911 default:
1912 return -EINVAL;
1913 }
1914
1915 dev->dev_config = config;
1916
1917 gadget_for_each_ep(ep, gadget) {
1918
1919 if (EP_NOTIFY_NAME
1920 && strcmp(ep->name, EP_NOTIFY_NAME) == 0) {
1921 ep_desc = GS_SPEED_SELECT(
1922 gadget->speed == USB_SPEED_HIGH,
1923 &gs_highspeed_notify_desc,
1924 &gs_fullspeed_notify_desc);
1925 ret = usb_ep_enable(ep,ep_desc);
1926 if (ret == 0) {
1927 ep->driver_data = dev;
1928 dev->dev_notify_ep = ep;
1929 dev->dev_notify_ep_desc = ep_desc;
1930 } else {
1931 printk(KERN_ERR "gs_set_config: cannot enable notify endpoint %s, ret=%d\n",
1932 ep->name, ret);
1933 goto exit_reset_config;
1934 }
1935 }
1936
1937 else if (strcmp(ep->name, EP_IN_NAME) == 0) {
1938 ep_desc = GS_SPEED_SELECT(
1939 gadget->speed == USB_SPEED_HIGH,
1940 &gs_highspeed_in_desc,
1941 &gs_fullspeed_in_desc);
1942 ret = usb_ep_enable(ep,ep_desc);
1943 if (ret == 0) {
1944 ep->driver_data = dev;
1945 dev->dev_in_ep = ep;
1946 dev->dev_in_ep_desc = ep_desc;
1947 } else {
1948 printk(KERN_ERR "gs_set_config: cannot enable in endpoint %s, ret=%d\n",
1949 ep->name, ret);
1950 goto exit_reset_config;
1951 }
1952 }
1953
1954 else if (strcmp(ep->name, EP_OUT_NAME) == 0) {
1955 ep_desc = GS_SPEED_SELECT(
1956 gadget->speed == USB_SPEED_HIGH,
1957 &gs_highspeed_out_desc,
1958 &gs_fullspeed_out_desc);
1959 ret = usb_ep_enable(ep,ep_desc);
1960 if (ret == 0) {
1961 ep->driver_data = dev;
1962 dev->dev_out_ep = ep;
1963 dev->dev_out_ep_desc = ep_desc;
1964 } else {
1965 printk(KERN_ERR "gs_set_config: cannot enable out endpoint %s, ret=%d\n",
1966 ep->name, ret);
1967 goto exit_reset_config;
1968 }
1969 }
1970
1971 }
1972
1973 if (dev->dev_in_ep == NULL || dev->dev_out_ep == NULL
1974 || (config != GS_BULK_CONFIG_ID && dev->dev_notify_ep == NULL)) {
1975 printk(KERN_ERR "gs_set_config: cannot find endpoints\n");
1976 ret = -ENODEV;
1977 goto exit_reset_config;
1978 }
1979
1980 /* allocate and queue read requests */
1981 ep = dev->dev_out_ep;
1982 for (i=0; i<read_q_size && ret == 0; i++) {
1983 if ((req=gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC))) {
1984 req->complete = gs_read_complete;
1985 if ((ret=usb_ep_queue(ep, req, GFP_ATOMIC))) {
1986 printk(KERN_ERR "gs_set_config: cannot queue read request, ret=%d\n",
1987 ret);
1988 }
1989 } else {
1990 printk(KERN_ERR "gs_set_config: cannot allocate read requests\n");
1991 ret = -ENOMEM;
1992 goto exit_reset_config;
1993 }
1994 }
1995
1996 /* allocate write requests, and put on free list */
1997 ep = dev->dev_in_ep;
1998 for (i=0; i<write_q_size; i++) {
1999 if ((req_entry=gs_alloc_req_entry(ep, ep->maxpacket, GFP_ATOMIC))) {
2000 req_entry->re_req->complete = gs_write_complete;
2001 list_add(&req_entry->re_entry, &dev->dev_req_list);
2002 } else {
2003 printk(KERN_ERR "gs_set_config: cannot allocate write requests\n");
2004 ret = -ENOMEM;
2005 goto exit_reset_config;
2006 }
2007 }
2008
2009 printk(KERN_INFO "gs_set_config: %s configured, %s speed %s config\n",
2010 GS_LONG_NAME,
2011 gadget->speed == USB_SPEED_HIGH ? "high" : "full",
2012 config == GS_BULK_CONFIG_ID ? "BULK" : "CDC-ACM");
2013
2014 return 0;
2015
2016exit_reset_config:
2017 gs_reset_config(dev);
2018 return ret;
2019}
2020
2021/*
2022 * gs_reset_config
2023 *
2024 * Mark the device as not configured, disable all endpoints,
2025 * which forces completion of pending I/O and frees queued
2026 * requests, and free the remaining write requests on the
2027 * free list.
2028 *
2029 * The device lock must be held when calling this function.
2030 */
2031static void gs_reset_config(struct gs_dev *dev)
2032{
2033 struct gs_req_entry *req_entry;
2034
2035 if (dev == NULL) {
2036 printk(KERN_ERR "gs_reset_config: NULL device pointer\n");
2037 return;
2038 }
2039
2040 if (dev->dev_config == GS_NO_CONFIG_ID)
2041 return;
2042
2043 dev->dev_config = GS_NO_CONFIG_ID;
2044
2045 /* free write requests on the free list */
2046 while(!list_empty(&dev->dev_req_list)) {
2047 req_entry = list_entry(dev->dev_req_list.next,
2048 struct gs_req_entry, re_entry);
2049 list_del(&req_entry->re_entry);
2050 gs_free_req_entry(dev->dev_in_ep, req_entry);
2051 }
2052
2053 /* disable endpoints, forcing completion of pending i/o; */
2054 /* completion handlers free their requests in this case */
2055 if (dev->dev_notify_ep) {
2056 usb_ep_disable(dev->dev_notify_ep);
2057 dev->dev_notify_ep = NULL;
2058 }
2059 if (dev->dev_in_ep) {
2060 usb_ep_disable(dev->dev_in_ep);
2061 dev->dev_in_ep = NULL;
2062 }
2063 if (dev->dev_out_ep) {
2064 usb_ep_disable(dev->dev_out_ep);
2065 dev->dev_out_ep = NULL;
2066 }
2067}
2068
2069/*
2070 * gs_build_config_buf
2071 *
2072 * Builds the config descriptors in the given buffer and returns the
2073 * length, or a negative error number.
2074 */
2075static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
2076 u8 type, unsigned int index, int is_otg)
2077{
2078 int len;
2079 int high_speed;
2080 const struct usb_config_descriptor *config_desc;
2081 const struct usb_descriptor_header **function;
2082
2083 if (index >= gs_device_desc.bNumConfigurations)
2084 return -EINVAL;
2085
2086 /* other speed switches high and full speed */
2087 high_speed = (speed == USB_SPEED_HIGH);
2088 if (type == USB_DT_OTHER_SPEED_CONFIG)
2089 high_speed = !high_speed;
2090
2091 if (use_acm) {
2092 config_desc = &gs_acm_config_desc;
2093 function = GS_SPEED_SELECT(high_speed,
2094 gs_acm_highspeed_function,
2095 gs_acm_fullspeed_function);
2096 } else {
2097 config_desc = &gs_bulk_config_desc;
2098 function = GS_SPEED_SELECT(high_speed,
2099 gs_bulk_highspeed_function,
2100 gs_bulk_fullspeed_function);
2101 }
2102
2103 /* for now, don't advertise srp-only devices */
2104 if (!is_otg)
2105 function++;
2106
2107 len = usb_gadget_config_buf(config_desc, buf, GS_MAX_DESC_LEN, function);
2108 if (len < 0)
2109 return len;
2110
2111 ((struct usb_config_descriptor *)buf)->bDescriptorType = type;
2112
2113 return len;
2114}
2115
2116/*
2117 * gs_alloc_req
2118 *
2119 * Allocate a usb_request and its buffer. Returns a pointer to the
2120 * usb_request or NULL if there is an error.
2121 */
2122static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len, int kmalloc_flags)
2123{
2124 struct usb_request *req;
2125
2126 if (ep == NULL)
2127 return NULL;
2128
2129 req = usb_ep_alloc_request(ep, kmalloc_flags);
2130
2131 if (req != NULL) {
2132 req->length = len;
2133 req->buf = kmalloc(len, kmalloc_flags);
2134 if (req->buf == NULL) {
2135 usb_ep_free_request(ep, req);
2136 return NULL;
2137 }
2138 }
2139
2140 return req;
2141}
2142
2143/*
2144 * gs_free_req
2145 *
2146 * Free a usb_request and its buffer.
2147 */
2148static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
2149{
2150 if (ep != NULL && req != NULL) {
2151 kfree(req->buf);
2152 usb_ep_free_request(ep, req);
2153 }
2154}
2155
2156/*
2157 * gs_alloc_req_entry
2158 *
2159 * Allocates a request and its buffer, using the given
2160 * endpoint, buffer len, and kmalloc flags.
2161 */
2162static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len, int kmalloc_flags)
2163{
2164 struct gs_req_entry *req;
2165
2166 req = kmalloc(sizeof(struct gs_req_entry), kmalloc_flags);
2167 if (req == NULL)
2168 return NULL;
2169
2170 req->re_req = gs_alloc_req(ep, len, kmalloc_flags);
2171 if (req->re_req == NULL) {
2172 kfree(req);
2173 return NULL;
2174 }
2175
2176 req->re_req->context = req;
2177
2178 return req;
2179}
2180
2181/*
2182 * gs_free_req_entry
2183 *
2184 * Frees a request and its buffer.
2185 */
2186static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req)
2187{
2188 if (ep != NULL && req != NULL) {
2189 if (req->re_req != NULL)
2190 gs_free_req(ep, req->re_req);
2191 kfree(req);
2192 }
2193}
2194
2195/*
2196 * gs_alloc_ports
2197 *
2198 * Allocate all ports and set the gs_dev struct to point to them.
2199 * Return 0 if successful, or a negative error number.
2200 *
2201 * The device lock is normally held when calling this function.
2202 */
2203static int gs_alloc_ports(struct gs_dev *dev, int kmalloc_flags)
2204{
2205 int i;
2206 struct gs_port *port;
2207
2208 if (dev == NULL)
2209 return -EIO;
2210
2211 for (i=0; i<GS_NUM_PORTS; i++) {
2212 if ((port=(struct gs_port *)kmalloc(sizeof(struct gs_port), kmalloc_flags)) == NULL)
2213 return -ENOMEM;
2214
2215 memset(port, 0, sizeof(struct gs_port));
2216 port->port_dev = dev;
2217 port->port_num = i;
2218 port->port_line_coding.dwDTERate = cpu_to_le32(GS_DEFAULT_DTE_RATE);
2219 port->port_line_coding.bCharFormat = GS_DEFAULT_CHAR_FORMAT;
2220 port->port_line_coding.bParityType = GS_DEFAULT_PARITY;
2221 port->port_line_coding.bDataBits = GS_DEFAULT_DATA_BITS;
2222 spin_lock_init(&port->port_lock);
2223 init_waitqueue_head(&port->port_write_wait);
2224
2225 dev->dev_port[i] = port;
2226 }
2227
2228 return 0;
2229}
2230
2231/*
2232 * gs_free_ports
2233 *
2234 * Free all closed ports. Open ports are disconnected by
2235 * freeing their write buffers, setting their device pointers
2236 * and the pointers to them in the device to NULL. These
2237 * ports will be freed when closed.
2238 *
2239 * The device lock is normally held when calling this function.
2240 */
2241static void gs_free_ports(struct gs_dev *dev)
2242{
2243 int i;
2244 unsigned long flags;
2245 struct gs_port *port;
2246
2247 if (dev == NULL)
2248 return;
2249
2250 for (i=0; i<GS_NUM_PORTS; i++) {
2251 if ((port=dev->dev_port[i]) != NULL) {
2252 dev->dev_port[i] = NULL;
2253
2254 spin_lock_irqsave(&port->port_lock, flags);
2255
2256 if (port->port_write_buf != NULL) {
2257 gs_buf_free(port->port_write_buf);
2258 port->port_write_buf = NULL;
2259 }
2260
2261 if (port->port_open_count > 0 || port->port_in_use) {
2262 port->port_dev = NULL;
2263 wake_up_interruptible(&port->port_write_wait);
2264 if (port->port_tty) {
2265 wake_up_interruptible(&port->port_tty->read_wait);
2266 wake_up_interruptible(&port->port_tty->write_wait);
2267 }
2268 spin_unlock_irqrestore(&port->port_lock, flags);
2269 } else {
2270 spin_unlock_irqrestore(&port->port_lock, flags);
2271 kfree(port);
2272 }
2273
2274 }
2275 }
2276}
2277
2278/* Circular Buffer */
2279
2280/*
2281 * gs_buf_alloc
2282 *
2283 * Allocate a circular buffer and all associated memory.
2284 */
2285static struct gs_buf *gs_buf_alloc(unsigned int size, int kmalloc_flags)
2286{
2287 struct gs_buf *gb;
2288
2289 if (size == 0)
2290 return NULL;
2291
2292 gb = (struct gs_buf *)kmalloc(sizeof(struct gs_buf), kmalloc_flags);
2293 if (gb == NULL)
2294 return NULL;
2295
2296 gb->buf_buf = kmalloc(size, kmalloc_flags);
2297 if (gb->buf_buf == NULL) {
2298 kfree(gb);
2299 return NULL;
2300 }
2301
2302 gb->buf_size = size;
2303 gb->buf_get = gb->buf_put = gb->buf_buf;
2304
2305 return gb;
2306}
2307
2308/*
2309 * gs_buf_free
2310 *
2311 * Free the buffer and all associated memory.
2312 */
2313void gs_buf_free(struct gs_buf *gb)
2314{
2315 if (gb != NULL) {
2316 if (gb->buf_buf != NULL)
2317 kfree(gb->buf_buf);
2318 kfree(gb);
2319 }
2320}
2321
2322/*
2323 * gs_buf_clear
2324 *
2325 * Clear out all data in the circular buffer.
2326 */
2327void gs_buf_clear(struct gs_buf *gb)
2328{
2329 if (gb != NULL)
2330 gb->buf_get = gb->buf_put;
2331 /* equivalent to a get of all data available */
2332}
2333
2334/*
2335 * gs_buf_data_avail
2336 *
2337 * Return the number of bytes of data available in the circular
2338 * buffer.
2339 */
2340unsigned int gs_buf_data_avail(struct gs_buf *gb)
2341{
2342 if (gb != NULL)
2343 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
2344 else
2345 return 0;
2346}
2347
2348/*
2349 * gs_buf_space_avail
2350 *
2351 * Return the number of bytes of space available in the circular
2352 * buffer.
2353 */
2354unsigned int gs_buf_space_avail(struct gs_buf *gb)
2355{
2356 if (gb != NULL)
2357 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
2358 else
2359 return 0;
2360}
2361
2362/*
2363 * gs_buf_put
2364 *
2365 * Copy data data from a user buffer and put it into the circular buffer.
2366 * Restrict to the amount of space available.
2367 *
2368 * Return the number of bytes copied.
2369 */
2370unsigned int gs_buf_put(struct gs_buf *gb, const char *buf, unsigned int count)
2371{
2372 unsigned int len;
2373
2374 if (gb == NULL)
2375 return 0;
2376
2377 len = gs_buf_space_avail(gb);
2378 if (count > len)
2379 count = len;
2380
2381 if (count == 0)
2382 return 0;
2383
2384 len = gb->buf_buf + gb->buf_size - gb->buf_put;
2385 if (count > len) {
2386 memcpy(gb->buf_put, buf, len);
2387 memcpy(gb->buf_buf, buf+len, count - len);
2388 gb->buf_put = gb->buf_buf + count - len;
2389 } else {
2390 memcpy(gb->buf_put, buf, count);
2391 if (count < len)
2392 gb->buf_put += count;
2393 else /* count == len */
2394 gb->buf_put = gb->buf_buf;
2395 }
2396
2397 return count;
2398}
2399
2400/*
2401 * gs_buf_get
2402 *
2403 * Get data from the circular buffer and copy to the given buffer.
2404 * Restrict to the amount of data available.
2405 *
2406 * Return the number of bytes copied.
2407 */
2408unsigned int gs_buf_get(struct gs_buf *gb, char *buf, unsigned int count)
2409{
2410 unsigned int len;
2411
2412 if (gb == NULL)
2413 return 0;
2414
2415 len = gs_buf_data_avail(gb);
2416 if (count > len)
2417 count = len;
2418
2419 if (count == 0)
2420 return 0;
2421
2422 len = gb->buf_buf + gb->buf_size - gb->buf_get;
2423 if (count > len) {
2424 memcpy(buf, gb->buf_get, len);
2425 memcpy(buf+len, gb->buf_buf, count - len);
2426 gb->buf_get = gb->buf_buf + count - len;
2427 } else {
2428 memcpy(buf, gb->buf_get, count);
2429 if (count < len)
2430 gb->buf_get += count;
2431 else /* count == len */
2432 gb->buf_get = gb->buf_buf;
2433 }
2434
2435 return count;
2436}
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
new file mode 100644
index 000000000000..b1735767660b
--- /dev/null
+++ b/drivers/usb/gadget/usbstring.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright (C) 2003 David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as published
6 * by the Free Software Foundation; either version 2.1 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/errno.h>
11#include <linux/kernel.h>
12#include <linux/list.h>
13#include <linux/string.h>
14#include <linux/device.h>
15#include <linux/init.h>
16
17#include <linux/usb_ch9.h>
18#include <linux/usb_gadget.h>
19
20#include <asm/unaligned.h>
21
22
23static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len)
24{
25 int count = 0;
26 u8 c;
27 u16 uchar;
28
29 /* this insists on correct encodings, though not minimal ones.
30 * BUT it currently rejects legit 4-byte UTF-8 code points,
31 * which need surrogate pairs. (Unicode 3.1 can use them.)
32 */
33 while (len != 0 && (c = (u8) *s++) != 0) {
34 if (unlikely(c & 0x80)) {
35 // 2-byte sequence:
36 // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
37 if ((c & 0xe0) == 0xc0) {
38 uchar = (c & 0x1f) << 6;
39
40 c = (u8) *s++;
41 if ((c & 0xc0) != 0xc0)
42 goto fail;
43 c &= 0x3f;
44 uchar |= c;
45
46 // 3-byte sequence (most CJKV characters):
47 // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
48 } else if ((c & 0xf0) == 0xe0) {
49 uchar = (c & 0x0f) << 12;
50
51 c = (u8) *s++;
52 if ((c & 0xc0) != 0xc0)
53 goto fail;
54 c &= 0x3f;
55 uchar |= c << 6;
56
57 c = (u8) *s++;
58 if ((c & 0xc0) != 0xc0)
59 goto fail;
60 c &= 0x3f;
61 uchar |= c;
62
63 /* no bogus surrogates */
64 if (0xd800 <= uchar && uchar <= 0xdfff)
65 goto fail;
66
67 // 4-byte sequence (surrogate pairs, currently rare):
68 // 11101110wwwwzzzzyy + 110111yyyyxxxxxx
69 // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
70 // (uuuuu = wwww + 1)
71 // FIXME accept the surrogate code points (only)
72
73 } else
74 goto fail;
75 } else
76 uchar = c;
77 put_unaligned (cpu_to_le16 (uchar), cp++);
78 count++;
79 len--;
80 }
81 return count;
82fail:
83 return -1;
84}
85
86
87/**
88 * usb_gadget_get_string - fill out a string descriptor
89 * @table: of c strings encoded using UTF-8
90 * @id: string id, from low byte of wValue in get string descriptor
91 * @buf: at least 256 bytes
92 *
93 * Finds the UTF-8 string matching the ID, and converts it into a
94 * string descriptor in utf16-le.
95 * Returns length of descriptor (always even) or negative errno
96 *
97 * If your driver needs stings in multiple languages, you'll probably
98 * "switch (wIndex) { ... }" in your ep0 string descriptor logic,
99 * using this routine after choosing which set of UTF-8 strings to use.
100 * Note that US-ASCII is a strict subset of UTF-8; any string bytes with
101 * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1
102 * characters (which are also widely used in C strings).
103 */
104int
105usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
106{
107 struct usb_string *s;
108 int len;
109
110 /* descriptor 0 has the language id */
111 if (id == 0) {
112 buf [0] = 4;
113 buf [1] = USB_DT_STRING;
114 buf [2] = (u8) table->language;
115 buf [3] = (u8) (table->language >> 8);
116 return 4;
117 }
118 for (s = table->strings; s && s->s; s++)
119 if (s->id == id)
120 break;
121
122 /* unrecognized: stall. */
123 if (!s || !s->s)
124 return -EINVAL;
125
126 /* string descriptors have length, tag, then UTF16-LE text */
127 len = min ((size_t) 126, strlen (s->s));
128 memset (buf + 2, 0, 2 * len); /* zero all the bytes */
129 len = utf8_to_utf16le(s->s, (__le16 *)&buf[2], len);
130 if (len < 0)
131 return -EINVAL;
132 buf [0] = (len + 1) * 2;
133 buf [1] = USB_DT_STRING;
134 return buf [0];
135}
136
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
new file mode 100644
index 000000000000..6e49432071a1
--- /dev/null
+++ b/drivers/usb/gadget/zero.c
@@ -0,0 +1,1357 @@
1/*
2 * zero.c -- Gadget Zero, for USB development
3 *
4 * Copyright (C) 2003-2004 David Brownell
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
23 * later version.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39/*
40 * Gadget Zero only needs two bulk endpoints, and is an example of how you
41 * can write a hardware-agnostic gadget driver running inside a USB device.
42 *
43 * Hardware details are visible (see CONFIG_USB_ZERO_* below) but don't
44 * affect most of the driver.
45 *
46 * Use it with the Linux host/master side "usbtest" driver to get a basic
47 * functional test of your device-side usb stack, or with "usb-skeleton".
48 *
49 * It supports two similar configurations. One sinks whatever the usb host
50 * writes, and in return sources zeroes. The other loops whatever the host
51 * writes back, so the host can read it. Module options include:
52 *
53 * buflen=N default N=4096, buffer size used
54 * qlen=N default N=32, how many buffers in the loopback queue
55 * loopdefault default false, list loopback config first
56 *
57 * Many drivers will only have one configuration, letting them be much
58 * simpler if they also don't support high speed operation (like this
59 * driver does).
60 */
61
62#define DEBUG 1
63// #define VERBOSE
64
65#include <linux/config.h>
66#include <linux/module.h>
67#include <linux/kernel.h>
68#include <linux/delay.h>
69#include <linux/ioport.h>
70#include <linux/sched.h>
71#include <linux/slab.h>
72#include <linux/smp_lock.h>
73#include <linux/errno.h>
74#include <linux/init.h>
75#include <linux/timer.h>
76#include <linux/list.h>
77#include <linux/interrupt.h>
78#include <linux/utsname.h>
79#include <linux/device.h>
80#include <linux/moduleparam.h>
81
82#include <asm/byteorder.h>
83#include <asm/io.h>
84#include <asm/irq.h>
85#include <asm/system.h>
86#include <asm/unaligned.h>
87
88#include <linux/usb_ch9.h>
89#include <linux/usb_gadget.h>
90
91#include "gadget_chips.h"
92
93
94/*-------------------------------------------------------------------------*/
95
96#define DRIVER_VERSION "St Patrick's Day 2004"
97
98static const char shortname [] = "zero";
99static const char longname [] = "Gadget Zero";
100
101static const char source_sink [] = "source and sink data";
102static const char loopback [] = "loop input to output";
103
104/*-------------------------------------------------------------------------*/
105
106/*
107 * driver assumes self-powered hardware, and
108 * has no way for users to trigger remote wakeup.
109 *
110 * this version autoconfigures as much as possible,
111 * which is reasonable for most "bulk-only" drivers.
112 */
113static const char *EP_IN_NAME; /* source */
114static const char *EP_OUT_NAME; /* sink */
115
116/*-------------------------------------------------------------------------*/
117
118/* big enough to hold our biggest descriptor */
119#define USB_BUFSIZ 256
120
121struct zero_dev {
122 spinlock_t lock;
123 struct usb_gadget *gadget;
124 struct usb_request *req; /* for control responses */
125
126 /* when configured, we have one of two configs:
127 * - source data (in to host) and sink it (out from host)
128 * - or loop it back (out from host back in to host)
129 */
130 u8 config;
131 struct usb_ep *in_ep, *out_ep;
132
133 /* autoresume timer */
134 struct timer_list resume;
135};
136
137#define xprintk(d,level,fmt,args...) \
138 dev_printk(level , &(d)->gadget->dev , fmt , ## args)
139
140#ifdef DEBUG
141#define DBG(dev,fmt,args...) \
142 xprintk(dev , KERN_DEBUG , fmt , ## args)
143#else
144#define DBG(dev,fmt,args...) \
145 do { } while (0)
146#endif /* DEBUG */
147
148#ifdef VERBOSE
149#define VDBG DBG
150#else
151#define VDBG(dev,fmt,args...) \
152 do { } while (0)
153#endif /* VERBOSE */
154
155#define ERROR(dev,fmt,args...) \
156 xprintk(dev , KERN_ERR , fmt , ## args)
157#define WARN(dev,fmt,args...) \
158 xprintk(dev , KERN_WARNING , fmt , ## args)
159#define INFO(dev,fmt,args...) \
160 xprintk(dev , KERN_INFO , fmt , ## args)
161
162/*-------------------------------------------------------------------------*/
163
164static unsigned buflen = 4096;
165static unsigned qlen = 32;
166static unsigned pattern = 0;
167
168module_param (buflen, uint, S_IRUGO|S_IWUSR);
169module_param (qlen, uint, S_IRUGO|S_IWUSR);
170module_param (pattern, uint, S_IRUGO|S_IWUSR);
171
172/*
173 * if it's nonzero, autoresume says how many seconds to wait
174 * before trying to wake up the host after suspend.
175 */
176static unsigned autoresume = 0;
177module_param (autoresume, uint, 0);
178
179/*
180 * Normally the "loopback" configuration is second (index 1) so
181 * it's not the default. Here's where to change that order, to
182 * work better with hosts where config changes are problematic.
183 * Or controllers (like superh) that only support one config.
184 */
185static int loopdefault = 0;
186
187module_param (loopdefault, bool, S_IRUGO|S_IWUSR);
188
189/*-------------------------------------------------------------------------*/
190
191/* Thanks to NetChip Technologies for donating this product ID.
192 *
193 * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
194 * Instead: allocate your own, using normal USB-IF procedures.
195 */
196#ifndef CONFIG_USB_ZERO_HNPTEST
197#define DRIVER_VENDOR_NUM 0x0525 /* NetChip */
198#define DRIVER_PRODUCT_NUM 0xa4a0 /* Linux-USB "Gadget Zero" */
199#else
200#define DRIVER_VENDOR_NUM 0x1a0a /* OTG test device IDs */
201#define DRIVER_PRODUCT_NUM 0xbadd
202#endif
203
204/*-------------------------------------------------------------------------*/
205
206/*
207 * DESCRIPTORS ... most are static, but strings and (full)
208 * configuration descriptors are built on demand.
209 */
210
211#define STRING_MANUFACTURER 25
212#define STRING_PRODUCT 42
213#define STRING_SERIAL 101
214#define STRING_SOURCE_SINK 250
215#define STRING_LOOPBACK 251
216
217/*
218 * This device advertises two configurations; these numbers work
219 * on a pxa250 as well as more flexible hardware.
220 */
221#define CONFIG_SOURCE_SINK 3
222#define CONFIG_LOOPBACK 2
223
224static struct usb_device_descriptor
225device_desc = {
226 .bLength = sizeof device_desc,
227 .bDescriptorType = USB_DT_DEVICE,
228
229 .bcdUSB = __constant_cpu_to_le16 (0x0200),
230 .bDeviceClass = USB_CLASS_VENDOR_SPEC,
231
232 .idVendor = __constant_cpu_to_le16 (DRIVER_VENDOR_NUM),
233 .idProduct = __constant_cpu_to_le16 (DRIVER_PRODUCT_NUM),
234 .iManufacturer = STRING_MANUFACTURER,
235 .iProduct = STRING_PRODUCT,
236 .iSerialNumber = STRING_SERIAL,
237 .bNumConfigurations = 2,
238};
239
240static struct usb_config_descriptor
241source_sink_config = {
242 .bLength = sizeof source_sink_config,
243 .bDescriptorType = USB_DT_CONFIG,
244
245 /* compute wTotalLength on the fly */
246 .bNumInterfaces = 1,
247 .bConfigurationValue = CONFIG_SOURCE_SINK,
248 .iConfiguration = STRING_SOURCE_SINK,
249 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
250 .bMaxPower = 1, /* self-powered */
251};
252
253static struct usb_config_descriptor
254loopback_config = {
255 .bLength = sizeof loopback_config,
256 .bDescriptorType = USB_DT_CONFIG,
257
258 /* compute wTotalLength on the fly */
259 .bNumInterfaces = 1,
260 .bConfigurationValue = CONFIG_LOOPBACK,
261 .iConfiguration = STRING_LOOPBACK,
262 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
263 .bMaxPower = 1, /* self-powered */
264};
265
266static struct usb_otg_descriptor
267otg_descriptor = {
268 .bLength = sizeof otg_descriptor,
269 .bDescriptorType = USB_DT_OTG,
270
271 .bmAttributes = USB_OTG_SRP,
272};
273
274/* one interface in each configuration */
275
276static const struct usb_interface_descriptor
277source_sink_intf = {
278 .bLength = sizeof source_sink_intf,
279 .bDescriptorType = USB_DT_INTERFACE,
280
281 .bNumEndpoints = 2,
282 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
283 .iInterface = STRING_SOURCE_SINK,
284};
285
286static const struct usb_interface_descriptor
287loopback_intf = {
288 .bLength = sizeof loopback_intf,
289 .bDescriptorType = USB_DT_INTERFACE,
290
291 .bNumEndpoints = 2,
292 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
293 .iInterface = STRING_LOOPBACK,
294};
295
296/* two full speed bulk endpoints; their use is config-dependent */
297
298static struct usb_endpoint_descriptor
299fs_source_desc = {
300 .bLength = USB_DT_ENDPOINT_SIZE,
301 .bDescriptorType = USB_DT_ENDPOINT,
302
303 .bEndpointAddress = USB_DIR_IN,
304 .bmAttributes = USB_ENDPOINT_XFER_BULK,
305};
306
307static struct usb_endpoint_descriptor
308fs_sink_desc = {
309 .bLength = USB_DT_ENDPOINT_SIZE,
310 .bDescriptorType = USB_DT_ENDPOINT,
311
312 .bEndpointAddress = USB_DIR_OUT,
313 .bmAttributes = USB_ENDPOINT_XFER_BULK,
314};
315
316static const struct usb_descriptor_header *fs_source_sink_function [] = {
317 (struct usb_descriptor_header *) &otg_descriptor,
318 (struct usb_descriptor_header *) &source_sink_intf,
319 (struct usb_descriptor_header *) &fs_sink_desc,
320 (struct usb_descriptor_header *) &fs_source_desc,
321 NULL,
322};
323
324static const struct usb_descriptor_header *fs_loopback_function [] = {
325 (struct usb_descriptor_header *) &otg_descriptor,
326 (struct usb_descriptor_header *) &loopback_intf,
327 (struct usb_descriptor_header *) &fs_sink_desc,
328 (struct usb_descriptor_header *) &fs_source_desc,
329 NULL,
330};
331
332#ifdef CONFIG_USB_GADGET_DUALSPEED
333
334/*
335 * usb 2.0 devices need to expose both high speed and full speed
336 * descriptors, unless they only run at full speed.
337 *
338 * that means alternate endpoint descriptors (bigger packets)
339 * and a "device qualifier" ... plus more construction options
340 * for the config descriptor.
341 */
342
343static struct usb_endpoint_descriptor
344hs_source_desc = {
345 .bLength = USB_DT_ENDPOINT_SIZE,
346 .bDescriptorType = USB_DT_ENDPOINT,
347
348 .bmAttributes = USB_ENDPOINT_XFER_BULK,
349 .wMaxPacketSize = __constant_cpu_to_le16 (512),
350};
351
352static struct usb_endpoint_descriptor
353hs_sink_desc = {
354 .bLength = USB_DT_ENDPOINT_SIZE,
355 .bDescriptorType = USB_DT_ENDPOINT,
356
357 .bmAttributes = USB_ENDPOINT_XFER_BULK,
358 .wMaxPacketSize = __constant_cpu_to_le16 (512),
359};
360
361static struct usb_qualifier_descriptor
362dev_qualifier = {
363 .bLength = sizeof dev_qualifier,
364 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
365
366 .bcdUSB = __constant_cpu_to_le16 (0x0200),
367 .bDeviceClass = USB_CLASS_VENDOR_SPEC,
368
369 .bNumConfigurations = 2,
370};
371
372static const struct usb_descriptor_header *hs_source_sink_function [] = {
373 (struct usb_descriptor_header *) &otg_descriptor,
374 (struct usb_descriptor_header *) &source_sink_intf,
375 (struct usb_descriptor_header *) &hs_source_desc,
376 (struct usb_descriptor_header *) &hs_sink_desc,
377 NULL,
378};
379
380static const struct usb_descriptor_header *hs_loopback_function [] = {
381 (struct usb_descriptor_header *) &otg_descriptor,
382 (struct usb_descriptor_header *) &loopback_intf,
383 (struct usb_descriptor_header *) &hs_source_desc,
384 (struct usb_descriptor_header *) &hs_sink_desc,
385 NULL,
386};
387
388/* maxpacket and other transfer characteristics vary by speed. */
389#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs))
390
391#else
392
393/* if there's no high speed support, maxpacket doesn't change. */
394#define ep_desc(g,hs,fs) fs
395
396#endif /* !CONFIG_USB_GADGET_DUALSPEED */
397
398static char manufacturer [50];
399static char serial [40];
400
401/* static strings, in UTF-8 */
402static struct usb_string strings [] = {
403 { STRING_MANUFACTURER, manufacturer, },
404 { STRING_PRODUCT, longname, },
405 { STRING_SERIAL, serial, },
406 { STRING_LOOPBACK, loopback, },
407 { STRING_SOURCE_SINK, source_sink, },
408 { } /* end of list */
409};
410
411static struct usb_gadget_strings stringtab = {
412 .language = 0x0409, /* en-us */
413 .strings = strings,
414};
415
416/*
417 * config descriptors are also handcrafted. these must agree with code
418 * that sets configurations, and with code managing interfaces and their
419 * altsettings. other complexity may come from:
420 *
421 * - high speed support, including "other speed config" rules
422 * - multiple configurations
423 * - interfaces with alternate settings
424 * - embedded class or vendor-specific descriptors
425 *
426 * this handles high speed, and has a second config that could as easily
427 * have been an alternate interface setting (on most hardware).
428 *
429 * NOTE: to demonstrate (and test) more USB capabilities, this driver
430 * should include an altsetting to test interrupt transfers, including
431 * high bandwidth modes at high speed. (Maybe work like Intel's test
432 * device?)
433 */
434static int
435config_buf (struct usb_gadget *gadget,
436 u8 *buf, u8 type, unsigned index)
437{
438 int is_source_sink;
439 int len;
440 const struct usb_descriptor_header **function;
441#ifdef CONFIG_USB_GADGET_DUALSPEED
442 int hs = (gadget->speed == USB_SPEED_HIGH);
443#endif
444
445 /* two configurations will always be index 0 and index 1 */
446 if (index > 1)
447 return -EINVAL;
448 is_source_sink = loopdefault ? (index == 1) : (index == 0);
449
450#ifdef CONFIG_USB_GADGET_DUALSPEED
451 if (type == USB_DT_OTHER_SPEED_CONFIG)
452 hs = !hs;
453 if (hs)
454 function = is_source_sink
455 ? hs_source_sink_function
456 : hs_loopback_function;
457 else
458#endif
459 function = is_source_sink
460 ? fs_source_sink_function
461 : fs_loopback_function;
462
463 /* for now, don't advertise srp-only devices */
464 if (!gadget->is_otg)
465 function++;
466
467 len = usb_gadget_config_buf (is_source_sink
468 ? &source_sink_config
469 : &loopback_config,
470 buf, USB_BUFSIZ, function);
471 if (len < 0)
472 return len;
473 ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
474 return len;
475}
476
477/*-------------------------------------------------------------------------*/
478
479static struct usb_request *
480alloc_ep_req (struct usb_ep *ep, unsigned length)
481{
482 struct usb_request *req;
483
484 req = usb_ep_alloc_request (ep, GFP_ATOMIC);
485 if (req) {
486 req->length = length;
487 req->buf = usb_ep_alloc_buffer (ep, length,
488 &req->dma, GFP_ATOMIC);
489 if (!req->buf) {
490 usb_ep_free_request (ep, req);
491 req = NULL;
492 }
493 }
494 return req;
495}
496
497static void free_ep_req (struct usb_ep *ep, struct usb_request *req)
498{
499 if (req->buf)
500 usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
501 usb_ep_free_request (ep, req);
502}
503
504/*-------------------------------------------------------------------------*/
505
506/* optionally require specific source/sink data patterns */
507
508static int
509check_read_data (
510 struct zero_dev *dev,
511 struct usb_ep *ep,
512 struct usb_request *req
513)
514{
515 unsigned i;
516 u8 *buf = req->buf;
517
518 for (i = 0; i < req->actual; i++, buf++) {
519 switch (pattern) {
520 /* all-zeroes has no synchronization issues */
521 case 0:
522 if (*buf == 0)
523 continue;
524 break;
525 /* mod63 stays in sync with short-terminated transfers,
526 * or otherwise when host and gadget agree on how large
527 * each usb transfer request should be. resync is done
528 * with set_interface or set_config.
529 */
530 case 1:
531 if (*buf == (u8)(i % 63))
532 continue;
533 break;
534 }
535 ERROR (dev, "bad OUT byte, buf [%d] = %d\n", i, *buf);
536 usb_ep_set_halt (ep);
537 return -EINVAL;
538 }
539 return 0;
540}
541
542static void
543reinit_write_data (
544 struct zero_dev *dev,
545 struct usb_ep *ep,
546 struct usb_request *req
547)
548{
549 unsigned i;
550 u8 *buf = req->buf;
551
552 switch (pattern) {
553 case 0:
554 memset (req->buf, 0, req->length);
555 break;
556 case 1:
557 for (i = 0; i < req->length; i++)
558 *buf++ = (u8) (i % 63);
559 break;
560 }
561}
562
563/* if there is only one request in the queue, there'll always be an
564 * irq delay between end of one request and start of the next.
565 * that prevents using hardware dma queues.
566 */
567static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
568{
569 struct zero_dev *dev = ep->driver_data;
570 int status = req->status;
571
572 switch (status) {
573
574 case 0: /* normal completion? */
575 if (ep == dev->out_ep)
576 check_read_data (dev, ep, req);
577 else
578 reinit_write_data (dev, ep, req);
579 break;
580
581 /* this endpoint is normally active while we're configured */
582 case -ECONNABORTED: /* hardware forced ep reset */
583 case -ECONNRESET: /* request dequeued */
584 case -ESHUTDOWN: /* disconnect from host */
585 VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status,
586 req->actual, req->length);
587 if (ep == dev->out_ep)
588 check_read_data (dev, ep, req);
589 free_ep_req (ep, req);
590 return;
591
592 case -EOVERFLOW: /* buffer overrun on read means that
593 * we didn't provide a big enough
594 * buffer.
595 */
596 default:
597#if 1
598 DBG (dev, "%s complete --> %d, %d/%d\n", ep->name,
599 status, req->actual, req->length);
600#endif
601 case -EREMOTEIO: /* short read */
602 break;
603 }
604
605 status = usb_ep_queue (ep, req, GFP_ATOMIC);
606 if (status) {
607 ERROR (dev, "kill %s: resubmit %d bytes --> %d\n",
608 ep->name, req->length, status);
609 usb_ep_set_halt (ep);
610 /* FIXME recover later ... somehow */
611 }
612}
613
614static struct usb_request *
615source_sink_start_ep (struct usb_ep *ep, int gfp_flags)
616{
617 struct usb_request *req;
618 int status;
619
620 req = alloc_ep_req (ep, buflen);
621 if (!req)
622 return NULL;
623
624 memset (req->buf, 0, req->length);
625 req->complete = source_sink_complete;
626
627 if (strcmp (ep->name, EP_IN_NAME) == 0)
628 reinit_write_data (ep->driver_data, ep, req);
629
630 status = usb_ep_queue (ep, req, gfp_flags);
631 if (status) {
632 struct zero_dev *dev = ep->driver_data;
633
634 ERROR (dev, "start %s --> %d\n", ep->name, status);
635 free_ep_req (ep, req);
636 req = NULL;
637 }
638
639 return req;
640}
641
642static int
643set_source_sink_config (struct zero_dev *dev, int gfp_flags)
644{
645 int result = 0;
646 struct usb_ep *ep;
647 struct usb_gadget *gadget = dev->gadget;
648
649 gadget_for_each_ep (ep, gadget) {
650 const struct usb_endpoint_descriptor *d;
651
652 /* one endpoint writes (sources) zeroes in (to the host) */
653 if (strcmp (ep->name, EP_IN_NAME) == 0) {
654 d = ep_desc (gadget, &hs_source_desc, &fs_source_desc);
655 result = usb_ep_enable (ep, d);
656 if (result == 0) {
657 ep->driver_data = dev;
658 if (source_sink_start_ep (ep, gfp_flags) != 0) {
659 dev->in_ep = ep;
660 continue;
661 }
662 usb_ep_disable (ep);
663 result = -EIO;
664 }
665
666 /* one endpoint reads (sinks) anything out (from the host) */
667 } else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
668 d = ep_desc (gadget, &hs_sink_desc, &fs_sink_desc);
669 result = usb_ep_enable (ep, d);
670 if (result == 0) {
671 ep->driver_data = dev;
672 if (source_sink_start_ep (ep, gfp_flags) != 0) {
673 dev->out_ep = ep;
674 continue;
675 }
676 usb_ep_disable (ep);
677 result = -EIO;
678 }
679
680 /* ignore any other endpoints */
681 } else
682 continue;
683
684 /* stop on error */
685 ERROR (dev, "can't start %s, result %d\n", ep->name, result);
686 break;
687 }
688 if (result == 0)
689 DBG (dev, "buflen %d\n", buflen);
690
691 /* caller is responsible for cleanup on error */
692 return result;
693}
694
695/*-------------------------------------------------------------------------*/
696
697static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
698{
699 struct zero_dev *dev = ep->driver_data;
700 int status = req->status;
701
702 switch (status) {
703
704 case 0: /* normal completion? */
705 if (ep == dev->out_ep) {
706 /* loop this OUT packet back IN to the host */
707 req->zero = (req->actual < req->length);
708 req->length = req->actual;
709 status = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC);
710 if (status == 0)
711 return;
712
713 /* "should never get here" */
714 ERROR (dev, "can't loop %s to %s: %d\n",
715 ep->name, dev->in_ep->name,
716 status);
717 }
718
719 /* queue the buffer for some later OUT packet */
720 req->length = buflen;
721 status = usb_ep_queue (dev->out_ep, req, GFP_ATOMIC);
722 if (status == 0)
723 return;
724
725 /* "should never get here" */
726 /* FALLTHROUGH */
727
728 default:
729 ERROR (dev, "%s loop complete --> %d, %d/%d\n", ep->name,
730 status, req->actual, req->length);
731 /* FALLTHROUGH */
732
733 /* NOTE: since this driver doesn't maintain an explicit record
734 * of requests it submitted (just maintains qlen count), we
735 * rely on the hardware driver to clean up on disconnect or
736 * endpoint disable.
737 */
738 case -ECONNABORTED: /* hardware forced ep reset */
739 case -ECONNRESET: /* request dequeued */
740 case -ESHUTDOWN: /* disconnect from host */
741 free_ep_req (ep, req);
742 return;
743 }
744}
745
746static int
747set_loopback_config (struct zero_dev *dev, int gfp_flags)
748{
749 int result = 0;
750 struct usb_ep *ep;
751 struct usb_gadget *gadget = dev->gadget;
752
753 gadget_for_each_ep (ep, gadget) {
754 const struct usb_endpoint_descriptor *d;
755
756 /* one endpoint writes data back IN to the host */
757 if (strcmp (ep->name, EP_IN_NAME) == 0) {
758 d = ep_desc (gadget, &hs_source_desc, &fs_source_desc);
759 result = usb_ep_enable (ep, d);
760 if (result == 0) {
761 ep->driver_data = dev;
762 dev->in_ep = ep;
763 continue;
764 }
765
766 /* one endpoint just reads OUT packets */
767 } else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
768 d = ep_desc (gadget, &hs_sink_desc, &fs_sink_desc);
769 result = usb_ep_enable (ep, d);
770 if (result == 0) {
771 ep->driver_data = dev;
772 dev->out_ep = ep;
773 continue;
774 }
775
776 /* ignore any other endpoints */
777 } else
778 continue;
779
780 /* stop on error */
781 ERROR (dev, "can't enable %s, result %d\n", ep->name, result);
782 break;
783 }
784
785 /* allocate a bunch of read buffers and queue them all at once.
786 * we buffer at most 'qlen' transfers; fewer if any need more
787 * than 'buflen' bytes each.
788 */
789 if (result == 0) {
790 struct usb_request *req;
791 unsigned i;
792
793 ep = dev->out_ep;
794 for (i = 0; i < qlen && result == 0; i++) {
795 req = alloc_ep_req (ep, buflen);
796 if (req) {
797 req->complete = loopback_complete;
798 result = usb_ep_queue (ep, req, GFP_ATOMIC);
799 if (result)
800 DBG (dev, "%s queue req --> %d\n",
801 ep->name, result);
802 } else
803 result = -ENOMEM;
804 }
805 }
806 if (result == 0)
807 DBG (dev, "qlen %d, buflen %d\n", qlen, buflen);
808
809 /* caller is responsible for cleanup on error */
810 return result;
811}
812
813/*-------------------------------------------------------------------------*/
814
815static void zero_reset_config (struct zero_dev *dev)
816{
817 if (dev->config == 0)
818 return;
819
820 DBG (dev, "reset config\n");
821
822 /* just disable endpoints, forcing completion of pending i/o.
823 * all our completion handlers free their requests in this case.
824 */
825 if (dev->in_ep) {
826 usb_ep_disable (dev->in_ep);
827 dev->in_ep = NULL;
828 }
829 if (dev->out_ep) {
830 usb_ep_disable (dev->out_ep);
831 dev->out_ep = NULL;
832 }
833 dev->config = 0;
834 del_timer (&dev->resume);
835}
836
837/* change our operational config. this code must agree with the code
838 * that returns config descriptors, and altsetting code.
839 *
840 * it's also responsible for power management interactions. some
841 * configurations might not work with our current power sources.
842 *
843 * note that some device controller hardware will constrain what this
844 * code can do, perhaps by disallowing more than one configuration or
845 * by limiting configuration choices (like the pxa2xx).
846 */
847static int
848zero_set_config (struct zero_dev *dev, unsigned number, int gfp_flags)
849{
850 int result = 0;
851 struct usb_gadget *gadget = dev->gadget;
852
853 if (number == dev->config)
854 return 0;
855
856 if (gadget_is_sa1100 (gadget) && dev->config) {
857 /* tx fifo is full, but we can't clear it...*/
858 INFO (dev, "can't change configurations\n");
859 return -ESPIPE;
860 }
861 zero_reset_config (dev);
862
863 switch (number) {
864 case CONFIG_SOURCE_SINK:
865 result = set_source_sink_config (dev, gfp_flags);
866 break;
867 case CONFIG_LOOPBACK:
868 result = set_loopback_config (dev, gfp_flags);
869 break;
870 default:
871 result = -EINVAL;
872 /* FALL THROUGH */
873 case 0:
874 return result;
875 }
876
877 if (!result && (!dev->in_ep || !dev->out_ep))
878 result = -ENODEV;
879 if (result)
880 zero_reset_config (dev);
881 else {
882 char *speed;
883
884 switch (gadget->speed) {
885 case USB_SPEED_LOW: speed = "low"; break;
886 case USB_SPEED_FULL: speed = "full"; break;
887 case USB_SPEED_HIGH: speed = "high"; break;
888 default: speed = "?"; break;
889 }
890
891 dev->config = number;
892 INFO (dev, "%s speed config #%d: %s\n", speed, number,
893 (number == CONFIG_SOURCE_SINK)
894 ? source_sink : loopback);
895 }
896 return result;
897}
898
899/*-------------------------------------------------------------------------*/
900
901static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req)
902{
903 if (req->status || req->actual != req->length)
904 DBG ((struct zero_dev *) ep->driver_data,
905 "setup complete --> %d, %d/%d\n",
906 req->status, req->actual, req->length);
907}
908
909/*
910 * The setup() callback implements all the ep0 functionality that's
911 * not handled lower down, in hardware or the hardware driver (like
912 * device and endpoint feature flags, and their status). It's all
913 * housekeeping for the gadget function we're implementing. Most of
914 * the work is in config-specific setup.
915 */
916static int
917zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
918{
919 struct zero_dev *dev = get_gadget_data (gadget);
920 struct usb_request *req = dev->req;
921 int value = -EOPNOTSUPP;
922 u16 w_index = ctrl->wIndex;
923 u16 w_value = ctrl->wValue;
924 u16 w_length = ctrl->wLength;
925
926 /* usually this stores reply data in the pre-allocated ep0 buffer,
927 * but config change events will reconfigure hardware.
928 */
929 req->zero = 0;
930 switch (ctrl->bRequest) {
931
932 case USB_REQ_GET_DESCRIPTOR:
933 if (ctrl->bRequestType != USB_DIR_IN)
934 goto unknown;
935 switch (w_value >> 8) {
936
937 case USB_DT_DEVICE:
938 value = min (w_length, (u16) sizeof device_desc);
939 memcpy (req->buf, &device_desc, value);
940 break;
941#ifdef CONFIG_USB_GADGET_DUALSPEED
942 case USB_DT_DEVICE_QUALIFIER:
943 if (!gadget->is_dualspeed)
944 break;
945 value = min (w_length, (u16) sizeof dev_qualifier);
946 memcpy (req->buf, &dev_qualifier, value);
947 break;
948
949 case USB_DT_OTHER_SPEED_CONFIG:
950 if (!gadget->is_dualspeed)
951 break;
952 // FALLTHROUGH
953#endif /* CONFIG_USB_GADGET_DUALSPEED */
954 case USB_DT_CONFIG:
955 value = config_buf (gadget, req->buf,
956 w_value >> 8,
957 w_value & 0xff);
958 if (value >= 0)
959 value = min (w_length, (u16) value);
960 break;
961
962 case USB_DT_STRING:
963 /* wIndex == language code.
964 * this driver only handles one language, you can
965 * add string tables for other languages, using
966 * any UTF-8 characters
967 */
968 value = usb_gadget_get_string (&stringtab,
969 w_value & 0xff, req->buf);
970 if (value >= 0)
971 value = min (w_length, (u16) value);
972 break;
973 }
974 break;
975
976 /* currently two configs, two speeds */
977 case USB_REQ_SET_CONFIGURATION:
978 if (ctrl->bRequestType != 0)
979 goto unknown;
980 if (gadget->a_hnp_support)
981 DBG (dev, "HNP available\n");
982 else if (gadget->a_alt_hnp_support)
983 DBG (dev, "HNP needs a different root port\n");
984 else
985 VDBG (dev, "HNP inactive\n");
986 spin_lock (&dev->lock);
987 value = zero_set_config (dev, w_value, GFP_ATOMIC);
988 spin_unlock (&dev->lock);
989 break;
990 case USB_REQ_GET_CONFIGURATION:
991 if (ctrl->bRequestType != USB_DIR_IN)
992 goto unknown;
993 *(u8 *)req->buf = dev->config;
994 value = min (w_length, (u16) 1);
995 break;
996
997 /* until we add altsetting support, or other interfaces,
998 * only 0/0 are possible. pxa2xx only supports 0/0 (poorly)
999 * and already killed pending endpoint I/O.
1000 */
1001 case USB_REQ_SET_INTERFACE:
1002 if (ctrl->bRequestType != USB_RECIP_INTERFACE)
1003 goto unknown;
1004 spin_lock (&dev->lock);
1005 if (dev->config && w_index == 0 && w_value == 0) {
1006 u8 config = dev->config;
1007
1008 /* resets interface configuration, forgets about
1009 * previous transaction state (queued bufs, etc)
1010 * and re-inits endpoint state (toggle etc)
1011 * no response queued, just zero status == success.
1012 * if we had more than one interface we couldn't
1013 * use this "reset the config" shortcut.
1014 */
1015 zero_reset_config (dev);
1016 zero_set_config (dev, config, GFP_ATOMIC);
1017 value = 0;
1018 }
1019 spin_unlock (&dev->lock);
1020 break;
1021 case USB_REQ_GET_INTERFACE:
1022 if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
1023 goto unknown;
1024 if (!dev->config)
1025 break;
1026 if (w_index != 0) {
1027 value = -EDOM;
1028 break;
1029 }
1030 *(u8 *)req->buf = 0;
1031 value = min (w_length, (u16) 1);
1032 break;
1033
1034 /*
1035 * These are the same vendor-specific requests supported by
1036 * Intel's USB 2.0 compliance test devices. We exceed that
1037 * device spec by allowing multiple-packet requests.
1038 */
1039 case 0x5b: /* control WRITE test -- fill the buffer */
1040 if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR))
1041 goto unknown;
1042 if (w_value || w_index)
1043 break;
1044 /* just read that many bytes into the buffer */
1045 if (w_length > USB_BUFSIZ)
1046 break;
1047 value = w_length;
1048 break;
1049 case 0x5c: /* control READ test -- return the buffer */
1050 if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR))
1051 goto unknown;
1052 if (w_value || w_index)
1053 break;
1054 /* expect those bytes are still in the buffer; send back */
1055 if (w_length > USB_BUFSIZ
1056 || w_length != req->length)
1057 break;
1058 value = w_length;
1059 break;
1060
1061 default:
1062unknown:
1063 VDBG (dev,
1064 "unknown control req%02x.%02x v%04x i%04x l%d\n",
1065 ctrl->bRequestType, ctrl->bRequest,
1066 w_value, w_index, w_length);
1067 }
1068
1069 /* respond with data transfer before status phase? */
1070 if (value >= 0) {
1071 req->length = value;
1072 req->zero = value < w_length;
1073 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
1074 if (value < 0) {
1075 DBG (dev, "ep_queue --> %d\n", value);
1076 req->status = 0;
1077 zero_setup_complete (gadget->ep0, req);
1078 }
1079 }
1080
1081 /* device either stalls (value < 0) or reports success */
1082 return value;
1083}
1084
1085static void
1086zero_disconnect (struct usb_gadget *gadget)
1087{
1088 struct zero_dev *dev = get_gadget_data (gadget);
1089 unsigned long flags;
1090
1091 spin_lock_irqsave (&dev->lock, flags);
1092 zero_reset_config (dev);
1093
1094 /* a more significant application might have some non-usb
1095 * activities to quiesce here, saving resources like power
1096 * or pushing the notification up a network stack.
1097 */
1098 spin_unlock_irqrestore (&dev->lock, flags);
1099
1100 /* next we may get setup() calls to enumerate new connections;
1101 * or an unbind() during shutdown (including removing module).
1102 */
1103}
1104
1105static void
1106zero_autoresume (unsigned long _dev)
1107{
1108 struct zero_dev *dev = (struct zero_dev *) _dev;
1109 int status;
1110
1111 /* normally the host would be woken up for something
1112 * more significant than just a timer firing...
1113 */
1114 if (dev->gadget->speed != USB_SPEED_UNKNOWN) {
1115 status = usb_gadget_wakeup (dev->gadget);
1116 DBG (dev, "wakeup --> %d\n", status);
1117 }
1118}
1119
1120/*-------------------------------------------------------------------------*/
1121
1122static void
1123zero_unbind (struct usb_gadget *gadget)
1124{
1125 struct zero_dev *dev = get_gadget_data (gadget);
1126
1127 DBG (dev, "unbind\n");
1128
1129 /* we've already been disconnected ... no i/o is active */
1130 if (dev->req)
1131 free_ep_req (gadget->ep0, dev->req);
1132 del_timer_sync (&dev->resume);
1133 kfree (dev);
1134 set_gadget_data (gadget, NULL);
1135}
1136
1137static int
1138zero_bind (struct usb_gadget *gadget)
1139{
1140 struct zero_dev *dev;
1141 struct usb_ep *ep;
1142
1143 /* Bulk-only drivers like this one SHOULD be able to
1144 * autoconfigure on any sane usb controller driver,
1145 * but there may also be important quirks to address.
1146 */
1147 usb_ep_autoconfig_reset (gadget);
1148 ep = usb_ep_autoconfig (gadget, &fs_source_desc);
1149 if (!ep) {
1150autoconf_fail:
1151 printk (KERN_ERR "%s: can't autoconfigure on %s\n",
1152 shortname, gadget->name);
1153 return -ENODEV;
1154 }
1155 EP_IN_NAME = ep->name;
1156 ep->driver_data = ep; /* claim */
1157
1158 ep = usb_ep_autoconfig (gadget, &fs_sink_desc);
1159 if (!ep)
1160 goto autoconf_fail;
1161 EP_OUT_NAME = ep->name;
1162 ep->driver_data = ep; /* claim */
1163
1164
1165 /*
1166 * DRIVER POLICY CHOICE: you may want to do this differently.
1167 * One thing to avoid is reusing a bcdDevice revision code
1168 * with different host-visible configurations or behavior
1169 * restrictions -- using ep1in/ep2out vs ep1out/ep3in, etc
1170 */
1171 if (gadget_is_net2280 (gadget)) {
1172 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201);
1173 } else if (gadget_is_pxa (gadget)) {
1174 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0203);
1175#if 0
1176 } else if (gadget_is_sh(gadget)) {
1177 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0204);
1178 /* SH has only one configuration; see "loopdefault" */
1179 device_desc.bNumConfigurations = 1;
1180 /* FIXME make 1 == default.bConfigurationValue */
1181#endif
1182 } else if (gadget_is_sa1100 (gadget)) {
1183 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0205);
1184 } else if (gadget_is_goku (gadget)) {
1185 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0206);
1186 } else if (gadget_is_mq11xx (gadget)) {
1187 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0207);
1188 } else if (gadget_is_omap (gadget)) {
1189 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0208);
1190 } else if (gadget_is_lh7a40x(gadget)) {
1191 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0209);
1192 } else if (gadget_is_n9604(gadget)) {
1193 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0210);
1194 } else if (gadget_is_pxa27x(gadget)) {
1195 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0211);
1196 } else if (gadget_is_s3c2410(gadget)) {
1197 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0212);
1198 } else if (gadget_is_at91(gadget)) {
1199 device_desc.bcdDevice = __constant_cpu_to_le16 (0x0213);
1200 } else {
1201 /* gadget zero is so simple (for now, no altsettings) that
1202 * it SHOULD NOT have problems with bulk-capable hardware.
1203 * so warn about unrcognized controllers, don't panic.
1204 *
1205 * things like configuration and altsetting numbering
1206 * can need hardware-specific attention though.
1207 */
1208 printk (KERN_WARNING "%s: controller '%s' not recognized\n",
1209 shortname, gadget->name);
1210 device_desc.bcdDevice = __constant_cpu_to_le16 (0x9999);
1211 }
1212
1213
1214 /* ok, we made sense of the hardware ... */
1215 dev = kmalloc (sizeof *dev, SLAB_KERNEL);
1216 if (!dev)
1217 return -ENOMEM;
1218 memset (dev, 0, sizeof *dev);
1219 spin_lock_init (&dev->lock);
1220 dev->gadget = gadget;
1221 set_gadget_data (gadget, dev);
1222
1223 /* preallocate control response and buffer */
1224 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1225 if (!dev->req)
1226 goto enomem;
1227 dev->req->buf = usb_ep_alloc_buffer (gadget->ep0, USB_BUFSIZ,
1228 &dev->req->dma, GFP_KERNEL);
1229 if (!dev->req->buf)
1230 goto enomem;
1231
1232 dev->req->complete = zero_setup_complete;
1233
1234 device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
1235
1236#ifdef CONFIG_USB_GADGET_DUALSPEED
1237 /* assume ep0 uses the same value for both speeds ... */
1238 dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
1239
1240 /* and that all endpoints are dual-speed */
1241 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
1242 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
1243#endif
1244
1245 if (gadget->is_otg) {
1246 otg_descriptor.bmAttributes |= USB_OTG_HNP,
1247 source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1248 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1249 }
1250
1251 if (gadget->is_otg) {
1252 otg_descriptor.bmAttributes |= USB_OTG_HNP,
1253 source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1254 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1255 }
1256
1257 usb_gadget_set_selfpowered (gadget);
1258
1259 init_timer (&dev->resume);
1260 dev->resume.function = zero_autoresume;
1261 dev->resume.data = (unsigned long) dev;
1262 if (autoresume) {
1263 source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1264 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1265 }
1266
1267 gadget->ep0->driver_data = dev;
1268
1269 INFO (dev, "%s, version: " DRIVER_VERSION "\n", longname);
1270 INFO (dev, "using %s, OUT %s IN %s\n", gadget->name,
1271 EP_OUT_NAME, EP_IN_NAME);
1272
1273 snprintf (manufacturer, sizeof manufacturer, "%s %s with %s",
1274 system_utsname.sysname, system_utsname.release,
1275 gadget->name);
1276
1277 return 0;
1278
1279enomem:
1280 zero_unbind (gadget);
1281 return -ENOMEM;
1282}
1283
1284/*-------------------------------------------------------------------------*/
1285
1286static void
1287zero_suspend (struct usb_gadget *gadget)
1288{
1289 struct zero_dev *dev = get_gadget_data (gadget);
1290
1291 if (gadget->speed == USB_SPEED_UNKNOWN)
1292 return;
1293
1294 if (autoresume) {
1295 mod_timer (&dev->resume, jiffies + (HZ * autoresume));
1296 DBG (dev, "suspend, wakeup in %d seconds\n", autoresume);
1297 } else
1298 DBG (dev, "suspend\n");
1299}
1300
1301static void
1302zero_resume (struct usb_gadget *gadget)
1303{
1304 struct zero_dev *dev = get_gadget_data (gadget);
1305
1306 DBG (dev, "resume\n");
1307 del_timer (&dev->resume);
1308}
1309
1310
1311/*-------------------------------------------------------------------------*/
1312
1313static struct usb_gadget_driver zero_driver = {
1314#ifdef CONFIG_USB_GADGET_DUALSPEED
1315 .speed = USB_SPEED_HIGH,
1316#else
1317 .speed = USB_SPEED_FULL,
1318#endif
1319 .function = (char *) longname,
1320 .bind = zero_bind,
1321 .unbind = zero_unbind,
1322
1323 .setup = zero_setup,
1324 .disconnect = zero_disconnect,
1325
1326 .suspend = zero_suspend,
1327 .resume = zero_resume,
1328
1329 .driver = {
1330 .name = (char *) shortname,
1331 // .shutdown = ...
1332 // .suspend = ...
1333 // .resume = ...
1334 },
1335};
1336
1337MODULE_AUTHOR ("David Brownell");
1338MODULE_LICENSE ("Dual BSD/GPL");
1339
1340
1341static int __init init (void)
1342{
1343 /* a real value would likely come through some id prom
1344 * or module option. this one takes at least two packets.
1345 */
1346 strlcpy (serial, "0123456789.0123456789.0123456789", sizeof serial);
1347
1348 return usb_gadget_register_driver (&zero_driver);
1349}
1350module_init (init);
1351
1352static void __exit cleanup (void)
1353{
1354 usb_gadget_unregister_driver (&zero_driver);
1355}
1356module_exit (cleanup);
1357