aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/wusbcore/Makefile6
-rw-r--r--drivers/usb/wusbcore/wa-hc.c95
-rw-r--r--drivers/usb/wusbcore/wa-hc.h417
-rw-r--r--drivers/usb/wusbcore/wa-nep.c310
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c562
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1709
6 files changed, 3099 insertions, 0 deletions
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile
index 6504f42ca367..7a4d00724039 100644
--- a/drivers/usb/wusbcore/Makefile
+++ b/drivers/usb/wusbcore/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_USB_WUSB) += wusbcore.o wusb-cbaf.o 1obj-$(CONFIG_USB_WUSB) += wusbcore.o wusb-cbaf.o
2obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o
2 3
3wusbcore-objs := \ 4wusbcore-objs := \
4 crypto.o \ 5 crypto.o \
@@ -12,3 +13,8 @@ wusbcore-objs := \
12 wusbhc.o 13 wusbhc.o
13 14
14wusb-cbaf-objs := cbaf.o 15wusb-cbaf-objs := cbaf.o
16
17wusb-wa-objs := wa-hc.o \
18 wa-nep.o \
19 wa-rpipe.o \
20 wa-xfer.o
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
new file mode 100644
index 000000000000..9d04722415bb
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -0,0 +1,95 @@
1/*
2 * Wire Adapter Host Controller Driver
3 * Common items to HWA and DWA based HCDs
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25#include "wusbhc.h"
26#include "wa-hc.h"
27
28/**
29 * Assumes
30 *
31 * wa->usb_dev and wa->usb_iface initialized and refcounted,
32 * wa->wa_descr initialized.
33 */
34int wa_create(struct wahc *wa, struct usb_interface *iface)
35{
36 int result;
37 struct device *dev = &iface->dev;
38
39 result = wa_rpipes_create(wa);
40 if (result < 0)
41 goto error_rpipes_create;
42 /* Fill up Data Transfer EP pointers */
43 wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
44 wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
45 wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize);
46 wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
47 if (wa->xfer_result == NULL)
48 goto error_xfer_result_alloc;
49 result = wa_nep_create(wa, iface);
50 if (result < 0) {
51 dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
52 result);
53 goto error_nep_create;
54 }
55 return 0;
56
57error_nep_create:
58 kfree(wa->xfer_result);
59error_xfer_result_alloc:
60 wa_rpipes_destroy(wa);
61error_rpipes_create:
62 return result;
63}
64EXPORT_SYMBOL_GPL(wa_create);
65
66
67void __wa_destroy(struct wahc *wa)
68{
69 if (wa->dti_urb) {
70 usb_kill_urb(wa->dti_urb);
71 usb_put_urb(wa->dti_urb);
72 usb_kill_urb(wa->buf_in_urb);
73 usb_put_urb(wa->buf_in_urb);
74 }
75 kfree(wa->xfer_result);
76 wa_nep_destroy(wa);
77 wa_rpipes_destroy(wa);
78}
79EXPORT_SYMBOL_GPL(__wa_destroy);
80
81/**
82 * wa_reset_all - reset the WA device
83 * @wa: the WA to be reset
84 *
85 * For HWAs the radio controller and all other PALs are also reset.
86 */
87void wa_reset_all(struct wahc *wa)
88{
89 /* FIXME: assuming HWA. */
90 wusbhc_reset_all(wa->wusb);
91}
92
93MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
94MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
95MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
new file mode 100644
index 000000000000..586d350cdb4d
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -0,0 +1,417 @@
1/*
2 * HWA Host Controller Driver
3 * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This driver implements a USB Host Controller (struct usb_hcd) for a
24 * Wireless USB Host Controller based on the Wireless USB 1.0
25 * Host-Wire-Adapter specification (in layman terms, a USB-dongle that
26 * implements a Wireless USB host).
27 *
28 * Check out the Design-overview.txt file in the source documentation
29 * for other details on the implementation.
30 *
31 * Main blocks:
32 *
33 * driver glue with the driver API, workqueue daemon
34 *
35 * lc RC instance life cycle management (create, destroy...)
36 *
37 * hcd glue with the USB API Host Controller Interface API.
38 *
39 * nep Notification EndPoint managent: collect notifications
40 * and queue them with the workqueue daemon.
41 *
42 * Handle notifications as coming from the NEP. Sends them
43 * off others to their respective modules (eg: connect,
44 * disconnect and reset go to devconnect).
45 *
46 * rpipe Remote Pipe management; rpipe is what we use to write
47 * to an endpoint on a WUSB device that is connected to a
48 * HWA RC.
49 *
50 * xfer Transfer managment -- this is all the code that gets a
51 * buffer and pushes it to a device (or viceversa). *
52 *
53 * Some day a lot of this code will be shared between this driver and
54 * the drivers for DWA (xfer, rpipe).
55 *
56 * All starts at driver.c:hwahc_probe(), when one of this guys is
57 * connected. hwahc_disconnect() stops it.
58 *
59 * During operation, the main driver is devices connecting or
60 * disconnecting. They cause the HWA RC to send notifications into
61 * nep.c:hwahc_nep_cb() that will dispatch them to
62 * notif.c:wa_notif_dispatch(). From there they will fan to cause
63 * device connects, disconnects, etc.
64 *
65 * Note much of the activity is difficult to follow. For example a
66 * device connect goes to devconnect, which will cause the "fake" root
67 * hub port to show a connect and stop there. Then khubd will notice
68 * and call into the rh.c:hwahc_rc_port_reset() code to authenticate
69 * the device (and this might require user intervention) and enable
70 * the port.
71 *
72 * We also have a timer workqueue going from devconnect.c that
73 * schedules in hwahc_devconnect_create().
74 *
75 * The rest of the traffic is in the usual entry points of a USB HCD,
76 * which are hooked up in driver.c:hwahc_rc_driver, and defined in
77 * hcd.c.
78 */
79
80#ifndef __HWAHC_INTERNAL_H__
81#define __HWAHC_INTERNAL_H__
82
83#include <linux/completion.h>
84#include <linux/usb.h>
85#include <linux/mutex.h>
86#include <linux/spinlock.h>
87#include <linux/uwb.h>
88#include <linux/usb/wusb.h>
89#include <linux/usb/wusb-wa.h>
90
91struct wusbhc;
92struct wahc;
93extern void wa_urb_enqueue_run(struct work_struct *ws);
94
95/**
96 * RPipe instance
97 *
98 * @descr's fields are kept in LE, as we need to send it back and
99 * forth.
100 *
101 * @wa is referenced when set
102 *
103 * @segs_available is the number of requests segments that still can
104 * be submitted to the controller without overloading
105 * it. It is initialized to descr->wRequests when
106 * aiming.
107 *
108 * A rpipe supports a max of descr->wRequests at the same time; before
109 * submitting seg_lock has to be taken. If segs_avail > 0, then we can
110 * submit; if not, we have to queue them.
111 */
112struct wa_rpipe {
113 struct kref refcnt;
114 struct usb_rpipe_descriptor descr;
115 struct usb_host_endpoint *ep;
116 struct wahc *wa;
117 spinlock_t seg_lock;
118 struct list_head seg_list;
119 atomic_t segs_available;
120 u8 buffer[1]; /* For reads/writes on USB */
121};
122
123
124/**
125 * Instance of a HWA Host Controller
126 *
127 * Except where a more specific lock/mutex applies or atomic, all
128 * fields protected by @mutex.
129 *
130 * @wa_descr Can be accessed without locking because it is in
131 * the same area where the device descriptors were
132 * read, so it is guaranteed to exist umodified while
133 * the device exists.
134 *
135 * Endianess has been converted to CPU's.
136 *
137 * @nep_* can be accessed without locking as its processing is
138 * serialized; we submit a NEP URB and it comes to
139 * hwahc_nep_cb(), which won't issue another URB until it is
140 * done processing it.
141 *
142 * @xfer_list:
143 *
144 * List of active transfers to verify existence from a xfer id
145 * gotten from the xfer result message. Can't use urb->list because
146 * it goes by endpoint, and we don't know the endpoint at the time
147 * when we get the xfer result message. We can't really rely on the
148 * pointer (will have to change for 64 bits) as the xfer id is 32 bits.
149 *
150 * @xfer_delayed_list: List of transfers that need to be started
151 * (with a workqueue, because they were
152 * submitted from an atomic context).
153 *
154 * FIXME: this needs to be layered up: a wusbhc layer (for sharing
155 * comonalities with WHCI), a wa layer (for sharing
156 * comonalities with DWA-RC).
157 */
158struct wahc {
159 struct usb_device *usb_dev;
160 struct usb_interface *usb_iface;
161
162 /* HC to deliver notifications */
163 union {
164 struct wusbhc *wusb;
165 struct dwahc *dwa;
166 };
167
168 const struct usb_endpoint_descriptor *dto_epd, *dti_epd;
169 const struct usb_wa_descriptor *wa_descr;
170
171 struct urb *nep_urb; /* Notification EndPoint [lockless] */
172 struct edc nep_edc;
173 void *nep_buffer;
174 size_t nep_buffer_size;
175
176 atomic_t notifs_queued;
177
178 u16 rpipes;
179 unsigned long *rpipe_bm; /* rpipe usage bitmap */
180 spinlock_t rpipe_bm_lock; /* protect rpipe_bm */
181 struct mutex rpipe_mutex; /* assigning resources to endpoints */
182
183 struct urb *dti_urb; /* URB for reading xfer results */
184 struct urb *buf_in_urb; /* URB for reading data in */
185 struct edc dti_edc; /* DTI error density counter */
186 struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */
187 size_t xfer_result_size;
188
189 s32 status; /* For reading status */
190
191 struct list_head xfer_list;
192 struct list_head xfer_delayed_list;
193 spinlock_t xfer_list_lock;
194 struct work_struct xfer_work;
195 atomic_t xfer_id_count;
196};
197
198
199extern int wa_create(struct wahc *wa, struct usb_interface *iface);
200extern void __wa_destroy(struct wahc *wa);
201void wa_reset_all(struct wahc *wa);
202
203
204/* Miscellaneous constants */
205enum {
206 /** Max number of EPROTO errors we tolerate on the NEP in a
207 * period of time */
208 HWAHC_EPROTO_MAX = 16,
209 /** Period of time for EPROTO errors (in jiffies) */
210 HWAHC_EPROTO_PERIOD = 4 * HZ,
211};
212
213
214/* Notification endpoint handling */
215extern int wa_nep_create(struct wahc *, struct usb_interface *);
216extern void wa_nep_destroy(struct wahc *);
217
218static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
219{
220 struct urb *urb = wa->nep_urb;
221 urb->transfer_buffer = wa->nep_buffer;
222 urb->transfer_buffer_length = wa->nep_buffer_size;
223 return usb_submit_urb(urb, gfp_mask);
224}
225
226static inline void wa_nep_disarm(struct wahc *wa)
227{
228 usb_kill_urb(wa->nep_urb);
229}
230
231
232/* RPipes */
233static inline void wa_rpipe_init(struct wahc *wa)
234{
235 spin_lock_init(&wa->rpipe_bm_lock);
236 mutex_init(&wa->rpipe_mutex);
237}
238
239static inline void wa_init(struct wahc *wa)
240{
241 edc_init(&wa->nep_edc);
242 atomic_set(&wa->notifs_queued, 0);
243 wa_rpipe_init(wa);
244 edc_init(&wa->dti_edc);
245 INIT_LIST_HEAD(&wa->xfer_list);
246 INIT_LIST_HEAD(&wa->xfer_delayed_list);
247 spin_lock_init(&wa->xfer_list_lock);
248 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
249 atomic_set(&wa->xfer_id_count, 1);
250}
251
252/**
253 * Destroy a pipe (when refcount drops to zero)
254 *
255 * Assumes it has been moved to the "QUIESCING" state.
256 */
257struct wa_xfer;
258extern void rpipe_destroy(struct kref *_rpipe);
259static inline
260void __rpipe_get(struct wa_rpipe *rpipe)
261{
262 kref_get(&rpipe->refcnt);
263}
264extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *,
265 struct urb *, gfp_t);
266static inline void rpipe_put(struct wa_rpipe *rpipe)
267{
268 kref_put(&rpipe->refcnt, rpipe_destroy);
269
270}
271extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
272extern int wa_rpipes_create(struct wahc *);
273extern void wa_rpipes_destroy(struct wahc *);
274static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
275{
276 atomic_dec(&rpipe->segs_available);
277}
278
279/**
280 * Returns true if the rpipe is ready to submit more segments.
281 */
282static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
283{
284 return atomic_inc_return(&rpipe->segs_available) > 0
285 && !list_empty(&rpipe->seg_list);
286}
287
288
289/* Transferring data */
290extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
291 struct urb *, gfp_t);
292extern int wa_urb_dequeue(struct wahc *, struct urb *);
293extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
294
295
296/* Misc
297 *
298 * FIXME: Refcounting for the actual @hwahc object is not correct; I
299 * mean, this should be refcounting on the HCD underneath, but
300 * it is not. In any case, the semantics for HCD refcounting
301 * are *weird*...on refcount reaching zero it just frees
302 * it...no RC specific function is called...unless I miss
303 * something.
304 *
305 * FIXME: has to go away in favour of an 'struct' hcd based sollution
306 */
307static inline struct wahc *wa_get(struct wahc *wa)
308{
309 usb_get_intf(wa->usb_iface);
310 return wa;
311}
312
313static inline void wa_put(struct wahc *wa)
314{
315 usb_put_intf(wa->usb_iface);
316}
317
318
319static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
320{
321 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
322 op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE,
323 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
324 feature,
325 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
326 NULL, 0, 1000 /* FIXME: arbitrary */);
327}
328
329
330static inline int __wa_set_feature(struct wahc *wa, u16 feature)
331{
332 return __wa_feature(wa, 1, feature);
333}
334
335
336static inline int __wa_clear_feature(struct wahc *wa, u16 feature)
337{
338 return __wa_feature(wa, 0, feature);
339}
340
341
342/**
343 * Return the status of a Wire Adapter
344 *
345 * @wa: Wire Adapter instance
346 * @returns < 0 errno code on error, or status bitmap as described
347 * in WUSB1.0[8.3.1.6].
348 *
349 * NOTE: need malloc, some arches don't take USB from the stack
350 */
351static inline
352s32 __wa_get_status(struct wahc *wa)
353{
354 s32 result;
355 result = usb_control_msg(
356 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
357 USB_REQ_GET_STATUS,
358 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
359 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
360 &wa->status, sizeof(wa->status),
361 1000 /* FIXME: arbitrary */);
362 if (result >= 0)
363 result = wa->status;
364 return result;
365}
366
367
368/**
369 * Waits until the Wire Adapter's status matches @mask/@value
370 *
371 * @wa: Wire Adapter instance.
372 * @returns < 0 errno code on error, otherwise status.
373 *
374 * Loop until the WAs status matches the mask and value (status & mask
375 * == value). Timeout if it doesn't happen.
376 *
377 * FIXME: is there an official specification on how long status
378 * changes can take?
379 */
380static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value)
381{
382 s32 result;
383 unsigned loops = 10;
384 do {
385 msleep(50);
386 result = __wa_get_status(wa);
387 if ((result & mask) == value)
388 break;
389 if (loops-- == 0) {
390 result = -ETIMEDOUT;
391 break;
392 }
393 } while (result >= 0);
394 return result;
395}
396
397
398/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */
399static inline int __wa_stop(struct wahc *wa)
400{
401 int result;
402 struct device *dev = &wa->usb_iface->dev;
403
404 result = __wa_clear_feature(wa, WA_ENABLE);
405 if (result < 0 && result != -ENODEV) {
406 dev_err(dev, "error commanding HC to stop: %d\n", result);
407 goto out;
408 }
409 result = __wa_wait_status(wa, WA_ENABLE, 0);
410 if (result < 0 && result != -ENODEV)
411 dev_err(dev, "error waiting for HC to stop: %d\n", result);
412out:
413 return 0;
414}
415
416
417#endif /* #ifndef __HWAHC_INTERNAL_H__ */
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
new file mode 100644
index 000000000000..3f542990c73f
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -0,0 +1,310 @@
1/*
2 * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
3 * Notification EndPoint support
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This part takes care of getting the notification from the hw
24 * only and dispatching through wusbwad into
25 * wa_notif_dispatch. Handling is done there.
26 *
27 * WA notifications are limited in size; most of them are three or
28 * four bytes long, and the longest is the HWA Device Notification,
29 * which would not exceed 38 bytes (DNs are limited in payload to 32
30 * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA
31 * header (WUSB1.0[8.5.4.2]).
32 *
33 * It is not clear if more than one Device Notification can be packed
34 * in a HWA Notification, I assume no because of the wording in
35 * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could
36 * get is 256 bytes (as the bLength field is a byte).
37 *
38 * So what we do is we have this buffer and read into it; when a
39 * notification arrives we schedule work to a specific, single thread
40 * workqueue (so notifications are serialized) and copy the
41 * notification data. After scheduling the work, we rearm the read from
42 * the notification endpoint.
43 *
44 * Entry points here are:
45 *
46 * wa_nep_[create|destroy]() To initialize/release this subsystem
47 *
48 * wa_nep_cb() Callback for the notification
49 * endpoint; when data is ready, this
50 * does the dispatching.
51 */
52#include <linux/workqueue.h>
53#include <linux/ctype.h>
54#include <linux/uwb/debug.h>
55#include "wa-hc.h"
56#include "wusbhc.h"
57
58/* Structure for queueing notifications to the workqueue */
59struct wa_notif_work {
60 struct work_struct work;
61 struct wahc *wa;
62 size_t size;
63 u8 data[];
64};
65
66/*
67 * Process incoming notifications from the WA's Notification EndPoint
68 * [the wuswad daemon, basically]
69 *
70 * @_nw: Pointer to a descriptor which has the pointer to the
71 * @wa, the size of the buffer and the work queue
72 * structure (so we can free all when done).
73 * @returns 0 if ok, < 0 errno code on error.
74 *
75 * All notifications follow the same format; they need to start with a
76 * 'struct wa_notif_hdr' header, so it is easy to parse through
77 * them. We just break the buffer in individual notifications (the
78 * standard doesn't say if it can be done or is forbidden, so we are
79 * cautious) and dispatch each.
80 *
81 * So the handling layers are is:
82 *
83 * WA specific notification (from NEP)
84 * Device Notification Received -> wa_handle_notif_dn()
85 * WUSB Device notification generic handling
86 * BPST Adjustment -> wa_handle_notif_bpst_adj()
87 * ... -> ...
88 *
89 * @wa has to be referenced
90 */
91static void wa_notif_dispatch(struct work_struct *ws)
92{
93 void *itr;
94 u8 missing = 0;
95 struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work);
96 struct wahc *wa = nw->wa;
97 struct wa_notif_hdr *notif_hdr;
98 size_t size;
99
100 struct device *dev = &wa->usb_iface->dev;
101
102#if 0
103 /* FIXME: need to check for this??? */
104 if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */
105 goto out; /* screw it */
106#endif
107 atomic_dec(&wa->notifs_queued); /* Throttling ctl */
108 dev = &wa->usb_iface->dev;
109 size = nw->size;
110 itr = nw->data;
111
112 while (size) {
113 if (size < sizeof(*notif_hdr)) {
114 missing = sizeof(*notif_hdr) - size;
115 goto exhausted_buffer;
116 }
117 notif_hdr = itr;
118 if (size < notif_hdr->bLength)
119 goto exhausted_buffer;
120 itr += notif_hdr->bLength;
121 size -= notif_hdr->bLength;
122 /* Dispatch the notification [don't use itr or size!] */
123 switch (notif_hdr->bNotifyType) {
124 case HWA_NOTIF_DN: {
125 struct hwa_notif_dn *hwa_dn;
126 hwa_dn = container_of(notif_hdr, struct hwa_notif_dn,
127 hdr);
128 wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr,
129 hwa_dn->dndata,
130 notif_hdr->bLength - sizeof(*hwa_dn));
131 break;
132 }
133 case WA_NOTIF_TRANSFER:
134 wa_handle_notif_xfer(wa, notif_hdr);
135 break;
136 case DWA_NOTIF_RWAKE:
137 case DWA_NOTIF_PORTSTATUS:
138 case HWA_NOTIF_BPST_ADJ:
139 /* FIXME: unimplemented WA NOTIFs */
140 /* fallthru */
141 default:
142 if (printk_ratelimit()) {
143 dev_err(dev, "HWA: unknown notification 0x%x, "
144 "%zu bytes; discarding\n",
145 notif_hdr->bNotifyType,
146 (size_t)notif_hdr->bLength);
147 dump_bytes(dev, notif_hdr, 16);
148 }
149 break;
150 }
151 }
152out:
153 wa_put(wa);
154 kfree(nw);
155 return;
156
157 /* THIS SHOULD NOT HAPPEN
158 *
159 * Buffer exahusted with partial data remaining; just warn and
160 * discard the data, as this should not happen.
161 */
162exhausted_buffer:
163 if (!printk_ratelimit())
164 goto out;
165 dev_warn(dev, "HWA: device sent short notification, "
166 "%d bytes missing; discarding %d bytes.\n",
167 missing, (int)size);
168 dump_bytes(dev, itr, size);
169 goto out;
170}
171
172/*
173 * Deliver incoming WA notifications to the wusbwa workqueue
174 *
175 * @wa: Pointer the Wire Adapter Controller Data Streaming
176 * instance (part of an 'struct usb_hcd').
177 * @size: Size of the received buffer
178 * @returns 0 if ok, < 0 errno code on error.
179 *
180 * The input buffer is @wa->nep_buffer, with @size bytes
181 * (guaranteed to fit in the allocated space,
182 * @wa->nep_buffer_size).
183 */
184static int wa_nep_queue(struct wahc *wa, size_t size)
185{
186 int result = 0;
187 struct device *dev = &wa->usb_iface->dev;
188 struct wa_notif_work *nw;
189
190 /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */
191 BUG_ON(size > wa->nep_buffer_size);
192 if (size == 0)
193 goto out;
194 if (atomic_read(&wa->notifs_queued) > 200) {
195 if (printk_ratelimit())
196 dev_err(dev, "Too many notifications queued, "
197 "throttling back\n");
198 goto out;
199 }
200 nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC);
201 if (nw == NULL) {
202 if (printk_ratelimit())
203 dev_err(dev, "No memory to queue notification\n");
204 goto out;
205 }
206 INIT_WORK(&nw->work, wa_notif_dispatch);
207 nw->wa = wa_get(wa);
208 nw->size = size;
209 memcpy(nw->data, wa->nep_buffer, size);
210 atomic_inc(&wa->notifs_queued); /* Throttling ctl */
211 queue_work(wusbd, &nw->work);
212out:
213 /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */
214 return result;
215}
216
217/*
218 * Callback for the notification event endpoint
219 *
220 * Check's that everything is fine and then passes the data to be
221 * queued to the workqueue.
222 */
223static void wa_nep_cb(struct urb *urb)
224{
225 int result;
226 struct wahc *wa = urb->context;
227 struct device *dev = &wa->usb_iface->dev;
228
229 switch (result = urb->status) {
230 case 0:
231 result = wa_nep_queue(wa, urb->actual_length);
232 if (result < 0)
233 dev_err(dev, "NEP: unable to process notification(s): "
234 "%d\n", result);
235 break;
236 case -ECONNRESET: /* Not an error, but a controlled situation; */
237 case -ENOENT: /* (we killed the URB)...so, no broadcast */
238 case -ESHUTDOWN:
239 dev_dbg(dev, "NEP: going down %d\n", urb->status);
240 goto out;
241 default: /* On general errors, we retry unless it gets ugly */
242 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
243 EDC_ERROR_TIMEFRAME)) {
244 dev_err(dev, "NEP: URB max acceptable errors "
245 "exceeded, resetting device\n");
246 wa_reset_all(wa);
247 goto out;
248 }
249 dev_err(dev, "NEP: URB error %d\n", urb->status);
250 }
251 result = wa_nep_arm(wa, GFP_ATOMIC);
252 if (result < 0) {
253 dev_err(dev, "NEP: cannot submit URB: %d\n", result);
254 wa_reset_all(wa);
255 }
256out:
257 return;
258}
259
260/*
261 * Initialize @wa's notification and event's endpoint stuff
262 *
263 * This includes the allocating the read buffer, the context ID
264 * allocation bitmap, the URB and submitting the URB.
265 */
266int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
267{
268 int result;
269 struct usb_endpoint_descriptor *epd;
270 struct usb_device *usb_dev = interface_to_usbdev(iface);
271 struct device *dev = &iface->dev;
272
273 edc_init(&wa->nep_edc);
274 epd = &iface->cur_altsetting->endpoint[0].desc;
275 wa->nep_buffer_size = 1024;
276 wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
277 if (wa->nep_buffer == NULL) {
278 dev_err(dev, "Unable to allocate notification's read buffer\n");
279 goto error_nep_buffer;
280 }
281 wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
282 if (wa->nep_urb == NULL) {
283 dev_err(dev, "Unable to allocate notification URB\n");
284 goto error_urb_alloc;
285 }
286 usb_fill_int_urb(wa->nep_urb, usb_dev,
287 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
288 wa->nep_buffer, wa->nep_buffer_size,
289 wa_nep_cb, wa, epd->bInterval);
290 result = wa_nep_arm(wa, GFP_KERNEL);
291 if (result < 0) {
292 dev_err(dev, "Cannot submit notification URB: %d\n", result);
293 goto error_nep_arm;
294 }
295 return 0;
296
297error_nep_arm:
298 usb_free_urb(wa->nep_urb);
299error_urb_alloc:
300 kfree(wa->nep_buffer);
301error_nep_buffer:
302 return -ENOMEM;
303}
304
305void wa_nep_destroy(struct wahc *wa)
306{
307 wa_nep_disarm(wa);
308 usb_free_urb(wa->nep_urb);
309 kfree(wa->nep_buffer);
310}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
new file mode 100644
index 000000000000..bfe3752e7c9a
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -0,0 +1,562 @@
1/*
2 * WUSB Wire Adapter
3 * rpipe management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * RPIPE
26 *
27 * Targetted at different downstream endpoints
28 *
29 * Descriptor: use to config the remote pipe.
30 *
31 * The number of blocks could be dynamic (wBlocks in descriptor is
32 * 0)--need to schedule them then.
33 *
34 * Each bit in wa->rpipe_bm represents if an rpipe is being used or
35 * not. Rpipes are represented with a 'struct wa_rpipe' that is
36 * attached to the hcpriv member of a 'struct usb_host_endpoint'.
37 *
38 * When you need to xfer data to an endpoint, you get an rpipe for it
39 * with wa_ep_rpipe_get(), which gives you a reference to the rpipe
40 * and keeps a single one (the first one) with the endpoint. When you
41 * are done transferring, you drop that reference. At the end the
42 * rpipe is always allocated and bound to the endpoint. There it might
43 * be recycled when not used.
44 *
45 * Addresses:
46 *
47 * We use a 1:1 mapping mechanism between port address (0 based
48 * index, actually) and the address. The USB stack knows about this.
49 *
50 * USB Stack port number 4 (1 based)
51 * WUSB code port index 3 (0 based)
52 * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub)
53 *
54 * Now, because we don't use the concept as default address exactly
55 * like the (wired) USB code does, we need to kind of skip it. So we
56 * never take addresses from the urb->pipe, but from the
57 * urb->dev->devnum, to make sure that we always have the right
58 * destination address.
59 */
60#include <linux/init.h>
61#include <asm/atomic.h>
62#include <linux/bitmap.h>
63#include "wusbhc.h"
64#include "wa-hc.h"
65
66#define D_LOCAL 0
67#include <linux/uwb/debug.h>
68
69
70static int __rpipe_get_descr(struct wahc *wa,
71 struct usb_rpipe_descriptor *descr, u16 index)
72{
73 ssize_t result;
74 struct device *dev = &wa->usb_iface->dev;
75
76 /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
77 * function because the arguments are different.
78 */
79 d_printf(1, dev, "rpipe %u: get descr\n", index);
80 result = usb_control_msg(
81 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
82 USB_REQ_GET_DESCRIPTOR,
83 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
84 USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
85 1000 /* FIXME: arbitrary */);
86 if (result < 0) {
87 dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
88 index, (int)result);
89 goto error;
90 }
91 if (result < sizeof(*descr)) {
92 dev_err(dev, "rpipe %u: got short descriptor "
93 "(%zd vs %zd bytes needed)\n",
94 index, result, sizeof(*descr));
95 result = -EINVAL;
96 goto error;
97 }
98 result = 0;
99
100error:
101 return result;
102}
103
104/*
105 *
106 * The descriptor is assumed to be properly initialized (ie: you got
107 * it through __rpipe_get_descr()).
108 */
109static int __rpipe_set_descr(struct wahc *wa,
110 struct usb_rpipe_descriptor *descr, u16 index)
111{
112 ssize_t result;
113 struct device *dev = &wa->usb_iface->dev;
114
115 /* we cannot use the usb_get_descriptor() function because the
116 * arguments are different.
117 */
118 d_printf(1, dev, "rpipe %u: set descr\n", index);
119 result = usb_control_msg(
120 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
121 USB_REQ_SET_DESCRIPTOR,
122 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
123 USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
124 HZ / 10);
125 if (result < 0) {
126 dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
127 index, (int)result);
128 goto error;
129 }
130 if (result < sizeof(*descr)) {
131 dev_err(dev, "rpipe %u: sent short descriptor "
132 "(%zd vs %zd bytes required)\n",
133 index, result, sizeof(*descr));
134 result = -EINVAL;
135 goto error;
136 }
137 result = 0;
138
139error:
140 return result;
141
142}
143
144static void rpipe_init(struct wa_rpipe *rpipe)
145{
146 kref_init(&rpipe->refcnt);
147 spin_lock_init(&rpipe->seg_lock);
148 INIT_LIST_HEAD(&rpipe->seg_list);
149}
150
151static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
152{
153 unsigned long flags;
154
155 spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
156 rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
157 if (rpipe_idx < wa->rpipes)
158 set_bit(rpipe_idx, wa->rpipe_bm);
159 spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
160
161 return rpipe_idx;
162}
163
164static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
165{
166 unsigned long flags;
167
168 spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
169 clear_bit(rpipe_idx, wa->rpipe_bm);
170 spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
171}
172
173void rpipe_destroy(struct kref *_rpipe)
174{
175 struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
176 u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
177 d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index);
178 if (rpipe->ep)
179 rpipe->ep->hcpriv = NULL;
180 rpipe_put_idx(rpipe->wa, index);
181 wa_put(rpipe->wa);
182 kfree(rpipe);
183 d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index);
184}
185EXPORT_SYMBOL_GPL(rpipe_destroy);
186
187/*
188 * Locate an idle rpipe, create an structure for it and return it
189 *
190 * @wa is referenced and unlocked
191 * @crs enum rpipe_attr, required endpoint characteristics
192 *
193 * The rpipe can be used only sequentially (not in parallel).
194 *
195 * The rpipe is moved into the "ready" state.
196 */
197static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
198 gfp_t gfp)
199{
200 int result;
201 unsigned rpipe_idx;
202 struct wa_rpipe *rpipe;
203 struct device *dev = &wa->usb_iface->dev;
204
205 d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs);
206 rpipe = kzalloc(sizeof(*rpipe), gfp);
207 if (rpipe == NULL)
208 return -ENOMEM;
209 rpipe_init(rpipe);
210
211 /* Look for an idle pipe */
212 for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
213 rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
214 if (rpipe_idx >= wa->rpipes) /* no more pipes :( */
215 break;
216 result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
217 if (result < 0)
218 dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
219 rpipe_idx, result);
220 else if ((rpipe->descr.bmCharacteristics & crs) != 0)
221 goto found;
222 rpipe_put_idx(wa, rpipe_idx);
223 }
224 *prpipe = NULL;
225 kfree(rpipe);
226 d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs);
227 return -ENXIO;
228
229found:
230 set_bit(rpipe_idx, wa->rpipe_bm);
231 rpipe->wa = wa_get(wa);
232 *prpipe = rpipe;
233 d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs);
234 return 0;
235}
236
237static int __rpipe_reset(struct wahc *wa, unsigned index)
238{
239 int result;
240 struct device *dev = &wa->usb_iface->dev;
241
242 d_printf(1, dev, "rpipe %u: reset\n", index);
243 result = usb_control_msg(
244 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
245 USB_REQ_RPIPE_RESET,
246 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
247 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
248 if (result < 0)
249 dev_err(dev, "rpipe %u: reset failed: %d\n",
250 index, result);
251 return result;
252}
253
254/*
255 * Fake companion descriptor for ep0
256 *
257 * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
258 */
259static struct usb_wireless_ep_comp_descriptor epc0 = {
260 .bLength = sizeof(epc0),
261 .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
262/* .bMaxBurst = 1, */
263 .bMaxSequence = 31,
264};
265
266/*
267 * Look for EP companion descriptor
268 *
269 * Get there, look for Inara in the endpoint's extra descriptors
270 */
271static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
272 struct device *dev, struct usb_host_endpoint *ep)
273{
274 void *itr;
275 size_t itr_size;
276 struct usb_descriptor_header *hdr;
277 struct usb_wireless_ep_comp_descriptor *epcd;
278
279 d_fnstart(3, dev, "(ep %p)\n", ep);
280 if (ep->desc.bEndpointAddress == 0) {
281 epcd = &epc0;
282 goto out;
283 }
284 itr = ep->extra;
285 itr_size = ep->extralen;
286 epcd = NULL;
287 while (itr_size > 0) {
288 if (itr_size < sizeof(*hdr)) {
289 dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
290 "at offset %zu: only %zu bytes left\n",
291 ep->desc.bEndpointAddress,
292 itr - (void *) ep->extra, itr_size);
293 break;
294 }
295 hdr = itr;
296 if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
297 epcd = itr;
298 break;
299 }
300 if (hdr->bLength > itr_size) {
301 dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
302 "at offset %zu (type 0x%02x) "
303 "length %d but only %zu bytes left\n",
304 ep->desc.bEndpointAddress,
305 itr - (void *) ep->extra, hdr->bDescriptorType,
306 hdr->bLength, itr_size);
307 break;
308 }
309 itr += hdr->bLength;
310 itr_size -= hdr->bDescriptorType;
311 }
312out:
313 d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd);
314 return epcd;
315}
316
317/*
318 * Aim an rpipe to its device & endpoint destination
319 *
320 * Make sure we change the address to unauthenticathed if the device
321 * is WUSB and it is not authenticated.
322 */
323static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
324 struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
325{
326 int result = -ENOMSG; /* better code for lack of companion? */
327 struct device *dev = &wa->usb_iface->dev;
328 struct usb_device *usb_dev = urb->dev;
329 struct usb_wireless_ep_comp_descriptor *epcd;
330 u8 unauth;
331
332 d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
333 rpipe, wa, ep, urb);
334 epcd = rpipe_epc_find(dev, ep);
335 if (epcd == NULL) {
336 dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
337 ep->desc.bEndpointAddress);
338 goto error;
339 }
340 unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
341 __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
342 atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
343 /* FIXME: block allocation system; request with queuing and timeout */
344 /* FIXME: compute so seg_size > ep->maxpktsize */
345 rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
346 /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
347 rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
348 rpipe->descr.bHSHubAddress = 0; /* reserved: zero */
349 rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
350 /* FIXME: use maximum speed as supported or recommended by device */
351 rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
352 UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
353 d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
354 urb->dev->devnum, urb->dev->devnum | unauth,
355 le16_to_cpu(rpipe->descr.wRPipeIndex),
356 usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
357 /* see security.c:wusb_update_address() */
358 if (unlikely(urb->dev->devnum == 0x80))
359 rpipe->descr.bDeviceAddress = 0;
360 else
361 rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth;
362 rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
363 /* FIXME: bDataSequence */
364 rpipe->descr.bDataSequence = 0;
365 /* FIXME: dwCurrentWindow */
366 rpipe->descr.dwCurrentWindow = cpu_to_le32(1);
367 /* FIXME: bMaxDataSequence */
368 rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1;
369 rpipe->descr.bInterval = ep->desc.bInterval;
370 /* FIXME: bOverTheAirInterval */
371 rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
372 /* FIXME: xmit power & preamble blah blah */
373 rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03;
374 /* rpipe->descr.bmCharacteristics RO */
375 /* FIXME: bmRetryOptions */
376 rpipe->descr.bmRetryOptions = 15;
377 /* FIXME: use for assessing link quality? */
378 rpipe->descr.wNumTransactionErrors = 0;
379 result = __rpipe_set_descr(wa, &rpipe->descr,
380 le16_to_cpu(rpipe->descr.wRPipeIndex));
381 if (result < 0) {
382 dev_err(dev, "Cannot aim rpipe: %d\n", result);
383 goto error;
384 }
385 result = 0;
386error:
387 d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n",
388 rpipe, wa, ep, urb, result);
389 return result;
390}
391
392/*
393 * Check an aimed rpipe to make sure it points to where we want
394 *
395 * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
396 * space; when it is like that, we or 0x80 to make an unauth address.
397 */
398static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
399 const struct usb_host_endpoint *ep,
400 const struct urb *urb, gfp_t gfp)
401{
402 int result = 0; /* better code for lack of companion? */
403 struct device *dev = &wa->usb_iface->dev;
404 struct usb_device *usb_dev = urb->dev;
405 u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0;
406 u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
407
408 d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
409 rpipe, wa, ep, urb);
410#define AIM_CHECK(rdf, val, text) \
411 do { \
412 if (rpipe->descr.rdf != (val)) { \
413 dev_err(dev, \
414 "rpipe aim discrepancy: " #rdf " " text "\n", \
415 rpipe->descr.rdf, (val)); \
416 result = -EINVAL; \
417 WARN_ON(1); \
418 } \
419 } while (0)
420 AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize),
421 "(%u vs %u)");
422 AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)");
423 AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
424 UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
425 "(%u vs %u)");
426 AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)");
427 AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
428 AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
429 AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
430#undef AIM_CHECK
431 return result;
432}
433
434#ifndef CONFIG_BUG
435#define CONFIG_BUG 0
436#endif
437
438/*
439 * Make sure there is an rpipe allocated for an endpoint
440 *
441 * If already allocated, we just refcount it; if not, we get an
442 * idle one, aim it to the right location and take it.
443 *
444 * Attaches to ep->hcpriv and rpipe->ep to ep.
445 */
446int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
447 struct urb *urb, gfp_t gfp)
448{
449 int result = 0;
450 struct device *dev = &wa->usb_iface->dev;
451 struct wa_rpipe *rpipe;
452 u8 eptype;
453
454 d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb,
455 gfp);
456 mutex_lock(&wa->rpipe_mutex);
457 rpipe = ep->hcpriv;
458 if (rpipe != NULL) {
459 if (CONFIG_BUG == 1) {
460 result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
461 if (result < 0)
462 goto error;
463 }
464 __rpipe_get(rpipe);
465 d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n",
466 ep->desc.bEndpointAddress,
467 le16_to_cpu(rpipe->descr.wRPipeIndex));
468 } else {
469 /* hmm, assign idle rpipe, aim it */
470 result = -ENOBUFS;
471 eptype = ep->desc.bmAttributes & 0x03;
472 result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
473 if (result < 0)
474 goto error;
475 result = rpipe_aim(rpipe, wa, ep, urb, gfp);
476 if (result < 0) {
477 rpipe_put(rpipe);
478 goto error;
479 }
480 ep->hcpriv = rpipe;
481 rpipe->ep = ep;
482 __rpipe_get(rpipe); /* for caching into ep->hcpriv */
483 d_printf(2, dev, "ep 0x%02x: using rpipe %u\n",
484 ep->desc.bEndpointAddress,
485 le16_to_cpu(rpipe->descr.wRPipeIndex));
486 }
487 d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr));
488error:
489 mutex_unlock(&wa->rpipe_mutex);
490 d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp);
491 return result;
492}
493
494/*
495 * Allocate the bitmap for each rpipe.
496 */
497int wa_rpipes_create(struct wahc *wa)
498{
499 wa->rpipes = wa->wa_descr->wNumRPipes;
500 wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
501 GFP_KERNEL);
502 if (wa->rpipe_bm == NULL)
503 return -ENOMEM;
504 return 0;
505}
506
507void wa_rpipes_destroy(struct wahc *wa)
508{
509 struct device *dev = &wa->usb_iface->dev;
510 d_fnstart(3, dev, "(wa %p)\n", wa);
511 if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
512 char buf[256];
513 WARN_ON(1);
514 bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes);
515 dev_err(dev, "BUG: pipes not released on exit: %s\n", buf);
516 }
517 kfree(wa->rpipe_bm);
518 d_fnend(3, dev, "(wa %p)\n", wa);
519}
520
521/*
522 * Release resources allocated for an endpoint
523 *
524 * If there is an associated rpipe to this endpoint, Abort any pending
525 * transfers and put it. If the rpipe ends up being destroyed,
526 * __rpipe_destroy() will cleanup ep->hcpriv.
527 *
528 * This is called before calling hcd->stop(), so you don't need to do
529 * anything else in there.
530 */
531void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
532{
533 struct device *dev = &wa->usb_iface->dev;
534 struct wa_rpipe *rpipe;
535 d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep);
536 mutex_lock(&wa->rpipe_mutex);
537 rpipe = ep->hcpriv;
538 if (rpipe != NULL) {
539 unsigned rc = atomic_read(&rpipe->refcnt.refcount);
540 int result;
541 u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
542
543 if (rc != 1)
544 d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n",
545 wa, ep, rpipe, rc);
546
547 d_printf(1, dev, "rpipe %u: abort\n", index);
548 result = usb_control_msg(
549 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
550 USB_REQ_RPIPE_ABORT,
551 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
552 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
553 if (result < 0 && result != -ENODEV /* dev is gone */)
554 d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n",
555 wa, index, result);
556 rpipe_put(rpipe);
557 }
558 mutex_unlock(&wa->rpipe_mutex);
559 d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep);
560 return;
561}
562EXPORT_SYMBOL_GPL(rpipe_ep_disable);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
new file mode 100644
index 000000000000..7d192f3e6742
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -0,0 +1,1709 @@
1/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer everytime an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different requried components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/hash.h>
85#include "wa-hc.h"
86#include "wusbhc.h"
87
88#undef D_LOCAL
89#define D_LOCAL 0 /* 0 disabled, > 0 different levels... */
90#include <linux/uwb/debug.h>
91
92enum {
93 WA_SEGS_MAX = 255,
94};
95
96enum wa_seg_status {
97 WA_SEG_NOTREADY,
98 WA_SEG_READY,
99 WA_SEG_DELAYED,
100 WA_SEG_SUBMITTED,
101 WA_SEG_PENDING,
102 WA_SEG_DTI_PENDING,
103 WA_SEG_DONE,
104 WA_SEG_ERROR,
105 WA_SEG_ABORTED,
106};
107
108static void wa_xfer_delayed_run(struct wa_rpipe *);
109
110/*
111 * Life cycle governed by 'struct urb' (the refcount of the struct is
112 * that of the 'struct urb' and usb_free_urb() would free the whole
113 * struct).
114 */
115struct wa_seg {
116 struct urb urb;
117 struct urb *dto_urb; /* for data output? */
118 struct list_head list_node; /* for rpipe->req_list */
119 struct wa_xfer *xfer; /* out xfer */
120 u8 index; /* which segment we are */
121 enum wa_seg_status status;
122 ssize_t result; /* bytes xfered or error */
123 struct wa_xfer_hdr xfer_hdr;
124 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
125};
126
127static void wa_seg_init(struct wa_seg *seg)
128{
129 /* usb_init_urb() repeats a lot of work, so we do it here */
130 kref_init(&seg->urb.kref);
131}
132
133/*
134 * Protected by xfer->lock
135 *
136 */
137struct wa_xfer {
138 struct kref refcnt;
139 struct list_head list_node;
140 spinlock_t lock;
141 u32 id;
142
143 struct wahc *wa; /* Wire adapter we are plugged to */
144 struct usb_host_endpoint *ep;
145 struct urb *urb; /* URB we are transfering for */
146 struct wa_seg **seg; /* transfer segments */
147 u8 segs, segs_submitted, segs_done;
148 unsigned is_inbound:1;
149 unsigned is_dma:1;
150 size_t seg_size;
151 int result;
152
153 gfp_t gfp; /* allocation mask */
154
155 struct wusb_dev *wusb_dev; /* for activity timestamps */
156};
157
158static inline void wa_xfer_init(struct wa_xfer *xfer)
159{
160 kref_init(&xfer->refcnt);
161 INIT_LIST_HEAD(&xfer->list_node);
162 spin_lock_init(&xfer->lock);
163}
164
165/*
166 * Destory a transfer structure
167 *
168 * Note that the xfer->seg[index] thingies follow the URB life cycle,
169 * so we need to put them, not free them.
170 */
171static void wa_xfer_destroy(struct kref *_xfer)
172{
173 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
174 if (xfer->seg) {
175 unsigned cnt;
176 for (cnt = 0; cnt < xfer->segs; cnt++) {
177 if (xfer->is_inbound)
178 usb_put_urb(xfer->seg[cnt]->dto_urb);
179 usb_put_urb(&xfer->seg[cnt]->urb);
180 }
181 }
182 kfree(xfer);
183 d_printf(2, NULL, "xfer %p destroyed\n", xfer);
184}
185
186static void wa_xfer_get(struct wa_xfer *xfer)
187{
188 kref_get(&xfer->refcnt);
189}
190
191static void wa_xfer_put(struct wa_xfer *xfer)
192{
193 d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n",
194 xfer, atomic_read(&xfer->refcnt.refcount));
195 kref_put(&xfer->refcnt, wa_xfer_destroy);
196 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
197}
198
199/*
200 * xfer is referenced
201 *
202 * xfer->lock has to be unlocked
203 *
204 * We take xfer->lock for setting the result; this is a barrier
205 * against drivers/usb/core/hcd.c:unlink1() being called after we call
206 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
207 * reference to the transfer.
208 */
209static void wa_xfer_giveback(struct wa_xfer *xfer)
210{
211 unsigned long flags;
212 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
213 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
214 list_del_init(&xfer->list_node);
215 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
216 /* FIXME: segmentation broken -- kills DWA */
217 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
218 wa_put(xfer->wa);
219 wa_xfer_put(xfer);
220 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
221}
222
223/*
224 * xfer is referenced
225 *
226 * xfer->lock has to be unlocked
227 */
228static void wa_xfer_completion(struct wa_xfer *xfer)
229{
230 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
231 if (xfer->wusb_dev)
232 wusb_dev_put(xfer->wusb_dev);
233 rpipe_put(xfer->ep->hcpriv);
234 wa_xfer_giveback(xfer);
235 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
236 return;
237}
238
239/*
240 * If transfer is done, wrap it up and return true
241 *
242 * xfer->lock has to be locked
243 */
244static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
245{
246 unsigned result, cnt;
247 struct wa_seg *seg;
248 struct urb *urb = xfer->urb;
249 unsigned found_short = 0;
250
251 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
252 result = xfer->segs_done == xfer->segs_submitted;
253 if (result == 0)
254 goto out;
255 urb->actual_length = 0;
256 for (cnt = 0; cnt < xfer->segs; cnt++) {
257 seg = xfer->seg[cnt];
258 switch (seg->status) {
259 case WA_SEG_DONE:
260 if (found_short && seg->result > 0) {
261 if (printk_ratelimit())
262 printk(KERN_ERR "xfer %p#%u: bad short "
263 "segments (%zu)\n", xfer, cnt,
264 seg->result);
265 urb->status = -EINVAL;
266 goto out;
267 }
268 urb->actual_length += seg->result;
269 if (seg->result < xfer->seg_size
270 && cnt != xfer->segs-1)
271 found_short = 1;
272 d_printf(2, NULL, "xfer %p#%u: DONE short %d "
273 "result %zu urb->actual_length %d\n",
274 xfer, seg->index, found_short, seg->result,
275 urb->actual_length);
276 break;
277 case WA_SEG_ERROR:
278 xfer->result = seg->result;
279 d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n",
280 xfer, seg->index, seg->result);
281 goto out;
282 case WA_SEG_ABORTED:
283 WARN_ON(urb->status != -ECONNRESET
284 && urb->status != -ENOENT);
285 d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n",
286 xfer, seg->index, urb->status);
287 xfer->result = urb->status;
288 goto out;
289 default:
290 /* if (printk_ratelimit()) */
291 printk(KERN_ERR "xfer %p#%u: "
292 "is_done bad state %d\n",
293 xfer, cnt, seg->status);
294 xfer->result = -EINVAL;
295 WARN_ON(1);
296 goto out;
297 }
298 }
299 xfer->result = 0;
300out:
301 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
302 return result;
303}
304
305/*
306 * Initialize a transfer's ID
307 *
308 * We need to use a sequential number; if we use the pointer or the
309 * hash of the pointer, it can repeat over sequential transfers and
310 * then it will confuse the HWA....wonder why in hell they put a 32
311 * bit handle in there then.
312 */
313static void wa_xfer_id_init(struct wa_xfer *xfer)
314{
315 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
316}
317
318/*
319 * Return the xfer's ID associated with xfer
320 *
321 * Need to generate a
322 */
323static u32 wa_xfer_id(struct wa_xfer *xfer)
324{
325 return xfer->id;
326}
327
328/*
329 * Search for a transfer list ID on the HCD's URB list
330 *
331 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
332 * 32-bit hash of the pointer.
333 *
334 * @returns NULL if not found.
335 */
336static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
337{
338 unsigned long flags;
339 struct wa_xfer *xfer_itr;
340 spin_lock_irqsave(&wa->xfer_list_lock, flags);
341 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
342 if (id == xfer_itr->id) {
343 wa_xfer_get(xfer_itr);
344 goto out;
345 }
346 }
347 xfer_itr = NULL;
348out:
349 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
350 return xfer_itr;
351}
352
353struct wa_xfer_abort_buffer {
354 struct urb urb;
355 struct wa_xfer_abort cmd;
356};
357
358static void __wa_xfer_abort_cb(struct urb *urb)
359{
360 struct wa_xfer_abort_buffer *b = urb->context;
361 usb_put_urb(&b->urb);
362}
363
364/*
365 * Aborts an ongoing transaction
366 *
367 * Assumes the transfer is referenced and locked and in a submitted
368 * state (mainly that there is an endpoint/rpipe assigned).
369 *
370 * The callback (see above) does nothing but freeing up the data by
371 * putting the URB. Because the URB is allocated at the head of the
372 * struct, the whole space we allocated is kfreed.
373 *
374 * We'll get an 'aborted transaction' xfer result on DTI, that'll
375 * politely ignore because at this point the transaction has been
376 * marked as aborted already.
377 */
378static void __wa_xfer_abort(struct wa_xfer *xfer)
379{
380 int result;
381 struct device *dev = &xfer->wa->usb_iface->dev;
382 struct wa_xfer_abort_buffer *b;
383 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
384
385 b = kmalloc(sizeof(*b), GFP_ATOMIC);
386 if (b == NULL)
387 goto error_kmalloc;
388 b->cmd.bLength = sizeof(b->cmd);
389 b->cmd.bRequestType = WA_XFER_ABORT;
390 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
391 b->cmd.dwTransferID = wa_xfer_id(xfer);
392
393 usb_init_urb(&b->urb);
394 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
395 usb_sndbulkpipe(xfer->wa->usb_dev,
396 xfer->wa->dto_epd->bEndpointAddress),
397 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
398 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
399 if (result < 0)
400 goto error_submit;
401 return; /* callback frees! */
402
403
404error_submit:
405 if (printk_ratelimit())
406 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
407 xfer, result);
408 kfree(b);
409error_kmalloc:
410 return;
411
412}
413
414/*
415 *
416 * @returns < 0 on error, transfer segment request size if ok
417 */
418static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
419 enum wa_xfer_type *pxfer_type)
420{
421 ssize_t result;
422 struct device *dev = &xfer->wa->usb_iface->dev;
423 size_t maxpktsize;
424 struct urb *urb = xfer->urb;
425 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
426
427 d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
428 xfer, rpipe, urb);
429 switch (rpipe->descr.bmAttribute & 0x3) {
430 case USB_ENDPOINT_XFER_CONTROL:
431 *pxfer_type = WA_XFER_TYPE_CTL;
432 result = sizeof(struct wa_xfer_ctl);
433 break;
434 case USB_ENDPOINT_XFER_INT:
435 case USB_ENDPOINT_XFER_BULK:
436 *pxfer_type = WA_XFER_TYPE_BI;
437 result = sizeof(struct wa_xfer_bi);
438 break;
439 case USB_ENDPOINT_XFER_ISOC:
440 dev_err(dev, "FIXME: ISOC not implemented\n");
441 result = -ENOSYS;
442 goto error;
443 default:
444 /* never happens */
445 BUG();
446 result = -EINVAL; /* shut gcc up */
447 };
448 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
449 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
450 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
451 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
452 /* Compute the segment size and make sure it is a multiple of
453 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
454 * a check (FIXME) */
455 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
456 if (xfer->seg_size < maxpktsize) {
457 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
458 "%zu\n", xfer->seg_size, maxpktsize);
459 result = -EINVAL;
460 goto error;
461 }
462 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
463 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
464 / xfer->seg_size;
465 if (xfer->segs >= WA_SEGS_MAX) {
466 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
467 (int)(urb->transfer_buffer_length / xfer->seg_size),
468 WA_SEGS_MAX);
469 result = -EINVAL;
470 goto error;
471 }
472 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
473 xfer->segs = 1;
474error:
475 d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
476 xfer, rpipe, urb, (int)result);
477 return result;
478}
479
480/** Fill in the common request header and xfer-type specific data. */
481static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
482 struct wa_xfer_hdr *xfer_hdr0,
483 enum wa_xfer_type xfer_type,
484 size_t xfer_hdr_size)
485{
486 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
487
488 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
489 xfer_hdr0->bLength = xfer_hdr_size;
490 xfer_hdr0->bRequestType = xfer_type;
491 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
492 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
493 xfer_hdr0->bTransferSegment = 0;
494 switch (xfer_type) {
495 case WA_XFER_TYPE_CTL: {
496 struct wa_xfer_ctl *xfer_ctl =
497 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
498 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
499 BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
500 && xfer->urb->setup_packet == NULL);
501 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
502 sizeof(xfer_ctl->baSetupData));
503 break;
504 }
505 case WA_XFER_TYPE_BI:
506 break;
507 case WA_XFER_TYPE_ISO:
508 printk(KERN_ERR "FIXME: ISOC not implemented\n");
509 default:
510 BUG();
511 };
512}
513
514/*
515 * Callback for the OUT data phase of the segment request
516 *
517 * Check wa_seg_cb(); most comments also apply here because this
518 * function does almost the same thing and they work closely
519 * together.
520 *
521 * If the seg request has failed but this DTO phase has suceeded,
522 * wa_seg_cb() has already failed the segment and moved the
523 * status to WA_SEG_ERROR, so this will go through 'case 0' and
524 * effectively do nothing.
525 */
526static void wa_seg_dto_cb(struct urb *urb)
527{
528 struct wa_seg *seg = urb->context;
529 struct wa_xfer *xfer = seg->xfer;
530 struct wahc *wa;
531 struct device *dev;
532 struct wa_rpipe *rpipe;
533 unsigned long flags;
534 unsigned rpipe_ready = 0;
535 u8 done = 0;
536
537 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
538 switch (urb->status) {
539 case 0:
540 spin_lock_irqsave(&xfer->lock, flags);
541 wa = xfer->wa;
542 dev = &wa->usb_iface->dev;
543 d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n",
544 xfer, seg->index, urb->actual_length);
545 if (seg->status < WA_SEG_PENDING)
546 seg->status = WA_SEG_PENDING;
547 seg->result = urb->actual_length;
548 spin_unlock_irqrestore(&xfer->lock, flags);
549 break;
550 case -ECONNRESET: /* URB unlinked; no need to do anything */
551 case -ENOENT: /* as it was done by the who unlinked us */
552 break;
553 default: /* Other errors ... */
554 spin_lock_irqsave(&xfer->lock, flags);
555 wa = xfer->wa;
556 dev = &wa->usb_iface->dev;
557 rpipe = xfer->ep->hcpriv;
558 if (printk_ratelimit())
559 dev_err(dev, "xfer %p#%u: data out error %d\n",
560 xfer, seg->index, urb->status);
561 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
562 EDC_ERROR_TIMEFRAME)){
563 dev_err(dev, "DTO: URB max acceptable errors "
564 "exceeded, resetting device\n");
565 wa_reset_all(wa);
566 }
567 if (seg->status != WA_SEG_ERROR) {
568 seg->status = WA_SEG_ERROR;
569 seg->result = urb->status;
570 xfer->segs_done++;
571 __wa_xfer_abort(xfer);
572 rpipe_ready = rpipe_avail_inc(rpipe);
573 done = __wa_xfer_is_done(xfer);
574 }
575 spin_unlock_irqrestore(&xfer->lock, flags);
576 if (done)
577 wa_xfer_completion(xfer);
578 if (rpipe_ready)
579 wa_xfer_delayed_run(rpipe);
580 }
581 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
582}
583
584/*
585 * Callback for the segment request
586 *
587 * If succesful transition state (unless already transitioned or
588 * outbound transfer); otherwise, take a note of the error, mark this
589 * segment done and try completion.
590 *
591 * Note we don't access until we are sure that the transfer hasn't
592 * been cancelled (ECONNRESET, ENOENT), which could mean that
593 * seg->xfer could be already gone.
594 *
595 * We have to check before setting the status to WA_SEG_PENDING
596 * because sometimes the xfer result callback arrives before this
597 * callback (geeeeeeze), so it might happen that we are already in
598 * another state. As well, we don't set it if the transfer is inbound,
599 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
600 * finishes.
601 */
602static void wa_seg_cb(struct urb *urb)
603{
604 struct wa_seg *seg = urb->context;
605 struct wa_xfer *xfer = seg->xfer;
606 struct wahc *wa;
607 struct device *dev;
608 struct wa_rpipe *rpipe;
609 unsigned long flags;
610 unsigned rpipe_ready;
611 u8 done = 0;
612
613 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
614 switch (urb->status) {
615 case 0:
616 spin_lock_irqsave(&xfer->lock, flags);
617 wa = xfer->wa;
618 dev = &wa->usb_iface->dev;
619 d_printf(2, dev, "xfer %p#%u: request done\n",
620 xfer, seg->index);
621 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
622 seg->status = WA_SEG_PENDING;
623 spin_unlock_irqrestore(&xfer->lock, flags);
624 break;
625 case -ECONNRESET: /* URB unlinked; no need to do anything */
626 case -ENOENT: /* as it was done by the who unlinked us */
627 break;
628 default: /* Other errors ... */
629 spin_lock_irqsave(&xfer->lock, flags);
630 wa = xfer->wa;
631 dev = &wa->usb_iface->dev;
632 rpipe = xfer->ep->hcpriv;
633 if (printk_ratelimit())
634 dev_err(dev, "xfer %p#%u: request error %d\n",
635 xfer, seg->index, urb->status);
636 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
637 EDC_ERROR_TIMEFRAME)){
638 dev_err(dev, "DTO: URB max acceptable errors "
639 "exceeded, resetting device\n");
640 wa_reset_all(wa);
641 }
642 usb_unlink_urb(seg->dto_urb);
643 seg->status = WA_SEG_ERROR;
644 seg->result = urb->status;
645 xfer->segs_done++;
646 __wa_xfer_abort(xfer);
647 rpipe_ready = rpipe_avail_inc(rpipe);
648 done = __wa_xfer_is_done(xfer);
649 spin_unlock_irqrestore(&xfer->lock, flags);
650 if (done)
651 wa_xfer_completion(xfer);
652 if (rpipe_ready)
653 wa_xfer_delayed_run(rpipe);
654 }
655 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
656}
657
658/*
659 * Allocate the segs array and initialize each of them
660 *
661 * The segments are freed by wa_xfer_destroy() when the xfer use count
662 * drops to zero; however, because each segment is given the same life
663 * cycle as the USB URB it contains, it is actually freed by
664 * usb_put_urb() on the contained USB URB (twisted, eh?).
665 */
666static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
667{
668 int result, cnt;
669 size_t alloc_size = sizeof(*xfer->seg[0])
670 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
671 struct usb_device *usb_dev = xfer->wa->usb_dev;
672 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
673 struct wa_seg *seg;
674 size_t buf_itr, buf_size, buf_itr_size;
675
676 result = -ENOMEM;
677 xfer->seg = kzalloc(xfer->segs * sizeof(xfer->seg[0]), GFP_ATOMIC);
678 if (xfer->seg == NULL)
679 goto error_segs_kzalloc;
680 buf_itr = 0;
681 buf_size = xfer->urb->transfer_buffer_length;
682 for (cnt = 0; cnt < xfer->segs; cnt++) {
683 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
684 if (seg == NULL)
685 goto error_seg_kzalloc;
686 wa_seg_init(seg);
687 seg->xfer = xfer;
688 seg->index = cnt;
689 usb_fill_bulk_urb(&seg->urb, usb_dev,
690 usb_sndbulkpipe(usb_dev,
691 dto_epd->bEndpointAddress),
692 &seg->xfer_hdr, xfer_hdr_size,
693 wa_seg_cb, seg);
694 buf_itr_size = buf_size > xfer->seg_size ?
695 xfer->seg_size : buf_size;
696 if (xfer->is_inbound == 0 && buf_size > 0) {
697 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
698 if (seg->dto_urb == NULL)
699 goto error_dto_alloc;
700 usb_fill_bulk_urb(
701 seg->dto_urb, usb_dev,
702 usb_sndbulkpipe(usb_dev,
703 dto_epd->bEndpointAddress),
704 NULL, 0, wa_seg_dto_cb, seg);
705 if (xfer->is_dma) {
706 seg->dto_urb->transfer_dma =
707 xfer->urb->transfer_dma + buf_itr;
708 seg->dto_urb->transfer_flags |=
709 URB_NO_TRANSFER_DMA_MAP;
710 } else
711 seg->dto_urb->transfer_buffer =
712 xfer->urb->transfer_buffer + buf_itr;
713 seg->dto_urb->transfer_buffer_length = buf_itr_size;
714 }
715 seg->status = WA_SEG_READY;
716 buf_itr += buf_itr_size;
717 buf_size -= buf_itr_size;
718 }
719 return 0;
720
721error_dto_alloc:
722 kfree(xfer->seg[cnt]);
723 cnt--;
724error_seg_kzalloc:
725 /* use the fact that cnt is left at were it failed */
726 for (; cnt > 0; cnt--) {
727 if (xfer->is_inbound == 0)
728 kfree(xfer->seg[cnt]->dto_urb);
729 kfree(xfer->seg[cnt]);
730 }
731error_segs_kzalloc:
732 return result;
733}
734
735/*
736 * Allocates all the stuff needed to submit a transfer
737 *
738 * Breaks the whole data buffer in a list of segments, each one has a
739 * structure allocated to it and linked in xfer->seg[index]
740 *
741 * FIXME: merge setup_segs() and the last part of this function, no
742 * need to do two for loops when we could run everything in a
743 * single one
744 */
745static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
746{
747 int result;
748 struct device *dev = &xfer->wa->usb_iface->dev;
749 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
750 size_t xfer_hdr_size, cnt, transfer_size;
751 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
752
753 d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
754 xfer, xfer->ep->hcpriv, urb);
755
756 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
757 if (result < 0)
758 goto error_setup_sizes;
759 xfer_hdr_size = result;
760 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
761 if (result < 0) {
762 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
763 xfer, xfer->segs, result);
764 goto error_setup_segs;
765 }
766 /* Fill the first header */
767 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
768 wa_xfer_id_init(xfer);
769 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
770
771 /* Fill remainig headers */
772 xfer_hdr = xfer_hdr0;
773 transfer_size = urb->transfer_buffer_length;
774 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
775 xfer->seg_size : transfer_size;
776 transfer_size -= xfer->seg_size;
777 for (cnt = 1; cnt < xfer->segs; cnt++) {
778 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
779 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
780 xfer_hdr->bTransferSegment = cnt;
781 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
782 cpu_to_le32(xfer->seg_size)
783 : cpu_to_le32(transfer_size);
784 xfer->seg[cnt]->status = WA_SEG_READY;
785 transfer_size -= xfer->seg_size;
786 }
787 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
788 result = 0;
789error_setup_segs:
790error_setup_sizes:
791 d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
792 xfer, xfer->ep->hcpriv, urb, result);
793 return result;
794}
795
796/*
797 *
798 *
799 * rpipe->seg_lock is held!
800 */
801static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
802 struct wa_seg *seg)
803{
804 int result;
805 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
806 if (result < 0) {
807 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
808 xfer, seg->index, result);
809 goto error_seg_submit;
810 }
811 if (seg->dto_urb) {
812 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
813 if (result < 0) {
814 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
815 xfer, seg->index, result);
816 goto error_dto_submit;
817 }
818 }
819 seg->status = WA_SEG_SUBMITTED;
820 rpipe_avail_dec(rpipe);
821 return 0;
822
823error_dto_submit:
824 usb_unlink_urb(&seg->urb);
825error_seg_submit:
826 seg->status = WA_SEG_ERROR;
827 seg->result = result;
828 return result;
829}
830
831/*
832 * Execute more queued request segments until the maximum concurrent allowed
833 *
834 * The ugly unlock/lock sequence on the error path is needed as the
835 * xfer->lock normally nests the seg_lock and not viceversa.
836 *
837 */
838static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
839{
840 int result;
841 struct device *dev = &rpipe->wa->usb_iface->dev;
842 struct wa_seg *seg;
843 struct wa_xfer *xfer;
844 unsigned long flags;
845
846 d_fnstart(1, dev, "(rpipe #%d) %d segments available\n",
847 le16_to_cpu(rpipe->descr.wRPipeIndex),
848 atomic_read(&rpipe->segs_available));
849 spin_lock_irqsave(&rpipe->seg_lock, flags);
850 while (atomic_read(&rpipe->segs_available) > 0
851 && !list_empty(&rpipe->seg_list)) {
852 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
853 list_node);
854 list_del(&seg->list_node);
855 xfer = seg->xfer;
856 result = __wa_seg_submit(rpipe, xfer, seg);
857 d_printf(1, dev, "xfer %p#%u submitted from delayed "
858 "[%d segments available] %d\n",
859 xfer, seg->index,
860 atomic_read(&rpipe->segs_available), result);
861 if (unlikely(result < 0)) {
862 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
863 spin_lock_irqsave(&xfer->lock, flags);
864 __wa_xfer_abort(xfer);
865 xfer->segs_done++;
866 spin_unlock_irqrestore(&xfer->lock, flags);
867 spin_lock_irqsave(&rpipe->seg_lock, flags);
868 }
869 }
870 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
871 d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n",
872 le16_to_cpu(rpipe->descr.wRPipeIndex),
873 atomic_read(&rpipe->segs_available));
874
875}
876
877/*
878 *
879 * xfer->lock is taken
880 *
881 * On failure submitting we just stop submitting and return error;
882 * wa_urb_enqueue_b() will execute the completion path
883 */
884static int __wa_xfer_submit(struct wa_xfer *xfer)
885{
886 int result;
887 struct wahc *wa = xfer->wa;
888 struct device *dev = &wa->usb_iface->dev;
889 unsigned cnt;
890 struct wa_seg *seg;
891 unsigned long flags;
892 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
893 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
894 u8 available;
895 u8 empty;
896
897 d_fnstart(3, dev, "(xfer %p [rpipe %p])\n",
898 xfer, xfer->ep->hcpriv);
899
900 spin_lock_irqsave(&wa->xfer_list_lock, flags);
901 list_add_tail(&xfer->list_node, &wa->xfer_list);
902 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
903
904 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
905 result = 0;
906 spin_lock_irqsave(&rpipe->seg_lock, flags);
907 for (cnt = 0; cnt < xfer->segs; cnt++) {
908 available = atomic_read(&rpipe->segs_available);
909 empty = list_empty(&rpipe->seg_list);
910 seg = xfer->seg[cnt];
911 d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n",
912 xfer, cnt, available, empty,
913 available == 0 || !empty ? "delayed" : "submitted");
914 if (available == 0 || !empty) {
915 d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt);
916 seg->status = WA_SEG_DELAYED;
917 list_add_tail(&seg->list_node, &rpipe->seg_list);
918 } else {
919 result = __wa_seg_submit(rpipe, xfer, seg);
920 if (result < 0)
921 goto error_seg_submit;
922 }
923 xfer->segs_submitted++;
924 }
925 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
926 d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
927 xfer->ep->hcpriv);
928 return result;
929
930error_seg_submit:
931 __wa_xfer_abort(xfer);
932 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
933 d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
934 xfer->ep->hcpriv);
935 return result;
936}
937
938/*
939 * Second part of a URB/transfer enqueuement
940 *
941 * Assumes this comes from wa_urb_enqueue() [maybe through
942 * wa_urb_enqueue_run()]. At this point:
943 *
944 * xfer->wa filled and refcounted
945 * xfer->ep filled with rpipe refcounted if
946 * delayed == 0
947 * xfer->urb filled and refcounted (this is the case when called
948 * from wa_urb_enqueue() as we come from usb_submit_urb()
949 * and when called by wa_urb_enqueue_run(), as we took an
950 * extra ref dropped by _run() after we return).
951 * xfer->gfp filled
952 *
953 * If we fail at __wa_xfer_submit(), then we just check if we are done
954 * and if so, we run the completion procedure. However, if we are not
955 * yet done, we do nothing and wait for the completion handlers from
956 * the submitted URBs or from the xfer-result path to kick in. If xfer
957 * result never kicks in, the xfer will timeout from the USB code and
958 * dequeue() will be called.
959 */
960static void wa_urb_enqueue_b(struct wa_xfer *xfer)
961{
962 int result;
963 unsigned long flags;
964 struct urb *urb = xfer->urb;
965 struct wahc *wa = xfer->wa;
966 struct wusbhc *wusbhc = wa->wusb;
967 struct device *dev = &wa->usb_iface->dev;
968 struct wusb_dev *wusb_dev;
969 unsigned done;
970
971 d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb);
972 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
973 if (result < 0)
974 goto error_rpipe_get;
975 result = -ENODEV;
976 /* FIXME: segmentation broken -- kills DWA */
977 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
978 if (urb->dev == NULL)
979 goto error_dev_gone;
980 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
981 if (wusb_dev == NULL) {
982 mutex_unlock(&wusbhc->mutex);
983 goto error_dev_gone;
984 }
985 mutex_unlock(&wusbhc->mutex);
986
987 spin_lock_irqsave(&xfer->lock, flags);
988 xfer->wusb_dev = wusb_dev;
989 result = urb->status;
990 if (urb->status != -EINPROGRESS)
991 goto error_dequeued;
992
993 result = __wa_xfer_setup(xfer, urb);
994 if (result < 0)
995 goto error_xfer_setup;
996 result = __wa_xfer_submit(xfer);
997 if (result < 0)
998 goto error_xfer_submit;
999 spin_unlock_irqrestore(&xfer->lock, flags);
1000 d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb);
1001 return;
1002
1003 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1004 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1005 * upundo setup().
1006 */
1007error_xfer_setup:
1008error_dequeued:
1009 spin_unlock_irqrestore(&xfer->lock, flags);
1010 /* FIXME: segmentation broken, kills DWA */
1011 if (wusb_dev)
1012 wusb_dev_put(wusb_dev);
1013error_dev_gone:
1014 rpipe_put(xfer->ep->hcpriv);
1015error_rpipe_get:
1016 xfer->result = result;
1017 wa_xfer_giveback(xfer);
1018 d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
1019 return;
1020
1021error_xfer_submit:
1022 done = __wa_xfer_is_done(xfer);
1023 xfer->result = result;
1024 spin_unlock_irqrestore(&xfer->lock, flags);
1025 if (done)
1026 wa_xfer_completion(xfer);
1027 d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
1028 return;
1029}
1030
1031/*
1032 * Execute the delayed transfers in the Wire Adapter @wa
1033 *
1034 * We need to be careful here, as dequeue() could be called in the
1035 * middle. That's why we do the whole thing under the
1036 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
1037 * and then checks the list -- so as we would be acquiring in inverse
1038 * order, we just drop the lock once we have the xfer and reacquire it
1039 * later.
1040 */
1041void wa_urb_enqueue_run(struct work_struct *ws)
1042{
1043 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
1044 struct device *dev = &wa->usb_iface->dev;
1045 struct wa_xfer *xfer, *next;
1046 struct urb *urb;
1047
1048 d_fnstart(3, dev, "(wa %p)\n", wa);
1049 spin_lock_irq(&wa->xfer_list_lock);
1050 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
1051 list_node) {
1052 list_del_init(&xfer->list_node);
1053 spin_unlock_irq(&wa->xfer_list_lock);
1054
1055 urb = xfer->urb;
1056 wa_urb_enqueue_b(xfer);
1057 usb_put_urb(urb); /* taken when queuing */
1058
1059 spin_lock_irq(&wa->xfer_list_lock);
1060 }
1061 spin_unlock_irq(&wa->xfer_list_lock);
1062 d_fnend(3, dev, "(wa %p) = void\n", wa);
1063}
1064EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1065
1066/*
1067 * Submit a transfer to the Wire Adapter in a delayed way
1068 *
1069 * The process of enqueuing involves possible sleeps() [see
1070 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1071 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1072 *
1073 * @urb: We own a reference to it done by the HCI Linux USB stack that
1074 * will be given up by calling usb_hcd_giveback_urb() or by
1075 * returning error from this function -> ergo we don't have to
1076 * refcount it.
1077 */
1078int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1079 struct urb *urb, gfp_t gfp)
1080{
1081 int result;
1082 struct device *dev = &wa->usb_iface->dev;
1083 struct wa_xfer *xfer;
1084 unsigned long my_flags;
1085 unsigned cant_sleep = irqs_disabled() | in_atomic();
1086
1087 d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n",
1088 wa, ep, urb, urb->transfer_buffer_length, gfp);
1089
1090 if (urb->transfer_buffer == NULL
1091 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1092 && urb->transfer_buffer_length != 0) {
1093 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1094 dump_stack();
1095 }
1096
1097 result = -ENOMEM;
1098 xfer = kzalloc(sizeof(*xfer), gfp);
1099 if (xfer == NULL)
1100 goto error_kmalloc;
1101
1102 result = -ENOENT;
1103 if (urb->status != -EINPROGRESS) /* cancelled */
1104 goto error_dequeued; /* before starting? */
1105 wa_xfer_init(xfer);
1106 xfer->wa = wa_get(wa);
1107 xfer->urb = urb;
1108 xfer->gfp = gfp;
1109 xfer->ep = ep;
1110 urb->hcpriv = xfer;
1111 d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1112 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1113 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1114 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1115 cant_sleep ? "deferred" : "inline");
1116 if (cant_sleep) {
1117 usb_get_urb(urb);
1118 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1119 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1120 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1121 queue_work(wusbd, &wa->xfer_work);
1122 } else {
1123 wa_urb_enqueue_b(xfer);
1124 }
1125 d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n",
1126 wa, ep, urb, urb->transfer_buffer_length, gfp);
1127 return 0;
1128
1129error_dequeued:
1130 kfree(xfer);
1131error_kmalloc:
1132 d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n",
1133 wa, ep, urb, urb->transfer_buffer_length, gfp, result);
1134 return result;
1135}
1136EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1137
1138/*
1139 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1140 * handler] is called.
1141 *
1142 * Until a transfer goes successfully through wa_urb_enqueue() it
1143 * needs to be dequeued with completion calling; when stuck in delayed
1144 * or before wa_xfer_setup() is called, we need to do completion.
1145 *
1146 * not setup If there is no hcpriv yet, that means that that enqueue
1147 * still had no time to set the xfer up. Because
1148 * urb->status should be other than -EINPROGRESS,
1149 * enqueue() will catch that and bail out.
1150 *
1151 * If the transfer has gone through setup, we just need to clean it
1152 * up. If it has gone through submit(), we have to abort it [with an
1153 * asynch request] and then make sure we cancel each segment.
1154 *
1155 */
1156int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1157{
1158 struct device *dev = &wa->usb_iface->dev;
1159 unsigned long flags, flags2;
1160 struct wa_xfer *xfer;
1161 struct wa_seg *seg;
1162 struct wa_rpipe *rpipe;
1163 unsigned cnt;
1164 unsigned rpipe_ready = 0;
1165
1166 d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb);
1167
1168 d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb);
1169 xfer = urb->hcpriv;
1170 if (xfer == NULL) {
1171 /* NOthing setup yet enqueue will see urb->status !=
1172 * -EINPROGRESS (by hcd layer) and bail out with
1173 * error, no need to do completion
1174 */
1175 BUG_ON(urb->status == -EINPROGRESS);
1176 goto out;
1177 }
1178 spin_lock_irqsave(&xfer->lock, flags);
1179 rpipe = xfer->ep->hcpriv;
1180 /* Check the delayed list -> if there, release and complete */
1181 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1182 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1183 goto dequeue_delayed;
1184 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1185 if (xfer->seg == NULL) /* still hasn't reached */
1186 goto out_unlock; /* setup(), enqueue_b() completes */
1187 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1188 __wa_xfer_abort(xfer);
1189 for (cnt = 0; cnt < xfer->segs; cnt++) {
1190 seg = xfer->seg[cnt];
1191 switch (seg->status) {
1192 case WA_SEG_NOTREADY:
1193 case WA_SEG_READY:
1194 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1195 xfer, cnt, seg->status);
1196 WARN_ON(1);
1197 break;
1198 case WA_SEG_DELAYED:
1199 seg->status = WA_SEG_ABORTED;
1200 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1201 list_del(&seg->list_node);
1202 xfer->segs_done++;
1203 rpipe_ready = rpipe_avail_inc(rpipe);
1204 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1205 break;
1206 case WA_SEG_SUBMITTED:
1207 seg->status = WA_SEG_ABORTED;
1208 usb_unlink_urb(&seg->urb);
1209 if (xfer->is_inbound == 0)
1210 usb_unlink_urb(seg->dto_urb);
1211 xfer->segs_done++;
1212 rpipe_ready = rpipe_avail_inc(rpipe);
1213 break;
1214 case WA_SEG_PENDING:
1215 seg->status = WA_SEG_ABORTED;
1216 xfer->segs_done++;
1217 rpipe_ready = rpipe_avail_inc(rpipe);
1218 break;
1219 case WA_SEG_DTI_PENDING:
1220 usb_unlink_urb(wa->dti_urb);
1221 seg->status = WA_SEG_ABORTED;
1222 xfer->segs_done++;
1223 rpipe_ready = rpipe_avail_inc(rpipe);
1224 break;
1225 case WA_SEG_DONE:
1226 case WA_SEG_ERROR:
1227 case WA_SEG_ABORTED:
1228 break;
1229 }
1230 }
1231 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1232 __wa_xfer_is_done(xfer);
1233 spin_unlock_irqrestore(&xfer->lock, flags);
1234 wa_xfer_completion(xfer);
1235 if (rpipe_ready)
1236 wa_xfer_delayed_run(rpipe);
1237 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1238 return 0;
1239
1240out_unlock:
1241 spin_unlock_irqrestore(&xfer->lock, flags);
1242out:
1243 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1244 return 0;
1245
1246dequeue_delayed:
1247 list_del_init(&xfer->list_node);
1248 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1249 xfer->result = urb->status;
1250 spin_unlock_irqrestore(&xfer->lock, flags);
1251 wa_xfer_giveback(xfer);
1252 usb_put_urb(urb); /* we got a ref in enqueue() */
1253 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1254 return 0;
1255}
1256EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1257
1258/*
1259 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1260 * codes
1261 *
1262 * Positive errno values are internal inconsistencies and should be
1263 * flagged louder. Negative are to be passed up to the user in the
1264 * normal way.
1265 *
1266 * @status: USB WA status code -- high two bits are stripped.
1267 */
1268static int wa_xfer_status_to_errno(u8 status)
1269{
1270 int errno;
1271 u8 real_status = status;
1272 static int xlat[] = {
1273 [WA_XFER_STATUS_SUCCESS] = 0,
1274 [WA_XFER_STATUS_HALTED] = -EPIPE,
1275 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1276 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1277 [WA_XFER_RESERVED] = EINVAL,
1278 [WA_XFER_STATUS_NOT_FOUND] = 0,
1279 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1280 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1281 [WA_XFER_STATUS_ABORTED] = -EINTR,
1282 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1283 [WA_XFER_INVALID_FORMAT] = EINVAL,
1284 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1285 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1286 };
1287 status &= 0x3f;
1288
1289 if (status == 0)
1290 return 0;
1291 if (status >= ARRAY_SIZE(xlat)) {
1292 if (printk_ratelimit())
1293 printk(KERN_ERR "%s(): BUG? "
1294 "Unknown WA transfer status 0x%02x\n",
1295 __func__, real_status);
1296 return -EINVAL;
1297 }
1298 errno = xlat[status];
1299 if (unlikely(errno > 0)) {
1300 if (printk_ratelimit())
1301 printk(KERN_ERR "%s(): BUG? "
1302 "Inconsistent WA status: 0x%02x\n",
1303 __func__, real_status);
1304 errno = -errno;
1305 }
1306 return errno;
1307}
1308
1309/*
1310 * Process a xfer result completion message
1311 *
1312 * inbound transfers: need to schedule a DTI read
1313 *
1314 * FIXME: this functio needs to be broken up in parts
1315 */
1316static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1317{
1318 int result;
1319 struct device *dev = &wa->usb_iface->dev;
1320 unsigned long flags;
1321 u8 seg_idx;
1322 struct wa_seg *seg;
1323 struct wa_rpipe *rpipe;
1324 struct wa_xfer_result *xfer_result = wa->xfer_result;
1325 u8 done = 0;
1326 u8 usb_status;
1327 unsigned rpipe_ready = 0;
1328
1329 d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer);
1330 spin_lock_irqsave(&xfer->lock, flags);
1331 seg_idx = xfer_result->bTransferSegment & 0x7f;
1332 if (unlikely(seg_idx >= xfer->segs))
1333 goto error_bad_seg;
1334 seg = xfer->seg[seg_idx];
1335 rpipe = xfer->ep->hcpriv;
1336 usb_status = xfer_result->bTransferStatus;
1337 d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1338 xfer, seg_idx, usb_status, seg->status);
1339 if (seg->status == WA_SEG_ABORTED
1340 || seg->status == WA_SEG_ERROR) /* already handled */
1341 goto segment_aborted;
1342 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1343 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1344 if (seg->status != WA_SEG_PENDING) {
1345 if (printk_ratelimit())
1346 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1347 xfer, seg_idx, seg->status);
1348 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1349 }
1350 if (usb_status & 0x80) {
1351 seg->result = wa_xfer_status_to_errno(usb_status);
1352 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1353 xfer, seg->index, usb_status);
1354 goto error_complete;
1355 }
1356 /* FIXME: we ignore warnings, tally them for stats */
1357 if (usb_status & 0x40) /* Warning?... */
1358 usb_status = 0; /* ... pass */
1359 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1360 seg->status = WA_SEG_DTI_PENDING;
1361 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1362 if (xfer->is_dma) {
1363 wa->buf_in_urb->transfer_dma =
1364 xfer->urb->transfer_dma
1365 + seg_idx * xfer->seg_size;
1366 wa->buf_in_urb->transfer_flags
1367 |= URB_NO_TRANSFER_DMA_MAP;
1368 } else {
1369 wa->buf_in_urb->transfer_buffer =
1370 xfer->urb->transfer_buffer
1371 + seg_idx * xfer->seg_size;
1372 wa->buf_in_urb->transfer_flags
1373 &= ~URB_NO_TRANSFER_DMA_MAP;
1374 }
1375 wa->buf_in_urb->transfer_buffer_length =
1376 le32_to_cpu(xfer_result->dwTransferLength);
1377 wa->buf_in_urb->context = seg;
1378 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1379 if (result < 0)
1380 goto error_submit_buf_in;
1381 } else {
1382 /* OUT data phase, complete it -- */
1383 seg->status = WA_SEG_DONE;
1384 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1385 xfer->segs_done++;
1386 rpipe_ready = rpipe_avail_inc(rpipe);
1387 done = __wa_xfer_is_done(xfer);
1388 }
1389 spin_unlock_irqrestore(&xfer->lock, flags);
1390 if (done)
1391 wa_xfer_completion(xfer);
1392 if (rpipe_ready)
1393 wa_xfer_delayed_run(rpipe);
1394 d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer);
1395 return;
1396
1397
1398error_submit_buf_in:
1399 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1400 dev_err(dev, "DTI: URB max acceptable errors "
1401 "exceeded, resetting device\n");
1402 wa_reset_all(wa);
1403 }
1404 if (printk_ratelimit())
1405 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1406 xfer, seg_idx, result);
1407 seg->result = result;
1408error_complete:
1409 seg->status = WA_SEG_ERROR;
1410 xfer->segs_done++;
1411 rpipe_ready = rpipe_avail_inc(rpipe);
1412 __wa_xfer_abort(xfer);
1413 done = __wa_xfer_is_done(xfer);
1414 spin_unlock_irqrestore(&xfer->lock, flags);
1415 if (done)
1416 wa_xfer_completion(xfer);
1417 if (rpipe_ready)
1418 wa_xfer_delayed_run(rpipe);
1419 d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n",
1420 wa, xfer);
1421 return;
1422
1423
1424error_bad_seg:
1425 spin_unlock_irqrestore(&xfer->lock, flags);
1426 wa_urb_dequeue(wa, xfer->urb);
1427 if (printk_ratelimit())
1428 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1429 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1430 dev_err(dev, "DTI: URB max acceptable errors "
1431 "exceeded, resetting device\n");
1432 wa_reset_all(wa);
1433 }
1434 d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer);
1435 return;
1436
1437
1438segment_aborted:
1439 /* nothing to do, as the aborter did the completion */
1440 spin_unlock_irqrestore(&xfer->lock, flags);
1441 d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n",
1442 wa, xfer);
1443 return;
1444
1445}
1446
1447/*
1448 * Callback for the IN data phase
1449 *
1450 * If succesful transition state; otherwise, take a note of the
1451 * error, mark this segment done and try completion.
1452 *
1453 * Note we don't access until we are sure that the transfer hasn't
1454 * been cancelled (ECONNRESET, ENOENT), which could mean that
1455 * seg->xfer could be already gone.
1456 */
1457static void wa_buf_in_cb(struct urb *urb)
1458{
1459 struct wa_seg *seg = urb->context;
1460 struct wa_xfer *xfer = seg->xfer;
1461 struct wahc *wa;
1462 struct device *dev;
1463 struct wa_rpipe *rpipe;
1464 unsigned rpipe_ready;
1465 unsigned long flags;
1466 u8 done = 0;
1467
1468 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
1469 switch (urb->status) {
1470 case 0:
1471 spin_lock_irqsave(&xfer->lock, flags);
1472 wa = xfer->wa;
1473 dev = &wa->usb_iface->dev;
1474 rpipe = xfer->ep->hcpriv;
1475 d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n",
1476 xfer, seg->index, (size_t)urb->actual_length);
1477 seg->status = WA_SEG_DONE;
1478 seg->result = urb->actual_length;
1479 xfer->segs_done++;
1480 rpipe_ready = rpipe_avail_inc(rpipe);
1481 done = __wa_xfer_is_done(xfer);
1482 spin_unlock_irqrestore(&xfer->lock, flags);
1483 if (done)
1484 wa_xfer_completion(xfer);
1485 if (rpipe_ready)
1486 wa_xfer_delayed_run(rpipe);
1487 break;
1488 case -ECONNRESET: /* URB unlinked; no need to do anything */
1489 case -ENOENT: /* as it was done by the who unlinked us */
1490 break;
1491 default: /* Other errors ... */
1492 spin_lock_irqsave(&xfer->lock, flags);
1493 wa = xfer->wa;
1494 dev = &wa->usb_iface->dev;
1495 rpipe = xfer->ep->hcpriv;
1496 if (printk_ratelimit())
1497 dev_err(dev, "xfer %p#%u: data in error %d\n",
1498 xfer, seg->index, urb->status);
1499 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1500 EDC_ERROR_TIMEFRAME)){
1501 dev_err(dev, "DTO: URB max acceptable errors "
1502 "exceeded, resetting device\n");
1503 wa_reset_all(wa);
1504 }
1505 seg->status = WA_SEG_ERROR;
1506 seg->result = urb->status;
1507 xfer->segs_done++;
1508 rpipe_ready = rpipe_avail_inc(rpipe);
1509 __wa_xfer_abort(xfer);
1510 done = __wa_xfer_is_done(xfer);
1511 spin_unlock_irqrestore(&xfer->lock, flags);
1512 if (done)
1513 wa_xfer_completion(xfer);
1514 if (rpipe_ready)
1515 wa_xfer_delayed_run(rpipe);
1516 }
1517 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
1518}
1519
1520/*
1521 * Handle an incoming transfer result buffer
1522 *
1523 * Given a transfer result buffer, it completes the transfer (possibly
1524 * scheduling and buffer in read) and then resubmits the DTI URB for a
1525 * new transfer result read.
1526 *
1527 *
1528 * The xfer_result DTI URB state machine
1529 *
1530 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1531 *
1532 * We start in OFF mode, the first xfer_result notification [through
1533 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1534 * read.
1535 *
1536 * We receive a buffer -- if it is not a xfer_result, we complain and
1537 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1538 * request accounting. If it is an IN segment, we move to RBI and post
1539 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1540 * repost the DTI-URB and move to RXR state. if there was no IN
1541 * segment, it will repost the DTI-URB.
1542 *
1543 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1544 * errors) in the URBs.
1545 */
1546static void wa_xfer_result_cb(struct urb *urb)
1547{
1548 int result;
1549 struct wahc *wa = urb->context;
1550 struct device *dev = &wa->usb_iface->dev;
1551 struct wa_xfer_result *xfer_result;
1552 u32 xfer_id;
1553 struct wa_xfer *xfer;
1554 u8 usb_status;
1555
1556 d_fnstart(3, dev, "(%p)\n", wa);
1557 BUG_ON(wa->dti_urb != urb);
1558 switch (wa->dti_urb->status) {
1559 case 0:
1560 /* We have a xfer result buffer; check it */
1561 d_printf(2, dev, "DTI: xfer result %d bytes at %p\n",
1562 urb->actual_length, urb->transfer_buffer);
1563 d_dump(3, dev, urb->transfer_buffer, urb->actual_length);
1564 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1565 dev_err(dev, "DTI Error: xfer result--bad size "
1566 "xfer result (%d bytes vs %zu needed)\n",
1567 urb->actual_length, sizeof(*xfer_result));
1568 break;
1569 }
1570 xfer_result = wa->xfer_result;
1571 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1572 dev_err(dev, "DTI Error: xfer result--"
1573 "bad header length %u\n",
1574 xfer_result->hdr.bLength);
1575 break;
1576 }
1577 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1578 dev_err(dev, "DTI Error: xfer result--"
1579 "bad header type 0x%02x\n",
1580 xfer_result->hdr.bNotifyType);
1581 break;
1582 }
1583 usb_status = xfer_result->bTransferStatus & 0x3f;
1584 if (usb_status == WA_XFER_STATUS_ABORTED
1585 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1586 /* taken care of already */
1587 break;
1588 xfer_id = xfer_result->dwTransferID;
1589 xfer = wa_xfer_get_by_id(wa, xfer_id);
1590 if (xfer == NULL) {
1591 /* FIXME: transaction might have been cancelled */
1592 dev_err(dev, "DTI Error: xfer result--"
1593 "unknown xfer 0x%08x (status 0x%02x)\n",
1594 xfer_id, usb_status);
1595 break;
1596 }
1597 wa_xfer_result_chew(wa, xfer);
1598 wa_xfer_put(xfer);
1599 break;
1600 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1601 case -ESHUTDOWN: /* going away! */
1602 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1603 goto out;
1604 default:
1605 /* Unknown error */
1606 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1607 EDC_ERROR_TIMEFRAME)) {
1608 dev_err(dev, "DTI: URB max acceptable errors "
1609 "exceeded, resetting device\n");
1610 wa_reset_all(wa);
1611 goto out;
1612 }
1613 if (printk_ratelimit())
1614 dev_err(dev, "DTI: URB error %d\n", urb->status);
1615 break;
1616 }
1617 /* Resubmit the DTI URB */
1618 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1619 if (result < 0) {
1620 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1621 "resetting\n", result);
1622 wa_reset_all(wa);
1623 }
1624out:
1625 d_fnend(3, dev, "(%p) = void\n", wa);
1626 return;
1627}
1628
1629/*
1630 * Transfer complete notification
1631 *
1632 * Called from the notif.c code. We get a notification on EP2 saying
1633 * that some endpoint has some transfer result data available. We are
1634 * about to read it.
1635 *
1636 * To speed up things, we always have a URB reading the DTI URB; we
1637 * don't really set it up and start it until the first xfer complete
1638 * notification arrives, which is what we do here.
1639 *
1640 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1641 * machine starts.
1642 *
1643 * So here we just initialize the DTI URB for reading transfer result
1644 * notifications and also the buffer-in URB, for reading buffers. Then
1645 * we just submit the DTI URB.
1646 *
1647 * @wa shall be referenced
1648 */
1649void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1650{
1651 int result;
1652 struct device *dev = &wa->usb_iface->dev;
1653 struct wa_notif_xfer *notif_xfer;
1654 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1655
1656 d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr);
1657 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1658 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1659
1660 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1661 /* FIXME: hardcoded limitation, adapt */
1662 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1663 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1664 goto error;
1665 }
1666 if (wa->dti_urb != NULL) /* DTI URB already started */
1667 goto out;
1668
1669 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1670 if (wa->dti_urb == NULL) {
1671 dev_err(dev, "Can't allocate DTI URB\n");
1672 goto error_dti_urb_alloc;
1673 }
1674 usb_fill_bulk_urb(
1675 wa->dti_urb, wa->usb_dev,
1676 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1677 wa->xfer_result, wa->xfer_result_size,
1678 wa_xfer_result_cb, wa);
1679
1680 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1681 if (wa->buf_in_urb == NULL) {
1682 dev_err(dev, "Can't allocate BUF-IN URB\n");
1683 goto error_buf_in_urb_alloc;
1684 }
1685 usb_fill_bulk_urb(
1686 wa->buf_in_urb, wa->usb_dev,
1687 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1688 NULL, 0, wa_buf_in_cb, wa);
1689 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1690 if (result < 0) {
1691 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1692 "resetting\n", result);
1693 goto error_dti_urb_submit;
1694 }
1695out:
1696 d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
1697 return;
1698
1699error_dti_urb_submit:
1700 usb_put_urb(wa->buf_in_urb);
1701error_buf_in_urb_alloc:
1702 usb_put_urb(wa->dti_urb);
1703 wa->dti_urb = NULL;
1704error_dti_urb_alloc:
1705error:
1706 wa_reset_all(wa);
1707 d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
1708 return;
1709}