diff options
author | Oliver Neukum <oliver@neukum.org> | 2009-09-09 11:06:53 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-23 09:46:40 -0400 |
commit | e7389cc9a7ff7c6e760e741c81a751c834f7d145 (patch) | |
tree | c32bb55858fd792ac10b90a2a0539d163ad46136 /drivers/usb | |
parent | b356b7c7696b289dda99022d71e3979c6134af52 (diff) |
USB: skel_read really sucks royally
The read code path of the skeleton driver really sucks
- skel_read works only for devices which always send data
- the timeout comes out of thin air
- it blocks signals for the duration of the timeout
- it disallows nonblocking IO by design
This patch fixes it by using a real urb, a completion and interruptible waits.
Signed-off-by: Oliver Neukum <oliver@neukum.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r-- | drivers/usb/usb-skeleton.c | 194 |
1 files changed, 176 insertions, 18 deletions
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index 60ba631e99c2..5ffa3e246856 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c | |||
@@ -52,15 +52,21 @@ struct usb_skel { | |||
52 | struct usb_interface *interface; /* the interface for this device */ | 52 | struct usb_interface *interface; /* the interface for this device */ |
53 | struct semaphore limit_sem; /* limiting the number of writes in progress */ | 53 | struct semaphore limit_sem; /* limiting the number of writes in progress */ |
54 | struct usb_anchor submitted; /* in case we need to retract our submissions */ | 54 | struct usb_anchor submitted; /* in case we need to retract our submissions */ |
55 | struct urb *bulk_in_urb; /* the urb to read data with */ | ||
55 | unsigned char *bulk_in_buffer; /* the buffer to receive data */ | 56 | unsigned char *bulk_in_buffer; /* the buffer to receive data */ |
56 | size_t bulk_in_size; /* the size of the receive buffer */ | 57 | size_t bulk_in_size; /* the size of the receive buffer */ |
58 | size_t bulk_in_filled; /* number of bytes in the buffer */ | ||
59 | size_t bulk_in_copied; /* already copied to user space */ | ||
57 | __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */ | 60 | __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */ |
58 | __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */ | 61 | __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */ |
59 | int errors; /* the last request tanked */ | 62 | int errors; /* the last request tanked */ |
60 | int open_count; /* count the number of openers */ | 63 | int open_count; /* count the number of openers */ |
64 | bool ongoing_read; /* a read is going on */ | ||
65 | bool processed_urb; /* indicates we haven't processed the urb */ | ||
61 | spinlock_t err_lock; /* lock for errors */ | 66 | spinlock_t err_lock; /* lock for errors */ |
62 | struct kref kref; | 67 | struct kref kref; |
63 | struct mutex io_mutex; /* synchronize I/O with disconnect */ | 68 | struct mutex io_mutex; /* synchronize I/O with disconnect */ |
69 | struct completion bulk_in_completion; /* to wait for an ongoing read */ | ||
64 | }; | 70 | }; |
65 | #define to_skel_dev(d) container_of(d, struct usb_skel, kref) | 71 | #define to_skel_dev(d) container_of(d, struct usb_skel, kref) |
66 | 72 | ||
@@ -71,6 +77,7 @@ static void skel_delete(struct kref *kref) | |||
71 | { | 77 | { |
72 | struct usb_skel *dev = to_skel_dev(kref); | 78 | struct usb_skel *dev = to_skel_dev(kref); |
73 | 79 | ||
80 | usb_free_urb(dev->bulk_in_urb); | ||
74 | usb_put_dev(dev->udev); | 81 | usb_put_dev(dev->udev); |
75 | kfree(dev->bulk_in_buffer); | 82 | kfree(dev->bulk_in_buffer); |
76 | kfree(dev); | 83 | kfree(dev); |
@@ -174,38 +181,182 @@ static int skel_flush(struct file *file, fl_owner_t id) | |||
174 | return res; | 181 | return res; |
175 | } | 182 | } |
176 | 183 | ||
184 | static void skel_read_bulk_callback(struct urb *urb) | ||
185 | { | ||
186 | struct usb_skel *dev; | ||
187 | |||
188 | dev = urb->context; | ||
189 | |||
190 | spin_lock(&dev->err_lock); | ||
191 | /* sync/async unlink faults aren't errors */ | ||
192 | if (urb->status) { | ||
193 | if(!(urb->status == -ENOENT || | ||
194 | urb->status == -ECONNRESET || | ||
195 | urb->status == -ESHUTDOWN)) | ||
196 | err("%s - nonzero write bulk status received: %d", | ||
197 | __func__, urb->status); | ||
198 | |||
199 | dev->errors = urb->status; | ||
200 | } else { | ||
201 | dev->bulk_in_filled = urb->actual_length; | ||
202 | } | ||
203 | dev->ongoing_read = 0; | ||
204 | spin_unlock(&dev->err_lock); | ||
205 | |||
206 | complete(&dev->bulk_in_completion); | ||
207 | } | ||
208 | |||
209 | static int skel_do_read_io(struct usb_skel *dev, size_t count) | ||
210 | { | ||
211 | int rv; | ||
212 | |||
213 | /* prepare a read */ | ||
214 | usb_fill_bulk_urb(dev->bulk_in_urb, | ||
215 | dev->udev, | ||
216 | usb_rcvbulkpipe(dev->udev, | ||
217 | dev->bulk_in_endpointAddr), | ||
218 | dev->bulk_in_buffer, | ||
219 | min(dev->bulk_in_size, count), | ||
220 | skel_read_bulk_callback, | ||
221 | dev); | ||
222 | /* tell everybody to leave the URB alone */ | ||
223 | spin_lock_irq(&dev->err_lock); | ||
224 | dev->ongoing_read = 1; | ||
225 | spin_unlock_irq(&dev->err_lock); | ||
226 | |||
227 | /* do it */ | ||
228 | rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL); | ||
229 | if (rv < 0) { | ||
230 | err("%s - failed submitting read urb, error %d", | ||
231 | __func__, rv); | ||
232 | dev->bulk_in_filled = 0; | ||
233 | rv = (rv == -ENOMEM) ? rv : -EIO; | ||
234 | spin_lock_irq(&dev->err_lock); | ||
235 | dev->ongoing_read = 0; | ||
236 | spin_unlock_irq(&dev->err_lock); | ||
237 | } | ||
238 | |||
239 | return rv; | ||
240 | } | ||
241 | |||
177 | static ssize_t skel_read(struct file *file, char *buffer, size_t count, loff_t *ppos) | 242 | static ssize_t skel_read(struct file *file, char *buffer, size_t count, loff_t *ppos) |
178 | { | 243 | { |
179 | struct usb_skel *dev; | 244 | struct usb_skel *dev; |
180 | int retval; | 245 | int rv; |
181 | int bytes_read; | 246 | bool ongoing_io; |
182 | 247 | ||
183 | dev = (struct usb_skel *)file->private_data; | 248 | dev = (struct usb_skel *)file->private_data; |
184 | 249 | ||
185 | mutex_lock(&dev->io_mutex); | 250 | /* if we cannot read at all, return EOF */ |
251 | if (!dev->bulk_in_urb || !count) | ||
252 | return 0; | ||
253 | |||
254 | /* no concurrent readers */ | ||
255 | rv = mutex_lock_interruptible(&dev->io_mutex); | ||
256 | if (rv < 0) | ||
257 | return rv; | ||
258 | |||
186 | if (!dev->interface) { /* disconnect() was called */ | 259 | if (!dev->interface) { /* disconnect() was called */ |
187 | retval = -ENODEV; | 260 | rv = -ENODEV; |
188 | goto exit; | 261 | goto exit; |
189 | } | 262 | } |
190 | 263 | ||
191 | /* do a blocking bulk read to get data from the device */ | 264 | /* if IO is under way, we must not touch things */ |
192 | retval = usb_bulk_msg(dev->udev, | 265 | retry: |
193 | usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), | 266 | spin_lock_irq(&dev->err_lock); |
194 | dev->bulk_in_buffer, | 267 | ongoing_io = dev->ongoing_read; |
195 | min(dev->bulk_in_size, count), | 268 | spin_unlock_irq(&dev->err_lock); |
196 | &bytes_read, 10000); | 269 | |
197 | 270 | if (ongoing_io) { | |
198 | /* if the read was successful, copy the data to userspace */ | 271 | /* |
199 | if (!retval) { | 272 | * IO may take forever |
200 | if (copy_to_user(buffer, dev->bulk_in_buffer, bytes_read)) | 273 | * hence wait in an interruptible state |
201 | retval = -EFAULT; | 274 | */ |
202 | else | 275 | rv = wait_for_completion_interruptible(&dev->bulk_in_completion); |
203 | retval = bytes_read; | 276 | if (rv < 0) |
277 | goto exit; | ||
278 | /* | ||
279 | * by waiting we also semiprocessed the urb | ||
280 | * we must finish now | ||
281 | */ | ||
282 | dev->bulk_in_copied = 0; | ||
283 | dev->processed_urb = 1; | ||
284 | } | ||
285 | |||
286 | if (!dev->processed_urb) { | ||
287 | /* | ||
288 | * the URB hasn't been processed | ||
289 | * do it now | ||
290 | */ | ||
291 | wait_for_completion(&dev->bulk_in_completion); | ||
292 | dev->bulk_in_copied = 0; | ||
293 | dev->processed_urb = 1; | ||
204 | } | 294 | } |
205 | 295 | ||
296 | /* errors must be reported */ | ||
297 | if ((rv = dev->errors) < 0) { | ||
298 | /* any error is reported once */ | ||
299 | dev->errors = 0; | ||
300 | /* to preserve notifications about reset */ | ||
301 | rv = (rv == -EPIPE) ? rv : -EIO; | ||
302 | /* no data to deliver */ | ||
303 | dev->bulk_in_filled = 0; | ||
304 | /* report it */ | ||
305 | goto exit; | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * if the buffer is filled we may satisfy the read | ||
310 | * else we need to start IO | ||
311 | */ | ||
312 | |||
313 | if (dev->bulk_in_filled) { | ||
314 | /* we had read data */ | ||
315 | size_t available = dev->bulk_in_filled - dev->bulk_in_copied; | ||
316 | size_t chunk = min(available, count); | ||
317 | |||
318 | if (!available) { | ||
319 | /* | ||
320 | * all data has been used | ||
321 | * actual IO needs to be done | ||
322 | */ | ||
323 | rv = skel_do_read_io(dev, count); | ||
324 | if (rv < 0) | ||
325 | goto exit; | ||
326 | else | ||
327 | goto retry; | ||
328 | } | ||
329 | /* | ||
330 | * data is available | ||
331 | * chunk tells us how much shall be copied | ||
332 | */ | ||
333 | |||
334 | if (copy_to_user(buffer, | ||
335 | dev->bulk_in_buffer + dev->bulk_in_copied, | ||
336 | chunk)) | ||
337 | rv = -EFAULT; | ||
338 | else | ||
339 | rv = chunk; | ||
340 | |||
341 | dev->bulk_in_copied += chunk; | ||
342 | |||
343 | /* | ||
344 | * if we are asked for more than we have, | ||
345 | * we start IO but don't wait | ||
346 | */ | ||
347 | if (available < count) | ||
348 | skel_do_read_io(dev, count - chunk); | ||
349 | } else { | ||
350 | /* no data in the buffer */ | ||
351 | rv = skel_do_read_io(dev, count); | ||
352 | if (rv < 0) | ||
353 | goto exit; | ||
354 | else | ||
355 | goto retry; | ||
356 | } | ||
206 | exit: | 357 | exit: |
207 | mutex_unlock(&dev->io_mutex); | 358 | mutex_unlock(&dev->io_mutex); |
208 | return retval; | 359 | return rv; |
209 | } | 360 | } |
210 | 361 | ||
211 | static void skel_write_bulk_callback(struct urb *urb) | 362 | static void skel_write_bulk_callback(struct urb *urb) |
@@ -363,6 +514,7 @@ static int skel_probe(struct usb_interface *interface, const struct usb_device_i | |||
363 | mutex_init(&dev->io_mutex); | 514 | mutex_init(&dev->io_mutex); |
364 | spin_lock_init(&dev->err_lock); | 515 | spin_lock_init(&dev->err_lock); |
365 | init_usb_anchor(&dev->submitted); | 516 | init_usb_anchor(&dev->submitted); |
517 | init_completion(&dev->bulk_in_completion); | ||
366 | 518 | ||
367 | dev->udev = usb_get_dev(interface_to_usbdev(interface)); | 519 | dev->udev = usb_get_dev(interface_to_usbdev(interface)); |
368 | dev->interface = interface; | 520 | dev->interface = interface; |
@@ -384,6 +536,11 @@ static int skel_probe(struct usb_interface *interface, const struct usb_device_i | |||
384 | err("Could not allocate bulk_in_buffer"); | 536 | err("Could not allocate bulk_in_buffer"); |
385 | goto error; | 537 | goto error; |
386 | } | 538 | } |
539 | dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
540 | if (!dev->bulk_in_urb) { | ||
541 | err("Could not allocate bulk_in_urb"); | ||
542 | goto error; | ||
543 | } | ||
387 | } | 544 | } |
388 | 545 | ||
389 | if (!dev->bulk_out_endpointAddr && | 546 | if (!dev->bulk_out_endpointAddr && |
@@ -453,6 +610,7 @@ static void skel_draw_down(struct usb_skel *dev) | |||
453 | time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000); | 610 | time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000); |
454 | if (!time) | 611 | if (!time) |
455 | usb_kill_anchored_urbs(&dev->submitted); | 612 | usb_kill_anchored_urbs(&dev->submitted); |
613 | usb_kill_urb(dev->bulk_in_urb); | ||
456 | } | 614 | } |
457 | 615 | ||
458 | static int skel_suspend(struct usb_interface *intf, pm_message_t message) | 616 | static int skel_suspend(struct usb_interface *intf, pm_message_t message) |