diff options
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/u132-hcd.c | 62 |
1 files changed, 24 insertions, 38 deletions
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index ef54e310bfc4..a9d7119e3176 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
@@ -163,7 +163,7 @@ struct u132_endp { | |||
163 | u16 queue_next; | 163 | u16 queue_next; |
164 | struct urb *urb_list[ENDP_QUEUE_SIZE]; | 164 | struct urb *urb_list[ENDP_QUEUE_SIZE]; |
165 | struct list_head urb_more; | 165 | struct list_head urb_more; |
166 | struct work_struct scheduler; | 166 | struct delayed_work scheduler; |
167 | }; | 167 | }; |
168 | struct u132_ring { | 168 | struct u132_ring { |
169 | unsigned in_use:1; | 169 | unsigned in_use:1; |
@@ -171,7 +171,7 @@ struct u132_ring { | |||
171 | u8 number; | 171 | u8 number; |
172 | struct u132 *u132; | 172 | struct u132 *u132; |
173 | struct u132_endp *curr_endp; | 173 | struct u132_endp *curr_endp; |
174 | struct work_struct scheduler; | 174 | struct delayed_work scheduler; |
175 | }; | 175 | }; |
176 | #define OHCI_QUIRK_AMD756 0x01 | 176 | #define OHCI_QUIRK_AMD756 0x01 |
177 | #define OHCI_QUIRK_SUPERIO 0x02 | 177 | #define OHCI_QUIRK_SUPERIO 0x02 |
@@ -198,7 +198,7 @@ struct u132 { | |||
198 | u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; | 198 | u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; |
199 | int flags; | 199 | int flags; |
200 | unsigned long next_statechange; | 200 | unsigned long next_statechange; |
201 | struct work_struct monitor; | 201 | struct delayed_work monitor; |
202 | int num_endpoints; | 202 | int num_endpoints; |
203 | struct u132_addr addr[MAX_U132_ADDRS]; | 203 | struct u132_addr addr[MAX_U132_ADDRS]; |
204 | struct u132_udev udev[MAX_U132_UDEVS]; | 204 | struct u132_udev udev[MAX_U132_UDEVS]; |
@@ -310,7 +310,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring, | |||
310 | if (delta > 0) { | 310 | if (delta > 0) { |
311 | if (queue_delayed_work(workqueue, &ring->scheduler, delta)) | 311 | if (queue_delayed_work(workqueue, &ring->scheduler, delta)) |
312 | return; | 312 | return; |
313 | } else if (queue_work(workqueue, &ring->scheduler)) | 313 | } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) |
314 | return; | 314 | return; |
315 | kref_put(&u132->kref, u132_hcd_delete); | 315 | kref_put(&u132->kref, u132_hcd_delete); |
316 | return; | 316 | return; |
@@ -389,12 +389,8 @@ static inline void u132_endp_init_kref(struct u132 *u132, | |||
389 | static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, | 389 | static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, |
390 | unsigned int delta) | 390 | unsigned int delta) |
391 | { | 391 | { |
392 | if (delta > 0) { | 392 | if (queue_delayed_work(workqueue, &endp->scheduler, delta)) |
393 | if (queue_delayed_work(workqueue, &endp->scheduler, delta)) | 393 | kref_get(&endp->kref); |
394 | kref_get(&endp->kref); | ||
395 | } else if (queue_work(workqueue, &endp->scheduler)) | ||
396 | kref_get(&endp->kref); | ||
397 | return; | ||
398 | } | 394 | } |
399 | 395 | ||
400 | static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) | 396 | static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) |
@@ -410,24 +406,14 @@ static inline void u132_monitor_put_kref(struct u132 *u132) | |||
410 | 406 | ||
411 | static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) | 407 | static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) |
412 | { | 408 | { |
413 | if (delta > 0) { | 409 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) |
414 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) { | 410 | kref_get(&u132->kref); |
415 | kref_get(&u132->kref); | ||
416 | } | ||
417 | } else if (queue_work(workqueue, &u132->monitor)) | ||
418 | kref_get(&u132->kref); | ||
419 | return; | ||
420 | } | 411 | } |
421 | 412 | ||
422 | static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) | 413 | static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) |
423 | { | 414 | { |
424 | if (delta > 0) { | 415 | if (!queue_delayed_work(workqueue, &u132->monitor, delta)) |
425 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) | 416 | kref_put(&u132->kref, u132_hcd_delete); |
426 | return; | ||
427 | } else if (queue_work(workqueue, &u132->monitor)) | ||
428 | return; | ||
429 | kref_put(&u132->kref, u132_hcd_delete); | ||
430 | return; | ||
431 | } | 417 | } |
432 | 418 | ||
433 | static void u132_monitor_cancel_work(struct u132 *u132) | 419 | static void u132_monitor_cancel_work(struct u132 *u132) |
@@ -489,9 +475,9 @@ static int read_roothub_info(struct u132 *u132) | |||
489 | return 0; | 475 | return 0; |
490 | } | 476 | } |
491 | 477 | ||
492 | static void u132_hcd_monitor_work(void *data) | 478 | static void u132_hcd_monitor_work(struct work_struct *work) |
493 | { | 479 | { |
494 | struct u132 *u132 = data; | 480 | struct u132 *u132 = container_of(work, struct u132, monitor.work); |
495 | if (u132->going > 1) { | 481 | if (u132->going > 1) { |
496 | dev_err(&u132->platform_dev->dev, "device has been removed %d\n" | 482 | dev_err(&u132->platform_dev->dev, "device has been removed %d\n" |
497 | , u132->going); | 483 | , u132->going); |
@@ -1315,15 +1301,14 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf, | |||
1315 | } | 1301 | } |
1316 | } | 1302 | } |
1317 | 1303 | ||
1318 | static void u132_hcd_ring_work_scheduler(void *data); | ||
1319 | static void u132_hcd_endp_work_scheduler(void *data); | ||
1320 | /* | 1304 | /* |
1321 | * this work function is only executed from the work queue | 1305 | * this work function is only executed from the work queue |
1322 | * | 1306 | * |
1323 | */ | 1307 | */ |
1324 | static void u132_hcd_ring_work_scheduler(void *data) | 1308 | static void u132_hcd_ring_work_scheduler(struct work_struct *work) |
1325 | { | 1309 | { |
1326 | struct u132_ring *ring = data; | 1310 | struct u132_ring *ring = |
1311 | container_of(work, struct u132_ring, scheduler.work); | ||
1327 | struct u132 *u132 = ring->u132; | 1312 | struct u132 *u132 = ring->u132; |
1328 | down(&u132->scheduler_lock); | 1313 | down(&u132->scheduler_lock); |
1329 | if (ring->in_use) { | 1314 | if (ring->in_use) { |
@@ -1382,10 +1367,11 @@ static void u132_hcd_ring_work_scheduler(void *data) | |||
1382 | } | 1367 | } |
1383 | } | 1368 | } |
1384 | 1369 | ||
1385 | static void u132_hcd_endp_work_scheduler(void *data) | 1370 | static void u132_hcd_endp_work_scheduler(struct work_struct *work) |
1386 | { | 1371 | { |
1387 | struct u132_ring *ring; | 1372 | struct u132_ring *ring; |
1388 | struct u132_endp *endp = data; | 1373 | struct u132_endp *endp = |
1374 | container_of(work, struct u132_endp, scheduler.work); | ||
1389 | struct u132 *u132 = endp->u132; | 1375 | struct u132 *u132 = endp->u132; |
1390 | down(&u132->scheduler_lock); | 1376 | down(&u132->scheduler_lock); |
1391 | ring = endp->ring; | 1377 | ring = endp->ring; |
@@ -1943,7 +1929,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132, | |||
1943 | if (!endp) { | 1929 | if (!endp) { |
1944 | return -ENOMEM; | 1930 | return -ENOMEM; |
1945 | } | 1931 | } |
1946 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 1932 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
1947 | spin_lock_init(&endp->queue_lock.slock); | 1933 | spin_lock_init(&endp->queue_lock.slock); |
1948 | INIT_LIST_HEAD(&endp->urb_more); | 1934 | INIT_LIST_HEAD(&endp->urb_more); |
1949 | ring = endp->ring = &u132->ring[0]; | 1935 | ring = endp->ring = &u132->ring[0]; |
@@ -2032,7 +2018,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132, | |||
2032 | if (!endp) { | 2018 | if (!endp) { |
2033 | return -ENOMEM; | 2019 | return -ENOMEM; |
2034 | } | 2020 | } |
2035 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 2021 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
2036 | spin_lock_init(&endp->queue_lock.slock); | 2022 | spin_lock_init(&endp->queue_lock.slock); |
2037 | INIT_LIST_HEAD(&endp->urb_more); | 2023 | INIT_LIST_HEAD(&endp->urb_more); |
2038 | endp->dequeueing = 0; | 2024 | endp->dequeueing = 0; |
@@ -2117,7 +2103,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132, | |||
2117 | if (!endp) { | 2103 | if (!endp) { |
2118 | return -ENOMEM; | 2104 | return -ENOMEM; |
2119 | } | 2105 | } |
2120 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 2106 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
2121 | spin_lock_init(&endp->queue_lock.slock); | 2107 | spin_lock_init(&endp->queue_lock.slock); |
2122 | INIT_LIST_HEAD(&endp->urb_more); | 2108 | INIT_LIST_HEAD(&endp->urb_more); |
2123 | ring = endp->ring = &u132->ring[0]; | 2109 | ring = endp->ring = &u132->ring[0]; |
@@ -3096,10 +3082,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev) | |||
3096 | ring->number = rings + 1; | 3082 | ring->number = rings + 1; |
3097 | ring->length = 0; | 3083 | ring->length = 0; |
3098 | ring->curr_endp = NULL; | 3084 | ring->curr_endp = NULL; |
3099 | INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, | 3085 | INIT_DELAYED_WORK(&ring->scheduler, |
3100 | (void *)ring); | 3086 | u132_hcd_ring_work_scheduler); |
3101 | } down(&u132->sw_lock); | 3087 | } down(&u132->sw_lock); |
3102 | INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); | 3088 | INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work); |
3103 | while (ports-- > 0) { | 3089 | while (ports-- > 0) { |
3104 | struct u132_port *port = &u132->port[ports]; | 3090 | struct u132_port *port = &u132->port[ports]; |
3105 | port->u132 = u132; | 3091 | port->u132 = u132; |