aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/u132-hcd.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 09:57:56 -0500
committerDavid Howells <dhowells@redhat.com>2006-11-22 09:57:56 -0500
commitc4028958b6ecad064b1a6303a6a5906d4fe48d73 (patch)
tree1c4c89652c62a75da09f9b9442012007e4ac6250 /drivers/usb/host/u132-hcd.c
parent65f27f38446e1976cc98fd3004b110fedcddd189 (diff)
WorkStruct: make allyesconfig
Fix up for make allyesconfig. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/usb/host/u132-hcd.c')
-rw-r--r--drivers/usb/host/u132-hcd.c62
1 files changed, 24 insertions, 38 deletions
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 32c635ecbf31..4f95a249c913 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -163,7 +163,7 @@ struct u132_endp {
163 u16 queue_next; 163 u16 queue_next;
164 struct urb *urb_list[ENDP_QUEUE_SIZE]; 164 struct urb *urb_list[ENDP_QUEUE_SIZE];
165 struct list_head urb_more; 165 struct list_head urb_more;
166 struct work_struct scheduler; 166 struct delayed_work scheduler;
167}; 167};
168struct u132_ring { 168struct u132_ring {
169 unsigned in_use:1; 169 unsigned in_use:1;
@@ -171,7 +171,7 @@ struct u132_ring {
171 u8 number; 171 u8 number;
172 struct u132 *u132; 172 struct u132 *u132;
173 struct u132_endp *curr_endp; 173 struct u132_endp *curr_endp;
174 struct work_struct scheduler; 174 struct delayed_work scheduler;
175}; 175};
176#define OHCI_QUIRK_AMD756 0x01 176#define OHCI_QUIRK_AMD756 0x01
177#define OHCI_QUIRK_SUPERIO 0x02 177#define OHCI_QUIRK_SUPERIO 0x02
@@ -198,7 +198,7 @@ struct u132 {
198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; 198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
199 int flags; 199 int flags;
200 unsigned long next_statechange; 200 unsigned long next_statechange;
201 struct work_struct monitor; 201 struct delayed_work monitor;
202 int num_endpoints; 202 int num_endpoints;
203 struct u132_addr addr[MAX_U132_ADDRS]; 203 struct u132_addr addr[MAX_U132_ADDRS];
204 struct u132_udev udev[MAX_U132_UDEVS]; 204 struct u132_udev udev[MAX_U132_UDEVS];
@@ -314,7 +314,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
314 if (delta > 0) { 314 if (delta > 0) {
315 if (queue_delayed_work(workqueue, &ring->scheduler, delta)) 315 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
316 return; 316 return;
317 } else if (queue_work(workqueue, &ring->scheduler)) 317 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
318 return; 318 return;
319 kref_put(&u132->kref, u132_hcd_delete); 319 kref_put(&u132->kref, u132_hcd_delete);
320 return; 320 return;
@@ -393,12 +393,8 @@ static inline void u132_endp_init_kref(struct u132 *u132,
393static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, 393static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
394 unsigned int delta) 394 unsigned int delta)
395{ 395{
396 if (delta > 0) { 396 if (queue_delayed_work(workqueue, &endp->scheduler, delta))
397 if (queue_delayed_work(workqueue, &endp->scheduler, delta)) 397 kref_get(&endp->kref);
398 kref_get(&endp->kref);
399 } else if (queue_work(workqueue, &endp->scheduler))
400 kref_get(&endp->kref);
401 return;
402} 398}
403 399
404static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) 400static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@ -414,24 +410,14 @@ static inline void u132_monitor_put_kref(struct u132 *u132)
414 410
415static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) 411static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
416{ 412{
417 if (delta > 0) { 413 if (queue_delayed_work(workqueue, &u132->monitor, delta))
418 if (queue_delayed_work(workqueue, &u132->monitor, delta)) { 414 kref_get(&u132->kref);
419 kref_get(&u132->kref);
420 }
421 } else if (queue_work(workqueue, &u132->monitor))
422 kref_get(&u132->kref);
423 return;
424} 415}
425 416
426static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) 417static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
427{ 418{
428 if (delta > 0) { 419 if (!queue_delayed_work(workqueue, &u132->monitor, delta))
429 if (queue_delayed_work(workqueue, &u132->monitor, delta)) 420 kref_put(&u132->kref, u132_hcd_delete);
430 return;
431 } else if (queue_work(workqueue, &u132->monitor))
432 return;
433 kref_put(&u132->kref, u132_hcd_delete);
434 return;
435} 421}
436 422
437static void u132_monitor_cancel_work(struct u132 *u132) 423static void u132_monitor_cancel_work(struct u132 *u132)
@@ -493,9 +479,9 @@ static int read_roothub_info(struct u132 *u132)
493 return 0; 479 return 0;
494} 480}
495 481
496static void u132_hcd_monitor_work(void *data) 482static void u132_hcd_monitor_work(struct work_struct *work)
497{ 483{
498 struct u132 *u132 = data; 484 struct u132 *u132 = container_of(work, struct u132, monitor.work);
499 if (u132->going > 1) { 485 if (u132->going > 1) {
500 dev_err(&u132->platform_dev->dev, "device has been removed %d\n" 486 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
501 , u132->going); 487 , u132->going);
@@ -1319,15 +1305,14 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
1319 } 1305 }
1320} 1306}
1321 1307
1322static void u132_hcd_ring_work_scheduler(void *data);
1323static void u132_hcd_endp_work_scheduler(void *data);
1324/* 1308/*
1325* this work function is only executed from the work queue 1309* this work function is only executed from the work queue
1326* 1310*
1327*/ 1311*/
1328static void u132_hcd_ring_work_scheduler(void *data) 1312static void u132_hcd_ring_work_scheduler(struct work_struct *work)
1329{ 1313{
1330 struct u132_ring *ring = data; 1314 struct u132_ring *ring =
1315 container_of(work, struct u132_ring, scheduler.work);
1331 struct u132 *u132 = ring->u132; 1316 struct u132 *u132 = ring->u132;
1332 down(&u132->scheduler_lock); 1317 down(&u132->scheduler_lock);
1333 if (ring->in_use) { 1318 if (ring->in_use) {
@@ -1386,10 +1371,11 @@ static void u132_hcd_ring_work_scheduler(void *data)
1386 } 1371 }
1387} 1372}
1388 1373
1389static void u132_hcd_endp_work_scheduler(void *data) 1374static void u132_hcd_endp_work_scheduler(struct work_struct *work)
1390{ 1375{
1391 struct u132_ring *ring; 1376 struct u132_ring *ring;
1392 struct u132_endp *endp = data; 1377 struct u132_endp *endp =
1378 container_of(work, struct u132_endp, scheduler.work);
1393 struct u132 *u132 = endp->u132; 1379 struct u132 *u132 = endp->u132;
1394 down(&u132->scheduler_lock); 1380 down(&u132->scheduler_lock);
1395 ring = endp->ring; 1381 ring = endp->ring;
@@ -1947,7 +1933,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
1947 if (!endp) { 1933 if (!endp) {
1948 return -ENOMEM; 1934 return -ENOMEM;
1949 } 1935 }
1950 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 1936 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1951 spin_lock_init(&endp->queue_lock.slock); 1937 spin_lock_init(&endp->queue_lock.slock);
1952 INIT_LIST_HEAD(&endp->urb_more); 1938 INIT_LIST_HEAD(&endp->urb_more);
1953 ring = endp->ring = &u132->ring[0]; 1939 ring = endp->ring = &u132->ring[0];
@@ -2036,7 +2022,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
2036 if (!endp) { 2022 if (!endp) {
2037 return -ENOMEM; 2023 return -ENOMEM;
2038 } 2024 }
2039 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2025 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2040 spin_lock_init(&endp->queue_lock.slock); 2026 spin_lock_init(&endp->queue_lock.slock);
2041 INIT_LIST_HEAD(&endp->urb_more); 2027 INIT_LIST_HEAD(&endp->urb_more);
2042 endp->dequeueing = 0; 2028 endp->dequeueing = 0;
@@ -2121,7 +2107,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2121 if (!endp) { 2107 if (!endp) {
2122 return -ENOMEM; 2108 return -ENOMEM;
2123 } 2109 }
2124 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2110 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2125 spin_lock_init(&endp->queue_lock.slock); 2111 spin_lock_init(&endp->queue_lock.slock);
2126 INIT_LIST_HEAD(&endp->urb_more); 2112 INIT_LIST_HEAD(&endp->urb_more);
2127 ring = endp->ring = &u132->ring[0]; 2113 ring = endp->ring = &u132->ring[0];
@@ -3100,10 +3086,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3100 ring->number = rings + 1; 3086 ring->number = rings + 1;
3101 ring->length = 0; 3087 ring->length = 0;
3102 ring->curr_endp = NULL; 3088 ring->curr_endp = NULL;
3103 INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, 3089 INIT_DELAYED_WORK(&ring->scheduler,
3104 (void *)ring); 3090 u132_hcd_ring_work_scheduler);
3105 } down(&u132->sw_lock); 3091 } down(&u132->sw_lock);
3106 INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); 3092 INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
3107 while (ports-- > 0) { 3093 while (ports-- > 0) {
3108 struct u132_port *port = &u132->port[ports]; 3094 struct u132_port *port = &u132->port[ports];
3109 port->u132 = u132; 3095 port->u132 = u132;