aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/u132-hcd.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor@insightbb.com>2006-12-08 01:07:56 -0500
committerDmitry Torokhov <dtor@insightbb.com>2006-12-08 01:07:56 -0500
commitbef986502fa398b1785a3979b1aa17cd902d3527 (patch)
treeb59c1afe7b1dfcc001b86e54863f550d7ddc8c34 /drivers/usb/host/u132-hcd.c
parent4bdbd2807deeccc0793d57fb5120d7a53f2c0b3c (diff)
parentc99767974ebd2a719d849fdeaaa1674456f5283f (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/usb/input/hid.h
Diffstat (limited to 'drivers/usb/host/u132-hcd.c')
-rw-r--r--drivers/usb/host/u132-hcd.c76
1 files changed, 29 insertions, 47 deletions
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 32c635ecbf31..a9d7119e3176 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -71,7 +71,7 @@ static int distrust_firmware = 1;
71module_param(distrust_firmware, bool, 0); 71module_param(distrust_firmware, bool, 0);
72MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren" 72MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
73 "t setup"); 73 "t setup");
74DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait); 74static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
75/* 75/*
76* u132_module_lock exists to protect access to global variables 76* u132_module_lock exists to protect access to global variables
77* 77*
@@ -163,7 +163,7 @@ struct u132_endp {
163 u16 queue_next; 163 u16 queue_next;
164 struct urb *urb_list[ENDP_QUEUE_SIZE]; 164 struct urb *urb_list[ENDP_QUEUE_SIZE];
165 struct list_head urb_more; 165 struct list_head urb_more;
166 struct work_struct scheduler; 166 struct delayed_work scheduler;
167}; 167};
168struct u132_ring { 168struct u132_ring {
169 unsigned in_use:1; 169 unsigned in_use:1;
@@ -171,7 +171,7 @@ struct u132_ring {
171 u8 number; 171 u8 number;
172 struct u132 *u132; 172 struct u132 *u132;
173 struct u132_endp *curr_endp; 173 struct u132_endp *curr_endp;
174 struct work_struct scheduler; 174 struct delayed_work scheduler;
175}; 175};
176#define OHCI_QUIRK_AMD756 0x01 176#define OHCI_QUIRK_AMD756 0x01
177#define OHCI_QUIRK_SUPERIO 0x02 177#define OHCI_QUIRK_SUPERIO 0x02
@@ -198,20 +198,16 @@ struct u132 {
198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; 198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
199 int flags; 199 int flags;
200 unsigned long next_statechange; 200 unsigned long next_statechange;
201 struct work_struct monitor; 201 struct delayed_work monitor;
202 int num_endpoints; 202 int num_endpoints;
203 struct u132_addr addr[MAX_U132_ADDRS]; 203 struct u132_addr addr[MAX_U132_ADDRS];
204 struct u132_udev udev[MAX_U132_UDEVS]; 204 struct u132_udev udev[MAX_U132_UDEVS];
205 struct u132_port port[MAX_U132_PORTS]; 205 struct u132_port port[MAX_U132_PORTS];
206 struct u132_endp *endp[MAX_U132_ENDPS]; 206 struct u132_endp *endp[MAX_U132_ENDPS];
207}; 207};
208int usb_ftdi_elan_read_reg(struct platform_device *pdev, u32 *data); 208
209int usb_ftdi_elan_read_pcimem(struct platform_device *pdev, u8 addressofs,
210 u8 width, u32 *data);
211int usb_ftdi_elan_write_pcimem(struct platform_device *pdev, u8 addressofs,
212 u8 width, u32 data);
213/* 209/*
214* these can not be inlines because we need the structure offset!! 210* these cannot be inlines because we need the structure offset!!
215* Does anyone have a better way????? 211* Does anyone have a better way?????
216*/ 212*/
217#define u132_read_pcimem(u132, member, data) \ 213#define u132_read_pcimem(u132, member, data) \
@@ -314,7 +310,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
314 if (delta > 0) { 310 if (delta > 0) {
315 if (queue_delayed_work(workqueue, &ring->scheduler, delta)) 311 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
316 return; 312 return;
317 } else if (queue_work(workqueue, &ring->scheduler)) 313 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
318 return; 314 return;
319 kref_put(&u132->kref, u132_hcd_delete); 315 kref_put(&u132->kref, u132_hcd_delete);
320 return; 316 return;
@@ -393,12 +389,8 @@ static inline void u132_endp_init_kref(struct u132 *u132,
393static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, 389static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
394 unsigned int delta) 390 unsigned int delta)
395{ 391{
396 if (delta > 0) { 392 if (queue_delayed_work(workqueue, &endp->scheduler, delta))
397 if (queue_delayed_work(workqueue, &endp->scheduler, delta)) 393 kref_get(&endp->kref);
398 kref_get(&endp->kref);
399 } else if (queue_work(workqueue, &endp->scheduler))
400 kref_get(&endp->kref);
401 return;
402} 394}
403 395
404static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) 396static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@ -414,24 +406,14 @@ static inline void u132_monitor_put_kref(struct u132 *u132)
414 406
415static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) 407static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
416{ 408{
417 if (delta > 0) { 409 if (queue_delayed_work(workqueue, &u132->monitor, delta))
418 if (queue_delayed_work(workqueue, &u132->monitor, delta)) { 410 kref_get(&u132->kref);
419 kref_get(&u132->kref);
420 }
421 } else if (queue_work(workqueue, &u132->monitor))
422 kref_get(&u132->kref);
423 return;
424} 411}
425 412
426static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) 413static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
427{ 414{
428 if (delta > 0) { 415 if (!queue_delayed_work(workqueue, &u132->monitor, delta))
429 if (queue_delayed_work(workqueue, &u132->monitor, delta)) 416 kref_put(&u132->kref, u132_hcd_delete);
430 return;
431 } else if (queue_work(workqueue, &u132->monitor))
432 return;
433 kref_put(&u132->kref, u132_hcd_delete);
434 return;
435} 417}
436 418
437static void u132_monitor_cancel_work(struct u132 *u132) 419static void u132_monitor_cancel_work(struct u132 *u132)
@@ -493,9 +475,9 @@ static int read_roothub_info(struct u132 *u132)
493 return 0; 475 return 0;
494} 476}
495 477
496static void u132_hcd_monitor_work(void *data) 478static void u132_hcd_monitor_work(struct work_struct *work)
497{ 479{
498 struct u132 *u132 = data; 480 struct u132 *u132 = container_of(work, struct u132, monitor.work);
499 if (u132->going > 1) { 481 if (u132->going > 1) {
500 dev_err(&u132->platform_dev->dev, "device has been removed %d\n" 482 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
501 , u132->going); 483 , u132->going);
@@ -1319,15 +1301,14 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
1319 } 1301 }
1320} 1302}
1321 1303
1322static void u132_hcd_ring_work_scheduler(void *data);
1323static void u132_hcd_endp_work_scheduler(void *data);
1324/* 1304/*
1325* this work function is only executed from the work queue 1305* this work function is only executed from the work queue
1326* 1306*
1327*/ 1307*/
1328static void u132_hcd_ring_work_scheduler(void *data) 1308static void u132_hcd_ring_work_scheduler(struct work_struct *work)
1329{ 1309{
1330 struct u132_ring *ring = data; 1310 struct u132_ring *ring =
1311 container_of(work, struct u132_ring, scheduler.work);
1331 struct u132 *u132 = ring->u132; 1312 struct u132 *u132 = ring->u132;
1332 down(&u132->scheduler_lock); 1313 down(&u132->scheduler_lock);
1333 if (ring->in_use) { 1314 if (ring->in_use) {
@@ -1386,10 +1367,11 @@ static void u132_hcd_ring_work_scheduler(void *data)
1386 } 1367 }
1387} 1368}
1388 1369
1389static void u132_hcd_endp_work_scheduler(void *data) 1370static void u132_hcd_endp_work_scheduler(struct work_struct *work)
1390{ 1371{
1391 struct u132_ring *ring; 1372 struct u132_ring *ring;
1392 struct u132_endp *endp = data; 1373 struct u132_endp *endp =
1374 container_of(work, struct u132_endp, scheduler.work);
1393 struct u132 *u132 = endp->u132; 1375 struct u132 *u132 = endp->u132;
1394 down(&u132->scheduler_lock); 1376 down(&u132->scheduler_lock);
1395 ring = endp->ring; 1377 ring = endp->ring;
@@ -1947,7 +1929,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
1947 if (!endp) { 1929 if (!endp) {
1948 return -ENOMEM; 1930 return -ENOMEM;
1949 } 1931 }
1950 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 1932 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1951 spin_lock_init(&endp->queue_lock.slock); 1933 spin_lock_init(&endp->queue_lock.slock);
1952 INIT_LIST_HEAD(&endp->urb_more); 1934 INIT_LIST_HEAD(&endp->urb_more);
1953 ring = endp->ring = &u132->ring[0]; 1935 ring = endp->ring = &u132->ring[0];
@@ -2036,7 +2018,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
2036 if (!endp) { 2018 if (!endp) {
2037 return -ENOMEM; 2019 return -ENOMEM;
2038 } 2020 }
2039 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2021 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2040 spin_lock_init(&endp->queue_lock.slock); 2022 spin_lock_init(&endp->queue_lock.slock);
2041 INIT_LIST_HEAD(&endp->urb_more); 2023 INIT_LIST_HEAD(&endp->urb_more);
2042 endp->dequeueing = 0; 2024 endp->dequeueing = 0;
@@ -2121,7 +2103,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2121 if (!endp) { 2103 if (!endp) {
2122 return -ENOMEM; 2104 return -ENOMEM;
2123 } 2105 }
2124 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2106 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2125 spin_lock_init(&endp->queue_lock.slock); 2107 spin_lock_init(&endp->queue_lock.slock);
2126 INIT_LIST_HEAD(&endp->urb_more); 2108 INIT_LIST_HEAD(&endp->urb_more);
2127 ring = endp->ring = &u132->ring[0]; 2109 ring = endp->ring = &u132->ring[0];
@@ -3045,7 +3027,7 @@ static struct hc_driver u132_hc_driver = {
3045* This function may be called by the USB core whilst the "usb_all_devices_rwsem" 3027* This function may be called by the USB core whilst the "usb_all_devices_rwsem"
3046* is held for writing, thus this module must not call usb_remove_hcd() 3028* is held for writing, thus this module must not call usb_remove_hcd()
3047* synchronously - but instead should immediately stop activity to the 3029* synchronously - but instead should immediately stop activity to the
3048* device and ansynchronously call usb_remove_hcd() 3030* device and asynchronously call usb_remove_hcd()
3049*/ 3031*/
3050static int __devexit u132_remove(struct platform_device *pdev) 3032static int __devexit u132_remove(struct platform_device *pdev)
3051{ 3033{
@@ -3100,10 +3082,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3100 ring->number = rings + 1; 3082 ring->number = rings + 1;
3101 ring->length = 0; 3083 ring->length = 0;
3102 ring->curr_endp = NULL; 3084 ring->curr_endp = NULL;
3103 INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, 3085 INIT_DELAYED_WORK(&ring->scheduler,
3104 (void *)ring); 3086 u132_hcd_ring_work_scheduler);
3105 } down(&u132->sw_lock); 3087 } down(&u132->sw_lock);
3106 INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); 3088 INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
3107 while (ports-- > 0) { 3089 while (ports-- > 0) {
3108 struct u132_port *port = &u132->port[ports]; 3090 struct u132_port *port = &u132->port[ports];
3109 port->u132 = u132; 3091 port->u132 = u132;
@@ -3241,7 +3223,7 @@ static int u132_resume(struct platform_device *pdev)
3241#define u132_resume NULL 3223#define u132_resume NULL
3242#endif 3224#endif
3243/* 3225/*
3244* this driver is loaded explicitely by ftdi_u132 3226* this driver is loaded explicitly by ftdi_u132
3245* 3227*
3246* the platform_driver struct is static because it is per type of module 3228* the platform_driver struct is static because it is per type of module
3247*/ 3229*/