diff options
author | Alan Stern <stern@rowland.harvard.edu> | 2007-08-08 11:48:02 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2007-10-12 17:55:10 -0400 |
commit | e9df41c5c5899259541dc928872cad4d07b82076 (patch) | |
tree | 12bb0917eeecbe62b2b5d3dc576806c7f2728550 /drivers/usb/host/u132-hcd.c | |
parent | b0e396e3097ce4914c643bc3f0c2fe0098f551eb (diff) |
USB: make HCDs responsible for managing endpoint queues
This patch (as954) implements a suggestion of David Brownell's. Now
the host controller drivers are responsible for linking and unlinking
URBs to/from their endpoint queues. This eliminates the possiblity of
strange situations where usbcore thinks an URB is linked but the HCD
thinks it isn't. It also means HCDs no longer have to check for URBs
being dequeued before they were fully enqueued.
In addition to the core changes, this requires changing every host
controller driver and the root-hub URB handler. For the most part the
required changes are fairly small; drivers have to call
usb_hcd_link_urb_to_ep() in their urb_enqueue method,
usb_hcd_check_unlink_urb() in their urb_dequeue method, and
usb_hcd_unlink_urb_from_ep() before giving URBs back. A few HCDs make
matters more complicated by the way they split up the flow of control.
In addition some method interfaces get changed. The endpoint argument
for urb_enqueue is now redundant so it is removed. The unlink status
is required by usb_hcd_check_unlink_urb(), so it has been added to
urb_dequeue.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: David Brownell <david-b@pacbell.net>
CC: Olav Kongas <ok@artecdesign.ee>
CC: Tony Olech <tony.olech@elandigitalsystems.com>
CC: Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/u132-hcd.c')
-rw-r--r-- | drivers/usb/host/u132-hcd.c | 170 |
1 files changed, 119 insertions, 51 deletions
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 598ad098aeeb..c87660b5edc3 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
@@ -521,6 +521,7 @@ static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp, | |||
521 | urb->status = status; | 521 | urb->status = status; |
522 | urb->hcpriv = NULL; | 522 | urb->hcpriv = NULL; |
523 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | 523 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); |
524 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
524 | endp->queue_next += 1; | 525 | endp->queue_next += 1; |
525 | if (ENDP_QUEUE_SIZE > --endp->queue_size) { | 526 | if (ENDP_QUEUE_SIZE > --endp->queue_size) { |
526 | endp->active = 0; | 527 | endp->active = 0; |
@@ -561,6 +562,7 @@ static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp, | |||
561 | urb->status = status; | 562 | urb->status = status; |
562 | urb->hcpriv = NULL; | 563 | urb->hcpriv = NULL; |
563 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | 564 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); |
565 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
564 | endp->queue_next += 1; | 566 | endp->queue_next += 1; |
565 | if (ENDP_QUEUE_SIZE > --endp->queue_size) { | 567 | if (ENDP_QUEUE_SIZE > --endp->queue_size) { |
566 | endp->active = 0; | 568 | endp->active = 0; |
@@ -1876,20 +1878,32 @@ static int u132_hcd_reset(struct usb_hcd *hcd) | |||
1876 | } | 1878 | } |
1877 | 1879 | ||
1878 | static int create_endpoint_and_queue_int(struct u132 *u132, | 1880 | static int create_endpoint_and_queue_int(struct u132 *u132, |
1879 | struct u132_udev *udev, struct usb_host_endpoint *hep, struct urb *urb, | 1881 | struct u132_udev *udev, struct urb *urb, |
1880 | struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address, | 1882 | struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address, |
1881 | gfp_t mem_flags) | 1883 | gfp_t mem_flags) |
1882 | { | 1884 | { |
1883 | struct u132_ring *ring; | 1885 | struct u132_ring *ring; |
1884 | unsigned long irqs; | 1886 | unsigned long irqs; |
1885 | u8 endp_number = ++u132->num_endpoints; | 1887 | int rc; |
1886 | struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] = | 1888 | u8 endp_number; |
1887 | kmalloc(sizeof(struct u132_endp), mem_flags); | 1889 | struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); |
1890 | |||
1888 | if (!endp) { | 1891 | if (!endp) { |
1889 | return -ENOMEM; | 1892 | return -ENOMEM; |
1890 | } | 1893 | } |
1894 | |||
1895 | spin_lock_init(&endp->queue_lock.slock); | ||
1896 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | ||
1897 | rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb); | ||
1898 | if (rc) { | ||
1899 | spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); | ||
1900 | kfree(endp); | ||
1901 | return rc; | ||
1902 | } | ||
1903 | |||
1904 | endp_number = ++u132->num_endpoints; | ||
1905 | urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; | ||
1891 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); | 1906 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
1892 | spin_lock_init(&endp->queue_lock.slock); | ||
1893 | INIT_LIST_HEAD(&endp->urb_more); | 1907 | INIT_LIST_HEAD(&endp->urb_more); |
1894 | ring = endp->ring = &u132->ring[0]; | 1908 | ring = endp->ring = &u132->ring[0]; |
1895 | if (ring->curr_endp) { | 1909 | if (ring->curr_endp) { |
@@ -1905,7 +1919,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132, | |||
1905 | endp->delayed = 0; | 1919 | endp->delayed = 0; |
1906 | endp->endp_number = endp_number; | 1920 | endp->endp_number = endp_number; |
1907 | endp->u132 = u132; | 1921 | endp->u132 = u132; |
1908 | endp->hep = hep; | 1922 | endp->hep = urb->ep; |
1909 | endp->pipetype = usb_pipetype(urb->pipe); | 1923 | endp->pipetype = usb_pipetype(urb->pipe); |
1910 | u132_endp_init_kref(u132, endp); | 1924 | u132_endp_init_kref(u132, endp); |
1911 | if (usb_pipein(urb->pipe)) { | 1925 | if (usb_pipein(urb->pipe)) { |
@@ -1924,7 +1938,6 @@ static int create_endpoint_and_queue_int(struct u132 *u132, | |||
1924 | u132_udev_get_kref(u132, udev); | 1938 | u132_udev_get_kref(u132, udev); |
1925 | } | 1939 | } |
1926 | urb->hcpriv = u132; | 1940 | urb->hcpriv = u132; |
1927 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | ||
1928 | endp->delayed = 1; | 1941 | endp->delayed = 1; |
1929 | endp->jiffies = jiffies + msecs_to_jiffies(urb->interval); | 1942 | endp->jiffies = jiffies + msecs_to_jiffies(urb->interval); |
1930 | endp->udev_number = address; | 1943 | endp->udev_number = address; |
@@ -1939,8 +1952,8 @@ static int create_endpoint_and_queue_int(struct u132 *u132, | |||
1939 | return 0; | 1952 | return 0; |
1940 | } | 1953 | } |
1941 | 1954 | ||
1942 | static int queue_int_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, | 1955 | static int queue_int_on_old_endpoint(struct u132 *u132, |
1943 | struct usb_host_endpoint *hep, struct urb *urb, | 1956 | struct u132_udev *udev, struct urb *urb, |
1944 | struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, | 1957 | struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, |
1945 | u8 usb_endp, u8 address) | 1958 | u8 usb_endp, u8 address) |
1946 | { | 1959 | { |
@@ -1964,21 +1977,33 @@ static int queue_int_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, | |||
1964 | } | 1977 | } |
1965 | 1978 | ||
1966 | static int create_endpoint_and_queue_bulk(struct u132 *u132, | 1979 | static int create_endpoint_and_queue_bulk(struct u132 *u132, |
1967 | struct u132_udev *udev, struct usb_host_endpoint *hep, struct urb *urb, | 1980 | struct u132_udev *udev, struct urb *urb, |
1968 | struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address, | 1981 | struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address, |
1969 | gfp_t mem_flags) | 1982 | gfp_t mem_flags) |
1970 | { | 1983 | { |
1971 | int ring_number; | 1984 | int ring_number; |
1972 | struct u132_ring *ring; | 1985 | struct u132_ring *ring; |
1973 | unsigned long irqs; | 1986 | unsigned long irqs; |
1974 | u8 endp_number = ++u132->num_endpoints; | 1987 | int rc; |
1975 | struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] = | 1988 | u8 endp_number; |
1976 | kmalloc(sizeof(struct u132_endp), mem_flags); | 1989 | struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); |
1990 | |||
1977 | if (!endp) { | 1991 | if (!endp) { |
1978 | return -ENOMEM; | 1992 | return -ENOMEM; |
1979 | } | 1993 | } |
1994 | |||
1995 | spin_lock_init(&endp->queue_lock.slock); | ||
1996 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | ||
1997 | rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb); | ||
1998 | if (rc) { | ||
1999 | spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); | ||
2000 | kfree(endp); | ||
2001 | return rc; | ||
2002 | } | ||
2003 | |||
2004 | endp_number = ++u132->num_endpoints; | ||
2005 | urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; | ||
1980 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); | 2006 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
1981 | spin_lock_init(&endp->queue_lock.slock); | ||
1982 | INIT_LIST_HEAD(&endp->urb_more); | 2007 | INIT_LIST_HEAD(&endp->urb_more); |
1983 | endp->dequeueing = 0; | 2008 | endp->dequeueing = 0; |
1984 | endp->edset_flush = 0; | 2009 | endp->edset_flush = 0; |
@@ -1986,7 +2011,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132, | |||
1986 | endp->delayed = 0; | 2011 | endp->delayed = 0; |
1987 | endp->endp_number = endp_number; | 2012 | endp->endp_number = endp_number; |
1988 | endp->u132 = u132; | 2013 | endp->u132 = u132; |
1989 | endp->hep = hep; | 2014 | endp->hep = urb->ep; |
1990 | endp->pipetype = usb_pipetype(urb->pipe); | 2015 | endp->pipetype = usb_pipetype(urb->pipe); |
1991 | u132_endp_init_kref(u132, endp); | 2016 | u132_endp_init_kref(u132, endp); |
1992 | if (usb_pipein(urb->pipe)) { | 2017 | if (usb_pipein(urb->pipe)) { |
@@ -2015,7 +2040,6 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132, | |||
2015 | } | 2040 | } |
2016 | ring->length += 1; | 2041 | ring->length += 1; |
2017 | urb->hcpriv = u132; | 2042 | urb->hcpriv = u132; |
2018 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | ||
2019 | endp->udev_number = address; | 2043 | endp->udev_number = address; |
2020 | endp->usb_addr = usb_addr; | 2044 | endp->usb_addr = usb_addr; |
2021 | endp->usb_endp = usb_endp; | 2045 | endp->usb_endp = usb_endp; |
@@ -2029,7 +2053,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132, | |||
2029 | } | 2053 | } |
2030 | 2054 | ||
2031 | static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, | 2055 | static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, |
2032 | struct usb_host_endpoint *hep, struct urb *urb, | 2056 | struct urb *urb, |
2033 | struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, | 2057 | struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, |
2034 | u8 usb_endp, u8 address) | 2058 | u8 usb_endp, u8 address) |
2035 | { | 2059 | { |
@@ -2051,19 +2075,32 @@ static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, | |||
2051 | } | 2075 | } |
2052 | 2076 | ||
2053 | static int create_endpoint_and_queue_control(struct u132 *u132, | 2077 | static int create_endpoint_and_queue_control(struct u132 *u132, |
2054 | struct usb_host_endpoint *hep, struct urb *urb, | 2078 | struct urb *urb, |
2055 | struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, | 2079 | struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, |
2056 | gfp_t mem_flags) | 2080 | gfp_t mem_flags) |
2057 | { | 2081 | { |
2058 | struct u132_ring *ring; | 2082 | struct u132_ring *ring; |
2059 | u8 endp_number = ++u132->num_endpoints; | 2083 | unsigned long irqs; |
2060 | struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] = | 2084 | int rc; |
2061 | kmalloc(sizeof(struct u132_endp), mem_flags); | 2085 | u8 endp_number; |
2086 | struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); | ||
2087 | |||
2062 | if (!endp) { | 2088 | if (!endp) { |
2063 | return -ENOMEM; | 2089 | return -ENOMEM; |
2064 | } | 2090 | } |
2091 | |||
2092 | spin_lock_init(&endp->queue_lock.slock); | ||
2093 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | ||
2094 | rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb); | ||
2095 | if (rc) { | ||
2096 | spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); | ||
2097 | kfree(endp); | ||
2098 | return rc; | ||
2099 | } | ||
2100 | |||
2101 | endp_number = ++u132->num_endpoints; | ||
2102 | urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; | ||
2065 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); | 2103 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
2066 | spin_lock_init(&endp->queue_lock.slock); | ||
2067 | INIT_LIST_HEAD(&endp->urb_more); | 2104 | INIT_LIST_HEAD(&endp->urb_more); |
2068 | ring = endp->ring = &u132->ring[0]; | 2105 | ring = endp->ring = &u132->ring[0]; |
2069 | if (ring->curr_endp) { | 2106 | if (ring->curr_endp) { |
@@ -2079,11 +2116,10 @@ static int create_endpoint_and_queue_control(struct u132 *u132, | |||
2079 | endp->delayed = 0; | 2116 | endp->delayed = 0; |
2080 | endp->endp_number = endp_number; | 2117 | endp->endp_number = endp_number; |
2081 | endp->u132 = u132; | 2118 | endp->u132 = u132; |
2082 | endp->hep = hep; | 2119 | endp->hep = urb->ep; |
2083 | u132_endp_init_kref(u132, endp); | 2120 | u132_endp_init_kref(u132, endp); |
2084 | u132_endp_get_kref(u132, endp); | 2121 | u132_endp_get_kref(u132, endp); |
2085 | if (usb_addr == 0) { | 2122 | if (usb_addr == 0) { |
2086 | unsigned long irqs; | ||
2087 | u8 address = u132->addr[usb_addr].address; | 2123 | u8 address = u132->addr[usb_addr].address; |
2088 | struct u132_udev *udev = &u132->udev[address]; | 2124 | struct u132_udev *udev = &u132->udev[address]; |
2089 | endp->udev_number = address; | 2125 | endp->udev_number = address; |
@@ -2097,7 +2133,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132, | |||
2097 | udev->endp_number_in[usb_endp] = endp_number; | 2133 | udev->endp_number_in[usb_endp] = endp_number; |
2098 | udev->endp_number_out[usb_endp] = endp_number; | 2134 | udev->endp_number_out[usb_endp] = endp_number; |
2099 | urb->hcpriv = u132; | 2135 | urb->hcpriv = u132; |
2100 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | ||
2101 | endp->queue_size = 1; | 2136 | endp->queue_size = 1; |
2102 | endp->queue_last = 0; | 2137 | endp->queue_last = 0; |
2103 | endp->queue_next = 0; | 2138 | endp->queue_next = 0; |
@@ -2106,7 +2141,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132, | |||
2106 | u132_endp_queue_work(u132, endp, 0); | 2141 | u132_endp_queue_work(u132, endp, 0); |
2107 | return 0; | 2142 | return 0; |
2108 | } else { /*(usb_addr > 0) */ | 2143 | } else { /*(usb_addr > 0) */ |
2109 | unsigned long irqs; | ||
2110 | u8 address = u132->addr[usb_addr].address; | 2144 | u8 address = u132->addr[usb_addr].address; |
2111 | struct u132_udev *udev = &u132->udev[address]; | 2145 | struct u132_udev *udev = &u132->udev[address]; |
2112 | endp->udev_number = address; | 2146 | endp->udev_number = address; |
@@ -2120,7 +2154,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132, | |||
2120 | udev->endp_number_in[usb_endp] = endp_number; | 2154 | udev->endp_number_in[usb_endp] = endp_number; |
2121 | udev->endp_number_out[usb_endp] = endp_number; | 2155 | udev->endp_number_out[usb_endp] = endp_number; |
2122 | urb->hcpriv = u132; | 2156 | urb->hcpriv = u132; |
2123 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | ||
2124 | endp->queue_size = 1; | 2157 | endp->queue_size = 1; |
2125 | endp->queue_last = 0; | 2158 | endp->queue_last = 0; |
2126 | endp->queue_next = 0; | 2159 | endp->queue_next = 0; |
@@ -2132,7 +2165,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132, | |||
2132 | } | 2165 | } |
2133 | 2166 | ||
2134 | static int queue_control_on_old_endpoint(struct u132 *u132, | 2167 | static int queue_control_on_old_endpoint(struct u132 *u132, |
2135 | struct usb_host_endpoint *hep, struct urb *urb, | 2168 | struct urb *urb, |
2136 | struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, | 2169 | struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, |
2137 | u8 usb_endp) | 2170 | u8 usb_endp) |
2138 | { | 2171 | { |
@@ -2232,8 +2265,8 @@ static int queue_control_on_old_endpoint(struct u132 *u132, | |||
2232 | } | 2265 | } |
2233 | } | 2266 | } |
2234 | 2267 | ||
2235 | static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, | 2268 | static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, |
2236 | struct urb *urb, gfp_t mem_flags) | 2269 | gfp_t mem_flags) |
2237 | { | 2270 | { |
2238 | struct u132 *u132 = hcd_to_u132(hcd); | 2271 | struct u132 *u132 = hcd_to_u132(hcd); |
2239 | if (irqs_disabled()) { | 2272 | if (irqs_disabled()) { |
@@ -2258,16 +2291,24 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, | |||
2258 | if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { | 2291 | if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { |
2259 | u8 address = u132->addr[usb_addr].address; | 2292 | u8 address = u132->addr[usb_addr].address; |
2260 | struct u132_udev *udev = &u132->udev[address]; | 2293 | struct u132_udev *udev = &u132->udev[address]; |
2261 | struct u132_endp *endp = hep->hcpriv; | 2294 | struct u132_endp *endp = urb->ep->hcpriv; |
2262 | urb->actual_length = 0; | 2295 | urb->actual_length = 0; |
2263 | if (endp) { | 2296 | if (endp) { |
2264 | unsigned long irqs; | 2297 | unsigned long irqs; |
2265 | int retval; | 2298 | int retval; |
2266 | spin_lock_irqsave(&endp->queue_lock.slock, | 2299 | spin_lock_irqsave(&endp->queue_lock.slock, |
2267 | irqs); | 2300 | irqs); |
2268 | retval = queue_int_on_old_endpoint(u132, udev, | 2301 | retval = usb_hcd_link_urb_to_ep(hcd, urb); |
2269 | hep, urb, usb_dev, endp, usb_addr, | 2302 | if (retval == 0) { |
2270 | usb_endp, address); | 2303 | retval = queue_int_on_old_endpoint( |
2304 | u132, udev, urb, | ||
2305 | usb_dev, endp, | ||
2306 | usb_addr, usb_endp, | ||
2307 | address); | ||
2308 | if (retval) | ||
2309 | usb_hcd_unlink_urb_from_ep( | ||
2310 | hcd, urb); | ||
2311 | } | ||
2271 | spin_unlock_irqrestore(&endp->queue_lock.slock, | 2312 | spin_unlock_irqrestore(&endp->queue_lock.slock, |
2272 | irqs); | 2313 | irqs); |
2273 | if (retval) { | 2314 | if (retval) { |
@@ -2282,8 +2323,8 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, | |||
2282 | return -EINVAL; | 2323 | return -EINVAL; |
2283 | } else { /*(endp == NULL) */ | 2324 | } else { /*(endp == NULL) */ |
2284 | return create_endpoint_and_queue_int(u132, udev, | 2325 | return create_endpoint_and_queue_int(u132, udev, |
2285 | hep, urb, usb_dev, usb_addr, usb_endp, | 2326 | urb, usb_dev, usb_addr, |
2286 | address, mem_flags); | 2327 | usb_endp, address, mem_flags); |
2287 | } | 2328 | } |
2288 | } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { | 2329 | } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { |
2289 | dev_err(&u132->platform_dev->dev, "the hardware does no" | 2330 | dev_err(&u132->platform_dev->dev, "the hardware does no" |
@@ -2292,16 +2333,24 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, | |||
2292 | } else if (usb_pipetype(urb->pipe) == PIPE_BULK) { | 2333 | } else if (usb_pipetype(urb->pipe) == PIPE_BULK) { |
2293 | u8 address = u132->addr[usb_addr].address; | 2334 | u8 address = u132->addr[usb_addr].address; |
2294 | struct u132_udev *udev = &u132->udev[address]; | 2335 | struct u132_udev *udev = &u132->udev[address]; |
2295 | struct u132_endp *endp = hep->hcpriv; | 2336 | struct u132_endp *endp = urb->ep->hcpriv; |
2296 | urb->actual_length = 0; | 2337 | urb->actual_length = 0; |
2297 | if (endp) { | 2338 | if (endp) { |
2298 | unsigned long irqs; | 2339 | unsigned long irqs; |
2299 | int retval; | 2340 | int retval; |
2300 | spin_lock_irqsave(&endp->queue_lock.slock, | 2341 | spin_lock_irqsave(&endp->queue_lock.slock, |
2301 | irqs); | 2342 | irqs); |
2302 | retval = queue_bulk_on_old_endpoint(u132, udev, | 2343 | retval = usb_hcd_link_urb_to_ep(hcd, urb); |
2303 | hep, urb, usb_dev, endp, usb_addr, | 2344 | if (retval == 0) { |
2304 | usb_endp, address); | 2345 | retval = queue_bulk_on_old_endpoint( |
2346 | u132, udev, urb, | ||
2347 | usb_dev, endp, | ||
2348 | usb_addr, usb_endp, | ||
2349 | address); | ||
2350 | if (retval) | ||
2351 | usb_hcd_unlink_urb_from_ep( | ||
2352 | hcd, urb); | ||
2353 | } | ||
2305 | spin_unlock_irqrestore(&endp->queue_lock.slock, | 2354 | spin_unlock_irqrestore(&endp->queue_lock.slock, |
2306 | irqs); | 2355 | irqs); |
2307 | if (retval) { | 2356 | if (retval) { |
@@ -2314,10 +2363,10 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, | |||
2314 | return -EINVAL; | 2363 | return -EINVAL; |
2315 | } else | 2364 | } else |
2316 | return create_endpoint_and_queue_bulk(u132, | 2365 | return create_endpoint_and_queue_bulk(u132, |
2317 | udev, hep, urb, usb_dev, usb_addr, | 2366 | udev, urb, usb_dev, usb_addr, |
2318 | usb_endp, address, mem_flags); | 2367 | usb_endp, address, mem_flags); |
2319 | } else { | 2368 | } else { |
2320 | struct u132_endp *endp = hep->hcpriv; | 2369 | struct u132_endp *endp = urb->ep->hcpriv; |
2321 | u16 urb_size = 8; | 2370 | u16 urb_size = 8; |
2322 | u8 *b = urb->setup_packet; | 2371 | u8 *b = urb->setup_packet; |
2323 | int i = 0; | 2372 | int i = 0; |
@@ -2340,9 +2389,16 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, | |||
2340 | int retval; | 2389 | int retval; |
2341 | spin_lock_irqsave(&endp->queue_lock.slock, | 2390 | spin_lock_irqsave(&endp->queue_lock.slock, |
2342 | irqs); | 2391 | irqs); |
2343 | retval = queue_control_on_old_endpoint(u132, | 2392 | retval = usb_hcd_link_urb_to_ep(hcd, urb); |
2344 | hep, urb, usb_dev, endp, usb_addr, | 2393 | if (retval == 0) { |
2345 | usb_endp); | 2394 | retval = queue_control_on_old_endpoint( |
2395 | u132, urb, usb_dev, | ||
2396 | endp, usb_addr, | ||
2397 | usb_endp); | ||
2398 | if (retval) | ||
2399 | usb_hcd_unlink_urb_from_ep( | ||
2400 | hcd, urb); | ||
2401 | } | ||
2346 | spin_unlock_irqrestore(&endp->queue_lock.slock, | 2402 | spin_unlock_irqrestore(&endp->queue_lock.slock, |
2347 | irqs); | 2403 | irqs); |
2348 | if (retval) { | 2404 | if (retval) { |
@@ -2355,7 +2411,7 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, | |||
2355 | return -EINVAL; | 2411 | return -EINVAL; |
2356 | } else | 2412 | } else |
2357 | return create_endpoint_and_queue_control(u132, | 2413 | return create_endpoint_and_queue_control(u132, |
2358 | hep, urb, usb_dev, usb_addr, usb_endp, | 2414 | urb, usb_dev, usb_addr, usb_endp, |
2359 | mem_flags); | 2415 | mem_flags); |
2360 | } | 2416 | } |
2361 | } | 2417 | } |
@@ -2390,10 +2446,17 @@ static int dequeue_from_overflow_chain(struct u132 *u132, | |||
2390 | } | 2446 | } |
2391 | 2447 | ||
2392 | static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp, | 2448 | static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp, |
2393 | struct urb *urb) | 2449 | struct urb *urb, int status) |
2394 | { | 2450 | { |
2395 | unsigned long irqs; | 2451 | unsigned long irqs; |
2452 | int rc; | ||
2453 | |||
2396 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); | 2454 | spin_lock_irqsave(&endp->queue_lock.slock, irqs); |
2455 | rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status); | ||
2456 | if (rc) { | ||
2457 | spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); | ||
2458 | return rc; | ||
2459 | } | ||
2397 | if (endp->queue_size == 0) { | 2460 | if (endp->queue_size == 0) { |
2398 | dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]" | 2461 | dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]" |
2399 | "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb, | 2462 | "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb, |
@@ -2438,6 +2501,8 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp, | |||
2438 | } | 2501 | } |
2439 | if (urb_slot) { | 2502 | if (urb_slot) { |
2440 | struct usb_hcd *hcd = u132_to_hcd(u132); | 2503 | struct usb_hcd *hcd = u132_to_hcd(u132); |
2504 | |||
2505 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
2441 | endp->queue_size -= 1; | 2506 | endp->queue_size -= 1; |
2442 | if (list_empty(&endp->urb_more)) { | 2507 | if (list_empty(&endp->urb_more)) { |
2443 | spin_unlock_irqrestore(&endp->queue_lock.slock, | 2508 | spin_unlock_irqrestore(&endp->queue_lock.slock, |
@@ -2467,7 +2532,10 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp, | |||
2467 | spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); | 2532 | spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); |
2468 | return -EINVAL; | 2533 | return -EINVAL; |
2469 | } else { | 2534 | } else { |
2470 | int retval = dequeue_from_overflow_chain(u132, endp, | 2535 | int retval; |
2536 | |||
2537 | usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb); | ||
2538 | retval = dequeue_from_overflow_chain(u132, endp, | ||
2471 | urb); | 2539 | urb); |
2472 | spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); | 2540 | spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); |
2473 | return retval; | 2541 | return retval; |
@@ -2475,7 +2543,7 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp, | |||
2475 | } | 2543 | } |
2476 | } | 2544 | } |
2477 | 2545 | ||
2478 | static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) | 2546 | static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
2479 | { | 2547 | { |
2480 | struct u132 *u132 = hcd_to_u132(hcd); | 2548 | struct u132 *u132 = hcd_to_u132(hcd); |
2481 | if (u132->going > 2) { | 2549 | if (u132->going > 2) { |
@@ -2490,11 +2558,11 @@ static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) | |||
2490 | if (usb_pipein(urb->pipe)) { | 2558 | if (usb_pipein(urb->pipe)) { |
2491 | u8 endp_number = udev->endp_number_in[usb_endp]; | 2559 | u8 endp_number = udev->endp_number_in[usb_endp]; |
2492 | struct u132_endp *endp = u132->endp[endp_number - 1]; | 2560 | struct u132_endp *endp = u132->endp[endp_number - 1]; |
2493 | return u132_endp_urb_dequeue(u132, endp, urb); | 2561 | return u132_endp_urb_dequeue(u132, endp, urb, status); |
2494 | } else { | 2562 | } else { |
2495 | u8 endp_number = udev->endp_number_out[usb_endp]; | 2563 | u8 endp_number = udev->endp_number_out[usb_endp]; |
2496 | struct u132_endp *endp = u132->endp[endp_number - 1]; | 2564 | struct u132_endp *endp = u132->endp[endp_number - 1]; |
2497 | return u132_endp_urb_dequeue(u132, endp, urb); | 2565 | return u132_endp_urb_dequeue(u132, endp, urb, status); |
2498 | } | 2566 | } |
2499 | } | 2567 | } |
2500 | } | 2568 | } |