diff options
author | Robert Jarzmik <robert.jarzmik@free.fr> | 2010-01-27 12:38:03 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-03-02 17:54:47 -0500 |
commit | 5e23e90f33888769ffe253663cc5f3ea0bb6da49 (patch) | |
tree | 9a59d36fdaaecea5eb7fa2d39c655032dc6920a2 /drivers/usb | |
parent | fb088e335d78f866be2e56eac6d500112a96aa11 (diff) |
USB: pxa27x_udc: Fix deadlocks on request queueing
As reported by Antonio, there are cases where the ep->lock
can be taken twice, triggering a deadlock.
The typical sequence is :
irq_handler
\
-> gadget.complete()
\
-> pxa27x_udc.pxa_ep_queue() : ep->lock is taken
\
-> gadget.complete()
\
-> pxa27x_udc.pxa_ep_queue() : ep->lock is taken
==> *deadlock*
The patch fixes this by :
- releasing the lock each time gadget.complete() is called
- adding a check in handle_ep() to detect a recursive call,
in which case the function becomes on no-op.
The patch is still not good enough for ep0. For this unique
endpoint, another well thought over patch will be needed.
Reported-by: Antonio Ospite <ospite@studenti.unina.it>
Tested-by: Antonio Ospite <ospite@studenti.unina.it>
Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr>
Cc: David Brownell <dbrownell@users.sourceforge.net>
Cc: Eric Miao <eric.y.miao@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r-- | drivers/usb/gadget/pxa27x_udc.c | 114 | ||||
-rw-r--r-- | drivers/usb/gadget/pxa27x_udc.h | 6 |
2 files changed, 85 insertions, 35 deletions
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c index e8b4b6992a9d..05b892c3d686 100644 --- a/drivers/usb/gadget/pxa27x_udc.c +++ b/drivers/usb/gadget/pxa27x_udc.c | |||
@@ -742,13 +742,17 @@ static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req) | |||
742 | * @ep: pxa physical endpoint | 742 | * @ep: pxa physical endpoint |
743 | * @req: pxa request | 743 | * @req: pxa request |
744 | * @status: usb request status sent to gadget API | 744 | * @status: usb request status sent to gadget API |
745 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
745 | * | 746 | * |
746 | * Context: ep->lock held | 747 | * Context: ep->lock held if flags not NULL, else ep->lock released |
747 | * | 748 | * |
748 | * Retire a pxa27x usb request. Endpoint must be locked. | 749 | * Retire a pxa27x usb request. Endpoint must be locked. |
749 | */ | 750 | */ |
750 | static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status) | 751 | static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status, |
752 | unsigned long *pflags) | ||
751 | { | 753 | { |
754 | unsigned long flags; | ||
755 | |||
752 | ep_del_request(ep, req); | 756 | ep_del_request(ep, req); |
753 | if (likely(req->req.status == -EINPROGRESS)) | 757 | if (likely(req->req.status == -EINPROGRESS)) |
754 | req->req.status = status; | 758 | req->req.status = status; |
@@ -760,38 +764,48 @@ static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status) | |||
760 | &req->req, status, | 764 | &req->req, status, |
761 | req->req.actual, req->req.length); | 765 | req->req.actual, req->req.length); |
762 | 766 | ||
767 | if (pflags) | ||
768 | spin_unlock_irqrestore(&ep->lock, *pflags); | ||
769 | local_irq_save(flags); | ||
763 | req->req.complete(&req->udc_usb_ep->usb_ep, &req->req); | 770 | req->req.complete(&req->udc_usb_ep->usb_ep, &req->req); |
771 | local_irq_restore(flags); | ||
772 | if (pflags) | ||
773 | spin_lock_irqsave(&ep->lock, *pflags); | ||
764 | } | 774 | } |
765 | 775 | ||
766 | /** | 776 | /** |
767 | * ep_end_out_req - Ends endpoint OUT request | 777 | * ep_end_out_req - Ends endpoint OUT request |
768 | * @ep: physical endpoint | 778 | * @ep: physical endpoint |
769 | * @req: pxa request | 779 | * @req: pxa request |
780 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
770 | * | 781 | * |
771 | * Context: ep->lock held | 782 | * Context: ep->lock held or released (see req_done()) |
772 | * | 783 | * |
773 | * Ends endpoint OUT request (completes usb request). | 784 | * Ends endpoint OUT request (completes usb request). |
774 | */ | 785 | */ |
775 | static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) | 786 | static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, |
787 | unsigned long *pflags) | ||
776 | { | 788 | { |
777 | inc_ep_stats_reqs(ep, !USB_DIR_IN); | 789 | inc_ep_stats_reqs(ep, !USB_DIR_IN); |
778 | req_done(ep, req, 0); | 790 | req_done(ep, req, 0, pflags); |
779 | } | 791 | } |
780 | 792 | ||
781 | /** | 793 | /** |
782 | * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) | 794 | * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) |
783 | * @ep: physical endpoint | 795 | * @ep: physical endpoint |
784 | * @req: pxa request | 796 | * @req: pxa request |
797 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
785 | * | 798 | * |
786 | * Context: ep->lock held | 799 | * Context: ep->lock held or released (see req_done()) |
787 | * | 800 | * |
788 | * Ends control endpoint OUT request (completes usb request), and puts | 801 | * Ends control endpoint OUT request (completes usb request), and puts |
789 | * control endpoint into idle state | 802 | * control endpoint into idle state |
790 | */ | 803 | */ |
791 | static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) | 804 | static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, |
805 | unsigned long *pflags) | ||
792 | { | 806 | { |
793 | set_ep0state(ep->dev, OUT_STATUS_STAGE); | 807 | set_ep0state(ep->dev, OUT_STATUS_STAGE); |
794 | ep_end_out_req(ep, req); | 808 | ep_end_out_req(ep, req, pflags); |
795 | ep0_idle(ep->dev); | 809 | ep0_idle(ep->dev); |
796 | } | 810 | } |
797 | 811 | ||
@@ -799,31 +813,35 @@ static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) | |||
799 | * ep_end_in_req - Ends endpoint IN request | 813 | * ep_end_in_req - Ends endpoint IN request |
800 | * @ep: physical endpoint | 814 | * @ep: physical endpoint |
801 | * @req: pxa request | 815 | * @req: pxa request |
816 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
802 | * | 817 | * |
803 | * Context: ep->lock held | 818 | * Context: ep->lock held or released (see req_done()) |
804 | * | 819 | * |
805 | * Ends endpoint IN request (completes usb request). | 820 | * Ends endpoint IN request (completes usb request). |
806 | */ | 821 | */ |
807 | static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) | 822 | static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, |
823 | unsigned long *pflags) | ||
808 | { | 824 | { |
809 | inc_ep_stats_reqs(ep, USB_DIR_IN); | 825 | inc_ep_stats_reqs(ep, USB_DIR_IN); |
810 | req_done(ep, req, 0); | 826 | req_done(ep, req, 0, pflags); |
811 | } | 827 | } |
812 | 828 | ||
813 | /** | 829 | /** |
814 | * ep0_end_in_req - Ends control endpoint IN request (ends data stage) | 830 | * ep0_end_in_req - Ends control endpoint IN request (ends data stage) |
815 | * @ep: physical endpoint | 831 | * @ep: physical endpoint |
816 | * @req: pxa request | 832 | * @req: pxa request |
833 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
817 | * | 834 | * |
818 | * Context: ep->lock held | 835 | * Context: ep->lock held or released (see req_done()) |
819 | * | 836 | * |
820 | * Ends control endpoint IN request (completes usb request), and puts | 837 | * Ends control endpoint IN request (completes usb request), and puts |
821 | * control endpoint into status state | 838 | * control endpoint into status state |
822 | */ | 839 | */ |
823 | static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) | 840 | static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, |
841 | unsigned long *pflags) | ||
824 | { | 842 | { |
825 | set_ep0state(ep->dev, IN_STATUS_STAGE); | 843 | set_ep0state(ep->dev, IN_STATUS_STAGE); |
826 | ep_end_in_req(ep, req); | 844 | ep_end_in_req(ep, req, pflags); |
827 | } | 845 | } |
828 | 846 | ||
829 | /** | 847 | /** |
@@ -831,19 +849,22 @@ static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) | |||
831 | * @ep: pxa endpoint | 849 | * @ep: pxa endpoint |
832 | * @status: usb request status | 850 | * @status: usb request status |
833 | * | 851 | * |
834 | * Context: ep->lock held | 852 | * Context: ep->lock released |
835 | * | 853 | * |
836 | * Dequeues all requests on an endpoint. As a side effect, interrupts will be | 854 | * Dequeues all requests on an endpoint. As a side effect, interrupts will be |
837 | * disabled on that endpoint (because no more requests). | 855 | * disabled on that endpoint (because no more requests). |
838 | */ | 856 | */ |
839 | static void nuke(struct pxa_ep *ep, int status) | 857 | static void nuke(struct pxa_ep *ep, int status) |
840 | { | 858 | { |
841 | struct pxa27x_request *req; | 859 | struct pxa27x_request *req; |
860 | unsigned long flags; | ||
842 | 861 | ||
862 | spin_lock_irqsave(&ep->lock, flags); | ||
843 | while (!list_empty(&ep->queue)) { | 863 | while (!list_empty(&ep->queue)) { |
844 | req = list_entry(ep->queue.next, struct pxa27x_request, queue); | 864 | req = list_entry(ep->queue.next, struct pxa27x_request, queue); |
845 | req_done(ep, req, status); | 865 | req_done(ep, req, status, &flags); |
846 | } | 866 | } |
867 | spin_unlock_irqrestore(&ep->lock, flags); | ||
847 | } | 868 | } |
848 | 869 | ||
849 | /** | 870 | /** |
@@ -1123,6 +1144,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1123 | int rc = 0; | 1144 | int rc = 0; |
1124 | int is_first_req; | 1145 | int is_first_req; |
1125 | unsigned length; | 1146 | unsigned length; |
1147 | int recursion_detected; | ||
1126 | 1148 | ||
1127 | req = container_of(_req, struct pxa27x_request, req); | 1149 | req = container_of(_req, struct pxa27x_request, req); |
1128 | udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); | 1150 | udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); |
@@ -1152,6 +1174,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1152 | return -EMSGSIZE; | 1174 | return -EMSGSIZE; |
1153 | 1175 | ||
1154 | spin_lock_irqsave(&ep->lock, flags); | 1176 | spin_lock_irqsave(&ep->lock, flags); |
1177 | recursion_detected = ep->in_handle_ep; | ||
1155 | 1178 | ||
1156 | is_first_req = list_empty(&ep->queue); | 1179 | is_first_req = list_empty(&ep->queue); |
1157 | ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", | 1180 | ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", |
@@ -1161,12 +1184,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1161 | if (!ep->enabled) { | 1184 | if (!ep->enabled) { |
1162 | _req->status = -ESHUTDOWN; | 1185 | _req->status = -ESHUTDOWN; |
1163 | rc = -ESHUTDOWN; | 1186 | rc = -ESHUTDOWN; |
1164 | goto out; | 1187 | goto out_locked; |
1165 | } | 1188 | } |
1166 | 1189 | ||
1167 | if (req->in_use) { | 1190 | if (req->in_use) { |
1168 | ep_err(ep, "refusing to queue req %p (already queued)\n", req); | 1191 | ep_err(ep, "refusing to queue req %p (already queued)\n", req); |
1169 | goto out; | 1192 | goto out_locked; |
1170 | } | 1193 | } |
1171 | 1194 | ||
1172 | length = _req->length; | 1195 | length = _req->length; |
@@ -1174,12 +1197,13 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1174 | _req->actual = 0; | 1197 | _req->actual = 0; |
1175 | 1198 | ||
1176 | ep_add_request(ep, req); | 1199 | ep_add_request(ep, req); |
1200 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1177 | 1201 | ||
1178 | if (is_ep0(ep)) { | 1202 | if (is_ep0(ep)) { |
1179 | switch (dev->ep0state) { | 1203 | switch (dev->ep0state) { |
1180 | case WAIT_ACK_SET_CONF_INTERF: | 1204 | case WAIT_ACK_SET_CONF_INTERF: |
1181 | if (length == 0) { | 1205 | if (length == 0) { |
1182 | ep_end_in_req(ep, req); | 1206 | ep_end_in_req(ep, req, NULL); |
1183 | } else { | 1207 | } else { |
1184 | ep_err(ep, "got a request of %d bytes while" | 1208 | ep_err(ep, "got a request of %d bytes while" |
1185 | "in state WAIT_ACK_SET_CONF_INTERF\n", | 1209 | "in state WAIT_ACK_SET_CONF_INTERF\n", |
@@ -1192,12 +1216,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1192 | case IN_DATA_STAGE: | 1216 | case IN_DATA_STAGE: |
1193 | if (!ep_is_full(ep)) | 1217 | if (!ep_is_full(ep)) |
1194 | if (write_ep0_fifo(ep, req)) | 1218 | if (write_ep0_fifo(ep, req)) |
1195 | ep0_end_in_req(ep, req); | 1219 | ep0_end_in_req(ep, req, NULL); |
1196 | break; | 1220 | break; |
1197 | case OUT_DATA_STAGE: | 1221 | case OUT_DATA_STAGE: |
1198 | if ((length == 0) || !epout_has_pkt(ep)) | 1222 | if ((length == 0) || !epout_has_pkt(ep)) |
1199 | if (read_ep0_fifo(ep, req)) | 1223 | if (read_ep0_fifo(ep, req)) |
1200 | ep0_end_out_req(ep, req); | 1224 | ep0_end_out_req(ep, req, NULL); |
1201 | break; | 1225 | break; |
1202 | default: | 1226 | default: |
1203 | ep_err(ep, "odd state %s to send me a request\n", | 1227 | ep_err(ep, "odd state %s to send me a request\n", |
@@ -1207,12 +1231,15 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1207 | break; | 1231 | break; |
1208 | } | 1232 | } |
1209 | } else { | 1233 | } else { |
1210 | handle_ep(ep); | 1234 | if (!recursion_detected) |
1235 | handle_ep(ep); | ||
1211 | } | 1236 | } |
1212 | 1237 | ||
1213 | out: | 1238 | out: |
1214 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1215 | return rc; | 1239 | return rc; |
1240 | out_locked: | ||
1241 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1242 | goto out; | ||
1216 | } | 1243 | } |
1217 | 1244 | ||
1218 | /** | 1245 | /** |
@@ -1242,13 +1269,14 @@ static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) | |||
1242 | /* make sure it's actually queued on this endpoint */ | 1269 | /* make sure it's actually queued on this endpoint */ |
1243 | list_for_each_entry(req, &ep->queue, queue) { | 1270 | list_for_each_entry(req, &ep->queue, queue) { |
1244 | if (&req->req == _req) { | 1271 | if (&req->req == _req) { |
1245 | req_done(ep, req, -ECONNRESET); | ||
1246 | rc = 0; | 1272 | rc = 0; |
1247 | break; | 1273 | break; |
1248 | } | 1274 | } |
1249 | } | 1275 | } |
1250 | 1276 | ||
1251 | spin_unlock_irqrestore(&ep->lock, flags); | 1277 | spin_unlock_irqrestore(&ep->lock, flags); |
1278 | if (!rc) | ||
1279 | req_done(ep, req, -ECONNRESET, NULL); | ||
1252 | return rc; | 1280 | return rc; |
1253 | } | 1281 | } |
1254 | 1282 | ||
@@ -1445,7 +1473,6 @@ static int pxa_ep_disable(struct usb_ep *_ep) | |||
1445 | { | 1473 | { |
1446 | struct pxa_ep *ep; | 1474 | struct pxa_ep *ep; |
1447 | struct udc_usb_ep *udc_usb_ep; | 1475 | struct udc_usb_ep *udc_usb_ep; |
1448 | unsigned long flags; | ||
1449 | 1476 | ||
1450 | if (!_ep) | 1477 | if (!_ep) |
1451 | return -EINVAL; | 1478 | return -EINVAL; |
@@ -1455,10 +1482,8 @@ static int pxa_ep_disable(struct usb_ep *_ep) | |||
1455 | if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) | 1482 | if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) |
1456 | return -EINVAL; | 1483 | return -EINVAL; |
1457 | 1484 | ||
1458 | spin_lock_irqsave(&ep->lock, flags); | ||
1459 | ep->enabled = 0; | 1485 | ep->enabled = 0; |
1460 | nuke(ep, -ESHUTDOWN); | 1486 | nuke(ep, -ESHUTDOWN); |
1461 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1462 | 1487 | ||
1463 | pxa_ep_fifo_flush(_ep); | 1488 | pxa_ep_fifo_flush(_ep); |
1464 | udc_usb_ep->pxa_ep = NULL; | 1489 | udc_usb_ep->pxa_ep = NULL; |
@@ -1907,8 +1932,10 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc, | |||
1907 | } u; | 1932 | } u; |
1908 | int i; | 1933 | int i; |
1909 | int have_extrabytes = 0; | 1934 | int have_extrabytes = 0; |
1935 | unsigned long flags; | ||
1910 | 1936 | ||
1911 | nuke(ep, -EPROTO); | 1937 | nuke(ep, -EPROTO); |
1938 | spin_lock_irqsave(&ep->lock, flags); | ||
1912 | 1939 | ||
1913 | /* | 1940 | /* |
1914 | * In the PXA320 manual, in the section about Back-to-Back setup | 1941 | * In the PXA320 manual, in the section about Back-to-Back setup |
@@ -1947,10 +1974,13 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc, | |||
1947 | /* Tell UDC to enter Data Stage */ | 1974 | /* Tell UDC to enter Data Stage */ |
1948 | ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); | 1975 | ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); |
1949 | 1976 | ||
1977 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1950 | i = udc->driver->setup(&udc->gadget, &u.r); | 1978 | i = udc->driver->setup(&udc->gadget, &u.r); |
1979 | spin_lock_irqsave(&ep->lock, flags); | ||
1951 | if (i < 0) | 1980 | if (i < 0) |
1952 | goto stall; | 1981 | goto stall; |
1953 | out: | 1982 | out: |
1983 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1954 | return; | 1984 | return; |
1955 | stall: | 1985 | stall: |
1956 | ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", | 1986 | ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", |
@@ -2055,13 +2085,13 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq) | |||
2055 | if (req && !ep_is_full(ep)) | 2085 | if (req && !ep_is_full(ep)) |
2056 | completed = write_ep0_fifo(ep, req); | 2086 | completed = write_ep0_fifo(ep, req); |
2057 | if (completed) | 2087 | if (completed) |
2058 | ep0_end_in_req(ep, req); | 2088 | ep0_end_in_req(ep, req, NULL); |
2059 | break; | 2089 | break; |
2060 | case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ | 2090 | case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ |
2061 | if (epout_has_pkt(ep) && req) | 2091 | if (epout_has_pkt(ep) && req) |
2062 | completed = read_ep0_fifo(ep, req); | 2092 | completed = read_ep0_fifo(ep, req); |
2063 | if (completed) | 2093 | if (completed) |
2064 | ep0_end_out_req(ep, req); | 2094 | ep0_end_out_req(ep, req, NULL); |
2065 | break; | 2095 | break; |
2066 | case STALL: | 2096 | case STALL: |
2067 | ep_write_UDCCSR(ep, UDCCSR0_FST); | 2097 | ep_write_UDCCSR(ep, UDCCSR0_FST); |
@@ -2091,7 +2121,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq) | |||
2091 | * Tries to transfer all pending request data into the endpoint and/or | 2121 | * Tries to transfer all pending request data into the endpoint and/or |
2092 | * transfer all pending data in the endpoint into usb requests. | 2122 | * transfer all pending data in the endpoint into usb requests. |
2093 | * | 2123 | * |
2094 | * Is always called when in_interrupt() or with ep->lock held. | 2124 | * Is always called when in_interrupt() and with ep->lock released. |
2095 | */ | 2125 | */ |
2096 | static void handle_ep(struct pxa_ep *ep) | 2126 | static void handle_ep(struct pxa_ep *ep) |
2097 | { | 2127 | { |
@@ -2100,10 +2130,17 @@ static void handle_ep(struct pxa_ep *ep) | |||
2100 | u32 udccsr; | 2130 | u32 udccsr; |
2101 | int is_in = ep->dir_in; | 2131 | int is_in = ep->dir_in; |
2102 | int loop = 0; | 2132 | int loop = 0; |
2133 | unsigned long flags; | ||
2134 | |||
2135 | spin_lock_irqsave(&ep->lock, flags); | ||
2136 | if (ep->in_handle_ep) | ||
2137 | goto recursion_detected; | ||
2138 | ep->in_handle_ep = 1; | ||
2103 | 2139 | ||
2104 | do { | 2140 | do { |
2105 | completed = 0; | 2141 | completed = 0; |
2106 | udccsr = udc_ep_readl(ep, UDCCSR); | 2142 | udccsr = udc_ep_readl(ep, UDCCSR); |
2143 | |||
2107 | if (likely(!list_empty(&ep->queue))) | 2144 | if (likely(!list_empty(&ep->queue))) |
2108 | req = list_entry(ep->queue.next, | 2145 | req = list_entry(ep->queue.next, |
2109 | struct pxa27x_request, queue); | 2146 | struct pxa27x_request, queue); |
@@ -2122,15 +2159,22 @@ static void handle_ep(struct pxa_ep *ep) | |||
2122 | if (unlikely(is_in)) { | 2159 | if (unlikely(is_in)) { |
2123 | if (likely(!ep_is_full(ep))) | 2160 | if (likely(!ep_is_full(ep))) |
2124 | completed = write_fifo(ep, req); | 2161 | completed = write_fifo(ep, req); |
2125 | if (completed) | ||
2126 | ep_end_in_req(ep, req); | ||
2127 | } else { | 2162 | } else { |
2128 | if (likely(epout_has_pkt(ep))) | 2163 | if (likely(epout_has_pkt(ep))) |
2129 | completed = read_fifo(ep, req); | 2164 | completed = read_fifo(ep, req); |
2130 | if (completed) | 2165 | } |
2131 | ep_end_out_req(ep, req); | 2166 | |
2167 | if (completed) { | ||
2168 | if (is_in) | ||
2169 | ep_end_in_req(ep, req, &flags); | ||
2170 | else | ||
2171 | ep_end_out_req(ep, req, &flags); | ||
2132 | } | 2172 | } |
2133 | } while (completed); | 2173 | } while (completed); |
2174 | |||
2175 | ep->in_handle_ep = 0; | ||
2176 | recursion_detected: | ||
2177 | spin_unlock_irqrestore(&ep->lock, flags); | ||
2134 | } | 2178 | } |
2135 | 2179 | ||
2136 | /** | 2180 | /** |
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h index e25225e26586..ff61e4866e8a 100644 --- a/drivers/usb/gadget/pxa27x_udc.h +++ b/drivers/usb/gadget/pxa27x_udc.h | |||
@@ -318,6 +318,11 @@ struct udc_usb_ep { | |||
318 | * @queue: requests queue | 318 | * @queue: requests queue |
319 | * @lock: lock to pxa_ep data (queues and stats) | 319 | * @lock: lock to pxa_ep data (queues and stats) |
320 | * @enabled: true when endpoint enabled (not stopped by gadget layer) | 320 | * @enabled: true when endpoint enabled (not stopped by gadget layer) |
321 | * @in_handle_ep: number of recursions of handle_ep() function | ||
322 | * Prevents deadlocks or infinite recursions of types : | ||
323 | * irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep() | ||
324 | * or | ||
325 | * pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue() | ||
321 | * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX) | 326 | * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX) |
322 | * @name: endpoint name (for trace/debug purpose) | 327 | * @name: endpoint name (for trace/debug purpose) |
323 | * @dir_in: 1 if IN endpoint, 0 if OUT endpoint | 328 | * @dir_in: 1 if IN endpoint, 0 if OUT endpoint |
@@ -346,6 +351,7 @@ struct pxa_ep { | |||
346 | spinlock_t lock; /* Protects this structure */ | 351 | spinlock_t lock; /* Protects this structure */ |
347 | /* (queues, stats) */ | 352 | /* (queues, stats) */ |
348 | unsigned enabled:1; | 353 | unsigned enabled:1; |
354 | unsigned in_handle_ep:1; | ||
349 | 355 | ||
350 | unsigned idx:5; | 356 | unsigned idx:5; |
351 | char *name; | 357 | char *name; |