diff options
Diffstat (limited to 'drivers/usb/gadget/pxa27x_udc.c')
-rw-r--r-- | drivers/usb/gadget/pxa27x_udc.c | 138 |
1 files changed, 95 insertions, 43 deletions
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c index 1937d8c7b433..85b0d8921eae 100644 --- a/drivers/usb/gadget/pxa27x_udc.c +++ b/drivers/usb/gadget/pxa27x_udc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/clk.h> | 31 | #include <linux/clk.h> |
32 | #include <linux/irq.h> | 32 | #include <linux/irq.h> |
33 | #include <linux/gpio.h> | 33 | #include <linux/gpio.h> |
34 | #include <linux/slab.h> | ||
34 | 35 | ||
35 | #include <asm/byteorder.h> | 36 | #include <asm/byteorder.h> |
36 | #include <mach/hardware.h> | 37 | #include <mach/hardware.h> |
@@ -742,13 +743,17 @@ static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req) | |||
742 | * @ep: pxa physical endpoint | 743 | * @ep: pxa physical endpoint |
743 | * @req: pxa request | 744 | * @req: pxa request |
744 | * @status: usb request status sent to gadget API | 745 | * @status: usb request status sent to gadget API |
746 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
745 | * | 747 | * |
746 | * Context: ep->lock held | 748 | * Context: ep->lock held if flags not NULL, else ep->lock released |
747 | * | 749 | * |
748 | * Retire a pxa27x usb request. Endpoint must be locked. | 750 | * Retire a pxa27x usb request. Endpoint must be locked. |
749 | */ | 751 | */ |
750 | static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status) | 752 | static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status, |
753 | unsigned long *pflags) | ||
751 | { | 754 | { |
755 | unsigned long flags; | ||
756 | |||
752 | ep_del_request(ep, req); | 757 | ep_del_request(ep, req); |
753 | if (likely(req->req.status == -EINPROGRESS)) | 758 | if (likely(req->req.status == -EINPROGRESS)) |
754 | req->req.status = status; | 759 | req->req.status = status; |
@@ -760,38 +765,48 @@ static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status) | |||
760 | &req->req, status, | 765 | &req->req, status, |
761 | req->req.actual, req->req.length); | 766 | req->req.actual, req->req.length); |
762 | 767 | ||
768 | if (pflags) | ||
769 | spin_unlock_irqrestore(&ep->lock, *pflags); | ||
770 | local_irq_save(flags); | ||
763 | req->req.complete(&req->udc_usb_ep->usb_ep, &req->req); | 771 | req->req.complete(&req->udc_usb_ep->usb_ep, &req->req); |
772 | local_irq_restore(flags); | ||
773 | if (pflags) | ||
774 | spin_lock_irqsave(&ep->lock, *pflags); | ||
764 | } | 775 | } |
765 | 776 | ||
766 | /** | 777 | /** |
767 | * ep_end_out_req - Ends endpoint OUT request | 778 | * ep_end_out_req - Ends endpoint OUT request |
768 | * @ep: physical endpoint | 779 | * @ep: physical endpoint |
769 | * @req: pxa request | 780 | * @req: pxa request |
781 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
770 | * | 782 | * |
771 | * Context: ep->lock held | 783 | * Context: ep->lock held or released (see req_done()) |
772 | * | 784 | * |
773 | * Ends endpoint OUT request (completes usb request). | 785 | * Ends endpoint OUT request (completes usb request). |
774 | */ | 786 | */ |
775 | static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) | 787 | static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, |
788 | unsigned long *pflags) | ||
776 | { | 789 | { |
777 | inc_ep_stats_reqs(ep, !USB_DIR_IN); | 790 | inc_ep_stats_reqs(ep, !USB_DIR_IN); |
778 | req_done(ep, req, 0); | 791 | req_done(ep, req, 0, pflags); |
779 | } | 792 | } |
780 | 793 | ||
781 | /** | 794 | /** |
782 | * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) | 795 | * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) |
783 | * @ep: physical endpoint | 796 | * @ep: physical endpoint |
784 | * @req: pxa request | 797 | * @req: pxa request |
798 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
785 | * | 799 | * |
786 | * Context: ep->lock held | 800 | * Context: ep->lock held or released (see req_done()) |
787 | * | 801 | * |
788 | * Ends control endpoint OUT request (completes usb request), and puts | 802 | * Ends control endpoint OUT request (completes usb request), and puts |
789 | * control endpoint into idle state | 803 | * control endpoint into idle state |
790 | */ | 804 | */ |
791 | static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) | 805 | static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, |
806 | unsigned long *pflags) | ||
792 | { | 807 | { |
793 | set_ep0state(ep->dev, OUT_STATUS_STAGE); | 808 | set_ep0state(ep->dev, OUT_STATUS_STAGE); |
794 | ep_end_out_req(ep, req); | 809 | ep_end_out_req(ep, req, pflags); |
795 | ep0_idle(ep->dev); | 810 | ep0_idle(ep->dev); |
796 | } | 811 | } |
797 | 812 | ||
@@ -799,31 +814,35 @@ static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) | |||
799 | * ep_end_in_req - Ends endpoint IN request | 814 | * ep_end_in_req - Ends endpoint IN request |
800 | * @ep: physical endpoint | 815 | * @ep: physical endpoint |
801 | * @req: pxa request | 816 | * @req: pxa request |
817 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
802 | * | 818 | * |
803 | * Context: ep->lock held | 819 | * Context: ep->lock held or released (see req_done()) |
804 | * | 820 | * |
805 | * Ends endpoint IN request (completes usb request). | 821 | * Ends endpoint IN request (completes usb request). |
806 | */ | 822 | */ |
807 | static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) | 823 | static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, |
824 | unsigned long *pflags) | ||
808 | { | 825 | { |
809 | inc_ep_stats_reqs(ep, USB_DIR_IN); | 826 | inc_ep_stats_reqs(ep, USB_DIR_IN); |
810 | req_done(ep, req, 0); | 827 | req_done(ep, req, 0, pflags); |
811 | } | 828 | } |
812 | 829 | ||
813 | /** | 830 | /** |
814 | * ep0_end_in_req - Ends control endpoint IN request (ends data stage) | 831 | * ep0_end_in_req - Ends control endpoint IN request (ends data stage) |
815 | * @ep: physical endpoint | 832 | * @ep: physical endpoint |
816 | * @req: pxa request | 833 | * @req: pxa request |
834 | * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held | ||
817 | * | 835 | * |
818 | * Context: ep->lock held | 836 | * Context: ep->lock held or released (see req_done()) |
819 | * | 837 | * |
820 | * Ends control endpoint IN request (completes usb request), and puts | 838 | * Ends control endpoint IN request (completes usb request), and puts |
821 | * control endpoint into status state | 839 | * control endpoint into status state |
822 | */ | 840 | */ |
823 | static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) | 841 | static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, |
842 | unsigned long *pflags) | ||
824 | { | 843 | { |
825 | set_ep0state(ep->dev, IN_STATUS_STAGE); | 844 | set_ep0state(ep->dev, IN_STATUS_STAGE); |
826 | ep_end_in_req(ep, req); | 845 | ep_end_in_req(ep, req, pflags); |
827 | } | 846 | } |
828 | 847 | ||
829 | /** | 848 | /** |
@@ -831,19 +850,22 @@ static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) | |||
831 | * @ep: pxa endpoint | 850 | * @ep: pxa endpoint |
832 | * @status: usb request status | 851 | * @status: usb request status |
833 | * | 852 | * |
834 | * Context: ep->lock held | 853 | * Context: ep->lock released |
835 | * | 854 | * |
836 | * Dequeues all requests on an endpoint. As a side effect, interrupts will be | 855 | * Dequeues all requests on an endpoint. As a side effect, interrupts will be |
837 | * disabled on that endpoint (because no more requests). | 856 | * disabled on that endpoint (because no more requests). |
838 | */ | 857 | */ |
839 | static void nuke(struct pxa_ep *ep, int status) | 858 | static void nuke(struct pxa_ep *ep, int status) |
840 | { | 859 | { |
841 | struct pxa27x_request *req; | 860 | struct pxa27x_request *req; |
861 | unsigned long flags; | ||
842 | 862 | ||
863 | spin_lock_irqsave(&ep->lock, flags); | ||
843 | while (!list_empty(&ep->queue)) { | 864 | while (!list_empty(&ep->queue)) { |
844 | req = list_entry(ep->queue.next, struct pxa27x_request, queue); | 865 | req = list_entry(ep->queue.next, struct pxa27x_request, queue); |
845 | req_done(ep, req, status); | 866 | req_done(ep, req, status, &flags); |
846 | } | 867 | } |
868 | spin_unlock_irqrestore(&ep->lock, flags); | ||
847 | } | 869 | } |
848 | 870 | ||
849 | /** | 871 | /** |
@@ -1123,6 +1145,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1123 | int rc = 0; | 1145 | int rc = 0; |
1124 | int is_first_req; | 1146 | int is_first_req; |
1125 | unsigned length; | 1147 | unsigned length; |
1148 | int recursion_detected; | ||
1126 | 1149 | ||
1127 | req = container_of(_req, struct pxa27x_request, req); | 1150 | req = container_of(_req, struct pxa27x_request, req); |
1128 | udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); | 1151 | udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); |
@@ -1152,6 +1175,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1152 | return -EMSGSIZE; | 1175 | return -EMSGSIZE; |
1153 | 1176 | ||
1154 | spin_lock_irqsave(&ep->lock, flags); | 1177 | spin_lock_irqsave(&ep->lock, flags); |
1178 | recursion_detected = ep->in_handle_ep; | ||
1155 | 1179 | ||
1156 | is_first_req = list_empty(&ep->queue); | 1180 | is_first_req = list_empty(&ep->queue); |
1157 | ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", | 1181 | ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", |
@@ -1161,12 +1185,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1161 | if (!ep->enabled) { | 1185 | if (!ep->enabled) { |
1162 | _req->status = -ESHUTDOWN; | 1186 | _req->status = -ESHUTDOWN; |
1163 | rc = -ESHUTDOWN; | 1187 | rc = -ESHUTDOWN; |
1164 | goto out; | 1188 | goto out_locked; |
1165 | } | 1189 | } |
1166 | 1190 | ||
1167 | if (req->in_use) { | 1191 | if (req->in_use) { |
1168 | ep_err(ep, "refusing to queue req %p (already queued)\n", req); | 1192 | ep_err(ep, "refusing to queue req %p (already queued)\n", req); |
1169 | goto out; | 1193 | goto out_locked; |
1170 | } | 1194 | } |
1171 | 1195 | ||
1172 | length = _req->length; | 1196 | length = _req->length; |
@@ -1174,12 +1198,13 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1174 | _req->actual = 0; | 1198 | _req->actual = 0; |
1175 | 1199 | ||
1176 | ep_add_request(ep, req); | 1200 | ep_add_request(ep, req); |
1201 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1177 | 1202 | ||
1178 | if (is_ep0(ep)) { | 1203 | if (is_ep0(ep)) { |
1179 | switch (dev->ep0state) { | 1204 | switch (dev->ep0state) { |
1180 | case WAIT_ACK_SET_CONF_INTERF: | 1205 | case WAIT_ACK_SET_CONF_INTERF: |
1181 | if (length == 0) { | 1206 | if (length == 0) { |
1182 | ep_end_in_req(ep, req); | 1207 | ep_end_in_req(ep, req, NULL); |
1183 | } else { | 1208 | } else { |
1184 | ep_err(ep, "got a request of %d bytes while" | 1209 | ep_err(ep, "got a request of %d bytes while" |
1185 | "in state WAIT_ACK_SET_CONF_INTERF\n", | 1210 | "in state WAIT_ACK_SET_CONF_INTERF\n", |
@@ -1192,12 +1217,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1192 | case IN_DATA_STAGE: | 1217 | case IN_DATA_STAGE: |
1193 | if (!ep_is_full(ep)) | 1218 | if (!ep_is_full(ep)) |
1194 | if (write_ep0_fifo(ep, req)) | 1219 | if (write_ep0_fifo(ep, req)) |
1195 | ep0_end_in_req(ep, req); | 1220 | ep0_end_in_req(ep, req, NULL); |
1196 | break; | 1221 | break; |
1197 | case OUT_DATA_STAGE: | 1222 | case OUT_DATA_STAGE: |
1198 | if ((length == 0) || !epout_has_pkt(ep)) | 1223 | if ((length == 0) || !epout_has_pkt(ep)) |
1199 | if (read_ep0_fifo(ep, req)) | 1224 | if (read_ep0_fifo(ep, req)) |
1200 | ep0_end_out_req(ep, req); | 1225 | ep0_end_out_req(ep, req, NULL); |
1201 | break; | 1226 | break; |
1202 | default: | 1227 | default: |
1203 | ep_err(ep, "odd state %s to send me a request\n", | 1228 | ep_err(ep, "odd state %s to send me a request\n", |
@@ -1207,12 +1232,15 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, | |||
1207 | break; | 1232 | break; |
1208 | } | 1233 | } |
1209 | } else { | 1234 | } else { |
1210 | handle_ep(ep); | 1235 | if (!recursion_detected) |
1236 | handle_ep(ep); | ||
1211 | } | 1237 | } |
1212 | 1238 | ||
1213 | out: | 1239 | out: |
1214 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1215 | return rc; | 1240 | return rc; |
1241 | out_locked: | ||
1242 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1243 | goto out; | ||
1216 | } | 1244 | } |
1217 | 1245 | ||
1218 | /** | 1246 | /** |
@@ -1242,13 +1270,14 @@ static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) | |||
1242 | /* make sure it's actually queued on this endpoint */ | 1270 | /* make sure it's actually queued on this endpoint */ |
1243 | list_for_each_entry(req, &ep->queue, queue) { | 1271 | list_for_each_entry(req, &ep->queue, queue) { |
1244 | if (&req->req == _req) { | 1272 | if (&req->req == _req) { |
1245 | req_done(ep, req, -ECONNRESET); | ||
1246 | rc = 0; | 1273 | rc = 0; |
1247 | break; | 1274 | break; |
1248 | } | 1275 | } |
1249 | } | 1276 | } |
1250 | 1277 | ||
1251 | spin_unlock_irqrestore(&ep->lock, flags); | 1278 | spin_unlock_irqrestore(&ep->lock, flags); |
1279 | if (!rc) | ||
1280 | req_done(ep, req, -ECONNRESET, NULL); | ||
1252 | return rc; | 1281 | return rc; |
1253 | } | 1282 | } |
1254 | 1283 | ||
@@ -1445,7 +1474,6 @@ static int pxa_ep_disable(struct usb_ep *_ep) | |||
1445 | { | 1474 | { |
1446 | struct pxa_ep *ep; | 1475 | struct pxa_ep *ep; |
1447 | struct udc_usb_ep *udc_usb_ep; | 1476 | struct udc_usb_ep *udc_usb_ep; |
1448 | unsigned long flags; | ||
1449 | 1477 | ||
1450 | if (!_ep) | 1478 | if (!_ep) |
1451 | return -EINVAL; | 1479 | return -EINVAL; |
@@ -1455,10 +1483,8 @@ static int pxa_ep_disable(struct usb_ep *_ep) | |||
1455 | if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) | 1483 | if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) |
1456 | return -EINVAL; | 1484 | return -EINVAL; |
1457 | 1485 | ||
1458 | spin_lock_irqsave(&ep->lock, flags); | ||
1459 | ep->enabled = 0; | 1486 | ep->enabled = 0; |
1460 | nuke(ep, -ESHUTDOWN); | 1487 | nuke(ep, -ESHUTDOWN); |
1461 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1462 | 1488 | ||
1463 | pxa_ep_fifo_flush(_ep); | 1489 | pxa_ep_fifo_flush(_ep); |
1464 | udc_usb_ep->pxa_ep = NULL; | 1490 | udc_usb_ep->pxa_ep = NULL; |
@@ -1524,7 +1550,7 @@ static int pxa_udc_get_frame(struct usb_gadget *_gadget) | |||
1524 | * pxa_udc_wakeup - Force udc device out of suspend | 1550 | * pxa_udc_wakeup - Force udc device out of suspend |
1525 | * @_gadget: usb gadget | 1551 | * @_gadget: usb gadget |
1526 | * | 1552 | * |
1527 | * Returns 0 if succesfull, error code otherwise | 1553 | * Returns 0 if successfull, error code otherwise |
1528 | */ | 1554 | */ |
1529 | static int pxa_udc_wakeup(struct usb_gadget *_gadget) | 1555 | static int pxa_udc_wakeup(struct usb_gadget *_gadget) |
1530 | { | 1556 | { |
@@ -1907,8 +1933,10 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc, | |||
1907 | } u; | 1933 | } u; |
1908 | int i; | 1934 | int i; |
1909 | int have_extrabytes = 0; | 1935 | int have_extrabytes = 0; |
1936 | unsigned long flags; | ||
1910 | 1937 | ||
1911 | nuke(ep, -EPROTO); | 1938 | nuke(ep, -EPROTO); |
1939 | spin_lock_irqsave(&ep->lock, flags); | ||
1912 | 1940 | ||
1913 | /* | 1941 | /* |
1914 | * In the PXA320 manual, in the section about Back-to-Back setup | 1942 | * In the PXA320 manual, in the section about Back-to-Back setup |
@@ -1947,10 +1975,13 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc, | |||
1947 | /* Tell UDC to enter Data Stage */ | 1975 | /* Tell UDC to enter Data Stage */ |
1948 | ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); | 1976 | ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); |
1949 | 1977 | ||
1978 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1950 | i = udc->driver->setup(&udc->gadget, &u.r); | 1979 | i = udc->driver->setup(&udc->gadget, &u.r); |
1980 | spin_lock_irqsave(&ep->lock, flags); | ||
1951 | if (i < 0) | 1981 | if (i < 0) |
1952 | goto stall; | 1982 | goto stall; |
1953 | out: | 1983 | out: |
1984 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1954 | return; | 1985 | return; |
1955 | stall: | 1986 | stall: |
1956 | ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", | 1987 | ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", |
@@ -2055,13 +2086,13 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq) | |||
2055 | if (req && !ep_is_full(ep)) | 2086 | if (req && !ep_is_full(ep)) |
2056 | completed = write_ep0_fifo(ep, req); | 2087 | completed = write_ep0_fifo(ep, req); |
2057 | if (completed) | 2088 | if (completed) |
2058 | ep0_end_in_req(ep, req); | 2089 | ep0_end_in_req(ep, req, NULL); |
2059 | break; | 2090 | break; |
2060 | case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ | 2091 | case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ |
2061 | if (epout_has_pkt(ep) && req) | 2092 | if (epout_has_pkt(ep) && req) |
2062 | completed = read_ep0_fifo(ep, req); | 2093 | completed = read_ep0_fifo(ep, req); |
2063 | if (completed) | 2094 | if (completed) |
2064 | ep0_end_out_req(ep, req); | 2095 | ep0_end_out_req(ep, req, NULL); |
2065 | break; | 2096 | break; |
2066 | case STALL: | 2097 | case STALL: |
2067 | ep_write_UDCCSR(ep, UDCCSR0_FST); | 2098 | ep_write_UDCCSR(ep, UDCCSR0_FST); |
@@ -2091,7 +2122,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq) | |||
2091 | * Tries to transfer all pending request data into the endpoint and/or | 2122 | * Tries to transfer all pending request data into the endpoint and/or |
2092 | * transfer all pending data in the endpoint into usb requests. | 2123 | * transfer all pending data in the endpoint into usb requests. |
2093 | * | 2124 | * |
2094 | * Is always called when in_interrupt() or with ep->lock held. | 2125 | * Is always called when in_interrupt() and with ep->lock released. |
2095 | */ | 2126 | */ |
2096 | static void handle_ep(struct pxa_ep *ep) | 2127 | static void handle_ep(struct pxa_ep *ep) |
2097 | { | 2128 | { |
@@ -2100,10 +2131,17 @@ static void handle_ep(struct pxa_ep *ep) | |||
2100 | u32 udccsr; | 2131 | u32 udccsr; |
2101 | int is_in = ep->dir_in; | 2132 | int is_in = ep->dir_in; |
2102 | int loop = 0; | 2133 | int loop = 0; |
2134 | unsigned long flags; | ||
2135 | |||
2136 | spin_lock_irqsave(&ep->lock, flags); | ||
2137 | if (ep->in_handle_ep) | ||
2138 | goto recursion_detected; | ||
2139 | ep->in_handle_ep = 1; | ||
2103 | 2140 | ||
2104 | do { | 2141 | do { |
2105 | completed = 0; | 2142 | completed = 0; |
2106 | udccsr = udc_ep_readl(ep, UDCCSR); | 2143 | udccsr = udc_ep_readl(ep, UDCCSR); |
2144 | |||
2107 | if (likely(!list_empty(&ep->queue))) | 2145 | if (likely(!list_empty(&ep->queue))) |
2108 | req = list_entry(ep->queue.next, | 2146 | req = list_entry(ep->queue.next, |
2109 | struct pxa27x_request, queue); | 2147 | struct pxa27x_request, queue); |
@@ -2122,15 +2160,22 @@ static void handle_ep(struct pxa_ep *ep) | |||
2122 | if (unlikely(is_in)) { | 2160 | if (unlikely(is_in)) { |
2123 | if (likely(!ep_is_full(ep))) | 2161 | if (likely(!ep_is_full(ep))) |
2124 | completed = write_fifo(ep, req); | 2162 | completed = write_fifo(ep, req); |
2125 | if (completed) | ||
2126 | ep_end_in_req(ep, req); | ||
2127 | } else { | 2163 | } else { |
2128 | if (likely(epout_has_pkt(ep))) | 2164 | if (likely(epout_has_pkt(ep))) |
2129 | completed = read_fifo(ep, req); | 2165 | completed = read_fifo(ep, req); |
2130 | if (completed) | 2166 | } |
2131 | ep_end_out_req(ep, req); | 2167 | |
2168 | if (completed) { | ||
2169 | if (is_in) | ||
2170 | ep_end_in_req(ep, req, &flags); | ||
2171 | else | ||
2172 | ep_end_out_req(ep, req, &flags); | ||
2132 | } | 2173 | } |
2133 | } while (completed); | 2174 | } while (completed); |
2175 | |||
2176 | ep->in_handle_ep = 0; | ||
2177 | recursion_detected: | ||
2178 | spin_unlock_irqrestore(&ep->lock, flags); | ||
2134 | } | 2179 | } |
2135 | 2180 | ||
2136 | /** | 2181 | /** |
@@ -2218,9 +2263,13 @@ static void irq_handle_data(int irq, struct pxa_udc *udc) | |||
2218 | continue; | 2263 | continue; |
2219 | 2264 | ||
2220 | udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK)); | 2265 | udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK)); |
2221 | ep = &udc->pxa_ep[i]; | 2266 | |
2222 | ep->stats.irqs++; | 2267 | WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep)); |
2223 | handle_ep(ep); | 2268 | if (i < ARRAY_SIZE(udc->pxa_ep)) { |
2269 | ep = &udc->pxa_ep[i]; | ||
2270 | ep->stats.irqs++; | ||
2271 | handle_ep(ep); | ||
2272 | } | ||
2224 | } | 2273 | } |
2225 | 2274 | ||
2226 | for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) { | 2275 | for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) { |
@@ -2228,9 +2277,12 @@ static void irq_handle_data(int irq, struct pxa_udc *udc) | |||
2228 | if (!(udcisr1 & UDCISR_INT_MASK)) | 2277 | if (!(udcisr1 & UDCISR_INT_MASK)) |
2229 | continue; | 2278 | continue; |
2230 | 2279 | ||
2231 | ep = &udc->pxa_ep[i]; | 2280 | WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep)); |
2232 | ep->stats.irqs++; | 2281 | if (i < ARRAY_SIZE(udc->pxa_ep)) { |
2233 | handle_ep(ep); | 2282 | ep = &udc->pxa_ep[i]; |
2283 | ep->stats.irqs++; | ||
2284 | handle_ep(ep); | ||
2285 | } | ||
2234 | } | 2286 | } |
2235 | 2287 | ||
2236 | } | 2288 | } |
@@ -2439,7 +2491,7 @@ static int __init pxa_udc_probe(struct platform_device *pdev) | |||
2439 | } | 2491 | } |
2440 | 2492 | ||
2441 | retval = -ENOMEM; | 2493 | retval = -ENOMEM; |
2442 | udc->regs = ioremap(regs->start, regs->end - regs->start + 1); | 2494 | udc->regs = ioremap(regs->start, resource_size(regs)); |
2443 | if (!udc->regs) { | 2495 | if (!udc->regs) { |
2444 | dev_err(&pdev->dev, "Unable to map UDC I/O memory\n"); | 2496 | dev_err(&pdev->dev, "Unable to map UDC I/O memory\n"); |
2445 | goto err_map; | 2497 | goto err_map; |