aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/usb/wusbcore/wa-hc.h10
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c9
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c154
3 files changed, 152 insertions, 21 deletions
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index b44aca3f25dd..41afaa6d01d2 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -117,6 +117,7 @@ struct wa_rpipe {
117 struct wahc *wa; 117 struct wahc *wa;
118 spinlock_t seg_lock; 118 spinlock_t seg_lock;
119 struct list_head seg_list; 119 struct list_head seg_list;
120 struct list_head list_node;
120 atomic_t segs_available; 121 atomic_t segs_available;
121 u8 buffer[1]; /* For reads/writes on USB */ 122 u8 buffer[1]; /* For reads/writes on USB */
122}; 123};
@@ -183,7 +184,8 @@ struct wahc {
183 184
184 u16 rpipes; 185 u16 rpipes;
185 unsigned long *rpipe_bm; /* rpipe usage bitmap */ 186 unsigned long *rpipe_bm; /* rpipe usage bitmap */
186 spinlock_t rpipe_bm_lock; /* protect rpipe_bm */ 187 struct list_head rpipe_delayed_list; /* delayed RPIPES. */
188 spinlock_t rpipe_lock; /* protect rpipe_bm and delayed list */
187 struct mutex rpipe_mutex; /* assigning resources to endpoints */ 189 struct mutex rpipe_mutex; /* assigning resources to endpoints */
188 190
189 /* 191 /*
@@ -201,6 +203,8 @@ struct wahc {
201 void *dti_buf; 203 void *dti_buf;
202 size_t dti_buf_size; 204 size_t dti_buf_size;
203 205
206 unsigned long dto_in_use; /* protect dto endoint serialization. */
207
204 s32 status; /* For reading status */ 208 s32 status; /* For reading status */
205 209
206 struct list_head xfer_list; 210 struct list_head xfer_list;
@@ -253,7 +257,8 @@ static inline void wa_nep_disarm(struct wahc *wa)
253/* RPipes */ 257/* RPipes */
254static inline void wa_rpipe_init(struct wahc *wa) 258static inline void wa_rpipe_init(struct wahc *wa)
255{ 259{
256 spin_lock_init(&wa->rpipe_bm_lock); 260 INIT_LIST_HEAD(&wa->rpipe_delayed_list);
261 spin_lock_init(&wa->rpipe_lock);
257 mutex_init(&wa->rpipe_mutex); 262 mutex_init(&wa->rpipe_mutex);
258} 263}
259 264
@@ -270,6 +275,7 @@ static inline void wa_init(struct wahc *wa)
270 spin_lock_init(&wa->xfer_list_lock); 275 spin_lock_init(&wa->xfer_list_lock);
271 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run); 276 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
272 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run); 277 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
278 wa->dto_in_use = 0;
273 atomic_set(&wa->xfer_id_count, 1); 279 atomic_set(&wa->xfer_id_count, 1);
274} 280}
275 281
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 554b16bd22f1..50de1d2c7b72 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -143,17 +143,18 @@ static void rpipe_init(struct wa_rpipe *rpipe)
143 kref_init(&rpipe->refcnt); 143 kref_init(&rpipe->refcnt);
144 spin_lock_init(&rpipe->seg_lock); 144 spin_lock_init(&rpipe->seg_lock);
145 INIT_LIST_HEAD(&rpipe->seg_list); 145 INIT_LIST_HEAD(&rpipe->seg_list);
146 INIT_LIST_HEAD(&rpipe->list_node);
146} 147}
147 148
148static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx) 149static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
149{ 150{
150 unsigned long flags; 151 unsigned long flags;
151 152
152 spin_lock_irqsave(&wa->rpipe_bm_lock, flags); 153 spin_lock_irqsave(&wa->rpipe_lock, flags);
153 rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx); 154 rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
154 if (rpipe_idx < wa->rpipes) 155 if (rpipe_idx < wa->rpipes)
155 set_bit(rpipe_idx, wa->rpipe_bm); 156 set_bit(rpipe_idx, wa->rpipe_bm);
156 spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags); 157 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
157 158
158 return rpipe_idx; 159 return rpipe_idx;
159} 160}
@@ -162,9 +163,9 @@ static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
162{ 163{
163 unsigned long flags; 164 unsigned long flags;
164 165
165 spin_lock_irqsave(&wa->rpipe_bm_lock, flags); 166 spin_lock_irqsave(&wa->rpipe_lock, flags);
166 clear_bit(rpipe_idx, wa->rpipe_bm); 167 clear_bit(rpipe_idx, wa->rpipe_bm);
167 spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags); 168 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
168} 169}
169 170
170void rpipe_destroy(struct kref *_rpipe) 171void rpipe_destroy(struct kref *_rpipe)
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index fd00e1aaccf4..f1e9a386beca 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -107,6 +107,7 @@ enum wa_seg_status {
107}; 107};
108 108
109static void wa_xfer_delayed_run(struct wa_rpipe *); 109static void wa_xfer_delayed_run(struct wa_rpipe *);
110static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
110 111
111/* 112/*
112 * Life cycle governed by 'struct urb' (the refcount of the struct is 113 * Life cycle governed by 'struct urb' (the refcount of the struct is
@@ -204,6 +205,59 @@ static void wa_xfer_put(struct wa_xfer *xfer)
204} 205}
205 206
206/* 207/*
208 * Try to get exclusive access to the DTO endpoint resource. Return true
209 * if successful.
210 */
211static inline int __wa_dto_try_get(struct wahc *wa)
212{
213 return (test_and_set_bit(0, &wa->dto_in_use) == 0);
214}
215
216/* Release the DTO endpoint resource. */
217static inline void __wa_dto_put(struct wahc *wa)
218{
219 clear_bit_unlock(0, &wa->dto_in_use);
220}
221
222/* Service RPIPEs that are waiting on the DTO resource. */
223static void wa_check_for_delayed_rpipes(struct wahc *wa)
224{
225 unsigned long flags;
226 int dto_waiting = 0;
227 struct wa_rpipe *rpipe;
228
229 spin_lock_irqsave(&wa->rpipe_lock, flags);
230 while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
231 rpipe = list_first_entry(&wa->rpipe_delayed_list,
232 struct wa_rpipe, list_node);
233 __wa_xfer_delayed_run(rpipe, &dto_waiting);
234 /* remove this RPIPE from the list if it is not waiting. */
235 if (!dto_waiting) {
236 pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
237 __func__,
238 le16_to_cpu(rpipe->descr.wRPipeIndex));
239 list_del_init(&rpipe->list_node);
240 }
241 }
242 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
243}
244
245/* add this RPIPE to the end of the delayed RPIPE list. */
246static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
247{
248 unsigned long flags;
249
250 spin_lock_irqsave(&wa->rpipe_lock, flags);
251 /* add rpipe to the list if it is not already on it. */
252 if (list_empty(&rpipe->list_node)) {
253 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
254 __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
255 list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
256 }
257 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
258}
259
260/*
207 * xfer is referenced 261 * xfer is referenced
208 * 262 *
209 * xfer->lock has to be unlocked 263 * xfer->lock has to be unlocked
@@ -1099,9 +1153,13 @@ error_setup_sizes:
1099 * rpipe->seg_lock is held! 1153 * rpipe->seg_lock is held!
1100 */ 1154 */
1101static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, 1155static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1102 struct wa_seg *seg) 1156 struct wa_seg *seg, int *dto_done)
1103{ 1157{
1104 int result; 1158 int result;
1159
1160 /* default to done unless we encounter a multi-frame isoc segment. */
1161 *dto_done = 1;
1162
1105 /* submit the transfer request. */ 1163 /* submit the transfer request. */
1106 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC); 1164 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1107 if (result < 0) { 1165 if (result < 0) {
@@ -1142,28 +1200,34 @@ error_seg_submit:
1142} 1200}
1143 1201
1144/* 1202/*
1145 * Execute more queued request segments until the maximum concurrent allowed 1203 * Execute more queued request segments until the maximum concurrent allowed.
1204 * Return true if the DTO resource was acquired and released.
1146 * 1205 *
1147 * The ugly unlock/lock sequence on the error path is needed as the 1206 * The ugly unlock/lock sequence on the error path is needed as the
1148 * xfer->lock normally nests the seg_lock and not viceversa. 1207 * xfer->lock normally nests the seg_lock and not viceversa.
1149 *
1150 */ 1208 */
1151static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) 1209static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1152{ 1210{
1153 int result; 1211 int result, dto_acquired = 0, dto_done = 0;
1154 struct device *dev = &rpipe->wa->usb_iface->dev; 1212 struct device *dev = &rpipe->wa->usb_iface->dev;
1155 struct wa_seg *seg; 1213 struct wa_seg *seg;
1156 struct wa_xfer *xfer; 1214 struct wa_xfer *xfer;
1157 unsigned long flags; 1215 unsigned long flags;
1158 1216
1217 *dto_waiting = 0;
1218
1159 spin_lock_irqsave(&rpipe->seg_lock, flags); 1219 spin_lock_irqsave(&rpipe->seg_lock, flags);
1160 while (atomic_read(&rpipe->segs_available) > 0 1220 while (atomic_read(&rpipe->segs_available) > 0
1161 && !list_empty(&rpipe->seg_list)) { 1221 && !list_empty(&rpipe->seg_list)
1222 && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1162 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg, 1223 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1163 list_node); 1224 list_node);
1164 list_del(&seg->list_node); 1225 list_del(&seg->list_node);
1165 xfer = seg->xfer; 1226 xfer = seg->xfer;
1166 result = __wa_seg_submit(rpipe, xfer, seg); 1227 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1228 /* release the dto resource if this RPIPE is done with it. */
1229 if (dto_done)
1230 __wa_dto_put(rpipe->wa);
1167 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n", 1231 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1168 xfer, wa_xfer_id(xfer), seg->index, 1232 xfer, wa_xfer_id(xfer), seg->index,
1169 atomic_read(&rpipe->segs_available), result); 1233 atomic_read(&rpipe->segs_available), result);
@@ -1176,7 +1240,37 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1176 spin_lock_irqsave(&rpipe->seg_lock, flags); 1240 spin_lock_irqsave(&rpipe->seg_lock, flags);
1177 } 1241 }
1178 } 1242 }
1243 /*
1244 * Mark this RPIPE as waiting if dto was not acquired, there are
1245 * delayed segs and no active transfers to wake us up later.
1246 */
1247 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1248 && (atomic_read(&rpipe->segs_available) ==
1249 le16_to_cpu(rpipe->descr.wRequests)))
1250 *dto_waiting = 1;
1251
1179 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 1252 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1253
1254 return dto_done;
1255}
1256
1257static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1258{
1259 int dto_waiting;
1260 int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1261
1262 /*
1263 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1264 * the waiting list.
1265 * Otherwise, if the WA DTO resource was acquired and released by
1266 * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1267 * DTO and failed during that time. Check the delayed list and process
1268 * any waiters. Start searching from the next RPIPE index.
1269 */
1270 if (dto_waiting)
1271 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1272 else if (dto_done)
1273 wa_check_for_delayed_rpipes(rpipe->wa);
1180} 1274}
1181 1275
1182/* 1276/*
@@ -1188,7 +1282,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1188 */ 1282 */
1189static int __wa_xfer_submit(struct wa_xfer *xfer) 1283static int __wa_xfer_submit(struct wa_xfer *xfer)
1190{ 1284{
1191 int result; 1285 int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1192 struct wahc *wa = xfer->wa; 1286 struct wahc *wa = xfer->wa;
1193 struct device *dev = &wa->usb_iface->dev; 1287 struct device *dev = &wa->usb_iface->dev;
1194 unsigned cnt; 1288 unsigned cnt;
@@ -1207,26 +1301,56 @@ static int __wa_xfer_submit(struct wa_xfer *xfer)
1207 result = 0; 1301 result = 0;
1208 spin_lock_irqsave(&rpipe->seg_lock, flags); 1302 spin_lock_irqsave(&rpipe->seg_lock, flags);
1209 for (cnt = 0; cnt < xfer->segs; cnt++) { 1303 for (cnt = 0; cnt < xfer->segs; cnt++) {
1304 int delay_seg = 1;
1305
1210 available = atomic_read(&rpipe->segs_available); 1306 available = atomic_read(&rpipe->segs_available);
1211 empty = list_empty(&rpipe->seg_list); 1307 empty = list_empty(&rpipe->seg_list);
1212 seg = xfer->seg[cnt]; 1308 seg = xfer->seg[cnt];
1213 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u (%s)\n", 1309 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u (%s)\n",
1214 xfer, wa_xfer_id(xfer), cnt, available, empty, 1310 xfer, wa_xfer_id(xfer), cnt, available, empty,
1215 available == 0 || !empty ? "delayed" : "submitted"); 1311 available == 0 || !empty ? "delayed" : "submitted");
1216 if (available == 0 || !empty) { 1312 if (available && empty) {
1313 /*
1314 * Only attempt to acquire DTO if we have a segment
1315 * to send.
1316 */
1317 dto_acquired = __wa_dto_try_get(rpipe->wa);
1318 if (dto_acquired) {
1319 delay_seg = 0;
1320 result = __wa_seg_submit(rpipe, xfer, seg,
1321 &dto_done);
1322 if (dto_done)
1323 __wa_dto_put(rpipe->wa);
1324
1325 if (result < 0) {
1326 __wa_xfer_abort(xfer);
1327 goto error_seg_submit;
1328 }
1329 }
1330 }
1331
1332 if (delay_seg) {
1217 seg->status = WA_SEG_DELAYED; 1333 seg->status = WA_SEG_DELAYED;
1218 list_add_tail(&seg->list_node, &rpipe->seg_list); 1334 list_add_tail(&seg->list_node, &rpipe->seg_list);
1219 } else {
1220 result = __wa_seg_submit(rpipe, xfer, seg);
1221 if (result < 0) {
1222 __wa_xfer_abort(xfer);
1223 goto error_seg_submit;
1224 }
1225 } 1335 }
1226 xfer->segs_submitted++; 1336 xfer->segs_submitted++;
1227 } 1337 }
1228error_seg_submit: 1338error_seg_submit:
1339 /*
1340 * Mark this RPIPE as waiting if dto was not acquired, there are
1341 * delayed segs and no active transfers to wake us up later.
1342 */
1343 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1344 && (atomic_read(&rpipe->segs_available) ==
1345 le16_to_cpu(rpipe->descr.wRequests)))
1346 dto_waiting = 1;
1229 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 1347 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1348
1349 if (dto_waiting)
1350 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1351 else if (dto_done)
1352 wa_check_for_delayed_rpipes(rpipe->wa);
1353
1230 return result; 1354 return result;
1231} 1355}
1232 1356