aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/core
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2010-04-02 13:27:28 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-05-20 16:21:37 -0400
commitff9c895f07d36193c75533bda8193bde8ca99d02 (patch)
tree386ca8e37734c4810e59a55eaba92e4e88275d14 /drivers/usb/core
parent0ff8d1b3c858ea7c8daa54f7577971a76d04d283 (diff)
USB: fix usbmon and DMA mapping for scatter-gather URBs
This patch (as1368) fixes a rather obscure bug in usbmon: When tracing URBs sent by the scatter-gather library, it accesses the data buffers while they are still mapped for DMA. The solution is to move the mapping and unmapping out of the s-g library and into the usual place in hcd.c. This requires the addition of new URB flag bits to describe the kind of mapping needed, since we have to call dma_map_sg() if the HCD supports native scatter-gather operation and dma_map_page() if it doesn't. The nice thing about having the new flags is that they simplify the testing for unmapping. The patch removes the only caller of usb_buffer_[un]map_sg(), so those functions are #if'ed out. A later patch will remove them entirely. As a result of this change, urb->sg will be set in situations where it wasn't set previously. Hence the xhci and whci drivers are adjusted to test urb->num_sgs instead, which retains its original meaning and is nonzero only when the HCD has to handle a scatterlist. Finally, even when a submission error occurs we don't want to hand URBs to usbmon before they are unmapped. The submission path is rearranged so that map_urb_for_dma() is called only for non-root-hub URBs and unmap_urb_for_dma() is called immediately after a submission error. This simplifies the error handling. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> CC: <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/core')
-rw-r--r--drivers/usb/core/hcd.c169
-rw-r--r--drivers/usb/core/message.c45
-rw-r--r--drivers/usb/core/urb.c9
-rw-r--r--drivers/usb/core/usb.c4
4 files changed, 125 insertions, 102 deletions
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 38d4700926f7..6a05e6934455 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1259,6 +1259,51 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
1259 *dma_handle = 0; 1259 *dma_handle = 0;
1260} 1260}
1261 1261
1262static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1263{
1264 enum dma_data_direction dir;
1265
1266 if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
1267 dma_unmap_single(hcd->self.controller,
1268 urb->setup_dma,
1269 sizeof(struct usb_ctrlrequest),
1270 DMA_TO_DEVICE);
1271 else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
1272 hcd_free_coherent(urb->dev->bus,
1273 &urb->setup_dma,
1274 (void **) &urb->setup_packet,
1275 sizeof(struct usb_ctrlrequest),
1276 DMA_TO_DEVICE);
1277
1278 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1279 if (urb->transfer_flags & URB_DMA_MAP_SG)
1280 dma_unmap_sg(hcd->self.controller,
1281 urb->sg->sg,
1282 urb->num_sgs,
1283 dir);
1284 else if (urb->transfer_flags & URB_DMA_MAP_PAGE)
1285 dma_unmap_page(hcd->self.controller,
1286 urb->transfer_dma,
1287 urb->transfer_buffer_length,
1288 dir);
1289 else if (urb->transfer_flags & URB_DMA_MAP_SINGLE)
1290 dma_unmap_single(hcd->self.controller,
1291 urb->transfer_dma,
1292 urb->transfer_buffer_length,
1293 dir);
1294 else if (urb->transfer_flags & URB_MAP_LOCAL)
1295 hcd_free_coherent(urb->dev->bus,
1296 &urb->transfer_dma,
1297 &urb->transfer_buffer,
1298 urb->transfer_buffer_length,
1299 dir);
1300
1301 /* Make it safe to call this routine more than once */
1302 urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
1303 URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
1304 URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
1305}
1306
1262static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 1307static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1263 gfp_t mem_flags) 1308 gfp_t mem_flags)
1264{ 1309{
@@ -1270,8 +1315,6 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1270 * unless it uses pio or talks to another transport, 1315 * unless it uses pio or talks to another transport,
1271 * or uses the provided scatter gather list for bulk. 1316 * or uses the provided scatter gather list for bulk.
1272 */ 1317 */
1273 if (is_root_hub(urb->dev))
1274 return 0;
1275 1318
1276 if (usb_endpoint_xfer_control(&urb->ep->desc) 1319 if (usb_endpoint_xfer_control(&urb->ep->desc)
1277 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) { 1320 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
@@ -1284,6 +1327,7 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1284 if (dma_mapping_error(hcd->self.controller, 1327 if (dma_mapping_error(hcd->self.controller,
1285 urb->setup_dma)) 1328 urb->setup_dma))
1286 return -EAGAIN; 1329 return -EAGAIN;
1330 urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
1287 } else if (hcd->driver->flags & HCD_LOCAL_MEM) 1331 } else if (hcd->driver->flags & HCD_LOCAL_MEM)
1288 ret = hcd_alloc_coherent( 1332 ret = hcd_alloc_coherent(
1289 urb->dev->bus, mem_flags, 1333 urb->dev->bus, mem_flags,
@@ -1291,20 +1335,57 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1291 (void **)&urb->setup_packet, 1335 (void **)&urb->setup_packet,
1292 sizeof(struct usb_ctrlrequest), 1336 sizeof(struct usb_ctrlrequest),
1293 DMA_TO_DEVICE); 1337 DMA_TO_DEVICE);
1338 if (ret)
1339 return ret;
1340 urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
1294 } 1341 }
1295 1342
1296 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1343 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1297 if (ret == 0 && urb->transfer_buffer_length != 0 1344 if (urb->transfer_buffer_length != 0
1298 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 1345 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1299 if (hcd->self.uses_dma) { 1346 if (hcd->self.uses_dma) {
1300 urb->transfer_dma = dma_map_single ( 1347 if (urb->num_sgs) {
1301 hcd->self.controller, 1348 int n = dma_map_sg(
1302 urb->transfer_buffer, 1349 hcd->self.controller,
1303 urb->transfer_buffer_length, 1350 urb->sg->sg,
1304 dir); 1351 urb->num_sgs,
1305 if (dma_mapping_error(hcd->self.controller, 1352 dir);
1353 if (n <= 0)
1354 ret = -EAGAIN;
1355 else
1356 urb->transfer_flags |= URB_DMA_MAP_SG;
1357 if (n != urb->num_sgs) {
1358 urb->num_sgs = n;
1359 urb->transfer_flags |=
1360 URB_DMA_SG_COMBINED;
1361 }
1362 } else if (urb->sg) {
1363 struct scatterlist *sg;
1364
1365 sg = (struct scatterlist *) urb->sg;
1366 urb->transfer_dma = dma_map_page(
1367 hcd->self.controller,
1368 sg_page(sg),
1369 sg->offset,
1370 urb->transfer_buffer_length,
1371 dir);
1372 if (dma_mapping_error(hcd->self.controller,
1306 urb->transfer_dma)) 1373 urb->transfer_dma))
1307 return -EAGAIN; 1374 ret = -EAGAIN;
1375 else
1376 urb->transfer_flags |= URB_DMA_MAP_PAGE;
1377 } else {
1378 urb->transfer_dma = dma_map_single(
1379 hcd->self.controller,
1380 urb->transfer_buffer,
1381 urb->transfer_buffer_length,
1382 dir);
1383 if (dma_mapping_error(hcd->self.controller,
1384 urb->transfer_dma))
1385 ret = -EAGAIN;
1386 else
1387 urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1388 }
1308 } else if (hcd->driver->flags & HCD_LOCAL_MEM) { 1389 } else if (hcd->driver->flags & HCD_LOCAL_MEM) {
1309 ret = hcd_alloc_coherent( 1390 ret = hcd_alloc_coherent(
1310 urb->dev->bus, mem_flags, 1391 urb->dev->bus, mem_flags,
@@ -1312,55 +1393,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1312 &urb->transfer_buffer, 1393 &urb->transfer_buffer,
1313 urb->transfer_buffer_length, 1394 urb->transfer_buffer_length,
1314 dir); 1395 dir);
1315 1396 if (ret == 0)
1316 if (ret && usb_endpoint_xfer_control(&urb->ep->desc) 1397 urb->transfer_flags |= URB_MAP_LOCAL;
1317 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
1318 hcd_free_coherent(urb->dev->bus,
1319 &urb->setup_dma,
1320 (void **)&urb->setup_packet,
1321 sizeof(struct usb_ctrlrequest),
1322 DMA_TO_DEVICE);
1323 } 1398 }
1399 if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
1400 URB_SETUP_MAP_LOCAL)))
1401 unmap_urb_for_dma(hcd, urb);
1324 } 1402 }
1325 return ret; 1403 return ret;
1326} 1404}
1327 1405
1328static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1329{
1330 enum dma_data_direction dir;
1331
1332 if (is_root_hub(urb->dev))
1333 return;
1334
1335 if (usb_endpoint_xfer_control(&urb->ep->desc)
1336 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
1337 if (hcd->self.uses_dma)
1338 dma_unmap_single(hcd->self.controller, urb->setup_dma,
1339 sizeof(struct usb_ctrlrequest),
1340 DMA_TO_DEVICE);
1341 else if (hcd->driver->flags & HCD_LOCAL_MEM)
1342 hcd_free_coherent(urb->dev->bus, &urb->setup_dma,
1343 (void **)&urb->setup_packet,
1344 sizeof(struct usb_ctrlrequest),
1345 DMA_TO_DEVICE);
1346 }
1347
1348 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1349 if (urb->transfer_buffer_length != 0
1350 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1351 if (hcd->self.uses_dma)
1352 dma_unmap_single(hcd->self.controller,
1353 urb->transfer_dma,
1354 urb->transfer_buffer_length,
1355 dir);
1356 else if (hcd->driver->flags & HCD_LOCAL_MEM)
1357 hcd_free_coherent(urb->dev->bus, &urb->transfer_dma,
1358 &urb->transfer_buffer,
1359 urb->transfer_buffer_length,
1360 dir);
1361 }
1362}
1363
1364/*-------------------------------------------------------------------------*/ 1406/*-------------------------------------------------------------------------*/
1365 1407
1366/* may be called in any context with a valid urb->dev usecount 1408/* may be called in any context with a valid urb->dev usecount
@@ -1389,21 +1431,20 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1389 * URBs must be submitted in process context with interrupts 1431 * URBs must be submitted in process context with interrupts
1390 * enabled. 1432 * enabled.
1391 */ 1433 */
1392 status = map_urb_for_dma(hcd, urb, mem_flags);
1393 if (unlikely(status)) {
1394 usbmon_urb_submit_error(&hcd->self, urb, status);
1395 goto error;
1396 }
1397 1434
1398 if (is_root_hub(urb->dev)) 1435 if (is_root_hub(urb->dev)) {
1399 status = rh_urb_enqueue(hcd, urb); 1436 status = rh_urb_enqueue(hcd, urb);
1400 else 1437 } else {
1401 status = hcd->driver->urb_enqueue(hcd, urb, mem_flags); 1438 status = map_urb_for_dma(hcd, urb, mem_flags);
1439 if (likely(status == 0)) {
1440 status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
1441 if (unlikely(status))
1442 unmap_urb_for_dma(hcd, urb);
1443 }
1444 }
1402 1445
1403 if (unlikely(status)) { 1446 if (unlikely(status)) {
1404 usbmon_urb_submit_error(&hcd->self, urb, status); 1447 usbmon_urb_submit_error(&hcd->self, urb, status);
1405 unmap_urb_for_dma(hcd, urb);
1406 error:
1407 urb->hcpriv = NULL; 1448 urb->hcpriv = NULL;
1408 INIT_LIST_HEAD(&urb->urb_list); 1449 INIT_LIST_HEAD(&urb->urb_list);
1409 atomic_dec(&urb->use_count); 1450 atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 619c44fb8a96..79d1cdf4a635 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -259,9 +259,6 @@ static void sg_clean(struct usb_sg_request *io)
259 kfree(io->urbs); 259 kfree(io->urbs);
260 io->urbs = NULL; 260 io->urbs = NULL;
261 } 261 }
262 if (io->dev->dev.dma_mask != NULL)
263 usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
264 io->sg, io->nents);
265 io->dev = NULL; 262 io->dev = NULL;
266} 263}
267 264
@@ -364,7 +361,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
364{ 361{
365 int i; 362 int i;
366 int urb_flags; 363 int urb_flags;
367 int dma;
368 int use_sg; 364 int use_sg;
369 365
370 if (!io || !dev || !sg 366 if (!io || !dev || !sg
@@ -378,21 +374,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
378 io->pipe = pipe; 374 io->pipe = pipe;
379 io->sg = sg; 375 io->sg = sg;
380 io->nents = nents; 376 io->nents = nents;
381 377 io->entries = nents;
382 /* not all host controllers use DMA (like the mainstream pci ones);
383 * they can use PIO (sl811) or be software over another transport.
384 */
385 dma = (dev->dev.dma_mask != NULL);
386 if (dma)
387 io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
388 sg, nents);
389 else
390 io->entries = nents;
391 378
392 /* initialize all the urbs we'll use */ 379 /* initialize all the urbs we'll use */
393 if (io->entries <= 0)
394 return io->entries;
395
396 if (dev->bus->sg_tablesize > 0) { 380 if (dev->bus->sg_tablesize > 0) {
397 io->urbs = kmalloc(sizeof *io->urbs, mem_flags); 381 io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
398 use_sg = true; 382 use_sg = true;
@@ -404,8 +388,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
404 goto nomem; 388 goto nomem;
405 389
406 urb_flags = 0; 390 urb_flags = 0;
407 if (dma)
408 urb_flags |= URB_NO_TRANSFER_DMA_MAP;
409 if (usb_pipein(pipe)) 391 if (usb_pipein(pipe))
410 urb_flags |= URB_SHORT_NOT_OK; 392 urb_flags |= URB_SHORT_NOT_OK;
411 393
@@ -423,12 +405,13 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
423 405
424 io->urbs[0]->complete = sg_complete; 406 io->urbs[0]->complete = sg_complete;
425 io->urbs[0]->context = io; 407 io->urbs[0]->context = io;
408
426 /* A length of zero means transfer the whole sg list */ 409 /* A length of zero means transfer the whole sg list */
427 io->urbs[0]->transfer_buffer_length = length; 410 io->urbs[0]->transfer_buffer_length = length;
428 if (length == 0) { 411 if (length == 0) {
429 for_each_sg(sg, sg, io->entries, i) { 412 for_each_sg(sg, sg, io->entries, i) {
430 io->urbs[0]->transfer_buffer_length += 413 io->urbs[0]->transfer_buffer_length +=
431 sg_dma_len(sg); 414 sg->length;
432 } 415 }
433 } 416 }
434 io->urbs[0]->sg = io; 417 io->urbs[0]->sg = io;
@@ -454,26 +437,16 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
454 io->urbs[i]->context = io; 437 io->urbs[i]->context = io;
455 438
456 /* 439 /*
457 * Some systems need to revert to PIO when DMA is temporarily 440 * Some systems can't use DMA; they use PIO instead.
458 * unavailable. For their sakes, both transfer_buffer and 441 * For their sakes, transfer_buffer is set whenever
459 * transfer_dma are set when possible. 442 * possible.
460 *
461 * Note that if IOMMU coalescing occurred, we cannot
462 * trust sg_page anymore, so check if S/G list shrunk.
463 */ 443 */
464 if (io->nents == io->entries && !PageHighMem(sg_page(sg))) 444 if (!PageHighMem(sg_page(sg)))
465 io->urbs[i]->transfer_buffer = sg_virt(sg); 445 io->urbs[i]->transfer_buffer = sg_virt(sg);
466 else 446 else
467 io->urbs[i]->transfer_buffer = NULL; 447 io->urbs[i]->transfer_buffer = NULL;
468 448
469 if (dma) { 449 len = sg->length;
470 io->urbs[i]->transfer_dma = sg_dma_address(sg);
471 len = sg_dma_len(sg);
472 } else {
473 /* hc may use _only_ transfer_buffer */
474 len = sg->length;
475 }
476
477 if (length) { 450 if (length) {
478 len = min_t(unsigned, len, length); 451 len = min_t(unsigned, len, length);
479 length -= len; 452 length -= len;
@@ -481,6 +454,8 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
481 io->entries = i + 1; 454 io->entries = i + 1;
482 } 455 }
483 io->urbs[i]->transfer_buffer_length = len; 456 io->urbs[i]->transfer_buffer_length = len;
457
458 io->urbs[i]->sg = (struct usb_sg_request *) sg;
484 } 459 }
485 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; 460 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
486 } 461 }
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 2532a0917f8c..a760e46871c5 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -333,9 +333,12 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
333 is_out = usb_endpoint_dir_out(&ep->desc); 333 is_out = usb_endpoint_dir_out(&ep->desc);
334 } 334 }
335 335
336 /* Cache the direction for later use */ 336 /* Clear the internal flags and cache the direction for later use */
337 urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) | 337 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
338 (is_out ? URB_DIR_OUT : URB_DIR_IN); 338 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
339 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
340 URB_DMA_SG_COMBINED);
341 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
339 342
340 if (xfertype != USB_ENDPOINT_XFER_CONTROL && 343 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
341 dev->state < USB_STATE_CONFIGURED) 344 dev->state < USB_STATE_CONFIGURED)
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 097172e2ba06..8180ce533ebf 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -881,6 +881,7 @@ void usb_buffer_unmap(struct urb *urb)
881EXPORT_SYMBOL_GPL(usb_buffer_unmap); 881EXPORT_SYMBOL_GPL(usb_buffer_unmap);
882#endif /* 0 */ 882#endif /* 0 */
883 883
884#if 0
884/** 885/**
885 * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint 886 * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
886 * @dev: device to which the scatterlist will be mapped 887 * @dev: device to which the scatterlist will be mapped
@@ -924,6 +925,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
924 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM; 925 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
925} 926}
926EXPORT_SYMBOL_GPL(usb_buffer_map_sg); 927EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
928#endif
927 929
928/* XXX DISABLED, no users currently. If you wish to re-enable this 930/* XXX DISABLED, no users currently. If you wish to re-enable this
929 * XXX please determine whether the sync is to transfer ownership of 931 * XXX please determine whether the sync is to transfer ownership of
@@ -960,6 +962,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
960EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg); 962EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
961#endif 963#endif
962 964
965#if 0
963/** 966/**
964 * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist 967 * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
965 * @dev: device to which the scatterlist will be mapped 968 * @dev: device to which the scatterlist will be mapped
@@ -985,6 +988,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
985 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 988 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
986} 989}
987EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg); 990EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
991#endif
988 992
989/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */ 993/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
990#ifdef MODULE 994#ifdef MODULE