aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/sdhci.c
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2008-06-28 12:28:51 -0400
committerPierre Ossman <drzeus@drzeus.cx>2008-07-15 08:14:44 -0400
commit2134a922c6e75c779983cad5d8aae832275f5a0d (patch)
treefb77a7a82cc62c9e788044cc7117b2cd72368c15 /drivers/mmc/host/sdhci.c
parent93fc48c785f6266e67663b3cbbf24579b53fe5cf (diff)
sdhci: scatter-gather (ADMA) support
Add support for the scatter-gather DMA mode present on newer controllers. As the mode requires 32-bit alignment, non-aligned chunks are handled by using a bounce buffer. Also add some new quirks to handle controllers that have bugs in the ADMA engine. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/host/sdhci.c')
-rw-r--r--drivers/mmc/host/sdhci.c383
1 files changed, 347 insertions, 36 deletions
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0ab582e77ac2..b802044ea940 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -124,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host)
124 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 124 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
125 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | 125 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
126 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 126 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
127 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; 127 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
128 SDHCI_INT_ADMA_ERROR;
128 129
129 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); 130 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
130 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); 131 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
@@ -314,6 +315,196 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
314 DBG("PIO transfer complete.\n"); 315 DBG("PIO transfer complete.\n");
315} 316}
316 317
318static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
319{
320 local_irq_save(*flags);
321 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
322}
323
324static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
325{
326 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
327 local_irq_restore(*flags);
328}
329
330static void sdhci_adma_table_pre(struct sdhci_host *host,
331 struct mmc_data *data)
332{
333 int direction;
334
335 u8 *desc;
336 u8 *align;
337 dma_addr_t addr;
338 dma_addr_t align_addr;
339 int len, offset;
340
341 struct scatterlist *sg;
342 int i;
343 char *buffer;
344 unsigned long flags;
345
346 /*
347 * The spec does not specify endianness of descriptor table.
348 * We currently guess that it is LE.
349 */
350
351 if (data->flags & MMC_DATA_READ)
352 direction = DMA_FROM_DEVICE;
353 else
354 direction = DMA_TO_DEVICE;
355
356 /*
357 * The ADMA descriptor table is mapped further down as we
358 * need to fill it with data first.
359 */
360
361 host->align_addr = dma_map_single(mmc_dev(host->mmc),
362 host->align_buffer, 128 * 4, direction);
363 BUG_ON(host->align_addr & 0x3);
364
365 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
366 data->sg, data->sg_len, direction);
367
368 desc = host->adma_desc;
369 align = host->align_buffer;
370
371 align_addr = host->align_addr;
372
373 for_each_sg(data->sg, sg, host->sg_count, i) {
374 addr = sg_dma_address(sg);
375 len = sg_dma_len(sg);
376
377 /*
378 * The SDHCI specification states that ADMA
379 * addresses must be 32-bit aligned. If they
380 * aren't, then we use a bounce buffer for
381 * the (up to three) bytes that screw up the
382 * alignment.
383 */
384 offset = (4 - (addr & 0x3)) & 0x3;
385 if (offset) {
386 if (data->flags & MMC_DATA_WRITE) {
387 buffer = sdhci_kmap_atomic(sg, &flags);
388 memcpy(align, buffer, offset);
389 sdhci_kunmap_atomic(buffer, &flags);
390 }
391
392 desc[7] = (align_addr >> 24) & 0xff;
393 desc[6] = (align_addr >> 16) & 0xff;
394 desc[5] = (align_addr >> 8) & 0xff;
395 desc[4] = (align_addr >> 0) & 0xff;
396
397 BUG_ON(offset > 65536);
398
399 desc[3] = (offset >> 8) & 0xff;
400 desc[2] = (offset >> 0) & 0xff;
401
402 desc[1] = 0x00;
403 desc[0] = 0x21; /* tran, valid */
404
405 align += 4;
406 align_addr += 4;
407
408 desc += 8;
409
410 addr += offset;
411 len -= offset;
412 }
413
414 desc[7] = (addr >> 24) & 0xff;
415 desc[6] = (addr >> 16) & 0xff;
416 desc[5] = (addr >> 8) & 0xff;
417 desc[4] = (addr >> 0) & 0xff;
418
419 BUG_ON(len > 65536);
420
421 desc[3] = (len >> 8) & 0xff;
422 desc[2] = (len >> 0) & 0xff;
423
424 desc[1] = 0x00;
425 desc[0] = 0x21; /* tran, valid */
426
427 desc += 8;
428
429 /*
430 * If this triggers then we have a calculation bug
431 * somewhere. :/
432 */
433 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
434 }
435
436 /*
437 * Add a terminating entry.
438 */
439 desc[7] = 0;
440 desc[6] = 0;
441 desc[5] = 0;
442 desc[4] = 0;
443
444 desc[3] = 0;
445 desc[2] = 0;
446
447 desc[1] = 0x00;
448 desc[0] = 0x03; /* nop, end, valid */
449
450 /*
451 * Resync align buffer as we might have changed it.
452 */
453 if (data->flags & MMC_DATA_WRITE) {
454 dma_sync_single_for_device(mmc_dev(host->mmc),
455 host->align_addr, 128 * 4, direction);
456 }
457
458 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
459 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
460 BUG_ON(host->adma_addr & 0x3);
461}
462
463static void sdhci_adma_table_post(struct sdhci_host *host,
464 struct mmc_data *data)
465{
466 int direction;
467
468 struct scatterlist *sg;
469 int i, size;
470 u8 *align;
471 char *buffer;
472 unsigned long flags;
473
474 if (data->flags & MMC_DATA_READ)
475 direction = DMA_FROM_DEVICE;
476 else
477 direction = DMA_TO_DEVICE;
478
479 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
480 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
481
482 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
483 128 * 4, direction);
484
485 if (data->flags & MMC_DATA_READ) {
486 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
487 data->sg_len, direction);
488
489 align = host->align_buffer;
490
491 for_each_sg(data->sg, sg, host->sg_count, i) {
492 if (sg_dma_address(sg) & 0x3) {
493 size = 4 - (sg_dma_address(sg) & 0x3);
494
495 buffer = sdhci_kmap_atomic(sg, &flags);
496 memcpy(buffer, align, size);
497 sdhci_kunmap_atomic(buffer, &flags);
498
499 align += 4;
500 }
501 }
502 }
503
504 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
505 data->sg_len, direction);
506}
507
317static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) 508static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
318{ 509{
319 u8 count; 510 u8 count;
@@ -363,6 +554,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
363static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 554static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
364{ 555{
365 u8 count; 556 u8 count;
557 u8 ctrl;
366 558
367 WARN_ON(host->data); 559 WARN_ON(host->data);
368 560
@@ -383,35 +575,104 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
383 if (host->flags & SDHCI_USE_DMA) 575 if (host->flags & SDHCI_USE_DMA)
384 host->flags |= SDHCI_REQ_USE_DMA; 576 host->flags |= SDHCI_REQ_USE_DMA;
385 577
386 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 578 /*
387 (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 579 * FIXME: This doesn't account for merging when mapping the
388 ((data->blksz * data->blocks) & 0x3))) { 580 * scatterlist.
389 DBG("Reverting to PIO because of transfer size (%d)\n", 581 */
390 data->blksz * data->blocks); 582 if (host->flags & SDHCI_REQ_USE_DMA) {
391 host->flags &= ~SDHCI_REQ_USE_DMA; 583 int broken, i;
584 struct scatterlist *sg;
585
586 broken = 0;
587 if (host->flags & SDHCI_USE_ADMA) {
588 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
589 broken = 1;
590 } else {
591 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
592 broken = 1;
593 }
594
595 if (unlikely(broken)) {
596 for_each_sg(data->sg, sg, data->sg_len, i) {
597 if (sg->length & 0x3) {
598 DBG("Reverting to PIO because of "
599 "transfer size (%d)\n",
600 sg->length);
601 host->flags &= ~SDHCI_REQ_USE_DMA;
602 break;
603 }
604 }
605 }
392 } 606 }
393 607
394 /* 608 /*
395 * The assumption here being that alignment is the same after 609 * The assumption here being that alignment is the same after
396 * translation to device address space. 610 * translation to device address space.
397 */ 611 */
398 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 612 if (host->flags & SDHCI_REQ_USE_DMA) {
399 (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 613 int broken, i;
400 (data->sg->offset & 0x3))) { 614 struct scatterlist *sg;
401 DBG("Reverting to PIO because of bad alignment\n"); 615
402 host->flags &= ~SDHCI_REQ_USE_DMA; 616 broken = 0;
617 if (host->flags & SDHCI_USE_ADMA) {
618 /*
619 * As we use 3 byte chunks to work around
620 * alignment problems, we need to check this
621 * quirk.
622 */
623 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
624 broken = 1;
625 } else {
626 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
627 broken = 1;
628 }
629
630 if (unlikely(broken)) {
631 for_each_sg(data->sg, sg, data->sg_len, i) {
632 if (sg->offset & 0x3) {
633 DBG("Reverting to PIO because of "
634 "bad alignment\n");
635 host->flags &= ~SDHCI_REQ_USE_DMA;
636 break;
637 }
638 }
639 }
640 }
641
642 /*
643 * Always adjust the DMA selection as some controllers
644 * (e.g. JMicron) can't do PIO properly when the selection
645 * is ADMA.
646 */
647 if (host->version >= SDHCI_SPEC_200) {
648 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
649 ctrl &= ~SDHCI_CTRL_DMA_MASK;
650 if ((host->flags & SDHCI_REQ_USE_DMA) &&
651 (host->flags & SDHCI_USE_ADMA))
652 ctrl |= SDHCI_CTRL_ADMA32;
653 else
654 ctrl |= SDHCI_CTRL_SDMA;
655 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
403 } 656 }
404 657
405 if (host->flags & SDHCI_REQ_USE_DMA) { 658 if (host->flags & SDHCI_REQ_USE_DMA) {
406 int count; 659 if (host->flags & SDHCI_USE_ADMA) {
660 sdhci_adma_table_pre(host, data);
661 writel(host->adma_addr,
662 host->ioaddr + SDHCI_ADMA_ADDRESS);
663 } else {
664 int count;
407 665
408 count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 666 count = dma_map_sg(mmc_dev(host->mmc),
409 (data->flags & MMC_DATA_READ) ? 667 data->sg, data->sg_len,
410 DMA_FROM_DEVICE : DMA_TO_DEVICE); 668 (data->flags & MMC_DATA_READ) ?
411 WARN_ON(count != 1); 669 DMA_FROM_DEVICE :
670 DMA_TO_DEVICE);
671 WARN_ON(count != 1);
412 672
413 writel(sg_dma_address(data->sg), 673 writel(sg_dma_address(data->sg),
414 host->ioaddr + SDHCI_DMA_ADDRESS); 674 host->ioaddr + SDHCI_DMA_ADDRESS);
675 }
415 } else { 676 } else {
416 host->cur_sg = data->sg; 677 host->cur_sg = data->sg;
417 host->num_sg = data->sg_len; 678 host->num_sg = data->sg_len;
@@ -457,9 +718,13 @@ static void sdhci_finish_data(struct sdhci_host *host)
457 host->data = NULL; 718 host->data = NULL;
458 719
459 if (host->flags & SDHCI_REQ_USE_DMA) { 720 if (host->flags & SDHCI_REQ_USE_DMA) {
460 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 721 if (host->flags & SDHCI_USE_ADMA)
461 (data->flags & MMC_DATA_READ) ? 722 sdhci_adma_table_post(host, data);
462 DMA_FROM_DEVICE : DMA_TO_DEVICE); 723 else {
724 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
725 data->sg_len, (data->flags & MMC_DATA_READ) ?
726 DMA_FROM_DEVICE : DMA_TO_DEVICE);
727 }
463 } 728 }
464 729
465 /* 730 /*
@@ -1008,6 +1273,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1008 host->data->error = -ETIMEDOUT; 1273 host->data->error = -ETIMEDOUT;
1009 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1274 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1010 host->data->error = -EILSEQ; 1275 host->data->error = -EILSEQ;
1276 else if (intmask & SDHCI_INT_ADMA_ERROR)
1277 host->data->error = -EIO;
1011 1278
1012 if (host->data->error) 1279 if (host->data->error)
1013 sdhci_finish_data(host); 1280 sdhci_finish_data(host);
@@ -1199,7 +1466,6 @@ int sdhci_add_host(struct sdhci_host *host)
1199{ 1466{
1200 struct mmc_host *mmc; 1467 struct mmc_host *mmc;
1201 unsigned int caps; 1468 unsigned int caps;
1202 unsigned int version;
1203 int ret; 1469 int ret;
1204 1470
1205 WARN_ON(host == NULL); 1471 WARN_ON(host == NULL);
@@ -1213,12 +1479,13 @@ int sdhci_add_host(struct sdhci_host *host)
1213 1479
1214 sdhci_reset(host, SDHCI_RESET_ALL); 1480 sdhci_reset(host, SDHCI_RESET_ALL);
1215 1481
1216 version = readw(host->ioaddr + SDHCI_HOST_VERSION); 1482 host->version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1217 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 1483 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1218 if (version > 1) { 1484 >> SDHCI_SPEC_VER_SHIFT;
1485 if (host->version > SDHCI_SPEC_200) {
1219 printk(KERN_ERR "%s: Unknown controller version (%d). " 1486 printk(KERN_ERR "%s: Unknown controller version (%d). "
1220 "You may experience problems.\n", mmc_hostname(mmc), 1487 "You may experience problems.\n", mmc_hostname(mmc),
1221 version); 1488 host->version);
1222 } 1489 }
1223 1490
1224 caps = readl(host->ioaddr + SDHCI_CAPABILITIES); 1491 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
@@ -1237,16 +1504,46 @@ int sdhci_add_host(struct sdhci_host *host)
1237 } 1504 }
1238 1505
1239 if (host->flags & SDHCI_USE_DMA) { 1506 if (host->flags & SDHCI_USE_DMA) {
1507 if ((host->version >= SDHCI_SPEC_200) &&
1508 (caps & SDHCI_CAN_DO_ADMA2))
1509 host->flags |= SDHCI_USE_ADMA;
1510 }
1511
1512 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1513 (host->flags & SDHCI_USE_ADMA)) {
1514 DBG("Disabling ADMA as it is marked broken\n");
1515 host->flags &= ~SDHCI_USE_ADMA;
1516 }
1517
1518 if (host->flags & SDHCI_USE_DMA) {
1240 if (host->ops->enable_dma) { 1519 if (host->ops->enable_dma) {
1241 if (host->ops->enable_dma(host)) { 1520 if (host->ops->enable_dma(host)) {
1242 printk(KERN_WARNING "%s: No suitable DMA " 1521 printk(KERN_WARNING "%s: No suitable DMA "
1243 "available. Falling back to PIO.\n", 1522 "available. Falling back to PIO.\n",
1244 mmc_hostname(mmc)); 1523 mmc_hostname(mmc));
1245 host->flags &= ~SDHCI_USE_DMA; 1524 host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
1246 } 1525 }
1247 } 1526 }
1248 } 1527 }
1249 1528
1529 if (host->flags & SDHCI_USE_ADMA) {
1530 /*
1531 * We need to allocate descriptors for all sg entries
1532 * (128) and potentially one alignment transfer for
1533 * each of those entries.
1534 */
1535 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1536 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1537 if (!host->adma_desc || !host->align_buffer) {
1538 kfree(host->adma_desc);
1539 kfree(host->align_buffer);
1540 printk(KERN_WARNING "%s: Unable to allocate ADMA "
1541 "buffers. Falling back to standard DMA.\n",
1542 mmc_hostname(mmc));
1543 host->flags &= ~SDHCI_USE_ADMA;
1544 }
1545 }
1546
1250 /* XXX: Hack to get MMC layer to avoid highmem */ 1547 /* XXX: Hack to get MMC layer to avoid highmem */
1251 if (!(host->flags & SDHCI_USE_DMA)) 1548 if (!(host->flags & SDHCI_USE_DMA))
1252 mmc_dev(host->mmc)->dma_mask = 0; 1549 mmc_dev(host->mmc)->dma_mask = 0;
@@ -1298,13 +1595,16 @@ int sdhci_add_host(struct sdhci_host *host)
1298 spin_lock_init(&host->lock); 1595 spin_lock_init(&host->lock);
1299 1596
1300 /* 1597 /*
1301 * Maximum number of segments. Hardware cannot do scatter lists. 1598 * Maximum number of segments. Depends on if the hardware
1599 * can do scatter/gather or not.
1302 */ 1600 */
1303 if (host->flags & SDHCI_USE_DMA) 1601 if (host->flags & SDHCI_USE_ADMA)
1602 mmc->max_hw_segs = 128;
1603 else if (host->flags & SDHCI_USE_DMA)
1304 mmc->max_hw_segs = 1; 1604 mmc->max_hw_segs = 1;
1305 else 1605 else /* PIO */
1306 mmc->max_hw_segs = 16; 1606 mmc->max_hw_segs = 128;
1307 mmc->max_phys_segs = 16; 1607 mmc->max_phys_segs = 128;
1308 1608
1309 /* 1609 /*
1310 * Maximum number of sectors in one transfer. Limited by DMA boundary 1610 * Maximum number of sectors in one transfer. Limited by DMA boundary
@@ -1314,9 +1614,13 @@ int sdhci_add_host(struct sdhci_host *host)
1314 1614
1315 /* 1615 /*
1316 * Maximum segment size. Could be one segment with the maximum number 1616 * Maximum segment size. Could be one segment with the maximum number
1317 * of bytes. 1617 * of bytes. When doing hardware scatter/gather, each entry cannot
1618 * be larger than 64 KiB though.
1318 */ 1619 */
1319 mmc->max_seg_size = mmc->max_req_size; 1620 if (host->flags & SDHCI_USE_ADMA)
1621 mmc->max_seg_size = 65536;
1622 else
1623 mmc->max_seg_size = mmc->max_req_size;
1320 1624
1321 /* 1625 /*
1322 * Maximum block size. This varies from controller to controller and 1626 * Maximum block size. This varies from controller to controller and
@@ -1371,8 +1675,9 @@ int sdhci_add_host(struct sdhci_host *host)
1371 1675
1372 mmc_add_host(mmc); 1676 mmc_add_host(mmc);
1373 1677
1374 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", 1678 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
1375 mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, 1679 mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id,
1680 (host->flags & SDHCI_USE_ADMA)?"A":"",
1376 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); 1681 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1377 1682
1378 return 0; 1683 return 0;
@@ -1426,6 +1731,12 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
1426 1731
1427 tasklet_kill(&host->card_tasklet); 1732 tasklet_kill(&host->card_tasklet);
1428 tasklet_kill(&host->finish_tasklet); 1733 tasklet_kill(&host->finish_tasklet);
1734
1735 kfree(host->adma_desc);
1736 kfree(host->align_buffer);
1737
1738 host->adma_desc = NULL;
1739 host->align_buffer = NULL;
1429} 1740}
1430 1741
1431EXPORT_SYMBOL_GPL(sdhci_remove_host); 1742EXPORT_SYMBOL_GPL(sdhci_remove_host);