diff options
-rw-r--r-- | drivers/mmc/host/sdhci-pci.c | 17 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.c | 383 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.h | 51 |
3 files changed, 407 insertions, 44 deletions
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 0716dcffd511..deb607c52c0d 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -142,6 +142,7 @@ static int jmicron_probe(struct sdhci_pci_chip *chip) | |||
142 | if (chip->pdev->revision == 0) { | 142 | if (chip->pdev->revision == 0) { |
143 | chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | | 143 | chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | |
144 | SDHCI_QUIRK_32BIT_DMA_SIZE | | 144 | SDHCI_QUIRK_32BIT_DMA_SIZE | |
145 | SDHCI_QUIRK_32BIT_ADMA_SIZE | | ||
145 | SDHCI_QUIRK_RESET_AFTER_REQUEST; | 146 | SDHCI_QUIRK_RESET_AFTER_REQUEST; |
146 | } | 147 | } |
147 | 148 | ||
@@ -206,6 +207,22 @@ static void jmicron_enable_mmc(struct sdhci_host *host, int on) | |||
206 | 207 | ||
207 | static int jmicron_probe_slot(struct sdhci_pci_slot *slot) | 208 | static int jmicron_probe_slot(struct sdhci_pci_slot *slot) |
208 | { | 209 | { |
210 | if (slot->chip->pdev->revision == 0) { | ||
211 | u16 version; | ||
212 | |||
213 | version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); | ||
214 | version = (version & SDHCI_VENDOR_VER_MASK) >> | ||
215 | SDHCI_VENDOR_VER_SHIFT; | ||
216 | |||
217 | /* | ||
218 | * Older versions of the chip have lots of nasty glitches | ||
219 | * in the ADMA engine. It's best just to avoid it | ||
220 | * completely. | ||
221 | */ | ||
222 | if (version < 0xAC) | ||
223 | slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; | ||
224 | } | ||
225 | |||
209 | /* | 226 | /* |
210 | * The secondary interface requires a bit set to get the | 227 | * The secondary interface requires a bit set to get the |
211 | * interrupts. | 228 | * interrupts. |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 0ab582e77ac2..b802044ea940 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -124,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host) | |||
124 | SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | | 124 | SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | |
125 | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | | 125 | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | |
126 | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | | 126 | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | |
127 | SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; | 127 | SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | |
128 | SDHCI_INT_ADMA_ERROR; | ||
128 | 129 | ||
129 | writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); | 130 | writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); |
130 | writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); | 131 | writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); |
@@ -314,6 +315,196 @@ static void sdhci_transfer_pio(struct sdhci_host *host) | |||
314 | DBG("PIO transfer complete.\n"); | 315 | DBG("PIO transfer complete.\n"); |
315 | } | 316 | } |
316 | 317 | ||
318 | static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) | ||
319 | { | ||
320 | local_irq_save(*flags); | ||
321 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
322 | } | ||
323 | |||
324 | static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) | ||
325 | { | ||
326 | kunmap_atomic(buffer, KM_BIO_SRC_IRQ); | ||
327 | local_irq_restore(*flags); | ||
328 | } | ||
329 | |||
330 | static void sdhci_adma_table_pre(struct sdhci_host *host, | ||
331 | struct mmc_data *data) | ||
332 | { | ||
333 | int direction; | ||
334 | |||
335 | u8 *desc; | ||
336 | u8 *align; | ||
337 | dma_addr_t addr; | ||
338 | dma_addr_t align_addr; | ||
339 | int len, offset; | ||
340 | |||
341 | struct scatterlist *sg; | ||
342 | int i; | ||
343 | char *buffer; | ||
344 | unsigned long flags; | ||
345 | |||
346 | /* | ||
347 | * The spec does not specify endianness of descriptor table. | ||
348 | * We currently guess that it is LE. | ||
349 | */ | ||
350 | |||
351 | if (data->flags & MMC_DATA_READ) | ||
352 | direction = DMA_FROM_DEVICE; | ||
353 | else | ||
354 | direction = DMA_TO_DEVICE; | ||
355 | |||
356 | /* | ||
357 | * The ADMA descriptor table is mapped further down as we | ||
358 | * need to fill it with data first. | ||
359 | */ | ||
360 | |||
361 | host->align_addr = dma_map_single(mmc_dev(host->mmc), | ||
362 | host->align_buffer, 128 * 4, direction); | ||
363 | BUG_ON(host->align_addr & 0x3); | ||
364 | |||
365 | host->sg_count = dma_map_sg(mmc_dev(host->mmc), | ||
366 | data->sg, data->sg_len, direction); | ||
367 | |||
368 | desc = host->adma_desc; | ||
369 | align = host->align_buffer; | ||
370 | |||
371 | align_addr = host->align_addr; | ||
372 | |||
373 | for_each_sg(data->sg, sg, host->sg_count, i) { | ||
374 | addr = sg_dma_address(sg); | ||
375 | len = sg_dma_len(sg); | ||
376 | |||
377 | /* | ||
378 | * The SDHCI specification states that ADMA | ||
379 | * addresses must be 32-bit aligned. If they | ||
380 | * aren't, then we use a bounce buffer for | ||
381 | * the (up to three) bytes that screw up the | ||
382 | * alignment. | ||
383 | */ | ||
384 | offset = (4 - (addr & 0x3)) & 0x3; | ||
385 | if (offset) { | ||
386 | if (data->flags & MMC_DATA_WRITE) { | ||
387 | buffer = sdhci_kmap_atomic(sg, &flags); | ||
388 | memcpy(align, buffer, offset); | ||
389 | sdhci_kunmap_atomic(buffer, &flags); | ||
390 | } | ||
391 | |||
392 | desc[7] = (align_addr >> 24) & 0xff; | ||
393 | desc[6] = (align_addr >> 16) & 0xff; | ||
394 | desc[5] = (align_addr >> 8) & 0xff; | ||
395 | desc[4] = (align_addr >> 0) & 0xff; | ||
396 | |||
397 | BUG_ON(offset > 65536); | ||
398 | |||
399 | desc[3] = (offset >> 8) & 0xff; | ||
400 | desc[2] = (offset >> 0) & 0xff; | ||
401 | |||
402 | desc[1] = 0x00; | ||
403 | desc[0] = 0x21; /* tran, valid */ | ||
404 | |||
405 | align += 4; | ||
406 | align_addr += 4; | ||
407 | |||
408 | desc += 8; | ||
409 | |||
410 | addr += offset; | ||
411 | len -= offset; | ||
412 | } | ||
413 | |||
414 | desc[7] = (addr >> 24) & 0xff; | ||
415 | desc[6] = (addr >> 16) & 0xff; | ||
416 | desc[5] = (addr >> 8) & 0xff; | ||
417 | desc[4] = (addr >> 0) & 0xff; | ||
418 | |||
419 | BUG_ON(len > 65536); | ||
420 | |||
421 | desc[3] = (len >> 8) & 0xff; | ||
422 | desc[2] = (len >> 0) & 0xff; | ||
423 | |||
424 | desc[1] = 0x00; | ||
425 | desc[0] = 0x21; /* tran, valid */ | ||
426 | |||
427 | desc += 8; | ||
428 | |||
429 | /* | ||
430 | * If this triggers then we have a calculation bug | ||
431 | * somewhere. :/ | ||
432 | */ | ||
433 | WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); | ||
434 | } | ||
435 | |||
436 | /* | ||
437 | * Add a terminating entry. | ||
438 | */ | ||
439 | desc[7] = 0; | ||
440 | desc[6] = 0; | ||
441 | desc[5] = 0; | ||
442 | desc[4] = 0; | ||
443 | |||
444 | desc[3] = 0; | ||
445 | desc[2] = 0; | ||
446 | |||
447 | desc[1] = 0x00; | ||
448 | desc[0] = 0x03; /* nop, end, valid */ | ||
449 | |||
450 | /* | ||
451 | * Resync align buffer as we might have changed it. | ||
452 | */ | ||
453 | if (data->flags & MMC_DATA_WRITE) { | ||
454 | dma_sync_single_for_device(mmc_dev(host->mmc), | ||
455 | host->align_addr, 128 * 4, direction); | ||
456 | } | ||
457 | |||
458 | host->adma_addr = dma_map_single(mmc_dev(host->mmc), | ||
459 | host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); | ||
460 | BUG_ON(host->adma_addr & 0x3); | ||
461 | } | ||
462 | |||
463 | static void sdhci_adma_table_post(struct sdhci_host *host, | ||
464 | struct mmc_data *data) | ||
465 | { | ||
466 | int direction; | ||
467 | |||
468 | struct scatterlist *sg; | ||
469 | int i, size; | ||
470 | u8 *align; | ||
471 | char *buffer; | ||
472 | unsigned long flags; | ||
473 | |||
474 | if (data->flags & MMC_DATA_READ) | ||
475 | direction = DMA_FROM_DEVICE; | ||
476 | else | ||
477 | direction = DMA_TO_DEVICE; | ||
478 | |||
479 | dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, | ||
480 | (128 * 2 + 1) * 4, DMA_TO_DEVICE); | ||
481 | |||
482 | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, | ||
483 | 128 * 4, direction); | ||
484 | |||
485 | if (data->flags & MMC_DATA_READ) { | ||
486 | dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, | ||
487 | data->sg_len, direction); | ||
488 | |||
489 | align = host->align_buffer; | ||
490 | |||
491 | for_each_sg(data->sg, sg, host->sg_count, i) { | ||
492 | if (sg_dma_address(sg) & 0x3) { | ||
493 | size = 4 - (sg_dma_address(sg) & 0x3); | ||
494 | |||
495 | buffer = sdhci_kmap_atomic(sg, &flags); | ||
496 | memcpy(buffer, align, size); | ||
497 | sdhci_kunmap_atomic(buffer, &flags); | ||
498 | |||
499 | align += 4; | ||
500 | } | ||
501 | } | ||
502 | } | ||
503 | |||
504 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | ||
505 | data->sg_len, direction); | ||
506 | } | ||
507 | |||
317 | static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) | 508 | static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) |
318 | { | 509 | { |
319 | u8 count; | 510 | u8 count; |
@@ -363,6 +554,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) | |||
363 | static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | 554 | static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) |
364 | { | 555 | { |
365 | u8 count; | 556 | u8 count; |
557 | u8 ctrl; | ||
366 | 558 | ||
367 | WARN_ON(host->data); | 559 | WARN_ON(host->data); |
368 | 560 | ||
@@ -383,35 +575,104 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | |||
383 | if (host->flags & SDHCI_USE_DMA) | 575 | if (host->flags & SDHCI_USE_DMA) |
384 | host->flags |= SDHCI_REQ_USE_DMA; | 576 | host->flags |= SDHCI_REQ_USE_DMA; |
385 | 577 | ||
386 | if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && | 578 | /* |
387 | (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && | 579 | * FIXME: This doesn't account for merging when mapping the |
388 | ((data->blksz * data->blocks) & 0x3))) { | 580 | * scatterlist. |
389 | DBG("Reverting to PIO because of transfer size (%d)\n", | 581 | */ |
390 | data->blksz * data->blocks); | 582 | if (host->flags & SDHCI_REQ_USE_DMA) { |
391 | host->flags &= ~SDHCI_REQ_USE_DMA; | 583 | int broken, i; |
584 | struct scatterlist *sg; | ||
585 | |||
586 | broken = 0; | ||
587 | if (host->flags & SDHCI_USE_ADMA) { | ||
588 | if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) | ||
589 | broken = 1; | ||
590 | } else { | ||
591 | if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) | ||
592 | broken = 1; | ||
593 | } | ||
594 | |||
595 | if (unlikely(broken)) { | ||
596 | for_each_sg(data->sg, sg, data->sg_len, i) { | ||
597 | if (sg->length & 0x3) { | ||
598 | DBG("Reverting to PIO because of " | ||
599 | "transfer size (%d)\n", | ||
600 | sg->length); | ||
601 | host->flags &= ~SDHCI_REQ_USE_DMA; | ||
602 | break; | ||
603 | } | ||
604 | } | ||
605 | } | ||
392 | } | 606 | } |
393 | 607 | ||
394 | /* | 608 | /* |
395 | * The assumption here being that alignment is the same after | 609 | * The assumption here being that alignment is the same after |
396 | * translation to device address space. | 610 | * translation to device address space. |
397 | */ | 611 | */ |
398 | if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && | 612 | if (host->flags & SDHCI_REQ_USE_DMA) { |
399 | (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && | 613 | int broken, i; |
400 | (data->sg->offset & 0x3))) { | 614 | struct scatterlist *sg; |
401 | DBG("Reverting to PIO because of bad alignment\n"); | 615 | |
402 | host->flags &= ~SDHCI_REQ_USE_DMA; | 616 | broken = 0; |
617 | if (host->flags & SDHCI_USE_ADMA) { | ||
618 | /* | ||
619 | * As we use 3 byte chunks to work around | ||
620 | * alignment problems, we need to check this | ||
621 | * quirk. | ||
622 | */ | ||
623 | if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) | ||
624 | broken = 1; | ||
625 | } else { | ||
626 | if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) | ||
627 | broken = 1; | ||
628 | } | ||
629 | |||
630 | if (unlikely(broken)) { | ||
631 | for_each_sg(data->sg, sg, data->sg_len, i) { | ||
632 | if (sg->offset & 0x3) { | ||
633 | DBG("Reverting to PIO because of " | ||
634 | "bad alignment\n"); | ||
635 | host->flags &= ~SDHCI_REQ_USE_DMA; | ||
636 | break; | ||
637 | } | ||
638 | } | ||
639 | } | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * Always adjust the DMA selection as some controllers | ||
644 | * (e.g. JMicron) can't do PIO properly when the selection | ||
645 | * is ADMA. | ||
646 | */ | ||
647 | if (host->version >= SDHCI_SPEC_200) { | ||
648 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); | ||
649 | ctrl &= ~SDHCI_CTRL_DMA_MASK; | ||
650 | if ((host->flags & SDHCI_REQ_USE_DMA) && | ||
651 | (host->flags & SDHCI_USE_ADMA)) | ||
652 | ctrl |= SDHCI_CTRL_ADMA32; | ||
653 | else | ||
654 | ctrl |= SDHCI_CTRL_SDMA; | ||
655 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); | ||
403 | } | 656 | } |
404 | 657 | ||
405 | if (host->flags & SDHCI_REQ_USE_DMA) { | 658 | if (host->flags & SDHCI_REQ_USE_DMA) { |
406 | int count; | 659 | if (host->flags & SDHCI_USE_ADMA) { |
660 | sdhci_adma_table_pre(host, data); | ||
661 | writel(host->adma_addr, | ||
662 | host->ioaddr + SDHCI_ADMA_ADDRESS); | ||
663 | } else { | ||
664 | int count; | ||
407 | 665 | ||
408 | count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, | 666 | count = dma_map_sg(mmc_dev(host->mmc), |
409 | (data->flags & MMC_DATA_READ) ? | 667 | data->sg, data->sg_len, |
410 | DMA_FROM_DEVICE : DMA_TO_DEVICE); | 668 | (data->flags & MMC_DATA_READ) ? |
411 | WARN_ON(count != 1); | 669 | DMA_FROM_DEVICE : |
670 | DMA_TO_DEVICE); | ||
671 | WARN_ON(count != 1); | ||
412 | 672 | ||
413 | writel(sg_dma_address(data->sg), | 673 | writel(sg_dma_address(data->sg), |
414 | host->ioaddr + SDHCI_DMA_ADDRESS); | 674 | host->ioaddr + SDHCI_DMA_ADDRESS); |
675 | } | ||
415 | } else { | 676 | } else { |
416 | host->cur_sg = data->sg; | 677 | host->cur_sg = data->sg; |
417 | host->num_sg = data->sg_len; | 678 | host->num_sg = data->sg_len; |
@@ -457,9 +718,13 @@ static void sdhci_finish_data(struct sdhci_host *host) | |||
457 | host->data = NULL; | 718 | host->data = NULL; |
458 | 719 | ||
459 | if (host->flags & SDHCI_REQ_USE_DMA) { | 720 | if (host->flags & SDHCI_REQ_USE_DMA) { |
460 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, | 721 | if (host->flags & SDHCI_USE_ADMA) |
461 | (data->flags & MMC_DATA_READ) ? | 722 | sdhci_adma_table_post(host, data); |
462 | DMA_FROM_DEVICE : DMA_TO_DEVICE); | 723 | else { |
724 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | ||
725 | data->sg_len, (data->flags & MMC_DATA_READ) ? | ||
726 | DMA_FROM_DEVICE : DMA_TO_DEVICE); | ||
727 | } | ||
463 | } | 728 | } |
464 | 729 | ||
465 | /* | 730 | /* |
@@ -1008,6 +1273,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |||
1008 | host->data->error = -ETIMEDOUT; | 1273 | host->data->error = -ETIMEDOUT; |
1009 | else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) | 1274 | else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) |
1010 | host->data->error = -EILSEQ; | 1275 | host->data->error = -EILSEQ; |
1276 | else if (intmask & SDHCI_INT_ADMA_ERROR) | ||
1277 | host->data->error = -EIO; | ||
1011 | 1278 | ||
1012 | if (host->data->error) | 1279 | if (host->data->error) |
1013 | sdhci_finish_data(host); | 1280 | sdhci_finish_data(host); |
@@ -1199,7 +1466,6 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1199 | { | 1466 | { |
1200 | struct mmc_host *mmc; | 1467 | struct mmc_host *mmc; |
1201 | unsigned int caps; | 1468 | unsigned int caps; |
1202 | unsigned int version; | ||
1203 | int ret; | 1469 | int ret; |
1204 | 1470 | ||
1205 | WARN_ON(host == NULL); | 1471 | WARN_ON(host == NULL); |
@@ -1213,12 +1479,13 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1213 | 1479 | ||
1214 | sdhci_reset(host, SDHCI_RESET_ALL); | 1480 | sdhci_reset(host, SDHCI_RESET_ALL); |
1215 | 1481 | ||
1216 | version = readw(host->ioaddr + SDHCI_HOST_VERSION); | 1482 | host->version = readw(host->ioaddr + SDHCI_HOST_VERSION); |
1217 | version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; | 1483 | host->version = (host->version & SDHCI_SPEC_VER_MASK) |
1218 | if (version > 1) { | 1484 | >> SDHCI_SPEC_VER_SHIFT; |
1485 | if (host->version > SDHCI_SPEC_200) { | ||
1219 | printk(KERN_ERR "%s: Unknown controller version (%d). " | 1486 | printk(KERN_ERR "%s: Unknown controller version (%d). " |
1220 | "You may experience problems.\n", mmc_hostname(mmc), | 1487 | "You may experience problems.\n", mmc_hostname(mmc), |
1221 | version); | 1488 | host->version); |
1222 | } | 1489 | } |
1223 | 1490 | ||
1224 | caps = readl(host->ioaddr + SDHCI_CAPABILITIES); | 1491 | caps = readl(host->ioaddr + SDHCI_CAPABILITIES); |
@@ -1237,16 +1504,46 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1237 | } | 1504 | } |
1238 | 1505 | ||
1239 | if (host->flags & SDHCI_USE_DMA) { | 1506 | if (host->flags & SDHCI_USE_DMA) { |
1507 | if ((host->version >= SDHCI_SPEC_200) && | ||
1508 | (caps & SDHCI_CAN_DO_ADMA2)) | ||
1509 | host->flags |= SDHCI_USE_ADMA; | ||
1510 | } | ||
1511 | |||
1512 | if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && | ||
1513 | (host->flags & SDHCI_USE_ADMA)) { | ||
1514 | DBG("Disabling ADMA as it is marked broken\n"); | ||
1515 | host->flags &= ~SDHCI_USE_ADMA; | ||
1516 | } | ||
1517 | |||
1518 | if (host->flags & SDHCI_USE_DMA) { | ||
1240 | if (host->ops->enable_dma) { | 1519 | if (host->ops->enable_dma) { |
1241 | if (host->ops->enable_dma(host)) { | 1520 | if (host->ops->enable_dma(host)) { |
1242 | printk(KERN_WARNING "%s: No suitable DMA " | 1521 | printk(KERN_WARNING "%s: No suitable DMA " |
1243 | "available. Falling back to PIO.\n", | 1522 | "available. Falling back to PIO.\n", |
1244 | mmc_hostname(mmc)); | 1523 | mmc_hostname(mmc)); |
1245 | host->flags &= ~SDHCI_USE_DMA; | 1524 | host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); |
1246 | } | 1525 | } |
1247 | } | 1526 | } |
1248 | } | 1527 | } |
1249 | 1528 | ||
1529 | if (host->flags & SDHCI_USE_ADMA) { | ||
1530 | /* | ||
1531 | * We need to allocate descriptors for all sg entries | ||
1532 | * (128) and potentially one alignment transfer for | ||
1533 | * each of those entries. | ||
1534 | */ | ||
1535 | host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); | ||
1536 | host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); | ||
1537 | if (!host->adma_desc || !host->align_buffer) { | ||
1538 | kfree(host->adma_desc); | ||
1539 | kfree(host->align_buffer); | ||
1540 | printk(KERN_WARNING "%s: Unable to allocate ADMA " | ||
1541 | "buffers. Falling back to standard DMA.\n", | ||
1542 | mmc_hostname(mmc)); | ||
1543 | host->flags &= ~SDHCI_USE_ADMA; | ||
1544 | } | ||
1545 | } | ||
1546 | |||
1250 | /* XXX: Hack to get MMC layer to avoid highmem */ | 1547 | /* XXX: Hack to get MMC layer to avoid highmem */ |
1251 | if (!(host->flags & SDHCI_USE_DMA)) | 1548 | if (!(host->flags & SDHCI_USE_DMA)) |
1252 | mmc_dev(host->mmc)->dma_mask = 0; | 1549 | mmc_dev(host->mmc)->dma_mask = 0; |
@@ -1298,13 +1595,16 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1298 | spin_lock_init(&host->lock); | 1595 | spin_lock_init(&host->lock); |
1299 | 1596 | ||
1300 | /* | 1597 | /* |
1301 | * Maximum number of segments. Hardware cannot do scatter lists. | 1598 | * Maximum number of segments. Depends on if the hardware |
1599 | * can do scatter/gather or not. | ||
1302 | */ | 1600 | */ |
1303 | if (host->flags & SDHCI_USE_DMA) | 1601 | if (host->flags & SDHCI_USE_ADMA) |
1602 | mmc->max_hw_segs = 128; | ||
1603 | else if (host->flags & SDHCI_USE_DMA) | ||
1304 | mmc->max_hw_segs = 1; | 1604 | mmc->max_hw_segs = 1; |
1305 | else | 1605 | else /* PIO */ |
1306 | mmc->max_hw_segs = 16; | 1606 | mmc->max_hw_segs = 128; |
1307 | mmc->max_phys_segs = 16; | 1607 | mmc->max_phys_segs = 128; |
1308 | 1608 | ||
1309 | /* | 1609 | /* |
1310 | * Maximum number of sectors in one transfer. Limited by DMA boundary | 1610 | * Maximum number of sectors in one transfer. Limited by DMA boundary |
@@ -1314,9 +1614,13 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1314 | 1614 | ||
1315 | /* | 1615 | /* |
1316 | * Maximum segment size. Could be one segment with the maximum number | 1616 | * Maximum segment size. Could be one segment with the maximum number |
1317 | * of bytes. | 1617 | * of bytes. When doing hardware scatter/gather, each entry cannot |
1618 | * be larger than 64 KiB though. | ||
1318 | */ | 1619 | */ |
1319 | mmc->max_seg_size = mmc->max_req_size; | 1620 | if (host->flags & SDHCI_USE_ADMA) |
1621 | mmc->max_seg_size = 65536; | ||
1622 | else | ||
1623 | mmc->max_seg_size = mmc->max_req_size; | ||
1320 | 1624 | ||
1321 | /* | 1625 | /* |
1322 | * Maximum block size. This varies from controller to controller and | 1626 | * Maximum block size. This varies from controller to controller and |
@@ -1371,8 +1675,9 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1371 | 1675 | ||
1372 | mmc_add_host(mmc); | 1676 | mmc_add_host(mmc); |
1373 | 1677 | ||
1374 | printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", | 1678 | printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", |
1375 | mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, | 1679 | mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, |
1680 | (host->flags & SDHCI_USE_ADMA)?"A":"", | ||
1376 | (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); | 1681 | (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); |
1377 | 1682 | ||
1378 | return 0; | 1683 | return 0; |
@@ -1426,6 +1731,12 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) | |||
1426 | 1731 | ||
1427 | tasklet_kill(&host->card_tasklet); | 1732 | tasklet_kill(&host->card_tasklet); |
1428 | tasklet_kill(&host->finish_tasklet); | 1733 | tasklet_kill(&host->finish_tasklet); |
1734 | |||
1735 | kfree(host->adma_desc); | ||
1736 | kfree(host->align_buffer); | ||
1737 | |||
1738 | host->adma_desc = NULL; | ||
1739 | host->align_buffer = NULL; | ||
1429 | } | 1740 | } |
1430 | 1741 | ||
1431 | EXPORT_SYMBOL_GPL(sdhci_remove_host); | 1742 | EXPORT_SYMBOL_GPL(sdhci_remove_host); |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7c302515a6a5..5bb355281765 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -60,6 +60,11 @@ | |||
60 | #define SDHCI_CTRL_LED 0x01 | 60 | #define SDHCI_CTRL_LED 0x01 |
61 | #define SDHCI_CTRL_4BITBUS 0x02 | 61 | #define SDHCI_CTRL_4BITBUS 0x02 |
62 | #define SDHCI_CTRL_HISPD 0x04 | 62 | #define SDHCI_CTRL_HISPD 0x04 |
63 | #define SDHCI_CTRL_DMA_MASK 0x18 | ||
64 | #define SDHCI_CTRL_SDMA 0x00 | ||
65 | #define SDHCI_CTRL_ADMA1 0x08 | ||
66 | #define SDHCI_CTRL_ADMA32 0x10 | ||
67 | #define SDHCI_CTRL_ADMA64 0x18 | ||
63 | 68 | ||
64 | #define SDHCI_POWER_CONTROL 0x29 | 69 | #define SDHCI_POWER_CONTROL 0x29 |
65 | #define SDHCI_POWER_ON 0x01 | 70 | #define SDHCI_POWER_ON 0x01 |
@@ -105,6 +110,7 @@ | |||
105 | #define SDHCI_INT_DATA_END_BIT 0x00400000 | 110 | #define SDHCI_INT_DATA_END_BIT 0x00400000 |
106 | #define SDHCI_INT_BUS_POWER 0x00800000 | 111 | #define SDHCI_INT_BUS_POWER 0x00800000 |
107 | #define SDHCI_INT_ACMD12ERR 0x01000000 | 112 | #define SDHCI_INT_ACMD12ERR 0x01000000 |
113 | #define SDHCI_INT_ADMA_ERROR 0x02000000 | ||
108 | 114 | ||
109 | #define SDHCI_INT_NORMAL_MASK 0x00007FFF | 115 | #define SDHCI_INT_NORMAL_MASK 0x00007FFF |
110 | #define SDHCI_INT_ERROR_MASK 0xFFFF8000 | 116 | #define SDHCI_INT_ERROR_MASK 0xFFFF8000 |
@@ -128,11 +134,14 @@ | |||
128 | #define SDHCI_CLOCK_BASE_SHIFT 8 | 134 | #define SDHCI_CLOCK_BASE_SHIFT 8 |
129 | #define SDHCI_MAX_BLOCK_MASK 0x00030000 | 135 | #define SDHCI_MAX_BLOCK_MASK 0x00030000 |
130 | #define SDHCI_MAX_BLOCK_SHIFT 16 | 136 | #define SDHCI_MAX_BLOCK_SHIFT 16 |
137 | #define SDHCI_CAN_DO_ADMA2 0x00080000 | ||
138 | #define SDHCI_CAN_DO_ADMA1 0x00100000 | ||
131 | #define SDHCI_CAN_DO_HISPD 0x00200000 | 139 | #define SDHCI_CAN_DO_HISPD 0x00200000 |
132 | #define SDHCI_CAN_DO_DMA 0x00400000 | 140 | #define SDHCI_CAN_DO_DMA 0x00400000 |
133 | #define SDHCI_CAN_VDD_330 0x01000000 | 141 | #define SDHCI_CAN_VDD_330 0x01000000 |
134 | #define SDHCI_CAN_VDD_300 0x02000000 | 142 | #define SDHCI_CAN_VDD_300 0x02000000 |
135 | #define SDHCI_CAN_VDD_180 0x04000000 | 143 | #define SDHCI_CAN_VDD_180 0x04000000 |
144 | #define SDHCI_CAN_64BIT 0x10000000 | ||
136 | 145 | ||
137 | /* 44-47 reserved for more caps */ | 146 | /* 44-47 reserved for more caps */ |
138 | 147 | ||
@@ -140,7 +149,16 @@ | |||
140 | 149 | ||
141 | /* 4C-4F reserved for more max current */ | 150 | /* 4C-4F reserved for more max current */ |
142 | 151 | ||
143 | /* 50-FB reserved */ | 152 | #define SDHCI_SET_ACMD12_ERROR 0x50 |
153 | #define SDHCI_SET_INT_ERROR 0x52 | ||
154 | |||
155 | #define SDHCI_ADMA_ERROR 0x54 | ||
156 | |||
157 | /* 55-57 reserved */ | ||
158 | |||
159 | #define SDHCI_ADMA_ADDRESS 0x58 | ||
160 | |||
161 | /* 60-FB reserved */ | ||
144 | 162 | ||
145 | #define SDHCI_SLOT_INT_STATUS 0xFC | 163 | #define SDHCI_SLOT_INT_STATUS 0xFC |
146 | 164 | ||
@@ -149,6 +167,8 @@ | |||
149 | #define SDHCI_VENDOR_VER_SHIFT 8 | 167 | #define SDHCI_VENDOR_VER_SHIFT 8 |
150 | #define SDHCI_SPEC_VER_MASK 0x00FF | 168 | #define SDHCI_SPEC_VER_MASK 0x00FF |
151 | #define SDHCI_SPEC_VER_SHIFT 0 | 169 | #define SDHCI_SPEC_VER_SHIFT 0 |
170 | #define SDHCI_SPEC_100 0 | ||
171 | #define SDHCI_SPEC_200 1 | ||
152 | 172 | ||
153 | struct sdhci_ops; | 173 | struct sdhci_ops; |
154 | 174 | ||
@@ -170,16 +190,20 @@ struct sdhci_host { | |||
170 | #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) | 190 | #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) |
171 | /* Controller has an unusable DMA engine */ | 191 | /* Controller has an unusable DMA engine */ |
172 | #define SDHCI_QUIRK_BROKEN_DMA (1<<5) | 192 | #define SDHCI_QUIRK_BROKEN_DMA (1<<5) |
193 | /* Controller has an unusable ADMA engine */ | ||
194 | #define SDHCI_QUIRK_BROKEN_ADMA (1<<6) | ||
173 | /* Controller can only DMA from 32-bit aligned addresses */ | 195 | /* Controller can only DMA from 32-bit aligned addresses */ |
174 | #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6) | 196 | #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7) |
175 | /* Controller can only DMA chunk sizes that are a multiple of 32 bits */ | 197 | /* Controller can only DMA chunk sizes that are a multiple of 32 bits */ |
176 | #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7) | 198 | #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8) |
199 | /* Controller can only ADMA chunks that are a multiple of 32 bits */ | ||
200 | #define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9) | ||
177 | /* Controller needs to be reset after each request to stay stable */ | 201 | /* Controller needs to be reset after each request to stay stable */ |
178 | #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8) | 202 | #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10) |
179 | /* Controller needs voltage and power writes to happen separately */ | 203 | /* Controller needs voltage and power writes to happen separately */ |
180 | #define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<9) | 204 | #define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11) |
181 | /* Controller provides an incorrect timeout value for transfers */ | 205 | /* Controller provides an incorrect timeout value for transfers */ |
182 | #define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<10) | 206 | #define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) |
183 | 207 | ||
184 | int irq; /* Device IRQ */ | 208 | int irq; /* Device IRQ */ |
185 | void __iomem * ioaddr; /* Mapped address */ | 209 | void __iomem * ioaddr; /* Mapped address */ |
@@ -197,8 +221,11 @@ struct sdhci_host { | |||
197 | 221 | ||
198 | int flags; /* Host attributes */ | 222 | int flags; /* Host attributes */ |
199 | #define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ | 223 | #define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ |
200 | #define SDHCI_REQ_USE_DMA (1<<1) /* Use DMA for this req. */ | 224 | #define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ |
201 | #define SDHCI_DEVICE_DEAD (1<<2) /* Device unresponsive */ | 225 | #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ |
226 | #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ | ||
227 | |||
228 | unsigned int version; /* SDHCI spec. version */ | ||
202 | 229 | ||
203 | unsigned int max_clk; /* Max possible freq (MHz) */ | 230 | unsigned int max_clk; /* Max possible freq (MHz) */ |
204 | unsigned int timeout_clk; /* Timeout freq (KHz) */ | 231 | unsigned int timeout_clk; /* Timeout freq (KHz) */ |
@@ -216,6 +243,14 @@ struct sdhci_host { | |||
216 | int offset; /* Offset into current sg */ | 243 | int offset; /* Offset into current sg */ |
217 | int remain; /* Bytes left in current */ | 244 | int remain; /* Bytes left in current */ |
218 | 245 | ||
246 | int sg_count; /* Mapped sg entries */ | ||
247 | |||
248 | u8 *adma_desc; /* ADMA descriptor table */ | ||
249 | u8 *align_buffer; /* Bounce buffer */ | ||
250 | |||
251 | dma_addr_t adma_addr; /* Mapped ADMA descr. table */ | ||
252 | dma_addr_t align_addr; /* Mapped bounce buffer */ | ||
253 | |||
219 | struct tasklet_struct card_tasklet; /* Tasklet structures */ | 254 | struct tasklet_struct card_tasklet; /* Tasklet structures */ |
220 | struct tasklet_struct finish_tasklet; | 255 | struct tasklet_struct finish_tasklet; |
221 | 256 | ||