diff options
-rw-r--r-- | drivers/mmc/host/sdhci.c | 68 | ||||
-rw-r--r-- | include/linux/mmc/sdhci.h | 7 |
2 files changed, 41 insertions, 34 deletions
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 20e8a2d0d51a..053b55df9df1 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -44,14 +44,6 @@ | |||
44 | 44 | ||
45 | #define MAX_TUNING_LOOP 40 | 45 | #define MAX_TUNING_LOOP 40 |
46 | 46 | ||
47 | /* | ||
48 | * The ADMA2 descriptor table size is calculated as the maximum number of | ||
49 | * segments (128), times 2 to allow for an alignment descriptor for each | ||
50 | * segment, plus 1 for a nop end descriptor, all multipled by the 32-bit | ||
51 | * descriptor size (8). | ||
52 | */ | ||
53 | #define ADMA_SIZE ((128 * 2 + 1) * 8) | ||
54 | |||
55 | static unsigned int debug_quirks = 0; | 47 | static unsigned int debug_quirks = 0; |
56 | static unsigned int debug_quirks2; | 48 | static unsigned int debug_quirks2; |
57 | 49 | ||
@@ -502,10 +494,10 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
502 | direction = DMA_TO_DEVICE; | 494 | direction = DMA_TO_DEVICE; |
503 | 495 | ||
504 | host->align_addr = dma_map_single(mmc_dev(host->mmc), | 496 | host->align_addr = dma_map_single(mmc_dev(host->mmc), |
505 | host->align_buffer, 128 * 4, direction); | 497 | host->align_buffer, host->align_buffer_sz, direction); |
506 | if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) | 498 | if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) |
507 | goto fail; | 499 | goto fail; |
508 | BUG_ON(host->align_addr & 0x3); | 500 | BUG_ON(host->align_addr & host->align_mask); |
509 | 501 | ||
510 | host->sg_count = dma_map_sg(mmc_dev(host->mmc), | 502 | host->sg_count = dma_map_sg(mmc_dev(host->mmc), |
511 | data->sg, data->sg_len, direction); | 503 | data->sg, data->sg_len, direction); |
@@ -528,7 +520,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
528 | * the (up to three) bytes that screw up the | 520 | * the (up to three) bytes that screw up the |
529 | * alignment. | 521 | * alignment. |
530 | */ | 522 | */ |
531 | offset = (4 - (addr & 0x3)) & 0x3; | 523 | offset = (host->align_sz - (addr & host->align_mask)) & |
524 | host->align_mask; | ||
532 | if (offset) { | 525 | if (offset) { |
533 | if (data->flags & MMC_DATA_WRITE) { | 526 | if (data->flags & MMC_DATA_WRITE) { |
534 | buffer = sdhci_kmap_atomic(sg, &flags); | 527 | buffer = sdhci_kmap_atomic(sg, &flags); |
@@ -543,10 +536,10 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
543 | 536 | ||
544 | BUG_ON(offset > 65536); | 537 | BUG_ON(offset > 65536); |
545 | 538 | ||
546 | align += 4; | 539 | align += host->align_sz; |
547 | align_addr += 4; | 540 | align_addr += host->align_sz; |
548 | 541 | ||
549 | desc += 8; | 542 | desc += host->desc_sz; |
550 | 543 | ||
551 | addr += offset; | 544 | addr += offset; |
552 | len -= offset; | 545 | len -= offset; |
@@ -556,13 +549,13 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
556 | 549 | ||
557 | /* tran, valid */ | 550 | /* tran, valid */ |
558 | sdhci_adma_write_desc(desc, addr, len, 0x21); | 551 | sdhci_adma_write_desc(desc, addr, len, 0x21); |
559 | desc += 8; | 552 | desc += host->desc_sz; |
560 | 553 | ||
561 | /* | 554 | /* |
562 | * If this triggers then we have a calculation bug | 555 | * If this triggers then we have a calculation bug |
563 | * somewhere. :/ | 556 | * somewhere. :/ |
564 | */ | 557 | */ |
565 | WARN_ON((desc - host->adma_table) >= ADMA_SIZE); | 558 | WARN_ON((desc - host->adma_table) >= host->adma_table_sz); |
566 | } | 559 | } |
567 | 560 | ||
568 | if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { | 561 | if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { |
@@ -570,7 +563,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
570 | * Mark the last descriptor as the terminating descriptor | 563 | * Mark the last descriptor as the terminating descriptor |
571 | */ | 564 | */ |
572 | if (desc != host->adma_table) { | 565 | if (desc != host->adma_table) { |
573 | desc -= 8; | 566 | desc -= host->desc_sz; |
574 | sdhci_adma_mark_end(desc); | 567 | sdhci_adma_mark_end(desc); |
575 | } | 568 | } |
576 | } else { | 569 | } else { |
@@ -587,14 +580,14 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
587 | */ | 580 | */ |
588 | if (data->flags & MMC_DATA_WRITE) { | 581 | if (data->flags & MMC_DATA_WRITE) { |
589 | dma_sync_single_for_device(mmc_dev(host->mmc), | 582 | dma_sync_single_for_device(mmc_dev(host->mmc), |
590 | host->align_addr, 128 * 4, direction); | 583 | host->align_addr, host->align_buffer_sz, direction); |
591 | } | 584 | } |
592 | 585 | ||
593 | return 0; | 586 | return 0; |
594 | 587 | ||
595 | unmap_align: | 588 | unmap_align: |
596 | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, | 589 | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, |
597 | 128 * 4, direction); | 590 | host->align_buffer_sz, direction); |
598 | fail: | 591 | fail: |
599 | return -EINVAL; | 592 | return -EINVAL; |
600 | } | 593 | } |
@@ -617,12 +610,12 @@ static void sdhci_adma_table_post(struct sdhci_host *host, | |||
617 | direction = DMA_TO_DEVICE; | 610 | direction = DMA_TO_DEVICE; |
618 | 611 | ||
619 | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, | 612 | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, |
620 | 128 * 4, direction); | 613 | host->align_buffer_sz, direction); |
621 | 614 | ||
622 | /* Do a quick scan of the SG list for any unaligned mappings */ | 615 | /* Do a quick scan of the SG list for any unaligned mappings */ |
623 | has_unaligned = false; | 616 | has_unaligned = false; |
624 | for_each_sg(data->sg, sg, host->sg_count, i) | 617 | for_each_sg(data->sg, sg, host->sg_count, i) |
625 | if (sg_dma_address(sg) & 3) { | 618 | if (sg_dma_address(sg) & host->align_mask) { |
626 | has_unaligned = true; | 619 | has_unaligned = true; |
627 | break; | 620 | break; |
628 | } | 621 | } |
@@ -634,8 +627,9 @@ static void sdhci_adma_table_post(struct sdhci_host *host, | |||
634 | align = host->align_buffer; | 627 | align = host->align_buffer; |
635 | 628 | ||
636 | for_each_sg(data->sg, sg, host->sg_count, i) { | 629 | for_each_sg(data->sg, sg, host->sg_count, i) { |
637 | if (sg_dma_address(sg) & 0x3) { | 630 | if (sg_dma_address(sg) & host->align_mask) { |
638 | size = 4 - (sg_dma_address(sg) & 0x3); | 631 | size = host->align_sz - |
632 | (sg_dma_address(sg) & host->align_mask); | ||
639 | 633 | ||
640 | buffer = sdhci_kmap_atomic(sg, &flags); | 634 | buffer = sdhci_kmap_atomic(sg, &flags); |
641 | WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > | 635 | WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > |
@@ -643,7 +637,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host, | |||
643 | memcpy(buffer, align, size); | 637 | memcpy(buffer, align, size); |
644 | sdhci_kunmap_atomic(buffer, &flags); | 638 | sdhci_kunmap_atomic(buffer, &flags); |
645 | 639 | ||
646 | align += 4; | 640 | align += host->align_sz; |
647 | } | 641 | } |
648 | } | 642 | } |
649 | } | 643 | } |
@@ -2316,7 +2310,7 @@ static void sdhci_adma_show_error(struct sdhci_host *host) | |||
2316 | DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", | 2310 | DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", |
2317 | name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); | 2311 | name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); |
2318 | 2312 | ||
2319 | desc += 8; | 2313 | desc += host->desc_sz; |
2320 | 2314 | ||
2321 | if (attr & 2) | 2315 | if (attr & 2) |
2322 | break; | 2316 | break; |
@@ -2878,17 +2872,23 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2878 | 2872 | ||
2879 | if (host->flags & SDHCI_USE_ADMA) { | 2873 | if (host->flags & SDHCI_USE_ADMA) { |
2880 | /* | 2874 | /* |
2881 | * We need to allocate descriptors for all sg entries | 2875 | * The DMA descriptor table size is calculated as the maximum |
2882 | * (128) and potentially one alignment transfer for | 2876 | * number of segments times 2, to allow for an alignment |
2883 | * each of those entries. | 2877 | * descriptor for each segment, plus 1 for a nop end descriptor, |
2878 | * all multipled by the descriptor size. | ||
2884 | */ | 2879 | */ |
2880 | host->adma_table_sz = (128 * 2 + 1) * 8; | ||
2881 | host->align_buffer_sz = 128 * 4; | ||
2882 | host->desc_sz = 8; | ||
2883 | host->align_sz = 4; | ||
2884 | host->align_mask = 3; | ||
2885 | host->adma_table = dma_alloc_coherent(mmc_dev(mmc), | 2885 | host->adma_table = dma_alloc_coherent(mmc_dev(mmc), |
2886 | ADMA_SIZE, | 2886 | host->adma_table_sz, |
2887 | &host->adma_addr, | 2887 | &host->adma_addr, |
2888 | GFP_KERNEL); | 2888 | GFP_KERNEL); |
2889 | host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); | 2889 | host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); |
2890 | if (!host->adma_table || !host->align_buffer) { | 2890 | if (!host->adma_table || !host->align_buffer) { |
2891 | dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, | 2891 | dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, |
2892 | host->adma_table, host->adma_addr); | 2892 | host->adma_table, host->adma_addr); |
2893 | kfree(host->align_buffer); | 2893 | kfree(host->align_buffer); |
2894 | pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", | 2894 | pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", |
@@ -2896,11 +2896,11 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2896 | host->flags &= ~SDHCI_USE_ADMA; | 2896 | host->flags &= ~SDHCI_USE_ADMA; |
2897 | host->adma_table = NULL; | 2897 | host->adma_table = NULL; |
2898 | host->align_buffer = NULL; | 2898 | host->align_buffer = NULL; |
2899 | } else if (host->adma_addr & 3) { | 2899 | } else if (host->adma_addr & host->align_mask) { |
2900 | pr_warn("%s: unable to allocate aligned ADMA descriptor\n", | 2900 | pr_warn("%s: unable to allocate aligned ADMA descriptor\n", |
2901 | mmc_hostname(mmc)); | 2901 | mmc_hostname(mmc)); |
2902 | host->flags &= ~SDHCI_USE_ADMA; | 2902 | host->flags &= ~SDHCI_USE_ADMA; |
2903 | dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, | 2903 | dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, |
2904 | host->adma_table, host->adma_addr); | 2904 | host->adma_table, host->adma_addr); |
2905 | kfree(host->align_buffer); | 2905 | kfree(host->align_buffer); |
2906 | host->adma_table = NULL; | 2906 | host->adma_table = NULL; |
@@ -3360,7 +3360,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) | |||
3360 | regulator_disable(mmc->supply.vqmmc); | 3360 | regulator_disable(mmc->supply.vqmmc); |
3361 | 3361 | ||
3362 | if (host->adma_table) | 3362 | if (host->adma_table) |
3363 | dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, | 3363 | dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, |
3364 | host->adma_table, host->adma_addr); | 3364 | host->adma_table, host->adma_addr); |
3365 | kfree(host->align_buffer); | 3365 | kfree(host->align_buffer); |
3366 | 3366 | ||
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index 933dbbb50742..2a72e9510833 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h | |||
@@ -158,9 +158,16 @@ struct sdhci_host { | |||
158 | void *adma_table; /* ADMA descriptor table */ | 158 | void *adma_table; /* ADMA descriptor table */ |
159 | void *align_buffer; /* Bounce buffer */ | 159 | void *align_buffer; /* Bounce buffer */ |
160 | 160 | ||
161 | size_t adma_table_sz; /* ADMA descriptor table size */ | ||
162 | size_t align_buffer_sz; /* Bounce buffer size */ | ||
163 | |||
161 | dma_addr_t adma_addr; /* Mapped ADMA descr. table */ | 164 | dma_addr_t adma_addr; /* Mapped ADMA descr. table */ |
162 | dma_addr_t align_addr; /* Mapped bounce buffer */ | 165 | dma_addr_t align_addr; /* Mapped bounce buffer */ |
163 | 166 | ||
167 | unsigned int desc_sz; /* ADMA descriptor size */ | ||
168 | unsigned int align_sz; /* ADMA alignment */ | ||
169 | unsigned int align_mask; /* ADMA alignment mask */ | ||
170 | |||
164 | struct tasklet_struct finish_tasklet; /* Tasklet structures */ | 171 | struct tasklet_struct finish_tasklet; /* Tasklet structures */ |
165 | 172 | ||
166 | struct timer_list timer; /* Timer for timeouts */ | 173 | struct timer_list timer; /* Timer for timeouts */ |