diff options
| author | Ingo Molnar <mingo@kernel.org> | 2013-12-17 09:27:08 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-12-17 09:27:08 -0500 |
| commit | bb799d3b980eb803ca2da4a4eefbd9308f8d988a (patch) | |
| tree | 69fbe0cd6d47b23a50f5e1d87bf7489532fae149 /drivers/dma/cppi41.c | |
| parent | 919fc6e34831d1c2b58bfb5ae261dc3facc9b269 (diff) | |
| parent | 319e2e3f63c348a9b66db4667efa73178e18b17d (diff) | |
Merge tag 'v3.13-rc4' into core/locking
Merge Linux 3.13-rc4, to refresh this rather old tree with the latest fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/dma/cppi41.c')
| -rw-r--r-- | drivers/dma/cppi41.c | 178 |
1 files changed, 106 insertions, 72 deletions
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 7c82b92f9b16..c29dacff66fa 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
| @@ -141,6 +141,9 @@ struct cppi41_dd { | |||
| 141 | const struct chan_queues *queues_rx; | 141 | const struct chan_queues *queues_rx; |
| 142 | const struct chan_queues *queues_tx; | 142 | const struct chan_queues *queues_tx; |
| 143 | struct chan_queues td_queue; | 143 | struct chan_queues td_queue; |
| 144 | |||
| 145 | /* context for suspend/resume */ | ||
| 146 | unsigned int dma_tdfdq; | ||
| 144 | }; | 147 | }; |
| 145 | 148 | ||
| 146 | #define FIST_COMPLETION_QUEUE 93 | 149 | #define FIST_COMPLETION_QUEUE 93 |
| @@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val) | |||
| 263 | return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); | 266 | return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); |
| 264 | } | 267 | } |
| 265 | 268 | ||
| 269 | static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) | ||
| 270 | { | ||
| 271 | u32 desc; | ||
| 272 | |||
| 273 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); | ||
| 274 | desc &= ~0x1f; | ||
| 275 | return desc; | ||
| 276 | } | ||
| 277 | |||
| 266 | static irqreturn_t cppi41_irq(int irq, void *data) | 278 | static irqreturn_t cppi41_irq(int irq, void *data) |
| 267 | { | 279 | { |
| 268 | struct cppi41_dd *cdd = data; | 280 | struct cppi41_dd *cdd = data; |
| @@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) | |||
| 300 | q_num = __fls(val); | 312 | q_num = __fls(val); |
| 301 | val &= ~(1 << q_num); | 313 | val &= ~(1 << q_num); |
| 302 | q_num += 32 * i; | 314 | q_num += 32 * i; |
| 303 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); | 315 | desc = cppi41_pop_desc(cdd, q_num); |
| 304 | desc &= ~0x1f; | ||
| 305 | c = desc_to_chan(cdd, desc); | 316 | c = desc_to_chan(cdd, desc); |
| 306 | if (WARN_ON(!c)) { | 317 | if (WARN_ON(!c)) { |
| 307 | pr_err("%s() q %d desc %08x\n", __func__, | 318 | pr_err("%s() q %d desc %08x\n", __func__, |
| @@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, | |||
| 353 | 364 | ||
| 354 | /* lock */ | 365 | /* lock */ |
| 355 | ret = dma_cookie_status(chan, cookie, txstate); | 366 | ret = dma_cookie_status(chan, cookie, txstate); |
| 356 | if (txstate && ret == DMA_SUCCESS) | 367 | if (txstate && ret == DMA_COMPLETE) |
| 357 | txstate->residue = c->residue; | 368 | txstate->residue = c->residue; |
| 358 | /* unlock */ | 369 | /* unlock */ |
| 359 | 370 | ||
| @@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d) | |||
| 517 | d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; | 528 | d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; |
| 518 | } | 529 | } |
| 519 | 530 | ||
| 520 | static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) | ||
| 521 | { | ||
| 522 | u32 desc; | ||
| 523 | |||
| 524 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); | ||
| 525 | desc &= ~0x1f; | ||
| 526 | return desc; | ||
| 527 | } | ||
| 528 | |||
| 529 | static int cppi41_tear_down_chan(struct cppi41_channel *c) | 531 | static int cppi41_tear_down_chan(struct cppi41_channel *c) |
| 530 | { | 532 | { |
| 531 | struct cppi41_dd *cdd = c->cdd; | 533 | struct cppi41_dd *cdd = c->cdd; |
| @@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) | |||
| 561 | c->td_retry = 100; | 563 | c->td_retry = 100; |
| 562 | } | 564 | } |
| 563 | 565 | ||
| 564 | if (!c->td_seen) { | 566 | if (!c->td_seen || !c->td_desc_seen) { |
| 565 | unsigned td_comp_queue; | ||
| 566 | 567 | ||
| 567 | if (c->is_tx) | 568 | desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); |
| 568 | td_comp_queue = cdd->td_queue.complete; | 569 | if (!desc_phys) |
| 569 | else | 570 | desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); |
| 570 | td_comp_queue = c->q_comp_num; | ||
| 571 | 571 | ||
| 572 | desc_phys = cppi41_pop_desc(cdd, td_comp_queue); | 572 | if (desc_phys == c->desc_phys) { |
| 573 | if (desc_phys) { | 573 | c->td_desc_seen = 1; |
| 574 | __iormb(); | 574 | |
| 575 | } else if (desc_phys == td_desc_phys) { | ||
| 576 | u32 pd0; | ||
| 575 | 577 | ||
| 576 | if (desc_phys == td_desc_phys) { | ||
| 577 | u32 pd0; | ||
| 578 | pd0 = td->pd0; | ||
| 579 | WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); | ||
| 580 | WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); | ||
| 581 | WARN_ON((pd0 & 0x1f) != c->port_num); | ||
| 582 | } else { | ||
| 583 | WARN_ON_ONCE(1); | ||
| 584 | } | ||
| 585 | c->td_seen = 1; | ||
| 586 | } | ||
| 587 | } | ||
| 588 | if (!c->td_desc_seen) { | ||
| 589 | desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); | ||
| 590 | if (desc_phys) { | ||
| 591 | __iormb(); | 578 | __iormb(); |
| 592 | WARN_ON(c->desc_phys != desc_phys); | 579 | pd0 = td->pd0; |
| 593 | c->td_desc_seen = 1; | 580 | WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); |
| 581 | WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); | ||
| 582 | WARN_ON((pd0 & 0x1f) != c->port_num); | ||
| 583 | c->td_seen = 1; | ||
| 584 | } else if (desc_phys) { | ||
| 585 | WARN_ON_ONCE(1); | ||
| 594 | } | 586 | } |
| 595 | } | 587 | } |
| 596 | c->td_retry--; | 588 | c->td_retry--; |
| @@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) | |||
| 609 | 601 | ||
| 610 | WARN_ON(!c->td_retry); | 602 | WARN_ON(!c->td_retry); |
| 611 | if (!c->td_desc_seen) { | 603 | if (!c->td_desc_seen) { |
| 612 | desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); | 604 | desc_phys = cppi41_pop_desc(cdd, c->q_num); |
| 613 | WARN_ON(!desc_phys); | 605 | WARN_ON(!desc_phys); |
| 614 | } | 606 | } |
| 615 | 607 | ||
| @@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd) | |||
| 674 | } | 666 | } |
| 675 | } | 667 | } |
| 676 | 668 | ||
| 677 | static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) | 669 | static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) |
| 678 | { | 670 | { |
| 679 | struct cppi41_channel *cchan; | 671 | struct cppi41_channel *cchan; |
| 680 | int i; | 672 | int i; |
| 681 | int ret; | 673 | int ret; |
| 682 | u32 n_chans; | 674 | u32 n_chans; |
| 683 | 675 | ||
| 684 | ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", | 676 | ret = of_property_read_u32(dev->of_node, "#dma-channels", |
| 685 | &n_chans); | 677 | &n_chans); |
| 686 | if (ret) | 678 | if (ret) |
| 687 | return ret; | 679 | return ret; |
| @@ -719,7 +711,7 @@ err: | |||
| 719 | return -ENOMEM; | 711 | return -ENOMEM; |
| 720 | } | 712 | } |
| 721 | 713 | ||
| 722 | static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | 714 | static void purge_descs(struct device *dev, struct cppi41_dd *cdd) |
| 723 | { | 715 | { |
| 724 | unsigned int mem_decs; | 716 | unsigned int mem_decs; |
| 725 | int i; | 717 | int i; |
| @@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
| 731 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); | 723 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); |
| 732 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); | 724 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); |
| 733 | 725 | ||
| 734 | dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, | 726 | dma_free_coherent(dev, mem_decs, cdd->cd, |
| 735 | cdd->descs_phys); | 727 | cdd->descs_phys); |
| 736 | } | 728 | } |
| 737 | } | 729 | } |
| @@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd) | |||
| 741 | cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); | 733 | cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); |
| 742 | } | 734 | } |
| 743 | 735 | ||
| 744 | static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) | 736 | static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) |
| 745 | { | 737 | { |
| 746 | disable_sched(cdd); | 738 | disable_sched(cdd); |
| 747 | 739 | ||
| 748 | purge_descs(pdev, cdd); | 740 | purge_descs(dev, cdd); |
| 749 | 741 | ||
| 750 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); | 742 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); |
| 751 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); | 743 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); |
| 752 | dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, | 744 | dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, |
| 753 | cdd->scratch_phys); | 745 | cdd->scratch_phys); |
| 754 | } | 746 | } |
| 755 | 747 | ||
| 756 | static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | 748 | static int init_descs(struct device *dev, struct cppi41_dd *cdd) |
| 757 | { | 749 | { |
| 758 | unsigned int desc_size; | 750 | unsigned int desc_size; |
| 759 | unsigned int mem_decs; | 751 | unsigned int mem_decs; |
| @@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
| 777 | reg |= ilog2(ALLOC_DECS_NUM) - 5; | 769 | reg |= ilog2(ALLOC_DECS_NUM) - 5; |
| 778 | 770 | ||
| 779 | BUILD_BUG_ON(DESCS_AREAS != 1); | 771 | BUILD_BUG_ON(DESCS_AREAS != 1); |
| 780 | cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, | 772 | cdd->cd = dma_alloc_coherent(dev, mem_decs, |
| 781 | &cdd->descs_phys, GFP_KERNEL); | 773 | &cdd->descs_phys, GFP_KERNEL); |
| 782 | if (!cdd->cd) | 774 | if (!cdd->cd) |
| 783 | return -ENOMEM; | 775 | return -ENOMEM; |
| @@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd) | |||
| 813 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); | 805 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); |
| 814 | } | 806 | } |
| 815 | 807 | ||
| 816 | static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | 808 | static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) |
| 817 | { | 809 | { |
| 818 | int ret; | 810 | int ret; |
| 819 | 811 | ||
| 820 | BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); | 812 | BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); |
| 821 | cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, | 813 | cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, |
| 822 | &cdd->scratch_phys, GFP_KERNEL); | 814 | &cdd->scratch_phys, GFP_KERNEL); |
| 823 | if (!cdd->qmgr_scratch) | 815 | if (!cdd->qmgr_scratch) |
| 824 | return -ENOMEM; | 816 | return -ENOMEM; |
| @@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
| 827 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); | 819 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); |
| 828 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); | 820 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); |
| 829 | 821 | ||
| 830 | ret = init_descs(pdev, cdd); | 822 | ret = init_descs(dev, cdd); |
| 831 | if (ret) | 823 | if (ret) |
| 832 | goto err_td; | 824 | goto err_td; |
| 833 | 825 | ||
| @@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
| 835 | init_sched(cdd); | 827 | init_sched(cdd); |
| 836 | return 0; | 828 | return 0; |
| 837 | err_td: | 829 | err_td: |
| 838 | deinit_cpii41(pdev, cdd); | 830 | deinit_cppi41(dev, cdd); |
| 839 | return ret; | 831 | return ret; |
| 840 | } | 832 | } |
| 841 | 833 | ||
| @@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = { | |||
| 914 | }; | 906 | }; |
| 915 | MODULE_DEVICE_TABLE(of, cppi41_dma_ids); | 907 | MODULE_DEVICE_TABLE(of, cppi41_dma_ids); |
| 916 | 908 | ||
| 917 | static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) | 909 | static const struct cppi_glue_infos *get_glue_info(struct device *dev) |
| 918 | { | 910 | { |
| 919 | const struct of_device_id *of_id; | 911 | const struct of_device_id *of_id; |
| 920 | 912 | ||
| 921 | of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); | 913 | of_id = of_match_node(cppi41_dma_ids, dev->of_node); |
| 922 | if (!of_id) | 914 | if (!of_id) |
| 923 | return NULL; | 915 | return NULL; |
| 924 | return of_id->data; | 916 | return of_id->data; |
| @@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) | |||
| 927 | static int cppi41_dma_probe(struct platform_device *pdev) | 919 | static int cppi41_dma_probe(struct platform_device *pdev) |
| 928 | { | 920 | { |
| 929 | struct cppi41_dd *cdd; | 921 | struct cppi41_dd *cdd; |
| 922 | struct device *dev = &pdev->dev; | ||
| 930 | const struct cppi_glue_infos *glue_info; | 923 | const struct cppi_glue_infos *glue_info; |
| 931 | int irq; | 924 | int irq; |
| 932 | int ret; | 925 | int ret; |
| 933 | 926 | ||
| 934 | glue_info = get_glue_info(pdev); | 927 | glue_info = get_glue_info(dev); |
| 935 | if (!glue_info) | 928 | if (!glue_info) |
| 936 | return -EINVAL; | 929 | return -EINVAL; |
| 937 | 930 | ||
| @@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
| 946 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; | 939 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; |
| 947 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; | 940 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; |
| 948 | cdd->ddev.device_control = cppi41_dma_control; | 941 | cdd->ddev.device_control = cppi41_dma_control; |
| 949 | cdd->ddev.dev = &pdev->dev; | 942 | cdd->ddev.dev = dev; |
| 950 | INIT_LIST_HEAD(&cdd->ddev.channels); | 943 | INIT_LIST_HEAD(&cdd->ddev.channels); |
| 951 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; | 944 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; |
| 952 | 945 | ||
| 953 | cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); | 946 | cdd->usbss_mem = of_iomap(dev->of_node, 0); |
| 954 | cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); | 947 | cdd->ctrl_mem = of_iomap(dev->of_node, 1); |
| 955 | cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); | 948 | cdd->sched_mem = of_iomap(dev->of_node, 2); |
| 956 | cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); | 949 | cdd->qmgr_mem = of_iomap(dev->of_node, 3); |
| 957 | 950 | ||
| 958 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || | 951 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || |
| 959 | !cdd->qmgr_mem) { | 952 | !cdd->qmgr_mem) { |
| @@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
| 961 | goto err_remap; | 954 | goto err_remap; |
| 962 | } | 955 | } |
| 963 | 956 | ||
| 964 | pm_runtime_enable(&pdev->dev); | 957 | pm_runtime_enable(dev); |
| 965 | ret = pm_runtime_get_sync(&pdev->dev); | 958 | ret = pm_runtime_get_sync(dev); |
| 966 | if (ret) | 959 | if (ret < 0) |
| 967 | goto err_get_sync; | 960 | goto err_get_sync; |
| 968 | 961 | ||
| 969 | cdd->queues_rx = glue_info->queues_rx; | 962 | cdd->queues_rx = glue_info->queues_rx; |
| 970 | cdd->queues_tx = glue_info->queues_tx; | 963 | cdd->queues_tx = glue_info->queues_tx; |
| 971 | cdd->td_queue = glue_info->td_queue; | 964 | cdd->td_queue = glue_info->td_queue; |
| 972 | 965 | ||
| 973 | ret = init_cppi41(pdev, cdd); | 966 | ret = init_cppi41(dev, cdd); |
| 974 | if (ret) | 967 | if (ret) |
| 975 | goto err_init_cppi; | 968 | goto err_init_cppi; |
| 976 | 969 | ||
| 977 | ret = cppi41_add_chans(pdev, cdd); | 970 | ret = cppi41_add_chans(dev, cdd); |
| 978 | if (ret) | 971 | if (ret) |
| 979 | goto err_chans; | 972 | goto err_chans; |
| 980 | 973 | ||
| 981 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | 974 | irq = irq_of_parse_and_map(dev->of_node, 0); |
| 982 | if (!irq) | 975 | if (!irq) |
| 983 | goto err_irq; | 976 | goto err_irq; |
| 984 | 977 | ||
| 985 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); | 978 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); |
| 986 | 979 | ||
| 987 | ret = request_irq(irq, glue_info->isr, IRQF_SHARED, | 980 | ret = request_irq(irq, glue_info->isr, IRQF_SHARED, |
| 988 | dev_name(&pdev->dev), cdd); | 981 | dev_name(dev), cdd); |
| 989 | if (ret) | 982 | if (ret) |
| 990 | goto err_irq; | 983 | goto err_irq; |
| 991 | cdd->irq = irq; | 984 | cdd->irq = irq; |
| @@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
| 994 | if (ret) | 987 | if (ret) |
| 995 | goto err_dma_reg; | 988 | goto err_dma_reg; |
| 996 | 989 | ||
| 997 | ret = of_dma_controller_register(pdev->dev.of_node, | 990 | ret = of_dma_controller_register(dev->of_node, |
| 998 | cppi41_dma_xlate, &cpp41_dma_info); | 991 | cppi41_dma_xlate, &cpp41_dma_info); |
| 999 | if (ret) | 992 | if (ret) |
| 1000 | goto err_of; | 993 | goto err_of; |
| @@ -1009,11 +1002,11 @@ err_irq: | |||
| 1009 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1002 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
| 1010 | cleanup_chans(cdd); | 1003 | cleanup_chans(cdd); |
| 1011 | err_chans: | 1004 | err_chans: |
| 1012 | deinit_cpii41(pdev, cdd); | 1005 | deinit_cppi41(dev, cdd); |
| 1013 | err_init_cppi: | 1006 | err_init_cppi: |
| 1014 | pm_runtime_put(&pdev->dev); | 1007 | pm_runtime_put(dev); |
| 1015 | err_get_sync: | 1008 | err_get_sync: |
| 1016 | pm_runtime_disable(&pdev->dev); | 1009 | pm_runtime_disable(dev); |
| 1017 | iounmap(cdd->usbss_mem); | 1010 | iounmap(cdd->usbss_mem); |
| 1018 | iounmap(cdd->ctrl_mem); | 1011 | iounmap(cdd->ctrl_mem); |
| 1019 | iounmap(cdd->sched_mem); | 1012 | iounmap(cdd->sched_mem); |
| @@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
| 1033 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1026 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
| 1034 | free_irq(cdd->irq, cdd); | 1027 | free_irq(cdd->irq, cdd); |
| 1035 | cleanup_chans(cdd); | 1028 | cleanup_chans(cdd); |
| 1036 | deinit_cpii41(pdev, cdd); | 1029 | deinit_cppi41(&pdev->dev, cdd); |
| 1037 | iounmap(cdd->usbss_mem); | 1030 | iounmap(cdd->usbss_mem); |
| 1038 | iounmap(cdd->ctrl_mem); | 1031 | iounmap(cdd->ctrl_mem); |
| 1039 | iounmap(cdd->sched_mem); | 1032 | iounmap(cdd->sched_mem); |
| @@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
| 1044 | return 0; | 1037 | return 0; |
| 1045 | } | 1038 | } |
| 1046 | 1039 | ||
| 1040 | #ifdef CONFIG_PM_SLEEP | ||
| 1041 | static int cppi41_suspend(struct device *dev) | ||
| 1042 | { | ||
| 1043 | struct cppi41_dd *cdd = dev_get_drvdata(dev); | ||
| 1044 | |||
| 1045 | cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); | ||
| 1046 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | ||
| 1047 | disable_sched(cdd); | ||
| 1048 | |||
| 1049 | return 0; | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | static int cppi41_resume(struct device *dev) | ||
| 1053 | { | ||
| 1054 | struct cppi41_dd *cdd = dev_get_drvdata(dev); | ||
| 1055 | struct cppi41_channel *c; | ||
| 1056 | int i; | ||
| 1057 | |||
| 1058 | for (i = 0; i < DESCS_AREAS; i++) | ||
| 1059 | cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); | ||
| 1060 | |||
| 1061 | list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) | ||
| 1062 | if (!c->is_tx) | ||
| 1063 | cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); | ||
| 1064 | |||
| 1065 | init_sched(cdd); | ||
| 1066 | |||
| 1067 | cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); | ||
| 1068 | cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); | ||
| 1069 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); | ||
| 1070 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); | ||
| 1071 | |||
| 1072 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); | ||
| 1073 | |||
| 1074 | return 0; | ||
| 1075 | } | ||
| 1076 | #endif | ||
| 1077 | |||
| 1078 | static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); | ||
| 1079 | |||
| 1047 | static struct platform_driver cpp41_dma_driver = { | 1080 | static struct platform_driver cpp41_dma_driver = { |
| 1048 | .probe = cppi41_dma_probe, | 1081 | .probe = cppi41_dma_probe, |
| 1049 | .remove = cppi41_dma_remove, | 1082 | .remove = cppi41_dma_remove, |
| 1050 | .driver = { | 1083 | .driver = { |
| 1051 | .name = "cppi41-dma-engine", | 1084 | .name = "cppi41-dma-engine", |
| 1052 | .owner = THIS_MODULE, | 1085 | .owner = THIS_MODULE, |
| 1086 | .pm = &cppi41_pm_ops, | ||
| 1053 | .of_match_table = of_match_ptr(cppi41_dma_ids), | 1087 | .of_match_table = of_match_ptr(cppi41_dma_ids), |
| 1054 | }, | 1088 | }, |
| 1055 | }; | 1089 | }; |
