aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 17:33:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 17:33:21 -0400
commit0bf6a210a43f7118d858806200127e421649fc4e (patch)
tree9a17d88ebd1b9bc693fba7f39c12123dec96e930 /drivers/dma/ste_dma40.c
parentee1a8d402e7e204d57fb108aa40003b6d1633036 (diff)
parent5c913a9a9772f4b434aaea7328836419287b5d1c (diff)
Merge tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver specific changes from Arnd Bergmann: "These changes are all driver specific and cross over between arm-soc contents and some other subsystem, in these cases cpufreq, crypto, dma, pinctrl, mailbox and usb, and the subsystem owners agreed to have these changes merged through arm-soc. As we proceed to untangle the dependencies between platform code and driver code, the amount of changes in this category is fortunately shrinking, for 3.11 we have 16 branches here and 101 non-merge changesets, the majority of which are for the stedma40 dma engine driver used in the ux500 platform. Cleaning up that code touches multiple subsystems, but gets rid of the dependency in the end. The mailbox code moved out from mach-omap2 to drivers/mailbox is an intermediate step and is still omap specific at the moment. Patches exist to generalize the subsystem and add other drivers with the same API, but those did not make it for 3.11." * tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (101 commits) crypto: ux500: use dmaengine_submit API crypto: ux500: use dmaengine_prep_slave_sg API crypto: ux500: use dmaengine_device_control API crypto: ux500/crypt: add missing __iomem qualifiers crypto: ux500/hash: add missing static qualifiers crypto: ux500/hash: use readl on iomem addresses dmaengine: ste_dma40: Declare memcpy config as static ARM: ux500: Remove mop500_snowball_ethernet_clock_enable() ARM: ux500: Correct the EN_3v3 regulator's on/off GPIO ARM: ux500: Provide a AB8500 GPIO Device Tree node gpio: rcar: fix gpio_rcar_of_table gpio-rcar: Remove #ifdef CONFIG_OF around OF-specific sections gpio-rcar: Reference core gpio documentation in the DT bindings clk: exynos5250: Add enum entries for divider clock of i2s1 and i2s2 ARM: dts: Update Samsung I2S documentation ARM: dts: add clock provider information for i2s controllers in Exynos5250 ARM: dts: add Exynos audio subsystem clock controller node clk: samsung: register audio subsystem clocks using common clock framework ARM: dts: use #include for all device trees for Samsung pinctrl: s3c24xx: use correct header for chained_irq functions ...
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c533
1 files changed, 310 insertions, 223 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 71bf4ec300ea..5ab5880d5c90 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -17,6 +17,8 @@
17#include <linux/pm.h> 17#include <linux/pm.h>
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/of.h>
21#include <linux/of_dma.h>
20#include <linux/amba/bus.h> 22#include <linux/amba/bus.h>
21#include <linux/regulator/consumer.h> 23#include <linux/regulator/consumer.h>
22#include <linux/platform_data/dma-ste-dma40.h> 24#include <linux/platform_data/dma-ste-dma40.h>
@@ -45,15 +47,63 @@
45#define D40_LCLA_LINK_PER_EVENT_GRP 128 47#define D40_LCLA_LINK_PER_EVENT_GRP 128
46#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP 48#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
47 49
50/* Max number of logical channels per physical channel */
51#define D40_MAX_LOG_CHAN_PER_PHY 32
52
48/* Attempts before giving up to trying to get pages that are aligned */ 53/* Attempts before giving up to trying to get pages that are aligned */
49#define MAX_LCLA_ALLOC_ATTEMPTS 256 54#define MAX_LCLA_ALLOC_ATTEMPTS 256
50 55
51/* Bit markings for allocation map */ 56/* Bit markings for allocation map */
52#define D40_ALLOC_FREE (1 << 31) 57#define D40_ALLOC_FREE BIT(31)
53#define D40_ALLOC_PHY (1 << 30) 58#define D40_ALLOC_PHY BIT(30)
54#define D40_ALLOC_LOG_FREE 0 59#define D40_ALLOC_LOG_FREE 0
55 60
56#define MAX(a, b) (((a) < (b)) ? (b) : (a)) 61#define D40_MEMCPY_MAX_CHANS 8
62
63/* Reserved event lines for memcpy only. */
64#define DB8500_DMA_MEMCPY_EV_0 51
65#define DB8500_DMA_MEMCPY_EV_1 56
66#define DB8500_DMA_MEMCPY_EV_2 57
67#define DB8500_DMA_MEMCPY_EV_3 58
68#define DB8500_DMA_MEMCPY_EV_4 59
69#define DB8500_DMA_MEMCPY_EV_5 60
70
71static int dma40_memcpy_channels[] = {
72 DB8500_DMA_MEMCPY_EV_0,
73 DB8500_DMA_MEMCPY_EV_1,
74 DB8500_DMA_MEMCPY_EV_2,
75 DB8500_DMA_MEMCPY_EV_3,
76 DB8500_DMA_MEMCPY_EV_4,
77 DB8500_DMA_MEMCPY_EV_5,
78};
79
80/* Default configuration for physcial memcpy */
81static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
82 .mode = STEDMA40_MODE_PHYSICAL,
83 .dir = DMA_MEM_TO_MEM,
84
85 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
86 .src_info.psize = STEDMA40_PSIZE_PHY_1,
87 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
88
89 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
90 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
91 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
92};
93
94/* Default configuration for logical memcpy */
95static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
96 .mode = STEDMA40_MODE_LOGICAL,
97 .dir = DMA_MEM_TO_MEM,
98
99 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
100 .src_info.psize = STEDMA40_PSIZE_LOG_1,
101 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
102
103 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
104 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
105 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
106};
57 107
58/** 108/**
59 * enum 40_command - The different commands and/or statuses. 109 * enum 40_command - The different commands and/or statuses.
@@ -171,6 +221,9 @@ static u32 d40_backup_regs_chan[] = {
171 D40_CHAN_REG_SDLNK, 221 D40_CHAN_REG_SDLNK,
172}; 222};
173 223
224#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
225 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
226
174/** 227/**
175 * struct d40_interrupt_lookup - lookup table for interrupt handler 228 * struct d40_interrupt_lookup - lookup table for interrupt handler
176 * 229 *
@@ -471,6 +524,8 @@ struct d40_gen_dmac {
471 * @phy_start: Physical memory start of the DMA registers. 524 * @phy_start: Physical memory start of the DMA registers.
472 * @phy_size: Size of the DMA register map. 525 * @phy_size: Size of the DMA register map.
473 * @irq: The IRQ number. 526 * @irq: The IRQ number.
527 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
528 * transfers).
474 * @num_phy_chans: The number of physical channels. Read from HW. This 529 * @num_phy_chans: The number of physical channels. Read from HW. This
475 * is the number of available channels for this driver, not counting "Secure 530 * is the number of available channels for this driver, not counting "Secure
476 * mode" allocated physical channels. 531 * mode" allocated physical channels.
@@ -514,6 +569,7 @@ struct d40_base {
514 phys_addr_t phy_start; 569 phys_addr_t phy_start;
515 resource_size_t phy_size; 570 resource_size_t phy_size;
516 int irq; 571 int irq;
572 int num_memcpy_chans;
517 int num_phy_chans; 573 int num_phy_chans;
518 int num_log_chans; 574 int num_log_chans;
519 struct device_dma_parameters dma_parms; 575 struct device_dma_parameters dma_parms;
@@ -534,7 +590,7 @@ struct d40_base {
534 resource_size_t lcpa_size; 590 resource_size_t lcpa_size;
535 struct kmem_cache *desc_slab; 591 struct kmem_cache *desc_slab;
536 u32 reg_val_backup[BACKUP_REGS_SZ]; 592 u32 reg_val_backup[BACKUP_REGS_SZ];
537 u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; 593 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
538 u32 *reg_val_backup_chan; 594 u32 *reg_val_backup_chan;
539 u16 gcc_pwr_off_mask; 595 u16 gcc_pwr_off_mask;
540 bool initialized; 596 bool initialized;
@@ -792,7 +848,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
792 * that uses linked lists. 848 * that uses linked lists.
793 */ 849 */
794 if (!(chan->phy_chan->use_soft_lli && 850 if (!(chan->phy_chan->use_soft_lli &&
795 chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)) 851 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
796 curr_lcla = d40_lcla_alloc_one(chan, desc); 852 curr_lcla = d40_lcla_alloc_one(chan, desc);
797 853
798 first_lcla = curr_lcla; 854 first_lcla = curr_lcla;
@@ -954,20 +1010,21 @@ static int d40_psize_2_burst_size(bool is_log, int psize)
954 1010
955/* 1011/*
956 * The dma only supports transmitting packages up to 1012 * The dma only supports transmitting packages up to
957 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of 1013 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
958 * dma elements required to send the entire sg list 1014 *
1015 * Calculate the total number of dma elements required to send the entire sg list.
959 */ 1016 */
960static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) 1017static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
961{ 1018{
962 int dmalen; 1019 int dmalen;
963 u32 max_w = max(data_width1, data_width2); 1020 u32 max_w = max(data_width1, data_width2);
964 u32 min_w = min(data_width1, data_width2); 1021 u32 min_w = min(data_width1, data_width2);
965 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); 1022 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
966 1023
967 if (seg_max > STEDMA40_MAX_SEG_SIZE) 1024 if (seg_max > STEDMA40_MAX_SEG_SIZE)
968 seg_max -= (1 << max_w); 1025 seg_max -= max_w;
969 1026
970 if (!IS_ALIGNED(size, 1 << max_w)) 1027 if (!IS_ALIGNED(size, max_w))
971 return -EINVAL; 1028 return -EINVAL;
972 1029
973 if (size <= seg_max) 1030 if (size <= seg_max)
@@ -1257,21 +1314,17 @@ static void __d40_config_set_event(struct d40_chan *d40c,
1257static void d40_config_set_event(struct d40_chan *d40c, 1314static void d40_config_set_event(struct d40_chan *d40c,
1258 enum d40_events event_type) 1315 enum d40_events event_type)
1259{ 1316{
1260 /* Enable event line connected to device (or memcpy) */ 1317 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1261 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1262 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1263 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1264 1318
1319 /* Enable event line connected to device (or memcpy) */
1320 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1321 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1265 __d40_config_set_event(d40c, event_type, event, 1322 __d40_config_set_event(d40c, event_type, event,
1266 D40_CHAN_REG_SSLNK); 1323 D40_CHAN_REG_SSLNK);
1267 }
1268
1269 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
1270 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1271 1324
1325 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1272 __d40_config_set_event(d40c, event_type, event, 1326 __d40_config_set_event(d40c, event_type, event,
1273 D40_CHAN_REG_SDLNK); 1327 D40_CHAN_REG_SDLNK);
1274 }
1275} 1328}
1276 1329
1277static u32 d40_chan_has_events(struct d40_chan *d40c) 1330static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1417,7 +1470,7 @@ static u32 d40_residue(struct d40_chan *d40c)
1417 >> D40_SREG_ELEM_PHY_ECNT_POS; 1470 >> D40_SREG_ELEM_PHY_ECNT_POS;
1418 } 1471 }
1419 1472
1420 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 1473 return num_elt * d40c->dma_cfg.dst_info.data_width;
1421} 1474}
1422 1475
1423static bool d40_tx_is_linked(struct d40_chan *d40c) 1476static bool d40_tx_is_linked(struct d40_chan *d40c)
@@ -1693,7 +1746,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
1693 } 1746 }
1694 1747
1695 /* ACK interrupt */ 1748 /* ACK interrupt */
1696 writel(1 << idx, base->virtbase + il[row].clr); 1749 writel(BIT(idx), base->virtbase + il[row].clr);
1697 1750
1698 spin_lock(&d40c->lock); 1751 spin_lock(&d40c->lock);
1699 1752
@@ -1715,8 +1768,6 @@ static int d40_validate_conf(struct d40_chan *d40c,
1715 struct stedma40_chan_cfg *conf) 1768 struct stedma40_chan_cfg *conf)
1716{ 1769{
1717 int res = 0; 1770 int res = 0;
1718 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1719 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1720 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1771 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1721 1772
1722 if (!conf->dir) { 1773 if (!conf->dir) {
@@ -1724,48 +1775,14 @@ static int d40_validate_conf(struct d40_chan *d40c,
1724 res = -EINVAL; 1775 res = -EINVAL;
1725 } 1776 }
1726 1777
1727 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && 1778 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1728 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && 1779 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1729 d40c->runtime_addr == 0) { 1780 (conf->dev_type < 0)) {
1730 1781 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1731 chan_err(d40c, "Invalid TX channel address (%d)\n",
1732 conf->dst_dev_type);
1733 res = -EINVAL;
1734 }
1735
1736 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1737 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1738 d40c->runtime_addr == 0) {
1739 chan_err(d40c, "Invalid RX channel address (%d)\n",
1740 conf->src_dev_type);
1741 res = -EINVAL;
1742 }
1743
1744 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1745 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1746 chan_err(d40c, "Invalid dst\n");
1747 res = -EINVAL; 1782 res = -EINVAL;
1748 } 1783 }
1749 1784
1750 if (conf->dir == STEDMA40_PERIPH_TO_MEM && 1785 if (conf->dir == DMA_DEV_TO_DEV) {
1751 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1752 chan_err(d40c, "Invalid src\n");
1753 res = -EINVAL;
1754 }
1755
1756 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1757 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1758 chan_err(d40c, "No event line\n");
1759 res = -EINVAL;
1760 }
1761
1762 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1763 (src_event_group != dst_event_group)) {
1764 chan_err(d40c, "Invalid event group\n");
1765 res = -EINVAL;
1766 }
1767
1768 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1769 /* 1786 /*
1770 * DMAC HW supports it. Will be added to this driver, 1787 * DMAC HW supports it. Will be added to this driver,
1771 * in case any dma client requires it. 1788 * in case any dma client requires it.
@@ -1775,9 +1792,9 @@ static int d40_validate_conf(struct d40_chan *d40c,
1775 } 1792 }
1776 1793
1777 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * 1794 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1778 (1 << conf->src_info.data_width) != 1795 conf->src_info.data_width !=
1779 d40_psize_2_burst_size(is_log, conf->dst_info.psize) * 1796 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1780 (1 << conf->dst_info.data_width)) { 1797 conf->dst_info.data_width) {
1781 /* 1798 /*
1782 * The DMAC hardware only supports 1799 * The DMAC hardware only supports
1783 * src (burst x width) == dst (burst x width) 1800 * src (burst x width) == dst (burst x width)
@@ -1819,8 +1836,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1819 if (phy->allocated_src == D40_ALLOC_FREE) 1836 if (phy->allocated_src == D40_ALLOC_FREE)
1820 phy->allocated_src = D40_ALLOC_LOG_FREE; 1837 phy->allocated_src = D40_ALLOC_LOG_FREE;
1821 1838
1822 if (!(phy->allocated_src & (1 << log_event_line))) { 1839 if (!(phy->allocated_src & BIT(log_event_line))) {
1823 phy->allocated_src |= 1 << log_event_line; 1840 phy->allocated_src |= BIT(log_event_line);
1824 goto found; 1841 goto found;
1825 } else 1842 } else
1826 goto not_found; 1843 goto not_found;
@@ -1831,8 +1848,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1831 if (phy->allocated_dst == D40_ALLOC_FREE) 1848 if (phy->allocated_dst == D40_ALLOC_FREE)
1832 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1849 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1833 1850
1834 if (!(phy->allocated_dst & (1 << log_event_line))) { 1851 if (!(phy->allocated_dst & BIT(log_event_line))) {
1835 phy->allocated_dst |= 1 << log_event_line; 1852 phy->allocated_dst |= BIT(log_event_line);
1836 goto found; 1853 goto found;
1837 } else 1854 } else
1838 goto not_found; 1855 goto not_found;
@@ -1862,11 +1879,11 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1862 1879
1863 /* Logical channel */ 1880 /* Logical channel */
1864 if (is_src) { 1881 if (is_src) {
1865 phy->allocated_src &= ~(1 << log_event_line); 1882 phy->allocated_src &= ~BIT(log_event_line);
1866 if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1883 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1867 phy->allocated_src = D40_ALLOC_FREE; 1884 phy->allocated_src = D40_ALLOC_FREE;
1868 } else { 1885 } else {
1869 phy->allocated_dst &= ~(1 << log_event_line); 1886 phy->allocated_dst &= ~BIT(log_event_line);
1870 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1887 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1871 phy->allocated_dst = D40_ALLOC_FREE; 1888 phy->allocated_dst = D40_ALLOC_FREE;
1872 } 1889 }
@@ -1882,7 +1899,7 @@ out:
1882 1899
1883static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) 1900static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1884{ 1901{
1885 int dev_type; 1902 int dev_type = d40c->dma_cfg.dev_type;
1886 int event_group; 1903 int event_group;
1887 int event_line; 1904 int event_line;
1888 struct d40_phy_res *phys; 1905 struct d40_phy_res *phys;
@@ -1896,14 +1913,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1896 phys = d40c->base->phy_res; 1913 phys = d40c->base->phy_res;
1897 num_phy_chans = d40c->base->num_phy_chans; 1914 num_phy_chans = d40c->base->num_phy_chans;
1898 1915
1899 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1916 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1900 dev_type = d40c->dma_cfg.src_dev_type;
1901 log_num = 2 * dev_type; 1917 log_num = 2 * dev_type;
1902 is_src = true; 1918 is_src = true;
1903 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1919 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1904 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1920 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1905 /* dst event lines are used for logical memcpy */ 1921 /* dst event lines are used for logical memcpy */
1906 dev_type = d40c->dma_cfg.dst_dev_type;
1907 log_num = 2 * dev_type + 1; 1922 log_num = 2 * dev_type + 1;
1908 is_src = false; 1923 is_src = false;
1909 } else 1924 } else
@@ -1913,7 +1928,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1913 event_line = D40_TYPE_TO_EVENT(dev_type); 1928 event_line = D40_TYPE_TO_EVENT(dev_type);
1914 1929
1915 if (!is_log) { 1930 if (!is_log) {
1916 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1931 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1917 /* Find physical half channel */ 1932 /* Find physical half channel */
1918 if (d40c->dma_cfg.use_fixed_channel) { 1933 if (d40c->dma_cfg.use_fixed_channel) {
1919 i = d40c->dma_cfg.phy_channel; 1934 i = d40c->dma_cfg.phy_channel;
@@ -2014,14 +2029,23 @@ static int d40_config_memcpy(struct d40_chan *d40c)
2014 dma_cap_mask_t cap = d40c->chan.device->cap_mask; 2029 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
2015 2030
2016 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { 2031 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
2017 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; 2032 d40c->dma_cfg = dma40_memcpy_conf_log;
2018 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; 2033 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
2019 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> 2034
2020 memcpy[d40c->chan.chan_id]; 2035 d40_log_cfg(&d40c->dma_cfg,
2036 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2021 2037
2022 } else if (dma_has_cap(DMA_MEMCPY, cap) && 2038 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
2023 dma_has_cap(DMA_SLAVE, cap)) { 2039 dma_has_cap(DMA_SLAVE, cap)) {
2024 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 2040 d40c->dma_cfg = dma40_memcpy_conf_phy;
2041
2042 /* Generate interrrupt at end of transfer or relink. */
2043 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
2044
2045 /* Generate interrupt on error. */
2046 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2047 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2048
2025 } else { 2049 } else {
2026 chan_err(d40c, "No memcpy\n"); 2050 chan_err(d40c, "No memcpy\n");
2027 return -EINVAL; 2051 return -EINVAL;
@@ -2034,7 +2058,7 @@ static int d40_free_dma(struct d40_chan *d40c)
2034{ 2058{
2035 2059
2036 int res = 0; 2060 int res = 0;
2037 u32 event; 2061 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2038 struct d40_phy_res *phy = d40c->phy_chan; 2062 struct d40_phy_res *phy = d40c->phy_chan;
2039 bool is_src; 2063 bool is_src;
2040 2064
@@ -2052,14 +2076,12 @@ static int d40_free_dma(struct d40_chan *d40c)
2052 return -EINVAL; 2076 return -EINVAL;
2053 } 2077 }
2054 2078
2055 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 2079 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2056 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 2080 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2057 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
2058 is_src = false; 2081 is_src = false;
2059 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 2082 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2060 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
2061 is_src = true; 2083 is_src = true;
2062 } else { 2084 else {
2063 chan_err(d40c, "Unknown direction\n"); 2085 chan_err(d40c, "Unknown direction\n");
2064 return -EINVAL; 2086 return -EINVAL;
2065 } 2087 }
@@ -2100,7 +2122,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
2100 unsigned long flags; 2122 unsigned long flags;
2101 void __iomem *active_reg; 2123 void __iomem *active_reg;
2102 u32 status; 2124 u32 status;
2103 u32 event; 2125 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2104 2126
2105 spin_lock_irqsave(&d40c->lock, flags); 2127 spin_lock_irqsave(&d40c->lock, flags);
2106 2128
@@ -2119,12 +2141,10 @@ static bool d40_is_paused(struct d40_chan *d40c)
2119 goto _exit; 2141 goto _exit;
2120 } 2142 }
2121 2143
2122 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 2144 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2123 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 2145 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2124 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
2125 status = readl(chanbase + D40_CHAN_REG_SDLNK); 2146 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2126 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 2147 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2127 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
2128 status = readl(chanbase + D40_CHAN_REG_SSLNK); 2148 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2129 } else { 2149 } else {
2130 chan_err(d40c, "Unknown direction\n"); 2150 chan_err(d40c, "Unknown direction\n");
@@ -2255,24 +2275,6 @@ err:
2255 return NULL; 2275 return NULL;
2256} 2276}
2257 2277
2258static dma_addr_t
2259d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
2260{
2261 struct stedma40_platform_data *plat = chan->base->plat_data;
2262 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2263 dma_addr_t addr = 0;
2264
2265 if (chan->runtime_addr)
2266 return chan->runtime_addr;
2267
2268 if (direction == DMA_DEV_TO_MEM)
2269 addr = plat->dev_rx[cfg->src_dev_type];
2270 else if (direction == DMA_MEM_TO_DEV)
2271 addr = plat->dev_tx[cfg->dst_dev_type];
2272
2273 return addr;
2274}
2275
2276static struct dma_async_tx_descriptor * 2278static struct dma_async_tx_descriptor *
2277d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, 2279d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2278 struct scatterlist *sg_dst, unsigned int sg_len, 2280 struct scatterlist *sg_dst, unsigned int sg_len,
@@ -2299,14 +2301,10 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2299 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2301 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2300 desc->cyclic = true; 2302 desc->cyclic = true;
2301 2303
2302 if (direction != DMA_TRANS_NONE) { 2304 if (direction == DMA_DEV_TO_MEM)
2303 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 2305 src_dev_addr = chan->runtime_addr;
2304 2306 else if (direction == DMA_MEM_TO_DEV)
2305 if (direction == DMA_DEV_TO_MEM) 2307 dst_dev_addr = chan->runtime_addr;
2306 src_dev_addr = dev_addr;
2307 else if (direction == DMA_MEM_TO_DEV)
2308 dst_dev_addr = dev_addr;
2309 }
2310 2308
2311 if (chan_is_logical(chan)) 2309 if (chan_is_logical(chan))
2312 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, 2310 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
@@ -2366,7 +2364,7 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2366 u32 rtreg; 2364 u32 rtreg;
2367 u32 event = D40_TYPE_TO_EVENT(dev_type); 2365 u32 event = D40_TYPE_TO_EVENT(dev_type);
2368 u32 group = D40_TYPE_TO_GROUP(dev_type); 2366 u32 group = D40_TYPE_TO_GROUP(dev_type);
2369 u32 bit = 1 << event; 2367 u32 bit = BIT(event);
2370 u32 prioreg; 2368 u32 prioreg;
2371 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; 2369 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2372 2370
@@ -2397,13 +2395,57 @@ static void d40_set_prio_realtime(struct d40_chan *d40c)
2397 if (d40c->base->rev < 3) 2395 if (d40c->base->rev < 3)
2398 return; 2396 return;
2399 2397
2400 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 2398 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2401 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 2399 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2402 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); 2400 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2403 2401
2404 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || 2402 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2405 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 2403 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2406 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); 2404 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2405}
2406
2407#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2408#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2409#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2410#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2411
2412static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2413 struct of_dma *ofdma)
2414{
2415 struct stedma40_chan_cfg cfg;
2416 dma_cap_mask_t cap;
2417 u32 flags;
2418
2419 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2420
2421 dma_cap_zero(cap);
2422 dma_cap_set(DMA_SLAVE, cap);
2423
2424 cfg.dev_type = dma_spec->args[0];
2425 flags = dma_spec->args[2];
2426
2427 switch (D40_DT_FLAGS_MODE(flags)) {
2428 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2429 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2430 }
2431
2432 switch (D40_DT_FLAGS_DIR(flags)) {
2433 case 0:
2434 cfg.dir = DMA_MEM_TO_DEV;
2435 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2436 break;
2437 case 1:
2438 cfg.dir = DMA_DEV_TO_MEM;
2439 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2440 break;
2441 }
2442
2443 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2444 cfg.phy_channel = dma_spec->args[1];
2445 cfg.use_fixed_channel = true;
2446 }
2447
2448 return dma_request_channel(cap, stedma40_filter, &cfg);
2407} 2449}
2408 2450
2409/* DMA ENGINE functions */ 2451/* DMA ENGINE functions */
@@ -2435,23 +2477,21 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2435 } 2477 }
2436 2478
2437 pm_runtime_get_sync(d40c->base->dev); 2479 pm_runtime_get_sync(d40c->base->dev);
2438 /* Fill in basic CFG register values */
2439 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
2440 &d40c->dst_def_cfg, chan_is_logical(d40c));
2441 2480
2442 d40_set_prio_realtime(d40c); 2481 d40_set_prio_realtime(d40c);
2443 2482
2444 if (chan_is_logical(d40c)) { 2483 if (chan_is_logical(d40c)) {
2445 d40_log_cfg(&d40c->dma_cfg, 2484 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2446 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2447
2448 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2449 d40c->lcpa = d40c->base->lcpa_base + 2485 d40c->lcpa = d40c->base->lcpa_base +
2450 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; 2486 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2451 else 2487 else
2452 d40c->lcpa = d40c->base->lcpa_base + 2488 d40c->lcpa = d40c->base->lcpa_base +
2453 d40c->dma_cfg.dst_dev_type * 2489 d40c->dma_cfg.dev_type *
2454 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; 2490 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2491
2492 /* Unmask the Global Interrupt Mask. */
2493 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2494 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2455 } 2495 }
2456 2496
2457 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", 2497 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
@@ -2641,33 +2681,10 @@ static void d40_terminate_all(struct dma_chan *chan)
2641static int 2681static int
2642dma40_config_to_halfchannel(struct d40_chan *d40c, 2682dma40_config_to_halfchannel(struct d40_chan *d40c,
2643 struct stedma40_half_channel_info *info, 2683 struct stedma40_half_channel_info *info,
2644 enum dma_slave_buswidth width,
2645 u32 maxburst) 2684 u32 maxburst)
2646{ 2685{
2647 enum stedma40_periph_data_width addr_width;
2648 int psize; 2686 int psize;
2649 2687
2650 switch (width) {
2651 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2652 addr_width = STEDMA40_BYTE_WIDTH;
2653 break;
2654 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2655 addr_width = STEDMA40_HALFWORD_WIDTH;
2656 break;
2657 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2658 addr_width = STEDMA40_WORD_WIDTH;
2659 break;
2660 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2661 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2662 break;
2663 default:
2664 dev_err(d40c->base->dev,
2665 "illegal peripheral address width "
2666 "requested (%d)\n",
2667 width);
2668 return -EINVAL;
2669 }
2670
2671 if (chan_is_logical(d40c)) { 2688 if (chan_is_logical(d40c)) {
2672 if (maxburst >= 16) 2689 if (maxburst >= 16)
2673 psize = STEDMA40_PSIZE_LOG_16; 2690 psize = STEDMA40_PSIZE_LOG_16;
@@ -2688,7 +2705,6 @@ dma40_config_to_halfchannel(struct d40_chan *d40c,
2688 psize = STEDMA40_PSIZE_PHY_1; 2705 psize = STEDMA40_PSIZE_PHY_1;
2689 } 2706 }
2690 2707
2691 info->data_width = addr_width;
2692 info->psize = psize; 2708 info->psize = psize;
2693 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2709 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2694 2710
@@ -2712,21 +2728,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2712 dst_maxburst = config->dst_maxburst; 2728 dst_maxburst = config->dst_maxburst;
2713 2729
2714 if (config->direction == DMA_DEV_TO_MEM) { 2730 if (config->direction == DMA_DEV_TO_MEM) {
2715 dma_addr_t dev_addr_rx =
2716 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2717
2718 config_addr = config->src_addr; 2731 config_addr = config->src_addr;
2719 if (dev_addr_rx) 2732
2720 dev_dbg(d40c->base->dev, 2733 if (cfg->dir != DMA_DEV_TO_MEM)
2721 "channel has a pre-wired RX address %08x "
2722 "overriding with %08x\n",
2723 dev_addr_rx, config_addr);
2724 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2725 dev_dbg(d40c->base->dev, 2734 dev_dbg(d40c->base->dev,
2726 "channel was not configured for peripheral " 2735 "channel was not configured for peripheral "
2727 "to memory transfer (%d) overriding\n", 2736 "to memory transfer (%d) overriding\n",
2728 cfg->dir); 2737 cfg->dir);
2729 cfg->dir = STEDMA40_PERIPH_TO_MEM; 2738 cfg->dir = DMA_DEV_TO_MEM;
2730 2739
2731 /* Configure the memory side */ 2740 /* Configure the memory side */
2732 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2741 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@@ -2735,21 +2744,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2735 dst_maxburst = src_maxburst; 2744 dst_maxburst = src_maxburst;
2736 2745
2737 } else if (config->direction == DMA_MEM_TO_DEV) { 2746 } else if (config->direction == DMA_MEM_TO_DEV) {
2738 dma_addr_t dev_addr_tx =
2739 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2740
2741 config_addr = config->dst_addr; 2747 config_addr = config->dst_addr;
2742 if (dev_addr_tx) 2748
2743 dev_dbg(d40c->base->dev, 2749 if (cfg->dir != DMA_MEM_TO_DEV)
2744 "channel has a pre-wired TX address %08x "
2745 "overriding with %08x\n",
2746 dev_addr_tx, config_addr);
2747 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2748 dev_dbg(d40c->base->dev, 2750 dev_dbg(d40c->base->dev,
2749 "channel was not configured for memory " 2751 "channel was not configured for memory "
2750 "to peripheral transfer (%d) overriding\n", 2752 "to peripheral transfer (%d) overriding\n",
2751 cfg->dir); 2753 cfg->dir);
2752 cfg->dir = STEDMA40_MEM_TO_PERIPH; 2754 cfg->dir = DMA_MEM_TO_DEV;
2753 2755
2754 /* Configure the memory side */ 2756 /* Configure the memory side */
2755 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2757 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@@ -2763,6 +2765,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2763 return -EINVAL; 2765 return -EINVAL;
2764 } 2766 }
2765 2767
2768 if (config_addr <= 0) {
2769 dev_err(d40c->base->dev, "no address supplied\n");
2770 return -EINVAL;
2771 }
2772
2766 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { 2773 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2767 dev_err(d40c->base->dev, 2774 dev_err(d40c->base->dev,
2768 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", 2775 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
@@ -2781,14 +2788,24 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2781 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; 2788 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2782 } 2789 }
2783 2790
2791 /* Only valid widths are; 1, 2, 4 and 8. */
2792 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2793 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2794 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2795 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2796 ((src_addr_width > 1) && (src_addr_width & 1)) ||
2797 ((dst_addr_width > 1) && (dst_addr_width & 1)))
2798 return -EINVAL;
2799
2800 cfg->src_info.data_width = src_addr_width;
2801 cfg->dst_info.data_width = dst_addr_width;
2802
2784 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, 2803 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2785 src_addr_width,
2786 src_maxburst); 2804 src_maxburst);
2787 if (ret) 2805 if (ret)
2788 return ret; 2806 return ret;
2789 2807
2790 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, 2808 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2791 dst_addr_width,
2792 dst_maxburst); 2809 dst_maxburst);
2793 if (ret) 2810 if (ret)
2794 return ret; 2811 return ret;
@@ -2797,8 +2814,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2797 if (chan_is_logical(d40c)) 2814 if (chan_is_logical(d40c))
2798 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2815 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2799 else 2816 else
2800 d40_phy_cfg(cfg, &d40c->src_def_cfg, 2817 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2801 &d40c->dst_def_cfg, false);
2802 2818
2803 /* These settings will take precedence later */ 2819 /* These settings will take precedence later */
2804 d40c->runtime_addr = config_addr; 2820 d40c->runtime_addr = config_addr;
@@ -2929,7 +2945,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2929 } 2945 }
2930 2946
2931 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2947 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2932 base->num_log_chans, base->plat_data->memcpy_len); 2948 base->num_log_chans, base->num_memcpy_chans);
2933 2949
2934 dma_cap_zero(base->dma_memcpy.cap_mask); 2950 dma_cap_zero(base->dma_memcpy.cap_mask);
2935 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2951 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
@@ -3123,13 +3139,14 @@ static int __init d40_phy_res_init(struct d40_base *base)
3123 3139
3124static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 3140static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3125{ 3141{
3126 struct stedma40_platform_data *plat_data; 3142 struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
3127 struct clk *clk = NULL; 3143 struct clk *clk = NULL;
3128 void __iomem *virtbase = NULL; 3144 void __iomem *virtbase = NULL;
3129 struct resource *res = NULL; 3145 struct resource *res = NULL;
3130 struct d40_base *base = NULL; 3146 struct d40_base *base = NULL;
3131 int num_log_chans = 0; 3147 int num_log_chans = 0;
3132 int num_phy_chans; 3148 int num_phy_chans;
3149 int num_memcpy_chans;
3133 int clk_ret = -EINVAL; 3150 int clk_ret = -EINVAL;
3134 int i; 3151 int i;
3135 u32 pid; 3152 u32 pid;
@@ -3189,8 +3206,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3189 * DB8540v1 has revision 4 3206 * DB8540v1 has revision 4
3190 */ 3207 */
3191 rev = AMBA_REV_BITS(pid); 3208 rev = AMBA_REV_BITS(pid);
3192 3209 if (rev < 2) {
3193 plat_data = pdev->dev.platform_data; 3210 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3211 goto failure;
3212 }
3194 3213
3195 /* The number of physical channels on this HW */ 3214 /* The number of physical channels on this HW */
3196 if (plat_data->num_of_phy_chans) 3215 if (plat_data->num_of_phy_chans)
@@ -3198,26 +3217,20 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3198 else 3217 else
3199 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 3218 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3200 3219
3201 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", 3220 /* The number of channels used for memcpy */
3202 rev, res->start, num_phy_chans); 3221 if (plat_data->num_of_memcpy_chans)
3203 3222 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3204 if (rev < 2) { 3223 else
3205 d40_err(&pdev->dev, "hardware revision: %d is not supported", 3224 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3206 rev);
3207 goto failure;
3208 }
3209 3225
3210 /* Count the number of logical channels in use */ 3226 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3211 for (i = 0; i < plat_data->dev_len; i++)
3212 if (plat_data->dev_rx[i] != 0)
3213 num_log_chans++;
3214 3227
3215 for (i = 0; i < plat_data->dev_len; i++) 3228 dev_info(&pdev->dev,
3216 if (plat_data->dev_tx[i] != 0) 3229 "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
3217 num_log_chans++; 3230 rev, res->start, num_phy_chans, num_log_chans);
3218 3231
3219 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 3232 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3220 (num_phy_chans + num_log_chans + plat_data->memcpy_len) * 3233 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3221 sizeof(struct d40_chan), GFP_KERNEL); 3234 sizeof(struct d40_chan), GFP_KERNEL);
3222 3235
3223 if (base == NULL) { 3236 if (base == NULL) {
@@ -3227,6 +3240,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3227 3240
3228 base->rev = rev; 3241 base->rev = rev;
3229 base->clk = clk; 3242 base->clk = clk;
3243 base->num_memcpy_chans = num_memcpy_chans;
3230 base->num_phy_chans = num_phy_chans; 3244 base->num_phy_chans = num_phy_chans;
3231 base->num_log_chans = num_log_chans; 3245 base->num_log_chans = num_log_chans;
3232 base->phy_start = res->start; 3246 base->phy_start = res->start;
@@ -3278,17 +3292,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3278 if (!base->lookup_phy_chans) 3292 if (!base->lookup_phy_chans)
3279 goto failure; 3293 goto failure;
3280 3294
3281 if (num_log_chans + plat_data->memcpy_len) { 3295 base->lookup_log_chans = kzalloc(num_log_chans *
3282 /* 3296 sizeof(struct d40_chan *),
3283 * The max number of logical channels are event lines for all 3297 GFP_KERNEL);
3284 * src devices and dst devices 3298 if (!base->lookup_log_chans)
3285 */ 3299 goto failure;
3286 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
3287 sizeof(struct d40_chan *),
3288 GFP_KERNEL);
3289 if (!base->lookup_log_chans)
3290 goto failure;
3291 }
3292 3300
3293 base->reg_val_backup_chan = kmalloc(base->num_phy_chans * 3301 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3294 sizeof(d40_backup_regs_chan), 3302 sizeof(d40_backup_regs_chan),
@@ -3472,17 +3480,82 @@ failure:
3472 return ret; 3480 return ret;
3473} 3481}
3474 3482
3483static int __init d40_of_probe(struct platform_device *pdev,
3484 struct device_node *np)
3485{
3486 struct stedma40_platform_data *pdata;
3487 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3488 const const __be32 *list;
3489
3490 pdata = devm_kzalloc(&pdev->dev,
3491 sizeof(struct stedma40_platform_data),
3492 GFP_KERNEL);
3493 if (!pdata)
3494 return -ENOMEM;
3495
3496 /* If absent this value will be obtained from h/w. */
3497 of_property_read_u32(np, "dma-channels", &num_phy);
3498 if (num_phy > 0)
3499 pdata->num_of_phy_chans = num_phy;
3500
3501 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3502 num_memcpy /= sizeof(*list);
3503
3504 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3505 d40_err(&pdev->dev,
3506 "Invalid number of memcpy channels specified (%d)\n",
3507 num_memcpy);
3508 return -EINVAL;
3509 }
3510 pdata->num_of_memcpy_chans = num_memcpy;
3511
3512 of_property_read_u32_array(np, "memcpy-channels",
3513 dma40_memcpy_channels,
3514 num_memcpy);
3515
3516 list = of_get_property(np, "disabled-channels", &num_disabled);
3517 num_disabled /= sizeof(*list);
3518
3519 if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
3520 d40_err(&pdev->dev,
3521 "Invalid number of disabled channels specified (%d)\n",
3522 num_disabled);
3523 return -EINVAL;
3524 }
3525
3526 of_property_read_u32_array(np, "disabled-channels",
3527 pdata->disabled_channels,
3528 num_disabled);
3529 pdata->disabled_channels[num_disabled] = -1;
3530
3531 pdev->dev.platform_data = pdata;
3532
3533 return 0;
3534}
3535
3475static int __init d40_probe(struct platform_device *pdev) 3536static int __init d40_probe(struct platform_device *pdev)
3476{ 3537{
3477 int err; 3538 struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
3539 struct device_node *np = pdev->dev.of_node;
3478 int ret = -ENOENT; 3540 int ret = -ENOENT;
3479 struct d40_base *base; 3541 struct d40_base *base = NULL;
3480 struct resource *res = NULL; 3542 struct resource *res = NULL;
3481 int num_reserved_chans; 3543 int num_reserved_chans;
3482 u32 val; 3544 u32 val;
3483 3545
3484 base = d40_hw_detect_init(pdev); 3546 if (!plat_data) {
3547 if (np) {
3548 if(d40_of_probe(pdev, np)) {
3549 ret = -ENOMEM;
3550 goto failure;
3551 }
3552 } else {
3553 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3554 goto failure;
3555 }
3556 }
3485 3557
3558 base = d40_hw_detect_init(pdev);
3486 if (!base) 3559 if (!base)
3487 goto failure; 3560 goto failure;
3488 3561
@@ -3575,6 +3648,7 @@ static int __init d40_probe(struct platform_device *pdev)
3575 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); 3648 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3576 if (IS_ERR(base->lcpa_regulator)) { 3649 if (IS_ERR(base->lcpa_regulator)) {
3577 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); 3650 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3651 ret = PTR_ERR(base->lcpa_regulator);
3578 base->lcpa_regulator = NULL; 3652 base->lcpa_regulator = NULL;
3579 goto failure; 3653 goto failure;
3580 } 3654 }
@@ -3590,19 +3664,26 @@ static int __init d40_probe(struct platform_device *pdev)
3590 } 3664 }
3591 3665
3592 base->initialized = true; 3666 base->initialized = true;
3593 err = d40_dmaengine_init(base, num_reserved_chans); 3667 ret = d40_dmaengine_init(base, num_reserved_chans);
3594 if (err) 3668 if (ret)
3595 goto failure; 3669 goto failure;
3596 3670
3597 base->dev->dma_parms = &base->dma_parms; 3671 base->dev->dma_parms = &base->dma_parms;
3598 err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); 3672 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3599 if (err) { 3673 if (ret) {
3600 d40_err(&pdev->dev, "Failed to set dma max seg size\n"); 3674 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3601 goto failure; 3675 goto failure;
3602 } 3676 }
3603 3677
3604 d40_hw_init(base); 3678 d40_hw_init(base);
3605 3679
3680 if (np) {
3681 ret = of_dma_controller_register(np, d40_xlate, NULL);
3682 if (ret)
3683 dev_err(&pdev->dev,
3684 "could not register of_dma_controller\n");
3685 }
3686
3606 dev_info(base->dev, "initialized\n"); 3687 dev_info(base->dev, "initialized\n");
3607 return 0; 3688 return 0;
3608 3689
@@ -3656,11 +3737,17 @@ failure:
3656 return ret; 3737 return ret;
3657} 3738}
3658 3739
3740static const struct of_device_id d40_match[] = {
3741 { .compatible = "stericsson,dma40", },
3742 {}
3743};
3744
3659static struct platform_driver d40_driver = { 3745static struct platform_driver d40_driver = {
3660 .driver = { 3746 .driver = {
3661 .owner = THIS_MODULE, 3747 .owner = THIS_MODULE,
3662 .name = D40_NAME, 3748 .name = D40_NAME,
3663 .pm = DMA40_PM_OPS, 3749 .pm = DMA40_PM_OPS,
3750 .of_match_table = d40_match,
3664 }, 3751 },
3665}; 3752};
3666 3753