diff options
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r-- | drivers/dma/ste_dma40.c | 366 |
1 files changed, 211 insertions, 155 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 71bf4ec300ea..7f23d45166c3 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <linux/pm.h> | 17 | #include <linux/pm.h> |
18 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_runtime.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/of.h> | ||
21 | #include <linux/of_dma.h> | ||
20 | #include <linux/amba/bus.h> | 22 | #include <linux/amba/bus.h> |
21 | #include <linux/regulator/consumer.h> | 23 | #include <linux/regulator/consumer.h> |
22 | #include <linux/platform_data/dma-ste-dma40.h> | 24 | #include <linux/platform_data/dma-ste-dma40.h> |
@@ -45,6 +47,9 @@ | |||
45 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 | 47 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 |
46 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP | 48 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP |
47 | 49 | ||
50 | /* Max number of logical channels per physical channel */ | ||
51 | #define D40_MAX_LOG_CHAN_PER_PHY 32 | ||
52 | |||
48 | /* Attempts before giving up to trying to get pages that are aligned */ | 53 | /* Attempts before giving up to trying to get pages that are aligned */ |
49 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | 54 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 |
50 | 55 | ||
@@ -53,7 +58,50 @@ | |||
53 | #define D40_ALLOC_PHY (1 << 30) | 58 | #define D40_ALLOC_PHY (1 << 30) |
54 | #define D40_ALLOC_LOG_FREE 0 | 59 | #define D40_ALLOC_LOG_FREE 0 |
55 | 60 | ||
56 | #define MAX(a, b) (((a) < (b)) ? (b) : (a)) | 61 | /* Reserved event lines for memcpy only. */ |
62 | #define DB8500_DMA_MEMCPY_EV_0 51 | ||
63 | #define DB8500_DMA_MEMCPY_EV_1 56 | ||
64 | #define DB8500_DMA_MEMCPY_EV_2 57 | ||
65 | #define DB8500_DMA_MEMCPY_EV_3 58 | ||
66 | #define DB8500_DMA_MEMCPY_EV_4 59 | ||
67 | #define DB8500_DMA_MEMCPY_EV_5 60 | ||
68 | |||
69 | static int dma40_memcpy_channels[] = { | ||
70 | DB8500_DMA_MEMCPY_EV_0, | ||
71 | DB8500_DMA_MEMCPY_EV_1, | ||
72 | DB8500_DMA_MEMCPY_EV_2, | ||
73 | DB8500_DMA_MEMCPY_EV_3, | ||
74 | DB8500_DMA_MEMCPY_EV_4, | ||
75 | DB8500_DMA_MEMCPY_EV_5, | ||
76 | }; | ||
77 | |||
78 | /* Default configuration for physcial memcpy */ | ||
79 | struct stedma40_chan_cfg dma40_memcpy_conf_phy = { | ||
80 | .mode = STEDMA40_MODE_PHYSICAL, | ||
81 | .dir = STEDMA40_MEM_TO_MEM, | ||
82 | |||
83 | .src_info.data_width = STEDMA40_BYTE_WIDTH, | ||
84 | .src_info.psize = STEDMA40_PSIZE_PHY_1, | ||
85 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | ||
86 | |||
87 | .dst_info.data_width = STEDMA40_BYTE_WIDTH, | ||
88 | .dst_info.psize = STEDMA40_PSIZE_PHY_1, | ||
89 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | ||
90 | }; | ||
91 | |||
92 | /* Default configuration for logical memcpy */ | ||
93 | struct stedma40_chan_cfg dma40_memcpy_conf_log = { | ||
94 | .mode = STEDMA40_MODE_LOGICAL, | ||
95 | .dir = STEDMA40_MEM_TO_MEM, | ||
96 | |||
97 | .src_info.data_width = STEDMA40_BYTE_WIDTH, | ||
98 | .src_info.psize = STEDMA40_PSIZE_LOG_1, | ||
99 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | ||
100 | |||
101 | .dst_info.data_width = STEDMA40_BYTE_WIDTH, | ||
102 | .dst_info.psize = STEDMA40_PSIZE_LOG_1, | ||
103 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | ||
104 | }; | ||
57 | 105 | ||
58 | /** | 106 | /** |
59 | * enum 40_command - The different commands and/or statuses. | 107 | * enum 40_command - The different commands and/or statuses. |
@@ -171,6 +219,9 @@ static u32 d40_backup_regs_chan[] = { | |||
171 | D40_CHAN_REG_SDLNK, | 219 | D40_CHAN_REG_SDLNK, |
172 | }; | 220 | }; |
173 | 221 | ||
222 | #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ | ||
223 | BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) | ||
224 | |||
174 | /** | 225 | /** |
175 | * struct d40_interrupt_lookup - lookup table for interrupt handler | 226 | * struct d40_interrupt_lookup - lookup table for interrupt handler |
176 | * | 227 | * |
@@ -534,7 +585,7 @@ struct d40_base { | |||
534 | resource_size_t lcpa_size; | 585 | resource_size_t lcpa_size; |
535 | struct kmem_cache *desc_slab; | 586 | struct kmem_cache *desc_slab; |
536 | u32 reg_val_backup[BACKUP_REGS_SZ]; | 587 | u32 reg_val_backup[BACKUP_REGS_SZ]; |
537 | u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; | 588 | u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; |
538 | u32 *reg_val_backup_chan; | 589 | u32 *reg_val_backup_chan; |
539 | u16 gcc_pwr_off_mask; | 590 | u16 gcc_pwr_off_mask; |
540 | bool initialized; | 591 | bool initialized; |
@@ -1257,21 +1308,17 @@ static void __d40_config_set_event(struct d40_chan *d40c, | |||
1257 | static void d40_config_set_event(struct d40_chan *d40c, | 1308 | static void d40_config_set_event(struct d40_chan *d40c, |
1258 | enum d40_events event_type) | 1309 | enum d40_events event_type) |
1259 | { | 1310 | { |
1311 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); | ||
1312 | |||
1260 | /* Enable event line connected to device (or memcpy) */ | 1313 | /* Enable event line connected to device (or memcpy) */ |
1261 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | 1314 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
1262 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 1315 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) |
1263 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | ||
1264 | |||
1265 | __d40_config_set_event(d40c, event_type, event, | 1316 | __d40_config_set_event(d40c, event_type, event, |
1266 | D40_CHAN_REG_SSLNK); | 1317 | D40_CHAN_REG_SSLNK); |
1267 | } | ||
1268 | |||
1269 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | ||
1270 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | ||
1271 | 1318 | ||
1319 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) | ||
1272 | __d40_config_set_event(d40c, event_type, event, | 1320 | __d40_config_set_event(d40c, event_type, event, |
1273 | D40_CHAN_REG_SDLNK); | 1321 | D40_CHAN_REG_SDLNK); |
1274 | } | ||
1275 | } | 1322 | } |
1276 | 1323 | ||
1277 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 1324 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
@@ -1715,8 +1762,6 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1715 | struct stedma40_chan_cfg *conf) | 1762 | struct stedma40_chan_cfg *conf) |
1716 | { | 1763 | { |
1717 | int res = 0; | 1764 | int res = 0; |
1718 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); | ||
1719 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); | ||
1720 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; | 1765 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
1721 | 1766 | ||
1722 | if (!conf->dir) { | 1767 | if (!conf->dir) { |
@@ -1724,44 +1769,10 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1724 | res = -EINVAL; | 1769 | res = -EINVAL; |
1725 | } | 1770 | } |
1726 | 1771 | ||
1727 | if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && | 1772 | if ((is_log && conf->dev_type > d40c->base->num_log_chans) || |
1728 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && | 1773 | (!is_log && conf->dev_type > d40c->base->num_phy_chans) || |
1729 | d40c->runtime_addr == 0) { | 1774 | (conf->dev_type < 0)) { |
1730 | 1775 | chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); | |
1731 | chan_err(d40c, "Invalid TX channel address (%d)\n", | ||
1732 | conf->dst_dev_type); | ||
1733 | res = -EINVAL; | ||
1734 | } | ||
1735 | |||
1736 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && | ||
1737 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && | ||
1738 | d40c->runtime_addr == 0) { | ||
1739 | chan_err(d40c, "Invalid RX channel address (%d)\n", | ||
1740 | conf->src_dev_type); | ||
1741 | res = -EINVAL; | ||
1742 | } | ||
1743 | |||
1744 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && | ||
1745 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { | ||
1746 | chan_err(d40c, "Invalid dst\n"); | ||
1747 | res = -EINVAL; | ||
1748 | } | ||
1749 | |||
1750 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && | ||
1751 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { | ||
1752 | chan_err(d40c, "Invalid src\n"); | ||
1753 | res = -EINVAL; | ||
1754 | } | ||
1755 | |||
1756 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | ||
1757 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | ||
1758 | chan_err(d40c, "No event line\n"); | ||
1759 | res = -EINVAL; | ||
1760 | } | ||
1761 | |||
1762 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | ||
1763 | (src_event_group != dst_event_group)) { | ||
1764 | chan_err(d40c, "Invalid event group\n"); | ||
1765 | res = -EINVAL; | 1776 | res = -EINVAL; |
1766 | } | 1777 | } |
1767 | 1778 | ||
@@ -1882,7 +1893,7 @@ out: | |||
1882 | 1893 | ||
1883 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | 1894 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) |
1884 | { | 1895 | { |
1885 | int dev_type; | 1896 | int dev_type = d40c->dma_cfg.dev_type; |
1886 | int event_group; | 1897 | int event_group; |
1887 | int event_line; | 1898 | int event_line; |
1888 | struct d40_phy_res *phys; | 1899 | struct d40_phy_res *phys; |
@@ -1897,13 +1908,11 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
1897 | num_phy_chans = d40c->base->num_phy_chans; | 1908 | num_phy_chans = d40c->base->num_phy_chans; |
1898 | 1909 | ||
1899 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 1910 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
1900 | dev_type = d40c->dma_cfg.src_dev_type; | ||
1901 | log_num = 2 * dev_type; | 1911 | log_num = 2 * dev_type; |
1902 | is_src = true; | 1912 | is_src = true; |
1903 | } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1913 | } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1904 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1914 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1905 | /* dst event lines are used for logical memcpy */ | 1915 | /* dst event lines are used for logical memcpy */ |
1906 | dev_type = d40c->dma_cfg.dst_dev_type; | ||
1907 | log_num = 2 * dev_type + 1; | 1916 | log_num = 2 * dev_type + 1; |
1908 | is_src = false; | 1917 | is_src = false; |
1909 | } else | 1918 | } else |
@@ -2014,14 +2023,23 @@ static int d40_config_memcpy(struct d40_chan *d40c) | |||
2014 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; | 2023 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; |
2015 | 2024 | ||
2016 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { | 2025 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { |
2017 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; | 2026 | d40c->dma_cfg = dma40_memcpy_conf_log; |
2018 | d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; | 2027 | d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; |
2019 | d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> | 2028 | |
2020 | memcpy[d40c->chan.chan_id]; | 2029 | d40_log_cfg(&d40c->dma_cfg, |
2030 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | ||
2021 | 2031 | ||
2022 | } else if (dma_has_cap(DMA_MEMCPY, cap) && | 2032 | } else if (dma_has_cap(DMA_MEMCPY, cap) && |
2023 | dma_has_cap(DMA_SLAVE, cap)) { | 2033 | dma_has_cap(DMA_SLAVE, cap)) { |
2024 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | 2034 | d40c->dma_cfg = dma40_memcpy_conf_phy; |
2035 | |||
2036 | /* Generate interrrupt at end of transfer or relink. */ | ||
2037 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); | ||
2038 | |||
2039 | /* Generate interrupt on error. */ | ||
2040 | d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); | ||
2041 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); | ||
2042 | |||
2025 | } else { | 2043 | } else { |
2026 | chan_err(d40c, "No memcpy\n"); | 2044 | chan_err(d40c, "No memcpy\n"); |
2027 | return -EINVAL; | 2045 | return -EINVAL; |
@@ -2034,7 +2052,7 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
2034 | { | 2052 | { |
2035 | 2053 | ||
2036 | int res = 0; | 2054 | int res = 0; |
2037 | u32 event; | 2055 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); |
2038 | struct d40_phy_res *phy = d40c->phy_chan; | 2056 | struct d40_phy_res *phy = d40c->phy_chan; |
2039 | bool is_src; | 2057 | bool is_src; |
2040 | 2058 | ||
@@ -2053,13 +2071,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
2053 | } | 2071 | } |
2054 | 2072 | ||
2055 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 2073 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
2056 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 2074 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) |
2057 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | ||
2058 | is_src = false; | 2075 | is_src = false; |
2059 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 2076 | else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) |
2060 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | ||
2061 | is_src = true; | 2077 | is_src = true; |
2062 | } else { | 2078 | else { |
2063 | chan_err(d40c, "Unknown direction\n"); | 2079 | chan_err(d40c, "Unknown direction\n"); |
2064 | return -EINVAL; | 2080 | return -EINVAL; |
2065 | } | 2081 | } |
@@ -2100,7 +2116,7 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
2100 | unsigned long flags; | 2116 | unsigned long flags; |
2101 | void __iomem *active_reg; | 2117 | void __iomem *active_reg; |
2102 | u32 status; | 2118 | u32 status; |
2103 | u32 event; | 2119 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); |
2104 | 2120 | ||
2105 | spin_lock_irqsave(&d40c->lock, flags); | 2121 | spin_lock_irqsave(&d40c->lock, flags); |
2106 | 2122 | ||
@@ -2121,10 +2137,8 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
2121 | 2137 | ||
2122 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 2138 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
2123 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 2139 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
2124 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | ||
2125 | status = readl(chanbase + D40_CHAN_REG_SDLNK); | 2140 | status = readl(chanbase + D40_CHAN_REG_SDLNK); |
2126 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 2141 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
2127 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | ||
2128 | status = readl(chanbase + D40_CHAN_REG_SSLNK); | 2142 | status = readl(chanbase + D40_CHAN_REG_SSLNK); |
2129 | } else { | 2143 | } else { |
2130 | chan_err(d40c, "Unknown direction\n"); | 2144 | chan_err(d40c, "Unknown direction\n"); |
@@ -2255,24 +2269,6 @@ err: | |||
2255 | return NULL; | 2269 | return NULL; |
2256 | } | 2270 | } |
2257 | 2271 | ||
2258 | static dma_addr_t | ||
2259 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) | ||
2260 | { | ||
2261 | struct stedma40_platform_data *plat = chan->base->plat_data; | ||
2262 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | ||
2263 | dma_addr_t addr = 0; | ||
2264 | |||
2265 | if (chan->runtime_addr) | ||
2266 | return chan->runtime_addr; | ||
2267 | |||
2268 | if (direction == DMA_DEV_TO_MEM) | ||
2269 | addr = plat->dev_rx[cfg->src_dev_type]; | ||
2270 | else if (direction == DMA_MEM_TO_DEV) | ||
2271 | addr = plat->dev_tx[cfg->dst_dev_type]; | ||
2272 | |||
2273 | return addr; | ||
2274 | } | ||
2275 | |||
2276 | static struct dma_async_tx_descriptor * | 2272 | static struct dma_async_tx_descriptor * |
2277 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | 2273 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
2278 | struct scatterlist *sg_dst, unsigned int sg_len, | 2274 | struct scatterlist *sg_dst, unsigned int sg_len, |
@@ -2299,14 +2295,10 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
2299 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) | 2295 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
2300 | desc->cyclic = true; | 2296 | desc->cyclic = true; |
2301 | 2297 | ||
2302 | if (direction != DMA_TRANS_NONE) { | 2298 | if (direction == DMA_DEV_TO_MEM) |
2303 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 2299 | src_dev_addr = chan->runtime_addr; |
2304 | 2300 | else if (direction == DMA_MEM_TO_DEV) | |
2305 | if (direction == DMA_DEV_TO_MEM) | 2301 | dst_dev_addr = chan->runtime_addr; |
2306 | src_dev_addr = dev_addr; | ||
2307 | else if (direction == DMA_MEM_TO_DEV) | ||
2308 | dst_dev_addr = dev_addr; | ||
2309 | } | ||
2310 | 2302 | ||
2311 | if (chan_is_logical(chan)) | 2303 | if (chan_is_logical(chan)) |
2312 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, | 2304 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, |
@@ -2399,11 +2391,55 @@ static void d40_set_prio_realtime(struct d40_chan *d40c) | |||
2399 | 2391 | ||
2400 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | 2392 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
2401 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | 2393 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) |
2402 | __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); | 2394 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); |
2403 | 2395 | ||
2404 | if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || | 2396 | if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || |
2405 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | 2397 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) |
2406 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); | 2398 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); |
2399 | } | ||
2400 | |||
2401 | #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) | ||
2402 | #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) | ||
2403 | #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) | ||
2404 | #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) | ||
2405 | |||
2406 | static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, | ||
2407 | struct of_dma *ofdma) | ||
2408 | { | ||
2409 | struct stedma40_chan_cfg cfg; | ||
2410 | dma_cap_mask_t cap; | ||
2411 | u32 flags; | ||
2412 | |||
2413 | memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); | ||
2414 | |||
2415 | dma_cap_zero(cap); | ||
2416 | dma_cap_set(DMA_SLAVE, cap); | ||
2417 | |||
2418 | cfg.dev_type = dma_spec->args[0]; | ||
2419 | flags = dma_spec->args[2]; | ||
2420 | |||
2421 | switch (D40_DT_FLAGS_MODE(flags)) { | ||
2422 | case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; | ||
2423 | case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; | ||
2424 | } | ||
2425 | |||
2426 | switch (D40_DT_FLAGS_DIR(flags)) { | ||
2427 | case 0: | ||
2428 | cfg.dir = STEDMA40_MEM_TO_PERIPH; | ||
2429 | cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); | ||
2430 | break; | ||
2431 | case 1: | ||
2432 | cfg.dir = STEDMA40_PERIPH_TO_MEM; | ||
2433 | cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); | ||
2434 | break; | ||
2435 | } | ||
2436 | |||
2437 | if (D40_DT_FLAGS_FIXED_CHAN(flags)) { | ||
2438 | cfg.phy_channel = dma_spec->args[1]; | ||
2439 | cfg.use_fixed_channel = true; | ||
2440 | } | ||
2441 | |||
2442 | return dma_request_channel(cap, stedma40_filter, &cfg); | ||
2407 | } | 2443 | } |
2408 | 2444 | ||
2409 | /* DMA ENGINE functions */ | 2445 | /* DMA ENGINE functions */ |
@@ -2435,23 +2471,21 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2435 | } | 2471 | } |
2436 | 2472 | ||
2437 | pm_runtime_get_sync(d40c->base->dev); | 2473 | pm_runtime_get_sync(d40c->base->dev); |
2438 | /* Fill in basic CFG register values */ | ||
2439 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | ||
2440 | &d40c->dst_def_cfg, chan_is_logical(d40c)); | ||
2441 | 2474 | ||
2442 | d40_set_prio_realtime(d40c); | 2475 | d40_set_prio_realtime(d40c); |
2443 | 2476 | ||
2444 | if (chan_is_logical(d40c)) { | 2477 | if (chan_is_logical(d40c)) { |
2445 | d40_log_cfg(&d40c->dma_cfg, | ||
2446 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | ||
2447 | |||
2448 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 2478 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) |
2449 | d40c->lcpa = d40c->base->lcpa_base + | 2479 | d40c->lcpa = d40c->base->lcpa_base + |
2450 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; | 2480 | d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; |
2451 | else | 2481 | else |
2452 | d40c->lcpa = d40c->base->lcpa_base + | 2482 | d40c->lcpa = d40c->base->lcpa_base + |
2453 | d40c->dma_cfg.dst_dev_type * | 2483 | d40c->dma_cfg.dev_type * |
2454 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 2484 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2485 | |||
2486 | /* Unmask the Global Interrupt Mask. */ | ||
2487 | d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); | ||
2488 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); | ||
2455 | } | 2489 | } |
2456 | 2490 | ||
2457 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | 2491 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", |
@@ -2712,15 +2746,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2712 | dst_maxburst = config->dst_maxburst; | 2746 | dst_maxburst = config->dst_maxburst; |
2713 | 2747 | ||
2714 | if (config->direction == DMA_DEV_TO_MEM) { | 2748 | if (config->direction == DMA_DEV_TO_MEM) { |
2715 | dma_addr_t dev_addr_rx = | ||
2716 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | ||
2717 | |||
2718 | config_addr = config->src_addr; | 2749 | config_addr = config->src_addr; |
2719 | if (dev_addr_rx) | 2750 | |
2720 | dev_dbg(d40c->base->dev, | ||
2721 | "channel has a pre-wired RX address %08x " | ||
2722 | "overriding with %08x\n", | ||
2723 | dev_addr_rx, config_addr); | ||
2724 | if (cfg->dir != STEDMA40_PERIPH_TO_MEM) | 2751 | if (cfg->dir != STEDMA40_PERIPH_TO_MEM) |
2725 | dev_dbg(d40c->base->dev, | 2752 | dev_dbg(d40c->base->dev, |
2726 | "channel was not configured for peripheral " | 2753 | "channel was not configured for peripheral " |
@@ -2735,15 +2762,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2735 | dst_maxburst = src_maxburst; | 2762 | dst_maxburst = src_maxburst; |
2736 | 2763 | ||
2737 | } else if (config->direction == DMA_MEM_TO_DEV) { | 2764 | } else if (config->direction == DMA_MEM_TO_DEV) { |
2738 | dma_addr_t dev_addr_tx = | ||
2739 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | ||
2740 | |||
2741 | config_addr = config->dst_addr; | 2765 | config_addr = config->dst_addr; |
2742 | if (dev_addr_tx) | 2766 | |
2743 | dev_dbg(d40c->base->dev, | ||
2744 | "channel has a pre-wired TX address %08x " | ||
2745 | "overriding with %08x\n", | ||
2746 | dev_addr_tx, config_addr); | ||
2747 | if (cfg->dir != STEDMA40_MEM_TO_PERIPH) | 2767 | if (cfg->dir != STEDMA40_MEM_TO_PERIPH) |
2748 | dev_dbg(d40c->base->dev, | 2768 | dev_dbg(d40c->base->dev, |
2749 | "channel was not configured for memory " | 2769 | "channel was not configured for memory " |
@@ -2763,6 +2783,11 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2763 | return -EINVAL; | 2783 | return -EINVAL; |
2764 | } | 2784 | } |
2765 | 2785 | ||
2786 | if (config_addr <= 0) { | ||
2787 | dev_err(d40c->base->dev, "no address supplied\n"); | ||
2788 | return -EINVAL; | ||
2789 | } | ||
2790 | |||
2766 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { | 2791 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { |
2767 | dev_err(d40c->base->dev, | 2792 | dev_err(d40c->base->dev, |
2768 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", | 2793 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", |
@@ -2797,8 +2822,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2797 | if (chan_is_logical(d40c)) | 2822 | if (chan_is_logical(d40c)) |
2798 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 2823 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
2799 | else | 2824 | else |
2800 | d40_phy_cfg(cfg, &d40c->src_def_cfg, | 2825 | d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); |
2801 | &d40c->dst_def_cfg, false); | ||
2802 | 2826 | ||
2803 | /* These settings will take precedence later */ | 2827 | /* These settings will take precedence later */ |
2804 | d40c->runtime_addr = config_addr; | 2828 | d40c->runtime_addr = config_addr; |
@@ -2929,7 +2953,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2929 | } | 2953 | } |
2930 | 2954 | ||
2931 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, | 2955 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, |
2932 | base->num_log_chans, base->plat_data->memcpy_len); | 2956 | base->num_log_chans, ARRAY_SIZE(dma40_memcpy_channels)); |
2933 | 2957 | ||
2934 | dma_cap_zero(base->dma_memcpy.cap_mask); | 2958 | dma_cap_zero(base->dma_memcpy.cap_mask); |
2935 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 2959 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
@@ -3123,7 +3147,7 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
3123 | 3147 | ||
3124 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | 3148 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) |
3125 | { | 3149 | { |
3126 | struct stedma40_platform_data *plat_data; | 3150 | struct stedma40_platform_data *plat_data = pdev->dev.platform_data; |
3127 | struct clk *clk = NULL; | 3151 | struct clk *clk = NULL; |
3128 | void __iomem *virtbase = NULL; | 3152 | void __iomem *virtbase = NULL; |
3129 | struct resource *res = NULL; | 3153 | struct resource *res = NULL; |
@@ -3189,8 +3213,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3189 | * DB8540v1 has revision 4 | 3213 | * DB8540v1 has revision 4 |
3190 | */ | 3214 | */ |
3191 | rev = AMBA_REV_BITS(pid); | 3215 | rev = AMBA_REV_BITS(pid); |
3192 | 3216 | if (rev < 2) { | |
3193 | plat_data = pdev->dev.platform_data; | 3217 | d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); |
3218 | goto failure; | ||
3219 | } | ||
3194 | 3220 | ||
3195 | /* The number of physical channels on this HW */ | 3221 | /* The number of physical channels on this HW */ |
3196 | if (plat_data->num_of_phy_chans) | 3222 | if (plat_data->num_of_phy_chans) |
@@ -3198,26 +3224,14 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3198 | else | 3224 | else |
3199 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 3225 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
3200 | 3226 | ||
3201 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", | 3227 | num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; |
3202 | rev, res->start, num_phy_chans); | ||
3203 | |||
3204 | if (rev < 2) { | ||
3205 | d40_err(&pdev->dev, "hardware revision: %d is not supported", | ||
3206 | rev); | ||
3207 | goto failure; | ||
3208 | } | ||
3209 | 3228 | ||
3210 | /* Count the number of logical channels in use */ | 3229 | dev_info(&pdev->dev, |
3211 | for (i = 0; i < plat_data->dev_len; i++) | 3230 | "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n", |
3212 | if (plat_data->dev_rx[i] != 0) | 3231 | rev, res->start, num_phy_chans, num_log_chans); |
3213 | num_log_chans++; | ||
3214 | |||
3215 | for (i = 0; i < plat_data->dev_len; i++) | ||
3216 | if (plat_data->dev_tx[i] != 0) | ||
3217 | num_log_chans++; | ||
3218 | 3232 | ||
3219 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + | 3233 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + |
3220 | (num_phy_chans + num_log_chans + plat_data->memcpy_len) * | 3234 | (num_phy_chans + num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) * |
3221 | sizeof(struct d40_chan), GFP_KERNEL); | 3235 | sizeof(struct d40_chan), GFP_KERNEL); |
3222 | 3236 | ||
3223 | if (base == NULL) { | 3237 | if (base == NULL) { |
@@ -3278,17 +3292,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3278 | if (!base->lookup_phy_chans) | 3292 | if (!base->lookup_phy_chans) |
3279 | goto failure; | 3293 | goto failure; |
3280 | 3294 | ||
3281 | if (num_log_chans + plat_data->memcpy_len) { | 3295 | base->lookup_log_chans = kzalloc(num_log_chans * |
3282 | /* | 3296 | sizeof(struct d40_chan *), |
3283 | * The max number of logical channels are event lines for all | 3297 | GFP_KERNEL); |
3284 | * src devices and dst devices | 3298 | if (!base->lookup_log_chans) |
3285 | */ | 3299 | goto failure; |
3286 | base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * | ||
3287 | sizeof(struct d40_chan *), | ||
3288 | GFP_KERNEL); | ||
3289 | if (!base->lookup_log_chans) | ||
3290 | goto failure; | ||
3291 | } | ||
3292 | 3300 | ||
3293 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * | 3301 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * |
3294 | sizeof(d40_backup_regs_chan), | 3302 | sizeof(d40_backup_regs_chan), |
@@ -3472,17 +3480,52 @@ failure: | |||
3472 | return ret; | 3480 | return ret; |
3473 | } | 3481 | } |
3474 | 3482 | ||
3483 | static int __init d40_of_probe(struct platform_device *pdev, | ||
3484 | struct device_node *np) | ||
3485 | { | ||
3486 | struct stedma40_platform_data *pdata; | ||
3487 | |||
3488 | /* | ||
3489 | * FIXME: Fill in this routine as more support is added. | ||
3490 | * First platform enabled (u8500) doens't need any extra | ||
3491 | * properties to run, so this is fairly sparce currently. | ||
3492 | */ | ||
3493 | |||
3494 | pdata = devm_kzalloc(&pdev->dev, | ||
3495 | sizeof(struct stedma40_platform_data), | ||
3496 | GFP_KERNEL); | ||
3497 | if (!pdata) | ||
3498 | return -ENOMEM; | ||
3499 | |||
3500 | pdev->dev.platform_data = pdata; | ||
3501 | |||
3502 | return 0; | ||
3503 | } | ||
3504 | |||
3475 | static int __init d40_probe(struct platform_device *pdev) | 3505 | static int __init d40_probe(struct platform_device *pdev) |
3476 | { | 3506 | { |
3507 | struct stedma40_platform_data *plat_data = pdev->dev.platform_data; | ||
3508 | struct device_node *np = pdev->dev.of_node; | ||
3477 | int err; | 3509 | int err; |
3478 | int ret = -ENOENT; | 3510 | int ret = -ENOENT; |
3479 | struct d40_base *base; | 3511 | struct d40_base *base = NULL; |
3480 | struct resource *res = NULL; | 3512 | struct resource *res = NULL; |
3481 | int num_reserved_chans; | 3513 | int num_reserved_chans; |
3482 | u32 val; | 3514 | u32 val; |
3483 | 3515 | ||
3484 | base = d40_hw_detect_init(pdev); | 3516 | if (!plat_data) { |
3517 | if (np) { | ||
3518 | if(d40_of_probe(pdev, np)) { | ||
3519 | ret = -ENOMEM; | ||
3520 | goto failure; | ||
3521 | } | ||
3522 | } else { | ||
3523 | d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); | ||
3524 | goto failure; | ||
3525 | } | ||
3526 | } | ||
3485 | 3527 | ||
3528 | base = d40_hw_detect_init(pdev); | ||
3486 | if (!base) | 3529 | if (!base) |
3487 | goto failure; | 3530 | goto failure; |
3488 | 3531 | ||
@@ -3603,6 +3646,13 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3603 | 3646 | ||
3604 | d40_hw_init(base); | 3647 | d40_hw_init(base); |
3605 | 3648 | ||
3649 | if (np) { | ||
3650 | err = of_dma_controller_register(np, d40_xlate, NULL); | ||
3651 | if (err && err != -ENODEV) | ||
3652 | dev_err(&pdev->dev, | ||
3653 | "could not register of_dma_controller\n"); | ||
3654 | } | ||
3655 | |||
3606 | dev_info(base->dev, "initialized\n"); | 3656 | dev_info(base->dev, "initialized\n"); |
3607 | return 0; | 3657 | return 0; |
3608 | 3658 | ||
@@ -3656,11 +3706,17 @@ failure: | |||
3656 | return ret; | 3706 | return ret; |
3657 | } | 3707 | } |
3658 | 3708 | ||
3709 | static const struct of_device_id d40_match[] = { | ||
3710 | { .compatible = "stericsson,dma40", }, | ||
3711 | {} | ||
3712 | }; | ||
3713 | |||
3659 | static struct platform_driver d40_driver = { | 3714 | static struct platform_driver d40_driver = { |
3660 | .driver = { | 3715 | .driver = { |
3661 | .owner = THIS_MODULE, | 3716 | .owner = THIS_MODULE, |
3662 | .name = D40_NAME, | 3717 | .name = D40_NAME, |
3663 | .pm = DMA40_PM_OPS, | 3718 | .pm = DMA40_PM_OPS, |
3719 | .of_match_table = d40_match, | ||
3664 | }, | 3720 | }, |
3665 | }; | 3721 | }; |
3666 | 3722 | ||