aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/ste_dma40.c201
1 files changed, 74 insertions, 127 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 0073988cdaf6..ab3ca6af7dbd 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -322,6 +322,12 @@ static void __iomem *chan_base(struct d40_chan *chan)
322 chan->phy_chan->num * D40_DREG_PCDELTA; 322 chan->phy_chan->num * D40_DREG_PCDELTA;
323} 323}
324 324
325#define d40_err(dev, format, arg...) \
326 dev_err(dev, "[%s] " format, __func__, ## arg)
327
328#define chan_err(d40c, format, arg...) \
329 d40_err(chan2dev(d40c), format, ## arg)
330
325static int d40_pool_lli_alloc(struct d40_desc *d40d, 331static int d40_pool_lli_alloc(struct d40_desc *d40d,
326 int lli_len, bool is_log) 332 int lli_len, bool is_log)
327{ 333{
@@ -673,9 +679,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
673 } 679 }
674 680
675 if (i == D40_SUSPEND_MAX_IT) { 681 if (i == D40_SUSPEND_MAX_IT) {
676 dev_err(&d40c->chan.dev->device, 682 chan_err(d40c,
677 "[%s]: unable to suspend the chl %d (log: %d) status %x\n", 683 "unable to suspend the chl %d (log: %d) status %x\n",
678 __func__, d40c->phy_chan->num, d40c->log_num, 684 d40c->phy_chan->num, d40c->log_num,
679 status); 685 status);
680 dump_stack(); 686 dump_stack();
681 ret = -EBUSY; 687 ret = -EBUSY;
@@ -1143,9 +1149,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
1143 if (!il[row].is_error) 1149 if (!il[row].is_error)
1144 dma_tc_handle(d40c); 1150 dma_tc_handle(d40c);
1145 else 1151 else
1146 dev_err(base->dev, 1152 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1147 "[%s] IRQ chan: %ld offset %d idx %d\n", 1153 chan, il[row].offset, idx);
1148 __func__, chan, il[row].offset, idx);
1149 1154
1150 spin_unlock(&d40c->lock); 1155 spin_unlock(&d40c->lock);
1151 } 1156 }
@@ -1164,8 +1169,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1164 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1169 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1165 1170
1166 if (!conf->dir) { 1171 if (!conf->dir) {
1167 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", 1172 chan_err(d40c, "Invalid direction.\n");
1168 __func__);
1169 res = -EINVAL; 1173 res = -EINVAL;
1170 } 1174 }
1171 1175
@@ -1173,46 +1177,40 @@ static int d40_validate_conf(struct d40_chan *d40c,
1173 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && 1177 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1174 d40c->runtime_addr == 0) { 1178 d40c->runtime_addr == 0) {
1175 1179
1176 dev_err(&d40c->chan.dev->device, 1180 chan_err(d40c, "Invalid TX channel address (%d)\n",
1177 "[%s] Invalid TX channel address (%d)\n", 1181 conf->dst_dev_type);
1178 __func__, conf->dst_dev_type);
1179 res = -EINVAL; 1182 res = -EINVAL;
1180 } 1183 }
1181 1184
1182 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && 1185 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1183 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && 1186 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1184 d40c->runtime_addr == 0) { 1187 d40c->runtime_addr == 0) {
1185 dev_err(&d40c->chan.dev->device, 1188 chan_err(d40c, "Invalid RX channel address (%d)\n",
1186 "[%s] Invalid RX channel address (%d)\n", 1189 conf->src_dev_type);
1187 __func__, conf->src_dev_type);
1188 res = -EINVAL; 1190 res = -EINVAL;
1189 } 1191 }
1190 1192
1191 if (conf->dir == STEDMA40_MEM_TO_PERIPH && 1193 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1192 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 1194 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1193 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 1195 chan_err(d40c, "Invalid dst\n");
1194 __func__);
1195 res = -EINVAL; 1196 res = -EINVAL;
1196 } 1197 }
1197 1198
1198 if (conf->dir == STEDMA40_PERIPH_TO_MEM && 1199 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1199 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 1200 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1200 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 1201 chan_err(d40c, "Invalid src\n");
1201 __func__);
1202 res = -EINVAL; 1202 res = -EINVAL;
1203 } 1203 }
1204 1204
1205 if (src_event_group == STEDMA40_DEV_SRC_MEMORY && 1205 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1206 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { 1206 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1207 dev_err(&d40c->chan.dev->device, 1207 chan_err(d40c, "No event line\n");
1208 "[%s] No event line\n", __func__);
1209 res = -EINVAL; 1208 res = -EINVAL;
1210 } 1209 }
1211 1210
1212 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && 1211 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1213 (src_event_group != dst_event_group)) { 1212 (src_event_group != dst_event_group)) {
1214 dev_err(&d40c->chan.dev->device, 1213 chan_err(d40c, "Invalid event group\n");
1215 "[%s] Invalid event group\n", __func__);
1216 res = -EINVAL; 1214 res = -EINVAL;
1217 } 1215 }
1218 1216
@@ -1221,9 +1219,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1221 * DMAC HW supports it. Will be added to this driver, 1219 * DMAC HW supports it. Will be added to this driver,
1222 * in case any dma client requires it. 1220 * in case any dma client requires it.
1223 */ 1221 */
1224 dev_err(&d40c->chan.dev->device, 1222 chan_err(d40c, "periph to periph not supported\n");
1225 "[%s] periph to periph not supported\n",
1226 __func__);
1227 res = -EINVAL; 1223 res = -EINVAL;
1228 } 1224 }
1229 1225
@@ -1236,9 +1232,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1236 * src (burst x width) == dst (burst x width) 1232 * src (burst x width) == dst (burst x width)
1237 */ 1233 */
1238 1234
1239 dev_err(&d40c->chan.dev->device, 1235 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1240 "[%s] src (burst x width) != dst (burst x width)\n",
1241 __func__);
1242 res = -EINVAL; 1236 res = -EINVAL;
1243 } 1237 }
1244 1238
@@ -1441,8 +1435,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
1441 dma_has_cap(DMA_SLAVE, cap)) { 1435 dma_has_cap(DMA_SLAVE, cap)) {
1442 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 1436 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1443 } else { 1437 } else {
1444 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", 1438 chan_err(d40c, "No memcpy\n");
1445 __func__);
1446 return -EINVAL; 1439 return -EINVAL;
1447 } 1440 }
1448 1441
@@ -1473,15 +1466,13 @@ static int d40_free_dma(struct d40_chan *d40c)
1473 } 1466 }
1474 1467
1475 if (phy == NULL) { 1468 if (phy == NULL) {
1476 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", 1469 chan_err(d40c, "phy == null\n");
1477 __func__);
1478 return -EINVAL; 1470 return -EINVAL;
1479 } 1471 }
1480 1472
1481 if (phy->allocated_src == D40_ALLOC_FREE && 1473 if (phy->allocated_src == D40_ALLOC_FREE &&
1482 phy->allocated_dst == D40_ALLOC_FREE) { 1474 phy->allocated_dst == D40_ALLOC_FREE) {
1483 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", 1475 chan_err(d40c, "channel already free\n");
1484 __func__);
1485 return -EINVAL; 1476 return -EINVAL;
1486 } 1477 }
1487 1478
@@ -1493,15 +1484,13 @@ static int d40_free_dma(struct d40_chan *d40c)
1493 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1484 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1494 is_src = true; 1485 is_src = true;
1495 } else { 1486 } else {
1496 dev_err(&d40c->chan.dev->device, 1487 chan_err(d40c, "Unknown direction\n");
1497 "[%s] Unknown direction\n", __func__);
1498 return -EINVAL; 1488 return -EINVAL;
1499 } 1489 }
1500 1490
1501 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1491 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1502 if (res) { 1492 if (res) {
1503 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", 1493 chan_err(d40c, "suspend failed\n");
1504 __func__);
1505 return res; 1494 return res;
1506 } 1495 }
1507 1496
@@ -1521,9 +1510,8 @@ static int d40_free_dma(struct d40_chan *d40c)
1521 res = d40_channel_execute_command(d40c, 1510 res = d40_channel_execute_command(d40c,
1522 D40_DMA_RUN); 1511 D40_DMA_RUN);
1523 if (res) { 1512 if (res) {
1524 dev_err(&d40c->chan.dev->device, 1513 chan_err(d40c,
1525 "[%s] Executing RUN command\n", 1514 "Executing RUN command\n");
1526 __func__);
1527 return res; 1515 return res;
1528 } 1516 }
1529 } 1517 }
@@ -1536,8 +1524,7 @@ static int d40_free_dma(struct d40_chan *d40c)
1536 /* Release physical channel */ 1524 /* Release physical channel */
1537 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1525 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1538 if (res) { 1526 if (res) {
1539 dev_err(&d40c->chan.dev->device, 1527 chan_err(d40c, "Failed to stop channel\n");
1540 "[%s] Failed to stop channel\n", __func__);
1541 return res; 1528 return res;
1542 } 1529 }
1543 d40c->phy_chan = NULL; 1530 d40c->phy_chan = NULL;
@@ -1581,8 +1568,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
1581 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1568 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1582 status = readl(chanbase + D40_CHAN_REG_SSLNK); 1569 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1583 } else { 1570 } else {
1584 dev_err(&d40c->chan.dev->device, 1571 chan_err(d40c, "Unknown direction\n");
1585 "[%s] Unknown direction\n", __func__);
1586 goto _exit; 1572 goto _exit;
1587 } 1573 }
1588 1574
@@ -1625,8 +1611,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1625 unsigned long flags; 1611 unsigned long flags;
1626 1612
1627 if (d40c->phy_chan == NULL) { 1613 if (d40c->phy_chan == NULL) {
1628 dev_err(&d40c->chan.dev->device, 1614 chan_err(d40c, "Unallocated channel.\n");
1629 "[%s] Unallocated channel.\n", __func__);
1630 return ERR_PTR(-EINVAL); 1615 return ERR_PTR(-EINVAL);
1631 } 1616 }
1632 1617
@@ -1640,8 +1625,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1640 d40c->dma_cfg.src_info.data_width, 1625 d40c->dma_cfg.src_info.data_width,
1641 d40c->dma_cfg.dst_info.data_width); 1626 d40c->dma_cfg.dst_info.data_width);
1642 if (d40d->lli_len < 0) { 1627 if (d40d->lli_len < 0) {
1643 dev_err(&d40c->chan.dev->device, 1628 chan_err(d40c, "Unaligned size\n");
1644 "[%s] Unaligned size\n", __func__);
1645 goto err; 1629 goto err;
1646 } 1630 }
1647 1631
@@ -1651,8 +1635,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1651 if (chan_is_logical(d40c)) { 1635 if (chan_is_logical(d40c)) {
1652 1636
1653 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 1637 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1654 dev_err(&d40c->chan.dev->device, 1638 chan_err(d40c, "Out of memory\n");
1655 "[%s] Out of memory\n", __func__);
1656 goto err; 1639 goto err;
1657 } 1640 }
1658 1641
@@ -1671,8 +1654,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1671 d40c->dma_cfg.src_info.data_width); 1654 d40c->dma_cfg.src_info.data_width);
1672 } else { 1655 } else {
1673 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 1656 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1674 dev_err(&d40c->chan.dev->device, 1657 chan_err(d40c, "Out of memory\n");
1675 "[%s] Out of memory\n", __func__);
1676 goto err; 1658 goto err;
1677 } 1659 }
1678 1660
@@ -1758,9 +1740,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1758 if (!d40c->configured) { 1740 if (!d40c->configured) {
1759 err = d40_config_memcpy(d40c); 1741 err = d40_config_memcpy(d40c);
1760 if (err) { 1742 if (err) {
1761 dev_err(&d40c->chan.dev->device, 1743 chan_err(d40c, "Failed to configure memcpy channel\n");
1762 "[%s] Failed to configure memcpy channel\n",
1763 __func__);
1764 goto fail; 1744 goto fail;
1765 } 1745 }
1766 } 1746 }
@@ -1768,8 +1748,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1768 1748
1769 err = d40_allocate_channel(d40c); 1749 err = d40_allocate_channel(d40c);
1770 if (err) { 1750 if (err) {
1771 dev_err(&d40c->chan.dev->device, 1751 chan_err(d40c, "Failed to allocate channel\n");
1772 "[%s] Failed to allocate channel\n", __func__);
1773 goto fail; 1752 goto fail;
1774 } 1753 }
1775 1754
@@ -1810,8 +1789,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
1810 unsigned long flags; 1789 unsigned long flags;
1811 1790
1812 if (d40c->phy_chan == NULL) { 1791 if (d40c->phy_chan == NULL) {
1813 dev_err(&d40c->chan.dev->device, 1792 chan_err(d40c, "Cannot free unallocated channel\n");
1814 "[%s] Cannot free unallocated channel\n", __func__);
1815 return; 1793 return;
1816 } 1794 }
1817 1795
@@ -1821,8 +1799,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
1821 err = d40_free_dma(d40c); 1799 err = d40_free_dma(d40c);
1822 1800
1823 if (err) 1801 if (err)
1824 dev_err(&d40c->chan.dev->device, 1802 chan_err(d40c, "Failed to free channel\n");
1825 "[%s] Failed to free channel\n", __func__);
1826 spin_unlock_irqrestore(&d40c->lock, flags); 1803 spin_unlock_irqrestore(&d40c->lock, flags);
1827} 1804}
1828 1805
@@ -1838,8 +1815,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1838 unsigned long flags; 1815 unsigned long flags;
1839 1816
1840 if (d40c->phy_chan == NULL) { 1817 if (d40c->phy_chan == NULL) {
1841 dev_err(&d40c->chan.dev->device, 1818 chan_err(d40c, "Channel is not allocated.\n");
1842 "[%s] Channel is not allocated.\n", __func__);
1843 return ERR_PTR(-EINVAL); 1819 return ERR_PTR(-EINVAL);
1844 } 1820 }
1845 1821
@@ -1847,8 +1823,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1847 d40d = d40_desc_get(d40c); 1823 d40d = d40_desc_get(d40c);
1848 1824
1849 if (d40d == NULL) { 1825 if (d40d == NULL) {
1850 dev_err(&d40c->chan.dev->device, 1826 chan_err(d40c, "Descriptor is NULL\n");
1851 "[%s] Descriptor is NULL\n", __func__);
1852 goto err; 1827 goto err;
1853 } 1828 }
1854 1829
@@ -1857,8 +1832,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1857 d40c->dma_cfg.src_info.data_width, 1832 d40c->dma_cfg.src_info.data_width,
1858 d40c->dma_cfg.dst_info.data_width); 1833 d40c->dma_cfg.dst_info.data_width);
1859 if (d40d->lli_len < 0) { 1834 if (d40d->lli_len < 0) {
1860 dev_err(&d40c->chan.dev->device, 1835 chan_err(d40c, "Unaligned size\n");
1861 "[%s] Unaligned size\n", __func__);
1862 goto err; 1836 goto err;
1863 } 1837 }
1864 1838
@@ -1870,8 +1844,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1870 if (chan_is_logical(d40c)) { 1844 if (chan_is_logical(d40c)) {
1871 1845
1872 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 1846 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1873 dev_err(&d40c->chan.dev->device, 1847 chan_err(d40c, "Out of memory\n");
1874 "[%s] Out of memory\n", __func__);
1875 goto err; 1848 goto err;
1876 } 1849 }
1877 d40d->lli_current = 0; 1850 d40d->lli_current = 0;
@@ -1897,8 +1870,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1897 } else { 1870 } else {
1898 1871
1899 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 1872 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1900 dev_err(&d40c->chan.dev->device, 1873 chan_err(d40c, "Out of memory\n");
1901 "[%s] Out of memory\n", __func__);
1902 goto err; 1874 goto err;
1903 } 1875 }
1904 1876
@@ -1966,14 +1938,12 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1966 d40c->dma_cfg.src_info.data_width, 1938 d40c->dma_cfg.src_info.data_width,
1967 d40c->dma_cfg.dst_info.data_width); 1939 d40c->dma_cfg.dst_info.data_width);
1968 if (d40d->lli_len < 0) { 1940 if (d40d->lli_len < 0) {
1969 dev_err(&d40c->chan.dev->device, 1941 chan_err(d40c, "Unaligned size\n");
1970 "[%s] Unaligned size\n", __func__);
1971 return -EINVAL; 1942 return -EINVAL;
1972 } 1943 }
1973 1944
1974 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 1945 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1975 dev_err(&d40c->chan.dev->device, 1946 chan_err(d40c, "Out of memory\n");
1976 "[%s] Out of memory\n", __func__);
1977 return -ENOMEM; 1947 return -ENOMEM;
1978 } 1948 }
1979 1949
@@ -2022,14 +1992,12 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2022 d40c->dma_cfg.src_info.data_width, 1992 d40c->dma_cfg.src_info.data_width,
2023 d40c->dma_cfg.dst_info.data_width); 1993 d40c->dma_cfg.dst_info.data_width);
2024 if (d40d->lli_len < 0) { 1994 if (d40d->lli_len < 0) {
2025 dev_err(&d40c->chan.dev->device, 1995 chan_err(d40c, "Unaligned size\n");
2026 "[%s] Unaligned size\n", __func__);
2027 return -EINVAL; 1996 return -EINVAL;
2028 } 1997 }
2029 1998
2030 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 1999 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
2031 dev_err(&d40c->chan.dev->device, 2000 chan_err(d40c, "Out of memory\n");
2032 "[%s] Out of memory\n", __func__);
2033 return -ENOMEM; 2001 return -ENOMEM;
2034 } 2002 }
2035 2003
@@ -2092,8 +2060,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2092 int err; 2060 int err;
2093 2061
2094 if (d40c->phy_chan == NULL) { 2062 if (d40c->phy_chan == NULL) {
2095 dev_err(&d40c->chan.dev->device, 2063 chan_err(d40c, "Cannot prepare unallocated channel\n");
2096 "[%s] Cannot prepare unallocated channel\n", __func__);
2097 return ERR_PTR(-EINVAL); 2064 return ERR_PTR(-EINVAL);
2098 } 2065 }
2099 2066
@@ -2110,9 +2077,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2110 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, 2077 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2111 direction, dma_flags); 2078 direction, dma_flags);
2112 if (err) { 2079 if (err) {
2113 dev_err(&d40c->chan.dev->device, 2080 chan_err(d40c, "Failed to prepare %s slave sg job: %d\n",
2114 "[%s] Failed to prepare %s slave sg job: %d\n",
2115 __func__,
2116 chan_is_logical(d40c) ? "log" : "phy", err); 2081 chan_is_logical(d40c) ? "log" : "phy", err);
2117 goto err; 2082 goto err;
2118 } 2083 }
@@ -2143,9 +2108,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2143 int ret; 2108 int ret;
2144 2109
2145 if (d40c->phy_chan == NULL) { 2110 if (d40c->phy_chan == NULL) {
2146 dev_err(&d40c->chan.dev->device, 2111 chan_err(d40c, "Cannot read status of unallocated channel\n");
2147 "[%s] Cannot read status of unallocated channel\n",
2148 __func__);
2149 return -EINVAL; 2112 return -EINVAL;
2150 } 2113 }
2151 2114
@@ -2169,8 +2132,7 @@ static void d40_issue_pending(struct dma_chan *chan)
2169 unsigned long flags; 2132 unsigned long flags;
2170 2133
2171 if (d40c->phy_chan == NULL) { 2134 if (d40c->phy_chan == NULL) {
2172 dev_err(&d40c->chan.dev->device, 2135 chan_err(d40c, "Channel is not allocated!\n");
2173 "[%s] Channel is not allocated!\n", __func__);
2174 return; 2136 return;
2175 } 2137 }
2176 2138
@@ -2321,8 +2283,7 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2321 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2283 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2322 2284
2323 if (d40c->phy_chan == NULL) { 2285 if (d40c->phy_chan == NULL) {
2324 dev_err(&d40c->chan.dev->device, 2286 chan_err(d40c, "Channel is not allocated!\n");
2325 "[%s] Channel is not allocated!\n", __func__);
2326 return -EINVAL; 2287 return -EINVAL;
2327 } 2288 }
2328 2289
@@ -2404,9 +2365,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2404 err = dma_async_device_register(&base->dma_slave); 2365 err = dma_async_device_register(&base->dma_slave);
2405 2366
2406 if (err) { 2367 if (err) {
2407 dev_err(base->dev, 2368 d40_err(base->dev, "Failed to register slave channels\n");
2408 "[%s] Failed to register slave channels\n",
2409 __func__);
2410 goto failure1; 2369 goto failure1;
2411 } 2370 }
2412 2371
@@ -2435,9 +2394,8 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2435 err = dma_async_device_register(&base->dma_memcpy); 2394 err = dma_async_device_register(&base->dma_memcpy);
2436 2395
2437 if (err) { 2396 if (err) {
2438 dev_err(base->dev, 2397 d40_err(base->dev,
2439 "[%s] Failed to regsiter memcpy only channels\n", 2398 "Failed to regsiter memcpy only channels\n");
2440 __func__);
2441 goto failure2; 2399 goto failure2;
2442 } 2400 }
2443 2401
@@ -2462,9 +2420,8 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2462 err = dma_async_device_register(&base->dma_both); 2420 err = dma_async_device_register(&base->dma_both);
2463 2421
2464 if (err) { 2422 if (err) {
2465 dev_err(base->dev, 2423 d40_err(base->dev,
2466 "[%s] Failed to register logical and physical capable channels\n", 2424 "Failed to register logical and physical capable channels\n");
2467 __func__);
2468 goto failure3; 2425 goto failure3;
2469 } 2426 }
2470 return 0; 2427 return 0;
@@ -2566,8 +2523,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2566 clk = clk_get(&pdev->dev, NULL); 2523 clk = clk_get(&pdev->dev, NULL);
2567 2524
2568 if (IS_ERR(clk)) { 2525 if (IS_ERR(clk)) {
2569 dev_err(&pdev->dev, "[%s] No matching clock found\n", 2526 d40_err(&pdev->dev, "No matching clock found\n");
2570 __func__);
2571 goto failure; 2527 goto failure;
2572 } 2528 }
2573 2529
@@ -2590,9 +2546,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2590 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2546 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2591 if (dma_id_regs[i].val != 2547 if (dma_id_regs[i].val !=
2592 readl(virtbase + dma_id_regs[i].reg)) { 2548 readl(virtbase + dma_id_regs[i].reg)) {
2593 dev_err(&pdev->dev, 2549 d40_err(&pdev->dev,
2594 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2550 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2595 __func__,
2596 dma_id_regs[i].val, 2551 dma_id_regs[i].val,
2597 dma_id_regs[i].reg, 2552 dma_id_regs[i].reg,
2598 readl(virtbase + dma_id_regs[i].reg)); 2553 readl(virtbase + dma_id_regs[i].reg));
@@ -2605,9 +2560,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2605 2560
2606 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != 2561 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2607 D40_HW_DESIGNER) { 2562 D40_HW_DESIGNER) {
2608 dev_err(&pdev->dev, 2563 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2609 "[%s] Unknown designer! Got %x wanted %x\n", 2564 val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2610 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2611 D40_HW_DESIGNER); 2565 D40_HW_DESIGNER);
2612 goto failure; 2566 goto failure;
2613 } 2567 }
@@ -2637,7 +2591,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2637 sizeof(struct d40_chan), GFP_KERNEL); 2591 sizeof(struct d40_chan), GFP_KERNEL);
2638 2592
2639 if (base == NULL) { 2593 if (base == NULL) {
2640 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); 2594 d40_err(&pdev->dev, "Out of memory\n");
2641 goto failure; 2595 goto failure;
2642 } 2596 }
2643 2597
@@ -2809,9 +2763,8 @@ static int __init d40_lcla_allocate(struct d40_base *base)
2809 base->lcla_pool.pages); 2763 base->lcla_pool.pages);
2810 if (!page_list[i]) { 2764 if (!page_list[i]) {
2811 2765
2812 dev_err(base->dev, 2766 d40_err(base->dev, "Failed to allocate %d pages.\n",
2813 "[%s] Failed to allocate %d pages.\n", 2767 base->lcla_pool.pages);
2814 __func__, base->lcla_pool.pages);
2815 2768
2816 for (j = 0; j < i; j++) 2769 for (j = 0; j < i; j++)
2817 free_pages(page_list[j], base->lcla_pool.pages); 2770 free_pages(page_list[j], base->lcla_pool.pages);
@@ -2881,9 +2834,7 @@ static int __init d40_probe(struct platform_device *pdev)
2881 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 2834 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2882 if (!res) { 2835 if (!res) {
2883 ret = -ENOENT; 2836 ret = -ENOENT;
2884 dev_err(&pdev->dev, 2837 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2885 "[%s] No \"lcpa\" memory resource\n",
2886 __func__);
2887 goto failure; 2838 goto failure;
2888 } 2839 }
2889 base->lcpa_size = resource_size(res); 2840 base->lcpa_size = resource_size(res);
@@ -2892,9 +2843,9 @@ static int __init d40_probe(struct platform_device *pdev)
2892 if (request_mem_region(res->start, resource_size(res), 2843 if (request_mem_region(res->start, resource_size(res),
2893 D40_NAME " I/O lcpa") == NULL) { 2844 D40_NAME " I/O lcpa") == NULL) {
2894 ret = -EBUSY; 2845 ret = -EBUSY;
2895 dev_err(&pdev->dev, 2846 d40_err(&pdev->dev,
2896 "[%s] Failed to request LCPA region 0x%x-0x%x\n", 2847 "Failed to request LCPA region 0x%x-0x%x\n",
2897 __func__, res->start, res->end); 2848 res->start, res->end);
2898 goto failure; 2849 goto failure;
2899 } 2850 }
2900 2851
@@ -2910,16 +2861,13 @@ static int __init d40_probe(struct platform_device *pdev)
2910 base->lcpa_base = ioremap(res->start, resource_size(res)); 2861 base->lcpa_base = ioremap(res->start, resource_size(res));
2911 if (!base->lcpa_base) { 2862 if (!base->lcpa_base) {
2912 ret = -ENOMEM; 2863 ret = -ENOMEM;
2913 dev_err(&pdev->dev, 2864 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2914 "[%s] Failed to ioremap LCPA region\n",
2915 __func__);
2916 goto failure; 2865 goto failure;
2917 } 2866 }
2918 2867
2919 ret = d40_lcla_allocate(base); 2868 ret = d40_lcla_allocate(base);
2920 if (ret) { 2869 if (ret) {
2921 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", 2870 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2922 __func__);
2923 goto failure; 2871 goto failure;
2924 } 2872 }
2925 2873
@@ -2928,9 +2876,8 @@ static int __init d40_probe(struct platform_device *pdev)
2928 base->irq = platform_get_irq(pdev, 0); 2876 base->irq = platform_get_irq(pdev, 0);
2929 2877
2930 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2878 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2931
2932 if (ret) { 2879 if (ret) {
2933 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); 2880 d40_err(&pdev->dev, "No IRQ defined\n");
2934 goto failure; 2881 goto failure;
2935 } 2882 }
2936 2883
@@ -2973,7 +2920,7 @@ failure:
2973 kfree(base); 2920 kfree(base);
2974 } 2921 }
2975 2922
2976 dev_err(&pdev->dev, "[%s] probe failed\n", __func__); 2923 d40_err(&pdev->dev, "probe failed\n");
2977 return ret; 2924 return ret;
2978} 2925}
2979 2926