aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c323
1 files changed, 201 insertions, 122 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index bdd41d4bfa8d..2ed1ac3513f3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -18,6 +18,7 @@
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/amba/bus.h> 20#include <linux/amba/bus.h>
21#include <linux/regulator/consumer.h>
21 22
22#include <plat/ste_dma40.h> 23#include <plat/ste_dma40.h>
23 24
@@ -69,6 +70,22 @@ enum d40_command {
69}; 70};
70 71
71/* 72/*
73 * enum d40_events - The different Event Enables for the event lines.
74 *
75 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
76 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
77 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
78 * @D40_ROUND_EVENTLINE: Status check for event line.
79 */
80
81enum d40_events {
82 D40_DEACTIVATE_EVENTLINE = 0,
83 D40_ACTIVATE_EVENTLINE = 1,
84 D40_SUSPEND_REQ_EVENTLINE = 2,
85 D40_ROUND_EVENTLINE = 3
86};
87
88/*
72 * These are the registers that has to be saved and later restored 89 * These are the registers that has to be saved and later restored
73 * when the DMA hw is powered off. 90 * when the DMA hw is powered off.
74 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 91 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
@@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
870} 887}
871#endif 888#endif
872 889
873static int d40_channel_execute_command(struct d40_chan *d40c, 890static int __d40_execute_command_phy(struct d40_chan *d40c,
874 enum d40_command command) 891 enum d40_command command)
875{ 892{
876 u32 status; 893 u32 status;
877 int i; 894 int i;
@@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
880 unsigned long flags; 897 unsigned long flags;
881 u32 wmask; 898 u32 wmask;
882 899
900 if (command == D40_DMA_STOP) {
901 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
902 if (ret)
903 return ret;
904 }
905
883 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 906 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
884 907
885 if (d40c->phy_chan->num % 2 == 0) 908 if (d40c->phy_chan->num % 2 == 0)
@@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c)
973 } 996 }
974 997
975 d40c->pending_tx = 0; 998 d40c->pending_tx = 0;
976 d40c->busy = false;
977} 999}
978 1000
979static void __d40_config_set_event(struct d40_chan *d40c, bool enable, 1001static void __d40_config_set_event(struct d40_chan *d40c,
980 u32 event, int reg) 1002 enum d40_events event_type, u32 event,
1003 int reg)
981{ 1004{
982 void __iomem *addr = chan_base(d40c) + reg; 1005 void __iomem *addr = chan_base(d40c) + reg;
983 int tries; 1006 int tries;
1007 u32 status;
1008
1009 switch (event_type) {
1010
1011 case D40_DEACTIVATE_EVENTLINE:
984 1012
985 if (!enable) {
986 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1013 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
987 | ~D40_EVENTLINE_MASK(event), addr); 1014 | ~D40_EVENTLINE_MASK(event), addr);
988 return; 1015 break;
989 } 1016
1017 case D40_SUSPEND_REQ_EVENTLINE:
1018 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1019 D40_EVENTLINE_POS(event);
1020
1021 if (status == D40_DEACTIVATE_EVENTLINE ||
1022 status == D40_SUSPEND_REQ_EVENTLINE)
1023 break;
990 1024
1025 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1026 | ~D40_EVENTLINE_MASK(event), addr);
1027
1028 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1029
1030 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1031 D40_EVENTLINE_POS(event);
1032
1033 cpu_relax();
1034 /*
1035 * Reduce the number of bus accesses while
1036 * waiting for the DMA to suspend.
1037 */
1038 udelay(3);
1039
1040 if (status == D40_DEACTIVATE_EVENTLINE)
1041 break;
1042 }
1043
1044 if (tries == D40_SUSPEND_MAX_IT) {
1045 chan_err(d40c,
1046 "unable to stop the event_line chl %d (log: %d)"
1047 "status %x\n", d40c->phy_chan->num,
1048 d40c->log_num, status);
1049 }
1050 break;
1051
1052 case D40_ACTIVATE_EVENTLINE:
991 /* 1053 /*
992 * The hardware sometimes doesn't register the enable when src and dst 1054 * The hardware sometimes doesn't register the enable when src and dst
993 * event lines are active on the same logical channel. Retry to ensure 1055 * event lines are active on the same logical channel. Retry to ensure
994 * it does. Usually only one retry is sufficient. 1056 * it does. Usually only one retry is sufficient.
995 */ 1057 */
996 tries = 100; 1058 tries = 100;
997 while (--tries) { 1059 while (--tries) {
998 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1060 writel((D40_ACTIVATE_EVENTLINE <<
999 | ~D40_EVENTLINE_MASK(event), addr); 1061 D40_EVENTLINE_POS(event)) |
1062 ~D40_EVENTLINE_MASK(event), addr);
1000 1063
1001 if (readl(addr) & D40_EVENTLINE_MASK(event)) 1064 if (readl(addr) & D40_EVENTLINE_MASK(event))
1002 break; 1065 break;
1003 } 1066 }
1004 1067
1005 if (tries != 99) 1068 if (tries != 99)
1006 dev_dbg(chan2dev(d40c), 1069 dev_dbg(chan2dev(d40c),
1007 "[%s] workaround enable S%cLNK (%d tries)\n", 1070 "[%s] workaround enable S%cLNK (%d tries)\n",
1008 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 1071 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1009 100 - tries); 1072 100 - tries);
1010 1073
1011 WARN_ON(!tries); 1074 WARN_ON(!tries);
1012} 1075 break;
1013 1076
1014static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 1077 case D40_ROUND_EVENTLINE:
1015{ 1078 BUG();
1016 unsigned long flags; 1079 break;
1017 1080
1018 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 1081 }
1082}
1019 1083
1084static void d40_config_set_event(struct d40_chan *d40c,
1085 enum d40_events event_type)
1086{
1020 /* Enable event line connected to device (or memcpy) */ 1087 /* Enable event line connected to device (or memcpy) */
1021 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 1088 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1022 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 1089 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1023 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1090 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1024 1091
1025 __d40_config_set_event(d40c, do_enable, event, 1092 __d40_config_set_event(d40c, event_type, event,
1026 D40_CHAN_REG_SSLNK); 1093 D40_CHAN_REG_SSLNK);
1027 } 1094 }
1028 1095
1029 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 1096 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
1030 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1097 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1031 1098
1032 __d40_config_set_event(d40c, do_enable, event, 1099 __d40_config_set_event(d40c, event_type, event,
1033 D40_CHAN_REG_SDLNK); 1100 D40_CHAN_REG_SDLNK);
1034 } 1101 }
1035
1036 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1037} 1102}
1038 1103
1039static u32 d40_chan_has_events(struct d40_chan *d40c) 1104static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c)
1047 return val; 1112 return val;
1048} 1113}
1049 1114
1115static int
1116__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1117{
1118 unsigned long flags;
1119 int ret = 0;
1120 u32 active_status;
1121 void __iomem *active_reg;
1122
1123 if (d40c->phy_chan->num % 2 == 0)
1124 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1125 else
1126 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1127
1128
1129 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1130
1131 switch (command) {
1132 case D40_DMA_STOP:
1133 case D40_DMA_SUSPEND_REQ:
1134
1135 active_status = (readl(active_reg) &
1136 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1137 D40_CHAN_POS(d40c->phy_chan->num);
1138
1139 if (active_status == D40_DMA_RUN)
1140 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1141 else
1142 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1143
1144 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1145 ret = __d40_execute_command_phy(d40c, command);
1146
1147 break;
1148
1149 case D40_DMA_RUN:
1150
1151 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1152 ret = __d40_execute_command_phy(d40c, command);
1153 break;
1154
1155 case D40_DMA_SUSPENDED:
1156 BUG();
1157 break;
1158 }
1159
1160 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1161 return ret;
1162}
1163
1164static int d40_channel_execute_command(struct d40_chan *d40c,
1165 enum d40_command command)
1166{
1167 if (chan_is_logical(d40c))
1168 return __d40_execute_command_log(d40c, command);
1169 else
1170 return __d40_execute_command_phy(d40c, command);
1171}
1172
1050static u32 d40_get_prmo(struct d40_chan *d40c) 1173static u32 d40_get_prmo(struct d40_chan *d40c)
1051{ 1174{
1052 static const unsigned int phy_map[] = { 1175 static const unsigned int phy_map[] = {
@@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c)
1149 spin_lock_irqsave(&d40c->lock, flags); 1272 spin_lock_irqsave(&d40c->lock, flags);
1150 1273
1151 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1274 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1152 if (res == 0) { 1275
1153 if (chan_is_logical(d40c)) {
1154 d40_config_set_event(d40c, false);
1155 /* Resume the other logical channels if any */
1156 if (d40_chan_has_events(d40c))
1157 res = d40_channel_execute_command(d40c,
1158 D40_DMA_RUN);
1159 }
1160 }
1161 pm_runtime_mark_last_busy(d40c->base->dev); 1276 pm_runtime_mark_last_busy(d40c->base->dev);
1162 pm_runtime_put_autosuspend(d40c->base->dev); 1277 pm_runtime_put_autosuspend(d40c->base->dev);
1163 spin_unlock_irqrestore(&d40c->lock, flags); 1278 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c)
1174 1289
1175 spin_lock_irqsave(&d40c->lock, flags); 1290 spin_lock_irqsave(&d40c->lock, flags);
1176 pm_runtime_get_sync(d40c->base->dev); 1291 pm_runtime_get_sync(d40c->base->dev);
1177 if (d40c->base->rev == 0)
1178 if (chan_is_logical(d40c)) {
1179 res = d40_channel_execute_command(d40c,
1180 D40_DMA_SUSPEND_REQ);
1181 goto no_suspend;
1182 }
1183 1292
1184 /* If bytes left to transfer or linked tx resume job */ 1293 /* If bytes left to transfer or linked tx resume job */
1185 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1294 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1186
1187 if (chan_is_logical(d40c))
1188 d40_config_set_event(d40c, true);
1189
1190 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1295 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1191 }
1192 1296
1193no_suspend:
1194 pm_runtime_mark_last_busy(d40c->base->dev); 1297 pm_runtime_mark_last_busy(d40c->base->dev);
1195 pm_runtime_put_autosuspend(d40c->base->dev); 1298 pm_runtime_put_autosuspend(d40c->base->dev);
1196 spin_unlock_irqrestore(&d40c->lock, flags); 1299 spin_unlock_irqrestore(&d40c->lock, flags);
1197 return res; 1300 return res;
1198} 1301}
1199 1302
1200static int d40_terminate_all(struct d40_chan *chan)
1201{
1202 unsigned long flags;
1203 int ret = 0;
1204
1205 ret = d40_pause(chan);
1206 if (!ret && chan_is_physical(chan))
1207 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
1208
1209 spin_lock_irqsave(&chan->lock, flags);
1210 d40_term_all(chan);
1211 spin_unlock_irqrestore(&chan->lock, flags);
1212
1213 return ret;
1214}
1215
1216static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1303static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1217{ 1304{
1218 struct d40_chan *d40c = container_of(tx->chan, 1305 struct d40_chan *d40c = container_of(tx->chan,
@@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1232 1319
1233static int d40_start(struct d40_chan *d40c) 1320static int d40_start(struct d40_chan *d40c)
1234{ 1321{
1235 if (d40c->base->rev == 0) {
1236 int err;
1237
1238 if (chan_is_logical(d40c)) {
1239 err = d40_channel_execute_command(d40c,
1240 D40_DMA_SUSPEND_REQ);
1241 if (err)
1242 return err;
1243 }
1244 }
1245
1246 if (chan_is_logical(d40c))
1247 d40_config_set_event(d40c, true);
1248
1249 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1322 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1250} 1323}
1251 1324
@@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1258 d40d = d40_first_queued(d40c); 1331 d40d = d40_first_queued(d40c);
1259 1332
1260 if (d40d != NULL) { 1333 if (d40d != NULL) {
1261 if (!d40c->busy) 1334 if (!d40c->busy) {
1262 d40c->busy = true; 1335 d40c->busy = true;
1263 1336 pm_runtime_get_sync(d40c->base->dev);
1264 pm_runtime_get_sync(d40c->base->dev); 1337 }
1265 1338
1266 /* Remove from queue */ 1339 /* Remove from queue */
1267 d40_desc_remove(d40d); 1340 d40_desc_remove(d40d);
@@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data)
1388 1461
1389 return; 1462 return;
1390 1463
1391 err: 1464err:
1392 /* Rescue manoeuvre if receiving double interrupts */ 1465 /* Rescue manouver if receiving double interrupts */
1393 if (d40c->pending_tx > 0) 1466 if (d40c->pending_tx > 0)
1394 d40c->pending_tx--; 1467 d40c->pending_tx--;
1395 spin_unlock_irqrestore(&d40c->lock, flags); 1468 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c)
1770 return 0; 1843 return 0;
1771} 1844}
1772 1845
1773
1774static int d40_free_dma(struct d40_chan *d40c) 1846static int d40_free_dma(struct d40_chan *d40c)
1775{ 1847{
1776 1848
@@ -1806,43 +1878,18 @@ static int d40_free_dma(struct d40_chan *d40c)
1806 } 1878 }
1807 1879
1808 pm_runtime_get_sync(d40c->base->dev); 1880 pm_runtime_get_sync(d40c->base->dev);
1809 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1881 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1810 if (res) { 1882 if (res) {
1811 chan_err(d40c, "suspend failed\n"); 1883 chan_err(d40c, "stop failed\n");
1812 goto out; 1884 goto out;
1813 } 1885 }
1814 1886
1815 if (chan_is_logical(d40c)) { 1887 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
1816 /* Release logical channel, deactivate the event line */
1817 1888
1818 d40_config_set_event(d40c, false); 1889 if (chan_is_logical(d40c))
1819 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 1890 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1820 1891 else
1821 /* 1892 d40c->base->lookup_phy_chans[phy->num] = NULL;
1822 * Check if there are more logical allocation
1823 * on this phy channel.
1824 */
1825 if (!d40_alloc_mask_free(phy, is_src, event)) {
1826 /* Resume the other logical channels if any */
1827 if (d40_chan_has_events(d40c)) {
1828 res = d40_channel_execute_command(d40c,
1829 D40_DMA_RUN);
1830 if (res)
1831 chan_err(d40c,
1832 "Executing RUN command\n");
1833 }
1834 goto out;
1835 }
1836 } else {
1837 (void) d40_alloc_mask_free(phy, is_src, 0);
1838 }
1839
1840 /* Release physical channel */
1841 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1842 if (res) {
1843 chan_err(d40c, "Failed to stop channel\n");
1844 goto out;
1845 }
1846 1893
1847 if (d40c->busy) { 1894 if (d40c->busy) {
1848 pm_runtime_mark_last_busy(d40c->base->dev); 1895 pm_runtime_mark_last_busy(d40c->base->dev);
@@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1852 d40c->busy = false; 1899 d40c->busy = false;
1853 d40c->phy_chan = NULL; 1900 d40c->phy_chan = NULL;
1854 d40c->configured = false; 1901 d40c->configured = false;
1855 d40c->base->lookup_phy_chans[phy->num] = NULL;
1856out: 1902out:
1857 1903
1858 pm_runtime_mark_last_busy(d40c->base->dev); 1904 pm_runtime_mark_last_busy(d40c->base->dev);
@@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2070 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2116 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2071 desc->cyclic = true; 2117 desc->cyclic = true;
2072 2118
2073 if (direction != DMA_NONE) { 2119 if (direction != DMA_TRANS_NONE) {
2074 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 2120 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
2075 2121
2076 if (direction == DMA_DEV_TO_MEM) 2122 if (direction == DMA_DEV_TO_MEM)
@@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan)
2371 spin_unlock_irqrestore(&d40c->lock, flags); 2417 spin_unlock_irqrestore(&d40c->lock, flags);
2372} 2418}
2373 2419
2420static void d40_terminate_all(struct dma_chan *chan)
2421{
2422 unsigned long flags;
2423 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2424 int ret;
2425
2426 spin_lock_irqsave(&d40c->lock, flags);
2427
2428 pm_runtime_get_sync(d40c->base->dev);
2429 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2430 if (ret)
2431 chan_err(d40c, "Failed to stop channel\n");
2432
2433 d40_term_all(d40c);
2434 pm_runtime_mark_last_busy(d40c->base->dev);
2435 pm_runtime_put_autosuspend(d40c->base->dev);
2436 if (d40c->busy) {
2437 pm_runtime_mark_last_busy(d40c->base->dev);
2438 pm_runtime_put_autosuspend(d40c->base->dev);
2439 }
2440 d40c->busy = false;
2441
2442 spin_unlock_irqrestore(&d40c->lock, flags);
2443}
2444
2374static int 2445static int
2375dma40_config_to_halfchannel(struct d40_chan *d40c, 2446dma40_config_to_halfchannel(struct d40_chan *d40c,
2376 struct stedma40_half_channel_info *info, 2447 struct stedma40_half_channel_info *info,
@@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2551 2622
2552 switch (cmd) { 2623 switch (cmd) {
2553 case DMA_TERMINATE_ALL: 2624 case DMA_TERMINATE_ALL:
2554 return d40_terminate_all(d40c); 2625 d40_terminate_all(chan);
2626 return 0;
2555 case DMA_PAUSE: 2627 case DMA_PAUSE:
2556 return d40_pause(d40c); 2628 return d40_pause(d40c);
2557 case DMA_RESUME: 2629 case DMA_RESUME:
@@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2908 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2980 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2909 rev, res->start); 2981 rev, res->start);
2910 2982
2983 if (rev < 2) {
2984 d40_err(&pdev->dev, "hardware revision: %d is not supported",
2985 rev);
2986 goto failure;
2987 }
2988
2911 plat_data = pdev->dev.platform_data; 2989 plat_data = pdev->dev.platform_data;
2912 2990
2913 /* Count the number of logical channels in use */ 2991 /* Count the number of logical channels in use */
@@ -2998,6 +3076,7 @@ failure:
2998 3076
2999 if (base) { 3077 if (base) {
3000 kfree(base->lcla_pool.alloc_map); 3078 kfree(base->lcla_pool.alloc_map);
3079 kfree(base->reg_val_backup_chan);
3001 kfree(base->lookup_log_chans); 3080 kfree(base->lookup_log_chans);
3002 kfree(base->lookup_phy_chans); 3081 kfree(base->lookup_phy_chans);
3003 kfree(base->phy_res); 3082 kfree(base->phy_res);