diff options
author | G, Manjunath Kondaiah <manjugk@ti.com> | 2010-12-20 21:27:18 -0500 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2010-12-20 21:38:29 -0500 |
commit | d3c9be2f42223f256d06b2b69ed26afdcb02f64a (patch) | |
tree | 79c5c7cb971e24527773af86572cafcdab6d37a6 /arch/arm/plat-omap/dma.c | |
parent | a4c537c7f60704691efc5f833b3d440252275c3b (diff) |
OMAP: DMA: Introduce errata handling feature
Implement errata handling to use flags instead of cpu_is_* and
cpu_class_* in the code.
The errata flags are initialized at init time and during runtime we are
using the errata variable (via the IS_DMA_ERRATA macro) to execute the
required errata workaround.
Reused errata handling patch from: Peter Ujfalusi
<peter.ujfalusi@nokia.com>
https://patchwork.kernel.org/patch/231191/
Changes to above patch:
1. Changes are done for converting all the existing errata work arounds
to use this feature.
2. Detailed description for each errata is added.
3. Fixed bug in SET_DMA_ERRATA macro
4. Bit shifting in macro definitions are replaced with BIT() macro
Signed-off-by: G, Manjunath Kondaiah <manjugk@ti.com>
Tested-by: Kevin Hilman <khilman@deeprootsystems.com>
Acked-by: Kevin Hilman <khilman@deeprootsystems.com>
Signed-off-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'arch/arm/plat-omap/dma.c')
-rw-r--r-- | arch/arm/plat-omap/dma.c | 152 |
1 files changed, 102 insertions, 50 deletions
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 49a7cd4763f9..6f51bf37ec02 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -144,6 +144,7 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED }; | |||
144 | #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) | 144 | #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) |
145 | 145 | ||
146 | static int enable_1510_mode; | 146 | static int enable_1510_mode; |
147 | static u32 errata; | ||
147 | 148 | ||
148 | static struct omap_dma_global_context_registers { | 149 | static struct omap_dma_global_context_registers { |
149 | u32 dma_irqenable_l0; | 150 | u32 dma_irqenable_l0; |
@@ -1088,31 +1089,17 @@ void omap_start_dma(int lch) | |||
1088 | 1089 | ||
1089 | cur_lch = next_lch; | 1090 | cur_lch = next_lch; |
1090 | } while (next_lch != -1); | 1091 | } while (next_lch != -1); |
1091 | } else if (cpu_is_omap242x() || | 1092 | } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS)) |
1092 | (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) { | ||
1093 | |||
1094 | /* Errata: Need to write lch even if not using chaining */ | ||
1095 | dma_write(lch, CLNK_CTRL, lch); | 1093 | dma_write(lch, CLNK_CTRL, lch); |
1096 | } | ||
1097 | 1094 | ||
1098 | omap_enable_channel_irq(lch); | 1095 | omap_enable_channel_irq(lch); |
1099 | 1096 | ||
1100 | l = dma_read(CCR, lch); | 1097 | l = dma_read(CCR, lch); |
1101 | 1098 | ||
1102 | /* | 1099 | if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING)) |
1103 | * Errata: Inter Frame DMA buffering issue (All OMAP2420 and | 1100 | l |= OMAP_DMA_CCR_BUFFERING_DISABLE; |
1104 | * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and | ||
1105 | * bursting is enabled. This might result in data gets stalled in | ||
1106 | * FIFO at the end of the block. | ||
1107 | * Workaround: DMA channels must have BUFFERING_DISABLED bit set to | ||
1108 | * guarantee no data will stay in the DMA FIFO in case inter frame | ||
1109 | * buffering occurs. | ||
1110 | */ | ||
1111 | if (cpu_is_omap2420() || | ||
1112 | (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0))) | ||
1113 | l |= OMAP_DMA_CCR_BUFFERING_DISABLE; | ||
1114 | |||
1115 | l |= OMAP_DMA_CCR_EN; | 1101 | l |= OMAP_DMA_CCR_EN; |
1102 | |||
1116 | dma_write(l, CCR, lch); | 1103 | dma_write(l, CCR, lch); |
1117 | 1104 | ||
1118 | dma_chan[lch].flags |= OMAP_DMA_ACTIVE; | 1105 | dma_chan[lch].flags |= OMAP_DMA_ACTIVE; |
@@ -1128,8 +1115,8 @@ void omap_stop_dma(int lch) | |||
1128 | dma_write(0, CICR, lch); | 1115 | dma_write(0, CICR, lch); |
1129 | 1116 | ||
1130 | l = dma_read(CCR, lch); | 1117 | l = dma_read(CCR, lch); |
1131 | /* OMAP3 Errata i541: sDMA FIFO draining does not finish */ | 1118 | if (IS_DMA_ERRATA(DMA_ERRATA_i541) && |
1132 | if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { | 1119 | (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { |
1133 | int i = 0; | 1120 | int i = 0; |
1134 | u32 sys_cf; | 1121 | u32 sys_cf; |
1135 | 1122 | ||
@@ -1229,11 +1216,7 @@ dma_addr_t omap_get_dma_src_pos(int lch) | |||
1229 | else | 1216 | else |
1230 | offset = dma_read(CSAC, lch); | 1217 | offset = dma_read(CSAC, lch); |
1231 | 1218 | ||
1232 | /* | 1219 | if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0) |
1233 | * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is | ||
1234 | * read before the DMA controller finished disabling the channel. | ||
1235 | */ | ||
1236 | if (!cpu_is_omap15xx() && offset == 0) | ||
1237 | offset = dma_read(CSAC, lch); | 1220 | offset = dma_read(CSAC, lch); |
1238 | 1221 | ||
1239 | if (cpu_class_is_omap1()) | 1222 | if (cpu_class_is_omap1()) |
@@ -1814,7 +1797,7 @@ int omap_stop_dma_chain_transfers(int chain_id) | |||
1814 | { | 1797 | { |
1815 | int *channels; | 1798 | int *channels; |
1816 | u32 l, i; | 1799 | u32 l, i; |
1817 | u32 sys_cf; | 1800 | u32 sys_cf = 0; |
1818 | 1801 | ||
1819 | /* Check for input params */ | 1802 | /* Check for input params */ |
1820 | if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { | 1803 | if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { |
@@ -1829,15 +1812,13 @@ int omap_stop_dma_chain_transfers(int chain_id) | |||
1829 | } | 1812 | } |
1830 | channels = dma_linked_lch[chain_id].linked_dmach_q; | 1813 | channels = dma_linked_lch[chain_id].linked_dmach_q; |
1831 | 1814 | ||
1832 | /* | 1815 | if (IS_DMA_ERRATA(DMA_ERRATA_i88)) { |
1833 | * DMA Errata: | 1816 | sys_cf = dma_read(OCP_SYSCONFIG, 0); |
1834 | * Special programming model needed to disable DMA before end of block | 1817 | l = sys_cf; |
1835 | */ | 1818 | /* Middle mode reg set no Standby */ |
1836 | sys_cf = dma_read(OCP_SYSCONFIG, 0); | 1819 | l &= ~((1 << 12)|(1 << 13)); |
1837 | l = sys_cf; | 1820 | dma_write(l, OCP_SYSCONFIG, 0); |
1838 | /* Middle mode reg set no Standby */ | 1821 | } |
1839 | l &= ~((1 << 12)|(1 << 13)); | ||
1840 | dma_write(l, OCP_SYSCONFIG, 0); | ||
1841 | 1822 | ||
1842 | for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { | 1823 | for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { |
1843 | 1824 | ||
@@ -1856,8 +1837,8 @@ int omap_stop_dma_chain_transfers(int chain_id) | |||
1856 | /* Reset the Queue pointers */ | 1837 | /* Reset the Queue pointers */ |
1857 | OMAP_DMA_CHAIN_QINIT(chain_id); | 1838 | OMAP_DMA_CHAIN_QINIT(chain_id); |
1858 | 1839 | ||
1859 | /* Errata - put in the old value */ | 1840 | if (IS_DMA_ERRATA(DMA_ERRATA_i88)) |
1860 | dma_write(sys_cf, OCP_SYSCONFIG, 0); | 1841 | dma_write(sys_cf, OCP_SYSCONFIG, 0); |
1861 | 1842 | ||
1862 | return 0; | 1843 | return 0; |
1863 | } | 1844 | } |
@@ -2063,12 +2044,7 @@ static int omap2_dma_handle_ch(int ch) | |||
2063 | if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) { | 2044 | if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) { |
2064 | printk(KERN_INFO "DMA transaction error with device %d\n", | 2045 | printk(KERN_INFO "DMA transaction error with device %d\n", |
2065 | dma_chan[ch].dev_id); | 2046 | dma_chan[ch].dev_id); |
2066 | if (cpu_class_is_omap2()) { | 2047 | if (IS_DMA_ERRATA(DMA_ERRATA_i378)) { |
2067 | /* | ||
2068 | * Errata: sDMA Channel is not disabled | ||
2069 | * after a transaction error. So we explicitely | ||
2070 | * disable the channel | ||
2071 | */ | ||
2072 | u32 ccr; | 2048 | u32 ccr; |
2073 | 2049 | ||
2074 | ccr = dma_read(CCR, ch); | 2050 | ccr = dma_read(CCR, ch); |
@@ -2168,13 +2144,7 @@ void omap_dma_global_context_restore(void) | |||
2168 | dma_write(omap_dma_global_context.dma_irqenable_l0, | 2144 | dma_write(omap_dma_global_context.dma_irqenable_l0, |
2169 | IRQENABLE_L0, 0); | 2145 | IRQENABLE_L0, 0); |
2170 | 2146 | ||
2171 | /* | 2147 | if (IS_DMA_ERRATA(DMA_ROMCODE_BUG)) |
2172 | * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared | ||
2173 | * after secure sram context save and restore. Hence we need to | ||
2174 | * manually clear those IRQs to avoid spurious interrupts. This | ||
2175 | * affects only secure devices. | ||
2176 | */ | ||
2177 | if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) | ||
2178 | dma_write(0x3 , IRQSTATUS_L0, 0); | 2148 | dma_write(0x3 , IRQSTATUS_L0, 0); |
2179 | 2149 | ||
2180 | for (ch = 0; ch < dma_chan_count; ch++) | 2150 | for (ch = 0; ch < dma_chan_count; ch++) |
@@ -2182,6 +2152,87 @@ void omap_dma_global_context_restore(void) | |||
2182 | omap_clear_dma(ch); | 2152 | omap_clear_dma(ch); |
2183 | } | 2153 | } |
2184 | 2154 | ||
2155 | static void configure_dma_errata(void) | ||
2156 | { | ||
2157 | |||
2158 | /* | ||
2159 | * Errata applicable for OMAP2430ES1.0 and all omap2420 | ||
2160 | * | ||
2161 | * I. | ||
2162 | * Erratum ID: Not Available | ||
2163 | * Inter Frame DMA buffering issue DMA will wrongly | ||
2164 | * buffer elements if packing and bursting is enabled. This might | ||
2165 | * result in data gets stalled in FIFO at the end of the block. | ||
2166 | * Workaround: DMA channels must have BUFFERING_DISABLED bit set to | ||
2167 | * guarantee no data will stay in the DMA FIFO in case inter frame | ||
2168 | * buffering occurs | ||
2169 | * | ||
2170 | * II. | ||
2171 | * Erratum ID: Not Available | ||
2172 | * DMA may hang when several channels are used in parallel | ||
2173 | * In the following configuration, DMA channel hanging can occur: | ||
2174 | * a. Channel i, hardware synchronized, is enabled | ||
2175 | * b. Another channel (Channel x), software synchronized, is enabled. | ||
2176 | * c. Channel i is disabled before end of transfer | ||
2177 | * d. Channel i is reenabled. | ||
2178 | * e. Steps 1 to 4 are repeated a certain number of times. | ||
2179 | * f. A third channel (Channel y), software synchronized, is enabled. | ||
2180 | * Channel x and Channel y may hang immediately after step 'f'. | ||
2181 | * Workaround: | ||
2182 | * For any channel used - make sure NextLCH_ID is set to the value j. | ||
2183 | */ | ||
2184 | if (cpu_is_omap2420() || (cpu_is_omap2430() && | ||
2185 | (omap_type() == OMAP2430_REV_ES1_0))) { | ||
2186 | SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING); | ||
2187 | SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS); | ||
2188 | } | ||
2189 | |||
2190 | /* | ||
2191 | * Erratum ID: i378: OMAP2plus: sDMA Channel is not disabled | ||
2192 | * after a transaction error. | ||
2193 | * Workaround: SW should explicitely disable the channel. | ||
2194 | */ | ||
2195 | if (cpu_class_is_omap2()) | ||
2196 | SET_DMA_ERRATA(DMA_ERRATA_i378); | ||
2197 | |||
2198 | /* | ||
2199 | * Erratum ID: i541: sDMA FIFO draining does not finish | ||
2200 | * If sDMA channel is disabled on the fly, sDMA enters standby even | ||
2201 | * through FIFO Drain is still in progress | ||
2202 | * Workaround: Put sDMA in NoStandby more before a logical channel is | ||
2203 | * disabled, then put it back to SmartStandby right after the channel | ||
2204 | * finishes FIFO draining. | ||
2205 | */ | ||
2206 | if (cpu_is_omap34xx()) | ||
2207 | SET_DMA_ERRATA(DMA_ERRATA_i541); | ||
2208 | |||
2209 | /* | ||
2210 | * Erratum ID: i88 : Special programming model needed to disable DMA | ||
2211 | * before end of block. | ||
2212 | * Workaround: software must ensure that the DMA is configured in No | ||
2213 | * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01") | ||
2214 | */ | ||
2215 | if (cpu_is_omap34xx() && (omap_type() == OMAP3430_REV_ES1_0)) | ||
2216 | SET_DMA_ERRATA(DMA_ERRATA_i88); | ||
2217 | |||
2218 | /* | ||
2219 | * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is | ||
2220 | * read before the DMA controller finished disabling the channel. | ||
2221 | */ | ||
2222 | if (!cpu_is_omap15xx()) | ||
2223 | SET_DMA_ERRATA(DMA_ERRATA_3_3); | ||
2224 | |||
2225 | /* | ||
2226 | * Erratum ID: Not Available | ||
2227 | * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared | ||
2228 | * after secure sram context save and restore. | ||
2229 | * Work around: Hence we need to manually clear those IRQs to avoid | ||
2230 | * spurious interrupts. This affects only secure devices. | ||
2231 | */ | ||
2232 | if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) | ||
2233 | SET_DMA_ERRATA(DMA_ROMCODE_BUG); | ||
2234 | } | ||
2235 | |||
2185 | /*----------------------------------------------------------------------------*/ | 2236 | /*----------------------------------------------------------------------------*/ |
2186 | 2237 | ||
2187 | static int __init omap_init_dma(void) | 2238 | static int __init omap_init_dma(void) |
@@ -2342,6 +2393,7 @@ static int __init omap_init_dma(void) | |||
2342 | dma_chan[1].dev_id = 1; | 2393 | dma_chan[1].dev_id = 1; |
2343 | } | 2394 | } |
2344 | } | 2395 | } |
2396 | configure_dma_errata(); | ||
2345 | 2397 | ||
2346 | return 0; | 2398 | return 0; |
2347 | 2399 | ||