diff options
| author | Vinod Koul <vkoul@kernel.org> | 2019-03-12 02:35:47 -0400 |
|---|---|---|
| committer | Vinod Koul <vkoul@kernel.org> | 2019-03-12 02:35:47 -0400 |
| commit | feb59d77a46d5df93864471ca926b6701479091b (patch) | |
| tree | db58124e49685323e9ccd52d50019cdaf06061b8 /drivers | |
| parent | 42cb6e07c5a6260988f7a06c638ecb9fdf6890f4 (diff) | |
| parent | c2be36ac2141b97f9a35ab560381317566bee357 (diff) | |
Merge branch 'topic/xilinx' into for-linus
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 170 |
1 files changed, 100 insertions, 70 deletions
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 02880963092f..c8acd34f1f70 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c | |||
| @@ -86,6 +86,7 @@ | |||
| 86 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) | 86 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) |
| 87 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) | 87 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) |
| 88 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) | 88 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) |
| 89 | #define XILINX_DMA_DMASR_SG_MASK BIT(3) | ||
| 89 | #define XILINX_DMA_DMASR_IDLE BIT(1) | 90 | #define XILINX_DMA_DMASR_IDLE BIT(1) |
| 90 | #define XILINX_DMA_DMASR_HALTED BIT(0) | 91 | #define XILINX_DMA_DMASR_HALTED BIT(0) |
| 91 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) | 92 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) |
| @@ -161,7 +162,9 @@ | |||
| 161 | #define XILINX_DMA_REG_BTT 0x28 | 162 | #define XILINX_DMA_REG_BTT 0x28 |
| 162 | 163 | ||
| 163 | /* AXI DMA Specific Masks/Bit fields */ | 164 | /* AXI DMA Specific Masks/Bit fields */ |
| 164 | #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) | 165 | #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 |
| 166 | #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 | ||
| 167 | #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 | ||
| 165 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) | 168 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) |
| 166 | #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) | 169 | #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) |
| 167 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 | 170 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 |
| @@ -412,7 +415,6 @@ struct xilinx_dma_config { | |||
| 412 | * @dev: Device Structure | 415 | * @dev: Device Structure |
| 413 | * @common: DMA device structure | 416 | * @common: DMA device structure |
| 414 | * @chan: Driver specific DMA channel | 417 | * @chan: Driver specific DMA channel |
| 415 | * @has_sg: Specifies whether Scatter-Gather is present or not | ||
| 416 | * @mcdma: Specifies whether Multi-Channel is present or not | 418 | * @mcdma: Specifies whether Multi-Channel is present or not |
| 417 | * @flush_on_fsync: Flush on frame sync | 419 | * @flush_on_fsync: Flush on frame sync |
| 418 | * @ext_addr: Indicates 64 bit addressing is supported by dma device | 420 | * @ext_addr: Indicates 64 bit addressing is supported by dma device |
| @@ -425,13 +427,13 @@ struct xilinx_dma_config { | |||
| 425 | * @rxs_clk: DMA s2mm stream clock | 427 | * @rxs_clk: DMA s2mm stream clock |
| 426 | * @nr_channels: Number of channels DMA device supports | 428 | * @nr_channels: Number of channels DMA device supports |
| 427 | * @chan_id: DMA channel identifier | 429 | * @chan_id: DMA channel identifier |
| 430 | * @max_buffer_len: Max buffer length | ||
| 428 | */ | 431 | */ |
| 429 | struct xilinx_dma_device { | 432 | struct xilinx_dma_device { |
| 430 | void __iomem *regs; | 433 | void __iomem *regs; |
| 431 | struct device *dev; | 434 | struct device *dev; |
| 432 | struct dma_device common; | 435 | struct dma_device common; |
| 433 | struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; | 436 | struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; |
| 434 | bool has_sg; | ||
| 435 | bool mcdma; | 437 | bool mcdma; |
| 436 | u32 flush_on_fsync; | 438 | u32 flush_on_fsync; |
| 437 | bool ext_addr; | 439 | bool ext_addr; |
| @@ -444,6 +446,7 @@ struct xilinx_dma_device { | |||
| 444 | struct clk *rxs_clk; | 446 | struct clk *rxs_clk; |
| 445 | u32 nr_channels; | 447 | u32 nr_channels; |
| 446 | u32 chan_id; | 448 | u32 chan_id; |
| 449 | u32 max_buffer_len; | ||
| 447 | }; | 450 | }; |
| 448 | 451 | ||
| 449 | /* Macros */ | 452 | /* Macros */ |
| @@ -960,6 +963,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
| 960 | } | 963 | } |
| 961 | 964 | ||
| 962 | /** | 965 | /** |
| 966 | * xilinx_dma_calc_copysize - Calculate the amount of data to copy | ||
| 967 | * @chan: Driver specific DMA channel | ||
| 968 | * @size: Total data that needs to be copied | ||
| 969 | * @done: Amount of data that has been already copied | ||
| 970 | * | ||
| 971 | * Return: Amount of data that has to be copied | ||
| 972 | */ | ||
| 973 | static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, | ||
| 974 | int size, int done) | ||
| 975 | { | ||
| 976 | size_t copy; | ||
| 977 | |||
| 978 | copy = min_t(size_t, size - done, | ||
| 979 | chan->xdev->max_buffer_len); | ||
| 980 | |||
| 981 | if ((copy + done < size) && | ||
| 982 | chan->xdev->common.copy_align) { | ||
| 983 | /* | ||
| 984 | * If this is not the last descriptor, make sure | ||
| 985 | * the next one will be properly aligned | ||
| 986 | */ | ||
| 987 | copy = rounddown(copy, | ||
| 988 | (1 << chan->xdev->common.copy_align)); | ||
| 989 | } | ||
| 990 | return copy; | ||
| 991 | } | ||
| 992 | |||
| 993 | /** | ||
| 963 | * xilinx_dma_tx_status - Get DMA transaction status | 994 | * xilinx_dma_tx_status - Get DMA transaction status |
| 964 | * @dchan: DMA channel | 995 | * @dchan: DMA channel |
| 965 | * @cookie: Transaction identifier | 996 | * @cookie: Transaction identifier |
| @@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, | |||
| 992 | list_for_each_entry(segment, &desc->segments, node) { | 1023 | list_for_each_entry(segment, &desc->segments, node) { |
| 993 | hw = &segment->hw; | 1024 | hw = &segment->hw; |
| 994 | residue += (hw->control - hw->status) & | 1025 | residue += (hw->control - hw->status) & |
| 995 | XILINX_DMA_MAX_TRANS_LEN; | 1026 | chan->xdev->max_buffer_len; |
| 996 | } | 1027 | } |
| 997 | } | 1028 | } |
| 998 | spin_unlock_irqrestore(&chan->lock, flags); | 1029 | spin_unlock_irqrestore(&chan->lock, flags); |
| @@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1070 | struct xilinx_vdma_config *config = &chan->config; | 1101 | struct xilinx_vdma_config *config = &chan->config; |
| 1071 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; | 1102 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; |
| 1072 | u32 reg, j; | 1103 | u32 reg, j; |
| 1073 | struct xilinx_vdma_tx_segment *tail_segment; | 1104 | struct xilinx_vdma_tx_segment *segment, *last = NULL; |
| 1105 | int i = 0; | ||
| 1074 | 1106 | ||
| 1075 | /* This function was invoked with lock held */ | 1107 | /* This function was invoked with lock held */ |
| 1076 | if (chan->err) | 1108 | if (chan->err) |
| @@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1087 | tail_desc = list_last_entry(&chan->pending_list, | 1119 | tail_desc = list_last_entry(&chan->pending_list, |
| 1088 | struct xilinx_dma_tx_descriptor, node); | 1120 | struct xilinx_dma_tx_descriptor, node); |
| 1089 | 1121 | ||
| 1090 | tail_segment = list_last_entry(&tail_desc->segments, | ||
| 1091 | struct xilinx_vdma_tx_segment, node); | ||
| 1092 | |||
| 1093 | /* | ||
| 1094 | * If hardware is idle, then all descriptors on the running lists are | ||
| 1095 | * done, start new transfers | ||
| 1096 | */ | ||
| 1097 | if (chan->has_sg) | ||
| 1098 | dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, | ||
| 1099 | desc->async_tx.phys); | ||
| 1100 | |||
| 1101 | /* Configure the hardware using info in the config structure */ | 1122 | /* Configure the hardware using info in the config structure */ |
| 1102 | if (chan->has_vflip) { | 1123 | if (chan->has_vflip) { |
| 1103 | reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); | 1124 | reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); |
| @@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1114 | else | 1135 | else |
| 1115 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; | 1136 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; |
| 1116 | 1137 | ||
| 1117 | /* | 1138 | /* If not parking, enable circular mode */ |
| 1118 | * With SG, start with circular mode, so that BDs can be fetched. | ||
| 1119 | * In direct register mode, if not parking, enable circular mode | ||
| 1120 | */ | ||
| 1121 | if (chan->has_sg || !config->park) | ||
| 1122 | reg |= XILINX_DMA_DMACR_CIRC_EN; | ||
| 1123 | |||
| 1124 | if (config->park) | 1139 | if (config->park) |
| 1125 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; | 1140 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; |
| 1141 | else | ||
| 1142 | reg |= XILINX_DMA_DMACR_CIRC_EN; | ||
| 1126 | 1143 | ||
| 1127 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); | 1144 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
| 1128 | 1145 | ||
| @@ -1144,48 +1161,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1144 | return; | 1161 | return; |
| 1145 | 1162 | ||
| 1146 | /* Start the transfer */ | 1163 | /* Start the transfer */ |
| 1147 | if (chan->has_sg) { | 1164 | if (chan->desc_submitcount < chan->num_frms) |
| 1148 | dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, | 1165 | i = chan->desc_submitcount; |
| 1149 | tail_segment->phys); | 1166 | |
| 1150 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1167 | list_for_each_entry(segment, &desc->segments, node) { |
| 1151 | chan->desc_pendingcount = 0; | 1168 | if (chan->ext_addr) |
| 1152 | } else { | 1169 | vdma_desc_write_64(chan, |
| 1153 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | 1170 | XILINX_VDMA_REG_START_ADDRESS_64(i++), |
| 1154 | int i = 0; | 1171 | segment->hw.buf_addr, |
| 1155 | 1172 | segment->hw.buf_addr_msb); | |
| 1156 | if (chan->desc_submitcount < chan->num_frms) | 1173 | else |
| 1157 | i = chan->desc_submitcount; | 1174 | vdma_desc_write(chan, |
| 1158 | |||
| 1159 | list_for_each_entry(segment, &desc->segments, node) { | ||
| 1160 | if (chan->ext_addr) | ||
| 1161 | vdma_desc_write_64(chan, | ||
| 1162 | XILINX_VDMA_REG_START_ADDRESS_64(i++), | ||
| 1163 | segment->hw.buf_addr, | ||
| 1164 | segment->hw.buf_addr_msb); | ||
| 1165 | else | ||
| 1166 | vdma_desc_write(chan, | ||
| 1167 | XILINX_VDMA_REG_START_ADDRESS(i++), | 1175 | XILINX_VDMA_REG_START_ADDRESS(i++), |
| 1168 | segment->hw.buf_addr); | 1176 | segment->hw.buf_addr); |
| 1169 | 1177 | ||
| 1170 | last = segment; | 1178 | last = segment; |
| 1171 | } | 1179 | } |
| 1172 | 1180 | ||
| 1173 | if (!last) | 1181 | if (!last) |
| 1174 | return; | 1182 | return; |
| 1175 | 1183 | ||
| 1176 | /* HW expects these parameters to be same for one transaction */ | 1184 | /* HW expects these parameters to be same for one transaction */ |
| 1177 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); | 1185 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); |
| 1178 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, | 1186 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
| 1179 | last->hw.stride); | 1187 | last->hw.stride); |
| 1180 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); | 1188 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); |
| 1181 | 1189 | ||
| 1182 | chan->desc_submitcount++; | 1190 | chan->desc_submitcount++; |
| 1183 | chan->desc_pendingcount--; | 1191 | chan->desc_pendingcount--; |
| 1184 | list_del(&desc->node); | 1192 | list_del(&desc->node); |
| 1185 | list_add_tail(&desc->node, &chan->active_list); | 1193 | list_add_tail(&desc->node, &chan->active_list); |
| 1186 | if (chan->desc_submitcount == chan->num_frms) | 1194 | if (chan->desc_submitcount == chan->num_frms) |
| 1187 | chan->desc_submitcount = 0; | 1195 | chan->desc_submitcount = 0; |
| 1188 | } | ||
| 1189 | 1196 | ||
| 1190 | chan->idle = false; | 1197 | chan->idle = false; |
| 1191 | } | 1198 | } |
| @@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1254 | 1261 | ||
| 1255 | /* Start the transfer */ | 1262 | /* Start the transfer */ |
| 1256 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | 1263 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
| 1257 | hw->control & XILINX_DMA_MAX_TRANS_LEN); | 1264 | hw->control & chan->xdev->max_buffer_len); |
| 1258 | } | 1265 | } |
| 1259 | 1266 | ||
| 1260 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1267 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
| @@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1357 | 1364 | ||
| 1358 | /* Start the transfer */ | 1365 | /* Start the transfer */ |
| 1359 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | 1366 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
| 1360 | hw->control & XILINX_DMA_MAX_TRANS_LEN); | 1367 | hw->control & chan->xdev->max_buffer_len); |
| 1361 | } | 1368 | } |
| 1362 | 1369 | ||
| 1363 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1370 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
| @@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | |||
| 1718 | struct xilinx_cdma_tx_segment *segment; | 1725 | struct xilinx_cdma_tx_segment *segment; |
| 1719 | struct xilinx_cdma_desc_hw *hw; | 1726 | struct xilinx_cdma_desc_hw *hw; |
| 1720 | 1727 | ||
| 1721 | if (!len || len > XILINX_DMA_MAX_TRANS_LEN) | 1728 | if (!len || len > chan->xdev->max_buffer_len) |
| 1722 | return NULL; | 1729 | return NULL; |
| 1723 | 1730 | ||
| 1724 | desc = xilinx_dma_alloc_tx_descriptor(chan); | 1731 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
| @@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( | |||
| 1808 | * Calculate the maximum number of bytes to transfer, | 1815 | * Calculate the maximum number of bytes to transfer, |
| 1809 | * making sure it is less than the hw limit | 1816 | * making sure it is less than the hw limit |
| 1810 | */ | 1817 | */ |
| 1811 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | 1818 | copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), |
| 1812 | XILINX_DMA_MAX_TRANS_LEN); | 1819 | sg_used); |
| 1813 | hw = &segment->hw; | 1820 | hw = &segment->hw; |
| 1814 | 1821 | ||
| 1815 | /* Fill in the descriptor */ | 1822 | /* Fill in the descriptor */ |
| @@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( | |||
| 1913 | * Calculate the maximum number of bytes to transfer, | 1920 | * Calculate the maximum number of bytes to transfer, |
| 1914 | * making sure it is less than the hw limit | 1921 | * making sure it is less than the hw limit |
| 1915 | */ | 1922 | */ |
| 1916 | copy = min_t(size_t, period_len - sg_used, | 1923 | copy = xilinx_dma_calc_copysize(chan, period_len, |
| 1917 | XILINX_DMA_MAX_TRANS_LEN); | 1924 | sg_used); |
| 1918 | hw = &segment->hw; | 1925 | hw = &segment->hw; |
| 1919 | xilinx_axidma_buf(chan, hw, buf_addr, sg_used, | 1926 | xilinx_axidma_buf(chan, hw, buf_addr, sg_used, |
| 1920 | period_len * i); | 1927 | period_len * i); |
| @@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, | |||
| 2389 | 2396 | ||
| 2390 | chan->dev = xdev->dev; | 2397 | chan->dev = xdev->dev; |
| 2391 | chan->xdev = xdev; | 2398 | chan->xdev = xdev; |
| 2392 | chan->has_sg = xdev->has_sg; | ||
| 2393 | chan->desc_pendingcount = 0x0; | 2399 | chan->desc_pendingcount = 0x0; |
| 2394 | chan->ext_addr = xdev->ext_addr; | 2400 | chan->ext_addr = xdev->ext_addr; |
| 2395 | /* This variable ensures that descriptors are not | 2401 | /* This variable ensures that descriptors are not |
| @@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, | |||
| 2489 | chan->stop_transfer = xilinx_dma_stop_transfer; | 2495 | chan->stop_transfer = xilinx_dma_stop_transfer; |
| 2490 | } | 2496 | } |
| 2491 | 2497 | ||
| 2498 | /* check if SG is enabled (only for AXIDMA and CDMA) */ | ||
| 2499 | if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { | ||
| 2500 | if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & | ||
| 2501 | XILINX_DMA_DMASR_SG_MASK) | ||
| 2502 | chan->has_sg = true; | ||
| 2503 | dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, | ||
| 2504 | chan->has_sg ? "enabled" : "disabled"); | ||
| 2505 | } | ||
| 2506 | |||
| 2492 | /* Initialize the tasklet */ | 2507 | /* Initialize the tasklet */ |
| 2493 | tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, | 2508 | tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, |
| 2494 | (unsigned long)chan); | 2509 | (unsigned long)chan); |
| @@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) | |||
| 2596 | struct xilinx_dma_device *xdev; | 2611 | struct xilinx_dma_device *xdev; |
| 2597 | struct device_node *child, *np = pdev->dev.of_node; | 2612 | struct device_node *child, *np = pdev->dev.of_node; |
| 2598 | struct resource *io; | 2613 | struct resource *io; |
| 2599 | u32 num_frames, addr_width; | 2614 | u32 num_frames, addr_width, len_width; |
| 2600 | int i, err; | 2615 | int i, err; |
| 2601 | 2616 | ||
| 2602 | /* Allocate and initialize the DMA engine structure */ | 2617 | /* Allocate and initialize the DMA engine structure */ |
| @@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev) | |||
| 2627 | return PTR_ERR(xdev->regs); | 2642 | return PTR_ERR(xdev->regs); |
| 2628 | 2643 | ||
| 2629 | /* Retrieve the DMA engine properties from the device tree */ | 2644 | /* Retrieve the DMA engine properties from the device tree */ |
| 2630 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); | 2645 | xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); |
| 2631 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) | 2646 | |
| 2647 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | ||
| 2632 | xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); | 2648 | xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); |
| 2649 | if (!of_property_read_u32(node, "xlnx,sg-length-width", | ||
| 2650 | &len_width)) { | ||
| 2651 | if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || | ||
| 2652 | len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { | ||
| 2653 | dev_warn(xdev->dev, | ||
| 2654 | "invalid xlnx,sg-length-width property value. Using default width\n"); | ||
| 2655 | } else { | ||
| 2656 | if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) | ||
| 2657 | dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); | ||
| 2658 | xdev->max_buffer_len = | ||
| 2659 | GENMASK(len_width - 1, 0); | ||
| 2660 | } | ||
| 2661 | } | ||
| 2662 | } | ||
| 2633 | 2663 | ||
| 2634 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { | 2664 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
| 2635 | err = of_property_read_u32(node, "xlnx,num-fstores", | 2665 | err = of_property_read_u32(node, "xlnx,num-fstores", |
