diff options
author | Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 2013-07-03 18:05:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 19:07:42 -0400 |
commit | 48a9db462d99494583dad829969616ac90a8df4e (patch) | |
tree | d908a6172274a9d4d5c14752aa648086bc4721cd /drivers/dma/mv_xor.c | |
parent | dcf6d294830d46b0e6901477fb4bf455281d90c8 (diff) |
drivers/dma: remove unused support for MEMSET operations
There have never been any real users of MEMSET operations since they
have been introduced in January 2007 by commit 7405f74badf4 ("dmaengine:
refactor dmaengine around dma_async_tx_descriptor"). Therefore remove
support for them for now, it can be always brought back when needed.
[sebastian.hesselbarth@gmail.com: fix drivers/dma/mv_xor]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Acked-by: Dan Williams <djbw@fb.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Olof Johansson <olof@lixom.net>
Cc: Kevin Hilman <khilman@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/dma/mv_xor.c')
-rw-r--r-- | drivers/dma/mv_xor.c | 85 |
1 files changed, 4 insertions, 81 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index d64ae14f2706..200f1a3c9a44 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -89,11 +89,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |||
89 | hw_desc->phy_next_desc = 0; | 89 | hw_desc->phy_next_desc = 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) | ||
93 | { | ||
94 | desc->value = val; | ||
95 | } | ||
96 | |||
97 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, | 92 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, |
98 | dma_addr_t addr) | 93 | dma_addr_t addr) |
99 | { | 94 | { |
@@ -128,22 +123,6 @@ static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |||
128 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); | 123 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); |
129 | } | 124 | } |
130 | 125 | ||
131 | static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) | ||
132 | { | ||
133 | __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); | ||
134 | } | ||
135 | |||
136 | static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) | ||
137 | { | ||
138 | __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); | ||
139 | } | ||
140 | |||
141 | static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) | ||
142 | { | ||
143 | __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); | ||
144 | __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); | ||
145 | } | ||
146 | |||
147 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) | 126 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
148 | { | 127 | { |
149 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); | 128 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); |
@@ -186,8 +165,6 @@ static int mv_can_chain(struct mv_xor_desc_slot *desc) | |||
186 | 165 | ||
187 | if (chain_old_tail->type != desc->type) | 166 | if (chain_old_tail->type != desc->type) |
188 | return 0; | 167 | return 0; |
189 | if (desc->type == DMA_MEMSET) | ||
190 | return 0; | ||
191 | 168 | ||
192 | return 1; | 169 | return 1; |
193 | } | 170 | } |
@@ -205,9 +182,6 @@ static void mv_set_mode(struct mv_xor_chan *chan, | |||
205 | case DMA_MEMCPY: | 182 | case DMA_MEMCPY: |
206 | op_mode = XOR_OPERATION_MODE_MEMCPY; | 183 | op_mode = XOR_OPERATION_MODE_MEMCPY; |
207 | break; | 184 | break; |
208 | case DMA_MEMSET: | ||
209 | op_mode = XOR_OPERATION_MODE_MEMSET; | ||
210 | break; | ||
211 | default: | 185 | default: |
212 | dev_err(mv_chan_to_devp(chan), | 186 | dev_err(mv_chan_to_devp(chan), |
213 | "error: unsupported operation %d\n", | 187 | "error: unsupported operation %d\n", |
@@ -274,18 +248,9 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |||
274 | if (sw_desc->type != mv_chan->current_type) | 248 | if (sw_desc->type != mv_chan->current_type) |
275 | mv_set_mode(mv_chan, sw_desc->type); | 249 | mv_set_mode(mv_chan, sw_desc->type); |
276 | 250 | ||
277 | if (sw_desc->type == DMA_MEMSET) { | 251 | /* set the hardware chain */ |
278 | /* for memset requests we need to program the engine, no | 252 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); |
279 | * descriptors used. | 253 | |
280 | */ | ||
281 | struct mv_xor_desc *hw_desc = sw_desc->hw_desc; | ||
282 | mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); | ||
283 | mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); | ||
284 | mv_chan_set_value(mv_chan, sw_desc->value); | ||
285 | } else { | ||
286 | /* set the hardware chain */ | ||
287 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | ||
288 | } | ||
289 | mv_chan->pending += sw_desc->slot_cnt; | 254 | mv_chan->pending += sw_desc->slot_cnt; |
290 | mv_xor_issue_pending(&mv_chan->dmachan); | 255 | mv_xor_issue_pending(&mv_chan->dmachan); |
291 | } | 256 | } |
@@ -688,43 +653,6 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
688 | } | 653 | } |
689 | 654 | ||
690 | static struct dma_async_tx_descriptor * | 655 | static struct dma_async_tx_descriptor * |
691 | mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | ||
692 | size_t len, unsigned long flags) | ||
693 | { | ||
694 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
695 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
696 | int slot_cnt; | ||
697 | |||
698 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
699 | "%s dest: %x len: %u flags: %ld\n", | ||
700 | __func__, dest, len, flags); | ||
701 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
702 | return NULL; | ||
703 | |||
704 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | ||
705 | |||
706 | spin_lock_bh(&mv_chan->lock); | ||
707 | slot_cnt = mv_chan_memset_slot_count(len); | ||
708 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
709 | if (sw_desc) { | ||
710 | sw_desc->type = DMA_MEMSET; | ||
711 | sw_desc->async_tx.flags = flags; | ||
712 | grp_start = sw_desc->group_head; | ||
713 | mv_desc_init(grp_start, flags); | ||
714 | mv_desc_set_byte_count(grp_start, len); | ||
715 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
716 | mv_desc_set_block_fill_val(grp_start, value); | ||
717 | sw_desc->unmap_src_cnt = 1; | ||
718 | sw_desc->unmap_len = len; | ||
719 | } | ||
720 | spin_unlock_bh(&mv_chan->lock); | ||
721 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
722 | "%s sw_desc %p async_tx %p \n", | ||
723 | __func__, sw_desc, &sw_desc->async_tx); | ||
724 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
725 | } | ||
726 | |||
727 | static struct dma_async_tx_descriptor * | ||
728 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 656 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
729 | unsigned int src_cnt, size_t len, unsigned long flags) | 657 | unsigned int src_cnt, size_t len, unsigned long flags) |
730 | { | 658 | { |
@@ -1137,8 +1065,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1137 | /* set prep routines based on capability */ | 1065 | /* set prep routines based on capability */ |
1138 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1066 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1139 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 1067 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
1140 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) | ||
1141 | dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; | ||
1142 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1068 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1143 | dma_dev->max_xor = 8; | 1069 | dma_dev->max_xor = 8; |
1144 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | 1070 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
@@ -1187,9 +1113,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1187 | goto err_free_irq; | 1113 | goto err_free_irq; |
1188 | } | 1114 | } |
1189 | 1115 | ||
1190 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", | 1116 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", |
1191 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1117 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1192 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | ||
1193 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1118 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1194 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1119 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1195 | 1120 | ||
@@ -1298,8 +1223,6 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1298 | dma_cap_set(DMA_MEMCPY, cap_mask); | 1223 | dma_cap_set(DMA_MEMCPY, cap_mask); |
1299 | if (of_property_read_bool(np, "dmacap,xor")) | 1224 | if (of_property_read_bool(np, "dmacap,xor")) |
1300 | dma_cap_set(DMA_XOR, cap_mask); | 1225 | dma_cap_set(DMA_XOR, cap_mask); |
1301 | if (of_property_read_bool(np, "dmacap,memset")) | ||
1302 | dma_cap_set(DMA_MEMSET, cap_mask); | ||
1303 | if (of_property_read_bool(np, "dmacap,interrupt")) | 1226 | if (of_property_read_bool(np, "dmacap,interrupt")) |
1304 | dma_cap_set(DMA_INTERRUPT, cap_mask); | 1227 | dma_cap_set(DMA_INTERRUPT, cap_mask); |
1305 | 1228 | ||