aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mv_xor.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 17:54:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 17:54:26 -0500
commitc2714334b944abbeaaadda8cddde619eff0292a1 (patch)
treeb45be97a313f58aa62933040230d51aa3a8592b4 /drivers/dma/mv_xor.c
parent0beb58783f2168354e2b5297af45fc7db70adf12 (diff)
parent5e5d8999a316d596f2012fe1cf4c59e0de693dab (diff)
Merge tag 'mvebu' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC updates for Marvell mvebu/kirkwood from Olof Johansson: "This is a branch with updates for Marvell's mvebu/kirkwood platforms. They came in late-ish, and were heavily interdependent such that it didn't make sense to split them up across the cross-platform topic branches. So here they are (for the second release in a row) in a branch on their own." * tag 'mvebu' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (88 commits) arm: l2x0: add aurora related properties to OF binding arm: mvebu: add Aurora L2 Cache Controller to the DT arm: mvebu: add L2 cache support dma: mv_xor: fix error handling path dma: mv_xor: fix error checking of irq_of_parse_and_map() dma: mv_xor: use request_irq() instead of devm_request_irq() dma: mv_xor: clear the window override control registers arm: mvebu: fix address decoding armada_cfg_base() function ARM: mvebu: update defconfig with I2C and RTC support ARM: mvebu: Add SATA support for OpenBlocks AX3-4 ARM: mvebu: Add support for the RTC in OpenBlocks AX3-4 ARM: mvebu: Add support for I2C on OpenBlocks AX3-4 ARM: mvebu: Add support for I2C controllers in Armada 370/XP arm: mvebu: Add hardware I/O Coherency support arm: plat-orion: Add coherency attribute when setup mbus target arm: dma mapping: Export a dma ops function arm_dma_set_mask arm: mvebu: Add SMP support for Armada XP arm: mm: Add support for PJ4B cpu and init routines arm: mvebu: Add IPI support via doorbells arm: mvebu: Add initial support for power managmement service unit ...
Diffstat (limited to 'drivers/dma/mv_xor.c')
-rw-r--r--drivers/dma/mv_xor.c429
1 files changed, 244 insertions, 185 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d12ad00da4cb..ac71f555dd72 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,9 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/memory.h> 27#include <linux/memory.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
29#include <linux/platform_data/dma-mv_xor.h> 32#include <linux/platform_data/dma-mv_xor.h>
30 33
31#include "dmaengine.h" 34#include "dmaengine.h"
@@ -34,14 +37,14 @@
34static void mv_xor_issue_pending(struct dma_chan *chan); 37static void mv_xor_issue_pending(struct dma_chan *chan);
35 38
36#define to_mv_xor_chan(chan) \ 39#define to_mv_xor_chan(chan) \
37 container_of(chan, struct mv_xor_chan, common) 40 container_of(chan, struct mv_xor_chan, dmachan)
38
39#define to_mv_xor_device(dev) \
40 container_of(dev, struct mv_xor_device, common)
41 41
42#define to_mv_xor_slot(tx) \ 42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx) 43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44 44
45#define mv_chan_to_devp(chan) \
46 ((chan)->dmadev.dev)
47
45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 48static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{ 49{
47 struct mv_xor_desc *hw_desc = desc->hw_desc; 50 struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -166,7 +169,7 @@ static int mv_is_err_intr(u32 intr_cause)
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 169static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{ 170{
168 u32 val = ~(1 << (chan->idx * 16)); 171 u32 val = ~(1 << (chan->idx * 16));
169 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 172 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
170 __raw_writel(val, XOR_INTR_CAUSE(chan)); 173 __raw_writel(val, XOR_INTR_CAUSE(chan));
171} 174}
172 175
@@ -206,9 +209,9 @@ static void mv_set_mode(struct mv_xor_chan *chan,
206 op_mode = XOR_OPERATION_MODE_MEMSET; 209 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break; 210 break;
208 default: 211 default:
209 dev_printk(KERN_ERR, chan->device->common.dev, 212 dev_err(mv_chan_to_devp(chan),
210 "error: unsupported operation %d.\n", 213 "error: unsupported operation %d.\n",
211 type); 214 type);
212 BUG(); 215 BUG();
213 return; 216 return;
214 } 217 }
@@ -223,7 +226,7 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
223{ 226{
224 u32 activation; 227 u32 activation;
225 228
226 dev_dbg(chan->device->common.dev, " activate chan.\n"); 229 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
227 activation = __raw_readl(XOR_ACTIVATION(chan)); 230 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1; 231 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan)); 232 __raw_writel(activation, XOR_ACTIVATION(chan));
@@ -251,7 +254,7 @@ static int mv_chan_xor_slot_count(size_t len, int src_cnt)
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 254static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot) 255 struct mv_xor_desc_slot *slot)
253{ 256{
254 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", 257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
255 __func__, __LINE__, slot); 258 __func__, __LINE__, slot);
256 259
257 slot->slots_per_op = 0; 260 slot->slots_per_op = 0;
@@ -266,7 +269,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 269static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc) 270 struct mv_xor_desc_slot *sw_desc)
268{ 271{
269 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", 272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
270 __func__, __LINE__, sw_desc); 273 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type) 274 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type); 275 mv_set_mode(mv_chan, sw_desc->type);
@@ -284,7 +287,7 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 287 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 } 288 }
286 mv_chan->pending += sw_desc->slot_cnt; 289 mv_chan->pending += sw_desc->slot_cnt;
287 mv_xor_issue_pending(&mv_chan->common); 290 mv_xor_issue_pending(&mv_chan->dmachan);
288} 291}
289 292
290static dma_cookie_t 293static dma_cookie_t
@@ -308,8 +311,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
308 */ 311 */
309 if (desc->group_head && desc->unmap_len) { 312 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head; 313 struct mv_xor_desc_slot *unmap = desc->group_head;
311 struct device *dev = 314 struct device *dev = mv_chan_to_devp(mv_chan);
312 &mv_chan->device->pdev->dev;
313 u32 len = unmap->unmap_len; 315 u32 len = unmap->unmap_len;
314 enum dma_ctrl_flags flags = desc->async_tx.flags; 316 enum dma_ctrl_flags flags = desc->async_tx.flags;
315 u32 src_cnt; 317 u32 src_cnt;
@@ -353,7 +355,7 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
353{ 355{
354 struct mv_xor_desc_slot *iter, *_iter; 356 struct mv_xor_desc_slot *iter, *_iter;
355 357
356 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 358 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
357 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 359 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
358 completed_node) { 360 completed_node) {
359 361
@@ -369,7 +371,7 @@ static int
369mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 371mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
370 struct mv_xor_chan *mv_chan) 372 struct mv_xor_chan *mv_chan)
371{ 373{
372 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", 374 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
373 __func__, __LINE__, desc, desc->async_tx.flags); 375 __func__, __LINE__, desc, desc->async_tx.flags);
374 list_del(&desc->chain_node); 376 list_del(&desc->chain_node);
375 /* the client is allowed to attach dependent operations 377 /* the client is allowed to attach dependent operations
@@ -393,8 +395,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
393 u32 current_desc = mv_chan_get_current_desc(mv_chan); 395 u32 current_desc = mv_chan_get_current_desc(mv_chan);
394 int seen_current = 0; 396 int seen_current = 0;
395 397
396 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 398 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
397 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); 399 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
398 mv_xor_clean_completed_slots(mv_chan); 400 mv_xor_clean_completed_slots(mv_chan);
399 401
400 /* free completed slots from the chain starting with 402 /* free completed slots from the chain starting with
@@ -438,7 +440,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
438 } 440 }
439 441
440 if (cookie > 0) 442 if (cookie > 0)
441 mv_chan->common.completed_cookie = cookie; 443 mv_chan->dmachan.completed_cookie = cookie;
442} 444}
443 445
444static void 446static void
@@ -547,7 +549,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
547 dma_cookie_t cookie; 549 dma_cookie_t cookie;
548 int new_hw_chain = 1; 550 int new_hw_chain = 1;
549 551
550 dev_dbg(mv_chan->device->common.dev, 552 dev_dbg(mv_chan_to_devp(mv_chan),
551 "%s sw_desc %p: async_tx %p\n", 553 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx); 554 __func__, sw_desc, &sw_desc->async_tx);
553 555
@@ -570,7 +572,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
570 if (!mv_can_chain(grp_start)) 572 if (!mv_can_chain(grp_start))
571 goto submit_done; 573 goto submit_done;
572 574
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", 575 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys); 576 old_chain_tail->async_tx.phys);
575 577
576 /* fix up the hardware chain */ 578 /* fix up the hardware chain */
@@ -604,9 +606,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
604 int idx; 606 int idx;
605 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 607 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
606 struct mv_xor_desc_slot *slot = NULL; 608 struct mv_xor_desc_slot *slot = NULL;
607 struct mv_xor_platform_data *plat_data = 609 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
608 mv_chan->device->pdev->dev.platform_data;
609 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
610 610
611 /* Allocate descriptor slots */ 611 /* Allocate descriptor slots */
612 idx = mv_chan->slots_allocated; 612 idx = mv_chan->slots_allocated;
@@ -617,7 +617,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
617 " %d descriptor slots", idx); 617 " %d descriptor slots", idx);
618 break; 618 break;
619 } 619 }
620 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; 620 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
622 622
623 dma_async_tx_descriptor_init(&slot->async_tx, chan); 623 dma_async_tx_descriptor_init(&slot->async_tx, chan);
@@ -625,7 +625,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
625 INIT_LIST_HEAD(&slot->chain_node); 625 INIT_LIST_HEAD(&slot->chain_node);
626 INIT_LIST_HEAD(&slot->slot_node); 626 INIT_LIST_HEAD(&slot->slot_node);
627 INIT_LIST_HEAD(&slot->tx_list); 627 INIT_LIST_HEAD(&slot->tx_list);
628 hw_desc = (char *) mv_chan->device->dma_desc_pool; 628 hw_desc = (char *) mv_chan->dma_desc_pool;
629 slot->async_tx.phys = 629 slot->async_tx.phys =
630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
631 slot->idx = idx++; 631 slot->idx = idx++;
@@ -641,7 +641,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
641 struct mv_xor_desc_slot, 641 struct mv_xor_desc_slot,
642 slot_node); 642 slot_node);
643 643
644 dev_dbg(mv_chan->device->common.dev, 644 dev_dbg(mv_chan_to_devp(mv_chan),
645 "allocated %d descriptor slots last_used: %p\n", 645 "allocated %d descriptor slots last_used: %p\n",
646 mv_chan->slots_allocated, mv_chan->last_used); 646 mv_chan->slots_allocated, mv_chan->last_used);
647 647
@@ -656,7 +656,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
656 struct mv_xor_desc_slot *sw_desc, *grp_start; 656 struct mv_xor_desc_slot *sw_desc, *grp_start;
657 int slot_cnt; 657 int slot_cnt;
658 658
659 dev_dbg(mv_chan->device->common.dev, 659 dev_dbg(mv_chan_to_devp(mv_chan),
660 "%s dest: %x src %x len: %u flags: %ld\n", 660 "%s dest: %x src %x len: %u flags: %ld\n",
661 __func__, dest, src, len, flags); 661 __func__, dest, src, len, flags);
662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -680,7 +680,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
680 } 680 }
681 spin_unlock_bh(&mv_chan->lock); 681 spin_unlock_bh(&mv_chan->lock);
682 682
683 dev_dbg(mv_chan->device->common.dev, 683 dev_dbg(mv_chan_to_devp(mv_chan),
684 "%s sw_desc %p async_tx %p\n", 684 "%s sw_desc %p async_tx %p\n",
685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); 685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
686 686
@@ -695,7 +695,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
695 struct mv_xor_desc_slot *sw_desc, *grp_start; 695 struct mv_xor_desc_slot *sw_desc, *grp_start;
696 int slot_cnt; 696 int slot_cnt;
697 697
698 dev_dbg(mv_chan->device->common.dev, 698 dev_dbg(mv_chan_to_devp(mv_chan),
699 "%s dest: %x len: %u flags: %ld\n", 699 "%s dest: %x len: %u flags: %ld\n",
700 __func__, dest, len, flags); 700 __func__, dest, len, flags);
701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -718,7 +718,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
718 sw_desc->unmap_len = len; 718 sw_desc->unmap_len = len;
719 } 719 }
720 spin_unlock_bh(&mv_chan->lock); 720 spin_unlock_bh(&mv_chan->lock);
721 dev_dbg(mv_chan->device->common.dev, 721 dev_dbg(mv_chan_to_devp(mv_chan),
722 "%s sw_desc %p async_tx %p \n", 722 "%s sw_desc %p async_tx %p \n",
723 __func__, sw_desc, &sw_desc->async_tx); 723 __func__, sw_desc, &sw_desc->async_tx);
724 return sw_desc ? &sw_desc->async_tx : NULL; 724 return sw_desc ? &sw_desc->async_tx : NULL;
@@ -737,7 +737,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
737 737
738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
739 739
740 dev_dbg(mv_chan->device->common.dev, 740 dev_dbg(mv_chan_to_devp(mv_chan),
741 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 741 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
742 __func__, src_cnt, len, dest, flags); 742 __func__, src_cnt, len, dest, flags);
743 743
@@ -758,7 +758,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); 758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
759 } 759 }
760 spin_unlock_bh(&mv_chan->lock); 760 spin_unlock_bh(&mv_chan->lock);
761 dev_dbg(mv_chan->device->common.dev, 761 dev_dbg(mv_chan_to_devp(mv_chan),
762 "%s sw_desc %p async_tx %p \n", 762 "%s sw_desc %p async_tx %p \n",
763 __func__, sw_desc, &sw_desc->async_tx); 763 __func__, sw_desc, &sw_desc->async_tx);
764 return sw_desc ? &sw_desc->async_tx : NULL; 764 return sw_desc ? &sw_desc->async_tx : NULL;
@@ -791,12 +791,12 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
791 } 791 }
792 mv_chan->last_used = NULL; 792 mv_chan->last_used = NULL;
793 793
794 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", 794 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
795 __func__, mv_chan->slots_allocated); 795 __func__, mv_chan->slots_allocated);
796 spin_unlock_bh(&mv_chan->lock); 796 spin_unlock_bh(&mv_chan->lock);
797 797
798 if (in_use_descs) 798 if (in_use_descs)
799 dev_err(mv_chan->device->common.dev, 799 dev_err(mv_chan_to_devp(mv_chan),
800 "freeing %d in use descriptors!\n", in_use_descs); 800 "freeing %d in use descriptors!\n", in_use_descs);
801} 801}
802 802
@@ -828,42 +828,42 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
828 u32 val; 828 u32 val;
829 829
830 val = __raw_readl(XOR_CONFIG(chan)); 830 val = __raw_readl(XOR_CONFIG(chan));
831 dev_printk(KERN_ERR, chan->device->common.dev, 831 dev_err(mv_chan_to_devp(chan),
832 "config 0x%08x.\n", val); 832 "config 0x%08x.\n", val);
833 833
834 val = __raw_readl(XOR_ACTIVATION(chan)); 834 val = __raw_readl(XOR_ACTIVATION(chan));
835 dev_printk(KERN_ERR, chan->device->common.dev, 835 dev_err(mv_chan_to_devp(chan),
836 "activation 0x%08x.\n", val); 836 "activation 0x%08x.\n", val);
837 837
838 val = __raw_readl(XOR_INTR_CAUSE(chan)); 838 val = __raw_readl(XOR_INTR_CAUSE(chan));
839 dev_printk(KERN_ERR, chan->device->common.dev, 839 dev_err(mv_chan_to_devp(chan),
840 "intr cause 0x%08x.\n", val); 840 "intr cause 0x%08x.\n", val);
841 841
842 val = __raw_readl(XOR_INTR_MASK(chan)); 842 val = __raw_readl(XOR_INTR_MASK(chan));
843 dev_printk(KERN_ERR, chan->device->common.dev, 843 dev_err(mv_chan_to_devp(chan),
844 "intr mask 0x%08x.\n", val); 844 "intr mask 0x%08x.\n", val);
845 845
846 val = __raw_readl(XOR_ERROR_CAUSE(chan)); 846 val = __raw_readl(XOR_ERROR_CAUSE(chan));
847 dev_printk(KERN_ERR, chan->device->common.dev, 847 dev_err(mv_chan_to_devp(chan),
848 "error cause 0x%08x.\n", val); 848 "error cause 0x%08x.\n", val);
849 849
850 val = __raw_readl(XOR_ERROR_ADDR(chan)); 850 val = __raw_readl(XOR_ERROR_ADDR(chan));
851 dev_printk(KERN_ERR, chan->device->common.dev, 851 dev_err(mv_chan_to_devp(chan),
852 "error addr 0x%08x.\n", val); 852 "error addr 0x%08x.\n", val);
853} 853}
854 854
855static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 855static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
856 u32 intr_cause) 856 u32 intr_cause)
857{ 857{
858 if (intr_cause & (1 << 4)) { 858 if (intr_cause & (1 << 4)) {
859 dev_dbg(chan->device->common.dev, 859 dev_dbg(mv_chan_to_devp(chan),
860 "ignore this error\n"); 860 "ignore this error\n");
861 return; 861 return;
862 } 862 }
863 863
864 dev_printk(KERN_ERR, chan->device->common.dev, 864 dev_err(mv_chan_to_devp(chan),
865 "error on chan %d. intr cause 0x%08x.\n", 865 "error on chan %d. intr cause 0x%08x.\n",
866 chan->idx, intr_cause); 866 chan->idx, intr_cause);
867 867
868 mv_dump_xor_regs(chan); 868 mv_dump_xor_regs(chan);
869 BUG(); 869 BUG();
@@ -874,7 +874,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
874 struct mv_xor_chan *chan = data; 874 struct mv_xor_chan *chan = data;
875 u32 intr_cause = mv_chan_get_intr_cause(chan); 875 u32 intr_cause = mv_chan_get_intr_cause(chan);
876 876
877 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); 877 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
878 878
879 if (mv_is_err_intr(intr_cause)) 879 if (mv_is_err_intr(intr_cause))
880 mv_xor_err_interrupt_handler(chan, intr_cause); 880 mv_xor_err_interrupt_handler(chan, intr_cause);
@@ -901,7 +901,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
901 */ 901 */
902#define MV_XOR_TEST_SIZE 2000 902#define MV_XOR_TEST_SIZE 2000
903 903
904static int mv_xor_memcpy_self_test(struct mv_xor_device *device) 904static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
905{ 905{
906 int i; 906 int i;
907 void *src, *dest; 907 void *src, *dest;
@@ -910,7 +910,6 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
910 dma_cookie_t cookie; 910 dma_cookie_t cookie;
911 struct dma_async_tx_descriptor *tx; 911 struct dma_async_tx_descriptor *tx;
912 int err = 0; 912 int err = 0;
913 struct mv_xor_chan *mv_chan;
914 913
915 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 914 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
916 if (!src) 915 if (!src)
@@ -926,10 +925,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
926 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 925 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
927 ((u8 *) src)[i] = (u8)i; 926 ((u8 *) src)[i] = (u8)i;
928 927
929 /* Start copy, using first DMA channel */ 928 dma_chan = &mv_chan->dmachan;
930 dma_chan = container_of(device->common.channels.next,
931 struct dma_chan,
932 device_node);
933 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 929 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
934 err = -ENODEV; 930 err = -ENODEV;
935 goto out; 931 goto out;
@@ -950,18 +946,17 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
950 946
951 if (mv_xor_status(dma_chan, cookie, NULL) != 947 if (mv_xor_status(dma_chan, cookie, NULL) !=
952 DMA_SUCCESS) { 948 DMA_SUCCESS) {
953 dev_printk(KERN_ERR, dma_chan->device->dev, 949 dev_err(dma_chan->device->dev,
954 "Self-test copy timed out, disabling\n"); 950 "Self-test copy timed out, disabling\n");
955 err = -ENODEV; 951 err = -ENODEV;
956 goto free_resources; 952 goto free_resources;
957 } 953 }
958 954
959 mv_chan = to_mv_xor_chan(dma_chan); 955 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
960 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
961 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 956 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
962 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 957 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
963 dev_printk(KERN_ERR, dma_chan->device->dev, 958 dev_err(dma_chan->device->dev,
964 "Self-test copy failed compare, disabling\n"); 959 "Self-test copy failed compare, disabling\n");
965 err = -ENODEV; 960 err = -ENODEV;
966 goto free_resources; 961 goto free_resources;
967 } 962 }
@@ -976,7 +971,7 @@ out:
976 971
977#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 972#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
978static int 973static int
979mv_xor_xor_self_test(struct mv_xor_device *device) 974mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
980{ 975{
981 int i, src_idx; 976 int i, src_idx;
982 struct page *dest; 977 struct page *dest;
@@ -989,7 +984,6 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
989 u8 cmp_byte = 0; 984 u8 cmp_byte = 0;
990 u32 cmp_word; 985 u32 cmp_word;
991 int err = 0; 986 int err = 0;
992 struct mv_xor_chan *mv_chan;
993 987
994 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 988 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
995 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 989 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1022,9 +1016,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1022 1016
1023 memset(page_address(dest), 0, PAGE_SIZE); 1017 memset(page_address(dest), 0, PAGE_SIZE);
1024 1018
1025 dma_chan = container_of(device->common.channels.next, 1019 dma_chan = &mv_chan->dmachan;
1026 struct dma_chan,
1027 device_node);
1028 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 1020 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1029 err = -ENODEV; 1021 err = -ENODEV;
1030 goto out; 1022 goto out;
@@ -1048,22 +1040,21 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1048 1040
1049 if (mv_xor_status(dma_chan, cookie, NULL) != 1041 if (mv_xor_status(dma_chan, cookie, NULL) !=
1050 DMA_SUCCESS) { 1042 DMA_SUCCESS) {
1051 dev_printk(KERN_ERR, dma_chan->device->dev, 1043 dev_err(dma_chan->device->dev,
1052 "Self-test xor timed out, disabling\n"); 1044 "Self-test xor timed out, disabling\n");
1053 err = -ENODEV; 1045 err = -ENODEV;
1054 goto free_resources; 1046 goto free_resources;
1055 } 1047 }
1056 1048
1057 mv_chan = to_mv_xor_chan(dma_chan); 1049 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1058 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1059 PAGE_SIZE, DMA_FROM_DEVICE); 1050 PAGE_SIZE, DMA_FROM_DEVICE);
1060 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1051 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1061 u32 *ptr = page_address(dest); 1052 u32 *ptr = page_address(dest);
1062 if (ptr[i] != cmp_word) { 1053 if (ptr[i] != cmp_word) {
1063 dev_printk(KERN_ERR, dma_chan->device->dev, 1054 dev_err(dma_chan->device->dev,
1064 "Self-test xor failed compare, disabling." 1055 "Self-test xor failed compare, disabling."
1065 " index %d, data %x, expected %x\n", i, 1056 " index %d, data %x, expected %x\n", i,
1066 ptr[i], cmp_word); 1057 ptr[i], cmp_word);
1067 err = -ENODEV; 1058 err = -ENODEV;
1068 goto free_resources; 1059 goto free_resources;
1069 } 1060 }
@@ -1079,62 +1070,66 @@ out:
1079 return err; 1070 return err;
1080} 1071}
1081 1072
1082static int __devexit mv_xor_remove(struct platform_device *dev) 1073/* This driver does not implement any of the optional DMA operations. */
1074static int
1075mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1076 unsigned long arg)
1077{
1078 return -ENOSYS;
1079}
1080
1081static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1083{ 1082{
1084 struct mv_xor_device *device = platform_get_drvdata(dev);
1085 struct dma_chan *chan, *_chan; 1083 struct dma_chan *chan, *_chan;
1086 struct mv_xor_chan *mv_chan; 1084 struct device *dev = mv_chan->dmadev.dev;
1087 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1088 1085
1089 dma_async_device_unregister(&device->common); 1086 dma_async_device_unregister(&mv_chan->dmadev);
1090 1087
1091 dma_free_coherent(&dev->dev, plat_data->pool_size, 1088 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1092 device->dma_desc_pool_virt, device->dma_desc_pool); 1089 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1093 1090
1094 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1091 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1095 device_node) { 1092 device_node) {
1096 mv_chan = to_mv_xor_chan(chan);
1097 list_del(&chan->device_node); 1093 list_del(&chan->device_node);
1098 } 1094 }
1099 1095
1096 free_irq(mv_chan->irq, mv_chan);
1097
1100 return 0; 1098 return 0;
1101} 1099}
1102 1100
1103static int mv_xor_probe(struct platform_device *pdev) 1101static struct mv_xor_chan *
1102mv_xor_channel_add(struct mv_xor_device *xordev,
1103 struct platform_device *pdev,
1104 int idx, dma_cap_mask_t cap_mask, int irq)
1104{ 1105{
1105 int ret = 0; 1106 int ret = 0;
1106 int irq;
1107 struct mv_xor_device *adev;
1108 struct mv_xor_chan *mv_chan; 1107 struct mv_xor_chan *mv_chan;
1109 struct dma_device *dma_dev; 1108 struct dma_device *dma_dev;
1110 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1111 1109
1110 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1111 if (!mv_chan) {
1112 ret = -ENOMEM;
1113 goto err_free_dma;
1114 }
1112 1115
1113 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 1116 mv_chan->idx = idx;
1114 if (!adev) 1117 mv_chan->irq = irq;
1115 return -ENOMEM;
1116 1118
1117 dma_dev = &adev->common; 1119 dma_dev = &mv_chan->dmadev;
1118 1120
1119 /* allocate coherent memory for hardware descriptors 1121 /* allocate coherent memory for hardware descriptors
1120 * note: writecombine gives slightly better performance, but 1122 * note: writecombine gives slightly better performance, but
1121 * requires that we explicitly flush the writes 1123 * requires that we explicitly flush the writes
1122 */ 1124 */
1123 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1125 mv_chan->dma_desc_pool_virt =
1124 plat_data->pool_size, 1126 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1125 &adev->dma_desc_pool, 1127 &mv_chan->dma_desc_pool, GFP_KERNEL);
1126 GFP_KERNEL); 1128 if (!mv_chan->dma_desc_pool_virt)
1127 if (!adev->dma_desc_pool_virt) 1129 return ERR_PTR(-ENOMEM);
1128 return -ENOMEM;
1129
1130 adev->id = plat_data->hw_id;
1131 1130
1132 /* discover transaction capabilites from the platform data */ 1131 /* discover transaction capabilites from the platform data */
1133 dma_dev->cap_mask = plat_data->cap_mask; 1132 dma_dev->cap_mask = cap_mask;
1134 adev->pdev = pdev;
1135 platform_set_drvdata(pdev, adev);
1136
1137 adev->shared = platform_get_drvdata(plat_data->shared);
1138 1133
1139 INIT_LIST_HEAD(&dma_dev->channels); 1134 INIT_LIST_HEAD(&dma_dev->channels);
1140 1135
@@ -1143,6 +1138,7 @@ static int mv_xor_probe(struct platform_device *pdev)
1143 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1138 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1144 dma_dev->device_tx_status = mv_xor_status; 1139 dma_dev->device_tx_status = mv_xor_status;
1145 dma_dev->device_issue_pending = mv_xor_issue_pending; 1140 dma_dev->device_issue_pending = mv_xor_issue_pending;
1141 dma_dev->device_control = mv_xor_control;
1146 dma_dev->dev = &pdev->dev; 1142 dma_dev->dev = &pdev->dev;
1147 1143
1148 /* set prep routines based on capability */ 1144 /* set prep routines based on capability */
@@ -1155,15 +1151,7 @@ static int mv_xor_probe(struct platform_device *pdev)
1155 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1151 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1156 } 1152 }
1157 1153
1158 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1154 mv_chan->mmr_base = xordev->xor_base;
1159 if (!mv_chan) {
1160 ret = -ENOMEM;
1161 goto err_free_dma;
1162 }
1163 mv_chan->device = adev;
1164 mv_chan->idx = plat_data->hw_id;
1165 mv_chan->mmr_base = adev->shared->xor_base;
1166
1167 if (!mv_chan->mmr_base) { 1155 if (!mv_chan->mmr_base) {
1168 ret = -ENOMEM; 1156 ret = -ENOMEM;
1169 goto err_free_dma; 1157 goto err_free_dma;
@@ -1174,14 +1162,8 @@ static int mv_xor_probe(struct platform_device *pdev)
1174 /* clear errors before enabling interrupts */ 1162 /* clear errors before enabling interrupts */
1175 mv_xor_device_clear_err_status(mv_chan); 1163 mv_xor_device_clear_err_status(mv_chan);
1176 1164
1177 irq = platform_get_irq(pdev, 0); 1165 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1178 if (irq < 0) { 1166 0, dev_name(&pdev->dev), mv_chan);
1179 ret = irq;
1180 goto err_free_dma;
1181 }
1182 ret = devm_request_irq(&pdev->dev, irq,
1183 mv_xor_interrupt_handler,
1184 0, dev_name(&pdev->dev), mv_chan);
1185 if (ret) 1167 if (ret)
1186 goto err_free_dma; 1168 goto err_free_dma;
1187 1169
@@ -1193,26 +1175,26 @@ static int mv_xor_probe(struct platform_device *pdev)
1193 INIT_LIST_HEAD(&mv_chan->chain); 1175 INIT_LIST_HEAD(&mv_chan->chain);
1194 INIT_LIST_HEAD(&mv_chan->completed_slots); 1176 INIT_LIST_HEAD(&mv_chan->completed_slots);
1195 INIT_LIST_HEAD(&mv_chan->all_slots); 1177 INIT_LIST_HEAD(&mv_chan->all_slots);
1196 mv_chan->common.device = dma_dev; 1178 mv_chan->dmachan.device = dma_dev;
1197 dma_cookie_init(&mv_chan->common); 1179 dma_cookie_init(&mv_chan->dmachan);
1198 1180
1199 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1181 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1200 1182
1201 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1183 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1202 ret = mv_xor_memcpy_self_test(adev); 1184 ret = mv_xor_memcpy_self_test(mv_chan);
1203 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1185 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1204 if (ret) 1186 if (ret)
1205 goto err_free_dma; 1187 goto err_free_irq;
1206 } 1188 }
1207 1189
1208 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1190 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1209 ret = mv_xor_xor_self_test(adev); 1191 ret = mv_xor_xor_self_test(mv_chan);
1210 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1192 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1211 if (ret) 1193 if (ret)
1212 goto err_free_dma; 1194 goto err_free_irq;
1213 } 1195 }
1214 1196
1215 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " 1197 dev_info(&pdev->dev, "Marvell XOR: "
1216 "( %s%s%s%s)\n", 1198 "( %s%s%s%s)\n",
1217 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1199 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1218 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1200 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
@@ -1220,20 +1202,21 @@ static int mv_xor_probe(struct platform_device *pdev)
1220 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1202 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1221 1203
1222 dma_async_device_register(dma_dev); 1204 dma_async_device_register(dma_dev);
1223 goto out; 1205 return mv_chan;
1224 1206
1207err_free_irq:
1208 free_irq(mv_chan->irq, mv_chan);
1225 err_free_dma: 1209 err_free_dma:
1226 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1210 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1227 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1211 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1228 out: 1212 return ERR_PTR(ret);
1229 return ret;
1230} 1213}
1231 1214
1232static void 1215static void
1233mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, 1216mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1234 const struct mbus_dram_target_info *dram) 1217 const struct mbus_dram_target_info *dram)
1235{ 1218{
1236 void __iomem *base = msp->xor_base; 1219 void __iomem *base = xordev->xor_base;
1237 u32 win_enable = 0; 1220 u32 win_enable = 0;
1238 int i; 1221 int i;
1239 1222
@@ -1258,99 +1241,176 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1258 1241
1259 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1242 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1260 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1243 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1244 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1245 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1261} 1246}
1262 1247
1263static struct platform_driver mv_xor_driver = { 1248static int mv_xor_probe(struct platform_device *pdev)
1264 .probe = mv_xor_probe,
1265 .remove = mv_xor_remove,
1266 .driver = {
1267 .owner = THIS_MODULE,
1268 .name = MV_XOR_NAME,
1269 },
1270};
1271
1272static int mv_xor_shared_probe(struct platform_device *pdev)
1273{ 1249{
1274 const struct mbus_dram_target_info *dram; 1250 const struct mbus_dram_target_info *dram;
1275 struct mv_xor_shared_private *msp; 1251 struct mv_xor_device *xordev;
1252 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
1276 struct resource *res; 1253 struct resource *res;
1254 int i, ret;
1277 1255
1278 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); 1256 dev_notice(&pdev->dev, "Marvell XOR driver\n");
1279 1257
1280 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); 1258 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1281 if (!msp) 1259 if (!xordev)
1282 return -ENOMEM; 1260 return -ENOMEM;
1283 1261
1284 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1262 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1285 if (!res) 1263 if (!res)
1286 return -ENODEV; 1264 return -ENODEV;
1287 1265
1288 msp->xor_base = devm_ioremap(&pdev->dev, res->start, 1266 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1289 resource_size(res)); 1267 resource_size(res));
1290 if (!msp->xor_base) 1268 if (!xordev->xor_base)
1291 return -EBUSY; 1269 return -EBUSY;
1292 1270
1293 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1271 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1294 if (!res) 1272 if (!res)
1295 return -ENODEV; 1273 return -ENODEV;
1296 1274
1297 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1275 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1298 resource_size(res)); 1276 resource_size(res));
1299 if (!msp->xor_high_base) 1277 if (!xordev->xor_high_base)
1300 return -EBUSY; 1278 return -EBUSY;
1301 1279
1302 platform_set_drvdata(pdev, msp); 1280 platform_set_drvdata(pdev, xordev);
1303 1281
1304 /* 1282 /*
1305 * (Re-)program MBUS remapping windows if we are asked to. 1283 * (Re-)program MBUS remapping windows if we are asked to.
1306 */ 1284 */
1307 dram = mv_mbus_dram_info(); 1285 dram = mv_mbus_dram_info();
1308 if (dram) 1286 if (dram)
1309 mv_xor_conf_mbus_windows(msp, dram); 1287 mv_xor_conf_mbus_windows(xordev, dram);
1310 1288
1311 /* Not all platforms can gate the clock, so it is not 1289 /* Not all platforms can gate the clock, so it is not
1312 * an error if the clock does not exists. 1290 * an error if the clock does not exists.
1313 */ 1291 */
1314 msp->clk = clk_get(&pdev->dev, NULL); 1292 xordev->clk = clk_get(&pdev->dev, NULL);
1315 if (!IS_ERR(msp->clk)) 1293 if (!IS_ERR(xordev->clk))
1316 clk_prepare_enable(msp->clk); 1294 clk_prepare_enable(xordev->clk);
1295
1296 if (pdev->dev.of_node) {
1297 struct device_node *np;
1298 int i = 0;
1299
1300 for_each_child_of_node(pdev->dev.of_node, np) {
1301 dma_cap_mask_t cap_mask;
1302 int irq;
1303
1304 dma_cap_zero(cap_mask);
1305 if (of_property_read_bool(np, "dmacap,memcpy"))
1306 dma_cap_set(DMA_MEMCPY, cap_mask);
1307 if (of_property_read_bool(np, "dmacap,xor"))
1308 dma_cap_set(DMA_XOR, cap_mask);
1309 if (of_property_read_bool(np, "dmacap,memset"))
1310 dma_cap_set(DMA_MEMSET, cap_mask);
1311 if (of_property_read_bool(np, "dmacap,interrupt"))
1312 dma_cap_set(DMA_INTERRUPT, cap_mask);
1313
1314 irq = irq_of_parse_and_map(np, 0);
1315 if (!irq) {
1316 ret = -ENODEV;
1317 goto err_channel_add;
1318 }
1319
1320 xordev->channels[i] =
1321 mv_xor_channel_add(xordev, pdev, i,
1322 cap_mask, irq);
1323 if (IS_ERR(xordev->channels[i])) {
1324 ret = PTR_ERR(xordev->channels[i]);
1325 xordev->channels[i] = NULL;
1326 irq_dispose_mapping(irq);
1327 goto err_channel_add;
1328 }
1329
1330 i++;
1331 }
1332 } else if (pdata && pdata->channels) {
1333 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1334 struct mv_xor_channel_data *cd;
1335 int irq;
1336
1337 cd = &pdata->channels[i];
1338 if (!cd) {
1339 ret = -ENODEV;
1340 goto err_channel_add;
1341 }
1342
1343 irq = platform_get_irq(pdev, i);
1344 if (irq < 0) {
1345 ret = irq;
1346 goto err_channel_add;
1347 }
1348
1349 xordev->channels[i] =
1350 mv_xor_channel_add(xordev, pdev, i,
1351 cd->cap_mask, irq);
1352 if (IS_ERR(xordev->channels[i])) {
1353 ret = PTR_ERR(xordev->channels[i]);
1354 goto err_channel_add;
1355 }
1356 }
1357 }
1317 1358
1318 return 0; 1359 return 0;
1360
1361err_channel_add:
1362 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1363 if (xordev->channels[i]) {
1364 if (pdev->dev.of_node)
1365 irq_dispose_mapping(xordev->channels[i]->irq);
1366 mv_xor_channel_remove(xordev->channels[i]);
1367 }
1368
1369 clk_disable_unprepare(xordev->clk);
1370 clk_put(xordev->clk);
1371 return ret;
1319} 1372}
1320 1373
1321static int mv_xor_shared_remove(struct platform_device *pdev) 1374static int mv_xor_remove(struct platform_device *pdev)
1322{ 1375{
1323 struct mv_xor_shared_private *msp = platform_get_drvdata(pdev); 1376 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1377 int i;
1378
1379 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1380 if (xordev->channels[i])
1381 mv_xor_channel_remove(xordev->channels[i]);
1382 }
1324 1383
1325 if (!IS_ERR(msp->clk)) { 1384 if (!IS_ERR(xordev->clk)) {
1326 clk_disable_unprepare(msp->clk); 1385 clk_disable_unprepare(xordev->clk);
1327 clk_put(msp->clk); 1386 clk_put(xordev->clk);
1328 } 1387 }
1329 1388
1330 return 0; 1389 return 0;
1331} 1390}
1332 1391
1333static struct platform_driver mv_xor_shared_driver = { 1392#ifdef CONFIG_OF
1334 .probe = mv_xor_shared_probe, 1393static struct of_device_id mv_xor_dt_ids[] = {
1335 .remove = mv_xor_shared_remove, 1394 { .compatible = "marvell,orion-xor", },
1395 {},
1396};
1397MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1398#endif
1399
1400static struct platform_driver mv_xor_driver = {
1401 .probe = mv_xor_probe,
1402 .remove = mv_xor_remove,
1336 .driver = { 1403 .driver = {
1337 .owner = THIS_MODULE, 1404 .owner = THIS_MODULE,
1338 .name = MV_XOR_SHARED_NAME, 1405 .name = MV_XOR_NAME,
1406 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1339 }, 1407 },
1340}; 1408};
1341 1409
1342 1410
1343static int __init mv_xor_init(void) 1411static int __init mv_xor_init(void)
1344{ 1412{
1345 int rc; 1413 return platform_driver_register(&mv_xor_driver);
1346
1347 rc = platform_driver_register(&mv_xor_shared_driver);
1348 if (!rc) {
1349 rc = platform_driver_register(&mv_xor_driver);
1350 if (rc)
1351 platform_driver_unregister(&mv_xor_shared_driver);
1352 }
1353 return rc;
1354} 1414}
1355module_init(mv_xor_init); 1415module_init(mv_xor_init);
1356 1416
@@ -1359,7 +1419,6 @@ module_init(mv_xor_init);
1359static void __exit mv_xor_exit(void) 1419static void __exit mv_xor_exit(void)
1360{ 1420{
1361 platform_driver_unregister(&mv_xor_driver); 1421 platform_driver_unregister(&mv_xor_driver);
1362 platform_driver_unregister(&mv_xor_shared_driver);
1363 return; 1422 return;
1364} 1423}
1365 1424