aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mv_xor.c
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2012-11-15 09:17:05 -0500
committerThomas Petazzoni <thomas.petazzoni@free-electrons.com>2012-11-20 09:58:59 -0500
commit1ef48a262b0d50add7d293a37b8c8bad4bec30a1 (patch)
tree651c17044708d7e3ef91987dec72010241a797c9 /drivers/dma/mv_xor.c
parent275cc0c8bd3c0a6b826d46a4d1a8135897387ca9 (diff)
dma: mv_xor: merge mv_xor_device and mv_xor_chan
Even though the DMA engine infrastructure has support for multiple channels per device, the mv_xor driver registers one DMA engine device for each channel, because the mv_xor channels inside the same XOR engine have different capabilities, and the DMA engine infrastructure only allows to express capabilities at the DMA engine device level. The mv_xor driver has therefore been registering one DMA engine device and one DMA engine channel for each XOR channel since its introduction in the kernel. However, it kept two separate internal structures, mv_xor_device and mv_xor_channel, which didn't make a lot of sense since there was a 1:1 mapping between those structures. This patch gets rid of this duplication, and merges everything into the mv_xor_chan structure. Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Diffstat (limited to 'drivers/dma/mv_xor.c')
-rw-r--r--drivers/dma/mv_xor.c64
1 files changed, 28 insertions, 36 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 75a80510dba9..3b81ee1b555d 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -40,7 +40,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan);
40 container_of(tx, struct mv_xor_desc_slot, async_tx) 40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41 41
42#define mv_chan_to_devp(chan) \ 42#define mv_chan_to_devp(chan) \
43 ((chan)->device->dmadev.dev) 43 ((chan)->dmadev.dev)
44 44
45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{ 46{
@@ -603,7 +603,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
603 int idx; 603 int idx;
604 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 604 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
605 struct mv_xor_desc_slot *slot = NULL; 605 struct mv_xor_desc_slot *slot = NULL;
606 int num_descs_in_pool = mv_chan->device->pool_size/MV_XOR_SLOT_SIZE; 606 int num_descs_in_pool = mv_chan->pool_size/MV_XOR_SLOT_SIZE;
607 607
608 /* Allocate descriptor slots */ 608 /* Allocate descriptor slots */
609 idx = mv_chan->slots_allocated; 609 idx = mv_chan->slots_allocated;
@@ -614,7 +614,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
614 " %d descriptor slots", idx); 614 " %d descriptor slots", idx);
615 break; 615 break;
616 } 616 }
617 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; 617 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
618 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 618 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
619 619
620 dma_async_tx_descriptor_init(&slot->async_tx, chan); 620 dma_async_tx_descriptor_init(&slot->async_tx, chan);
@@ -622,7 +622,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
622 INIT_LIST_HEAD(&slot->chain_node); 622 INIT_LIST_HEAD(&slot->chain_node);
623 INIT_LIST_HEAD(&slot->slot_node); 623 INIT_LIST_HEAD(&slot->slot_node);
624 INIT_LIST_HEAD(&slot->tx_list); 624 INIT_LIST_HEAD(&slot->tx_list);
625 hw_desc = (char *) mv_chan->device->dma_desc_pool; 625 hw_desc = (char *) mv_chan->dma_desc_pool;
626 slot->async_tx.phys = 626 slot->async_tx.phys =
627 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 627 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
628 slot->idx = idx++; 628 slot->idx = idx++;
@@ -1067,58 +1067,58 @@ out:
1067 return err; 1067 return err;
1068} 1068}
1069 1069
1070static int mv_xor_channel_remove(struct mv_xor_device *device) 1070static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1071{ 1071{
1072 struct dma_chan *chan, *_chan; 1072 struct dma_chan *chan, *_chan;
1073 struct mv_xor_chan *mv_chan; 1073 struct device *dev = mv_chan->dmadev.dev;
1074 struct device *dev = device->dmadev.dev;
1075 1074
1076 dma_async_device_unregister(&device->dmadev); 1075 dma_async_device_unregister(&mv_chan->dmadev);
1077 1076
1078 dma_free_coherent(dev, device->pool_size, 1077 dma_free_coherent(dev, mv_chan->pool_size,
1079 device->dma_desc_pool_virt, device->dma_desc_pool); 1078 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1080 1079
1081 list_for_each_entry_safe(chan, _chan, &device->dmadev.channels, 1080 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1082 device_node) { 1081 device_node) {
1083 mv_chan = to_mv_xor_chan(chan);
1084 list_del(&chan->device_node); 1082 list_del(&chan->device_node);
1085 } 1083 }
1086 1084
1087 return 0; 1085 return 0;
1088} 1086}
1089 1087
1090static struct mv_xor_device * 1088static struct mv_xor_chan *
1091mv_xor_channel_add(struct mv_xor_private *msp, 1089mv_xor_channel_add(struct mv_xor_private *msp,
1092 struct platform_device *pdev, 1090 struct platform_device *pdev,
1093 int hw_id, dma_cap_mask_t cap_mask, 1091 int hw_id, dma_cap_mask_t cap_mask,
1094 size_t pool_size, int irq) 1092 size_t pool_size, int irq)
1095{ 1093{
1096 int ret = 0; 1094 int ret = 0;
1097 struct mv_xor_device *adev;
1098 struct mv_xor_chan *mv_chan; 1095 struct mv_xor_chan *mv_chan;
1099 struct dma_device *dma_dev; 1096 struct dma_device *dma_dev;
1100 1097
1101 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 1098 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1102 if (!adev) 1099 if (!mv_chan) {
1103 return ERR_PTR(-ENOMEM); 1100 ret = -ENOMEM;
1101 goto err_free_dma;
1102 }
1103
1104 mv_chan->idx = hw_id;
1104 1105
1105 dma_dev = &adev->dmadev; 1106 dma_dev = &mv_chan->dmadev;
1106 1107
1107 /* allocate coherent memory for hardware descriptors 1108 /* allocate coherent memory for hardware descriptors
1108 * note: writecombine gives slightly better performance, but 1109 * note: writecombine gives slightly better performance, but
1109 * requires that we explicitly flush the writes 1110 * requires that we explicitly flush the writes
1110 */ 1111 */
1111 adev->pool_size = pool_size; 1112 mv_chan->pool_size = pool_size;
1112 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1113 mv_chan->dma_desc_pool_virt =
1113 adev->pool_size, 1114 dma_alloc_writecombine(&pdev->dev, mv_chan->pool_size,
1114 &adev->dma_desc_pool, 1115 &mv_chan->dma_desc_pool, GFP_KERNEL);
1115 GFP_KERNEL); 1116 if (!mv_chan->dma_desc_pool_virt)
1116 if (!adev->dma_desc_pool_virt)
1117 return ERR_PTR(-ENOMEM); 1117 return ERR_PTR(-ENOMEM);
1118 1118
1119 /* discover transaction capabilites from the platform data */ 1119 /* discover transaction capabilites from the platform data */
1120 dma_dev->cap_mask = cap_mask; 1120 dma_dev->cap_mask = cap_mask;
1121 adev->shared = msp; 1121 mv_chan->shared = msp;
1122 1122
1123 INIT_LIST_HEAD(&dma_dev->channels); 1123 INIT_LIST_HEAD(&dma_dev->channels);
1124 1124
@@ -1139,15 +1139,7 @@ mv_xor_channel_add(struct mv_xor_private *msp,
1139 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1139 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1140 } 1140 }
1141 1141
1142 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1142 mv_chan->mmr_base = msp->xor_base;
1143 if (!mv_chan) {
1144 ret = -ENOMEM;
1145 goto err_free_dma;
1146 }
1147 mv_chan->device = adev;
1148 mv_chan->idx = hw_id;
1149 mv_chan->mmr_base = adev->shared->xor_base;
1150
1151 if (!mv_chan->mmr_base) { 1143 if (!mv_chan->mmr_base) {
1152 ret = -ENOMEM; 1144 ret = -ENOMEM;
1153 goto err_free_dma; 1145 goto err_free_dma;
@@ -1199,11 +1191,11 @@ mv_xor_channel_add(struct mv_xor_private *msp,
1199 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1191 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1200 1192
1201 dma_async_device_register(dma_dev); 1193 dma_async_device_register(dma_dev);
1202 return adev; 1194 return mv_chan;
1203 1195
1204 err_free_dma: 1196 err_free_dma:
1205 dma_free_coherent(&pdev->dev, pool_size, 1197 dma_free_coherent(&pdev->dev, pool_size,
1206 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1198 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1207 return ERR_PTR(ret); 1199 return ERR_PTR(ret);
1208} 1200}
1209 1201