aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mv_xor.c
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2012-10-29 11:45:46 -0400
committerThomas Petazzoni <thomas.petazzoni@free-electrons.com>2012-11-20 09:58:55 -0500
commita6b4a9d2c1063ffc52ca94b6c1b24f9b6d5b79c5 (patch)
treed35077f7e4a7d14b623372f3b8d2134d4984270b /drivers/dma/mv_xor.c
parent09f2b7864ce37483f4c4ecb30b0eed599f475035 (diff)
dma: mv_xor: split initialization/cleanup of XOR channels
Instead of doing the initialization/cleanup of the XOR channels directly in the ->probe() and ->remove() hooks, we create separate utility functions mv_xor_channel_add() and mv_xor_channel_remove(). This will allow to easily introduce in a future patch a different way of registering XOR channels: instead of having one platform_device per channel, we'll trigger the registration of all XOR channels of a given XOR engine directly from the XOR engine ->probe() function. Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Diffstat (limited to 'drivers/dma/mv_xor.c')
-rw-r--r--drivers/dma/mv_xor.c75
1 files changed, 48 insertions, 27 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 704277259a5b..f7b99193e884 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1077,19 +1077,18 @@ out:
1077 return err; 1077 return err;
1078} 1078}
1079 1079
1080static int __devexit mv_xor_remove(struct platform_device *dev) 1080static int mv_xor_channel_remove(struct mv_xor_device *device)
1081{ 1081{
1082 struct mv_xor_device *device = platform_get_drvdata(dev);
1083 struct dma_chan *chan, *_chan; 1082 struct dma_chan *chan, *_chan;
1084 struct mv_xor_chan *mv_chan; 1083 struct mv_xor_chan *mv_chan;
1085 1084
1086 dma_async_device_unregister(&device->common); 1085 dma_async_device_unregister(&device->common);
1087 1086
1088 dma_free_coherent(&dev->dev, device->pool_size, 1087 dma_free_coherent(&device->pdev->dev, device->pool_size,
1089 device->dma_desc_pool_virt, device->dma_desc_pool); 1088 device->dma_desc_pool_virt, device->dma_desc_pool);
1090 1089
1091 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1090 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1092 device_node) { 1091 device_node) {
1093 mv_chan = to_mv_xor_chan(chan); 1092 mv_chan = to_mv_xor_chan(chan);
1094 list_del(&chan->device_node); 1093 list_del(&chan->device_node);
1095 } 1094 }
@@ -1097,19 +1096,20 @@ static int __devexit mv_xor_remove(struct platform_device *dev)
1097 return 0; 1096 return 0;
1098} 1097}
1099 1098
1100static int __devinit mv_xor_probe(struct platform_device *pdev) 1099static struct mv_xor_device *
1100mv_xor_channel_add(struct mv_xor_shared_private *msp,
1101 struct platform_device *pdev,
1102 int hw_id, dma_cap_mask_t cap_mask,
1103 size_t pool_size, int irq)
1101{ 1104{
1102 int ret = 0; 1105 int ret = 0;
1103 int irq;
1104 struct mv_xor_device *adev; 1106 struct mv_xor_device *adev;
1105 struct mv_xor_chan *mv_chan; 1107 struct mv_xor_chan *mv_chan;
1106 struct dma_device *dma_dev; 1108 struct dma_device *dma_dev;
1107 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1108
1109 1109
1110 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 1110 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1111 if (!adev) 1111 if (!adev)
1112 return -ENOMEM; 1112 return ERR_PTR(-ENOMEM);
1113 1113
1114 dma_dev = &adev->common; 1114 dma_dev = &adev->common;
1115 1115
@@ -1117,22 +1117,20 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1117 * note: writecombine gives slightly better performance, but 1117 * note: writecombine gives slightly better performance, but
1118 * requires that we explicitly flush the writes 1118 * requires that we explicitly flush the writes
1119 */ 1119 */
1120 adev->pool_size = plat_data->pool_size; 1120 adev->pool_size = pool_size;
1121 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1121 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1122 adev->pool_size, 1122 adev->pool_size,
1123 &adev->dma_desc_pool, 1123 &adev->dma_desc_pool,
1124 GFP_KERNEL); 1124 GFP_KERNEL);
1125 if (!adev->dma_desc_pool_virt) 1125 if (!adev->dma_desc_pool_virt)
1126 return -ENOMEM; 1126 return ERR_PTR(-ENOMEM);
1127 1127
1128 adev->id = plat_data->hw_id; 1128 adev->id = hw_id;
1129 1129
1130 /* discover transaction capabilites from the platform data */ 1130 /* discover transaction capabilites from the platform data */
1131 dma_dev->cap_mask = plat_data->cap_mask; 1131 dma_dev->cap_mask = cap_mask;
1132 adev->pdev = pdev; 1132 adev->pdev = pdev;
1133 platform_set_drvdata(pdev, adev); 1133 adev->shared = msp;
1134
1135 adev->shared = platform_get_drvdata(plat_data->shared);
1136 1134
1137 INIT_LIST_HEAD(&dma_dev->channels); 1135 INIT_LIST_HEAD(&dma_dev->channels);
1138 1136
@@ -1159,7 +1157,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1159 goto err_free_dma; 1157 goto err_free_dma;
1160 } 1158 }
1161 mv_chan->device = adev; 1159 mv_chan->device = adev;
1162 mv_chan->idx = plat_data->hw_id; 1160 mv_chan->idx = hw_id;
1163 mv_chan->mmr_base = adev->shared->xor_base; 1161 mv_chan->mmr_base = adev->shared->xor_base;
1164 1162
1165 if (!mv_chan->mmr_base) { 1163 if (!mv_chan->mmr_base) {
@@ -1172,11 +1170,6 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1172 /* clear errors before enabling interrupts */ 1170 /* clear errors before enabling interrupts */
1173 mv_xor_device_clear_err_status(mv_chan); 1171 mv_xor_device_clear_err_status(mv_chan);
1174 1172
1175 irq = platform_get_irq(pdev, 0);
1176 if (irq < 0) {
1177 ret = irq;
1178 goto err_free_dma;
1179 }
1180 ret = devm_request_irq(&pdev->dev, irq, 1173 ret = devm_request_irq(&pdev->dev, irq,
1181 mv_xor_interrupt_handler, 1174 mv_xor_interrupt_handler,
1182 0, dev_name(&pdev->dev), mv_chan); 1175 0, dev_name(&pdev->dev), mv_chan);
@@ -1218,13 +1211,41 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1218 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1211 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1219 1212
1220 dma_async_device_register(dma_dev); 1213 dma_async_device_register(dma_dev);
1221 goto out; 1214 return adev;
1222 1215
1223 err_free_dma: 1216 err_free_dma:
1224 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1217 dma_free_coherent(&adev->pdev->dev, pool_size,
1225 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1218 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1226 out: 1219 return ERR_PTR(ret);
1227 return ret; 1220}
1221
1222static int __devexit mv_xor_remove(struct platform_device *pdev)
1223{
1224 struct mv_xor_device *device = platform_get_drvdata(pdev);
1225 return mv_xor_channel_remove(device);
1226}
1227
1228static int __devinit mv_xor_probe(struct platform_device *pdev)
1229{
1230 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1231 struct mv_xor_shared_private *msp =
1232 platform_get_drvdata(plat_data->shared);
1233 struct mv_xor_device *mv_xor_device;
1234 int irq;
1235
1236 irq = platform_get_irq(pdev, 0);
1237 if (irq < 0)
1238 return irq;
1239
1240 mv_xor_device = mv_xor_channel_add(msp, pdev, plat_data->hw_id,
1241 plat_data->cap_mask,
1242 plat_data->pool_size, irq);
1243 if (IS_ERR(mv_xor_device))
1244 return PTR_ERR(mv_xor_device);
1245
1246 platform_set_drvdata(pdev, mv_xor_device);
1247
1248 return 0;
1228} 1249}
1229 1250
1230static void 1251static void