aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaura Abbott <labbott@redhat.com>2017-04-18 14:27:06 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-18 14:43:14 -0400
commit62b3a094cb9e4a3c5a5be4a20b72e0ced3af0e31 (patch)
tree1def6fdf42e4debb28d2bdc06664771df1cd54cb
parent204f672255c228ef7a66c29ae48123778da938a1 (diff)
staging: android: ion: Stop butchering the DMA address
Now that we have proper caching, stop setting the DMA address manually. It should be set after properly calling dma_map. Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/staging/android/ion/ion.c17
1 files changed, 1 insertions, 16 deletions
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 3d979ef543f6..65638f509f6c 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -81,8 +81,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
81{ 81{
82 struct ion_buffer *buffer; 82 struct ion_buffer *buffer;
83 struct sg_table *table; 83 struct sg_table *table;
84 struct scatterlist *sg; 84 int ret;
85 int i, ret;
86 85
87 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 86 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
88 if (!buffer) 87 if (!buffer)
@@ -119,20 +118,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
119 INIT_LIST_HEAD(&buffer->vmas); 118 INIT_LIST_HEAD(&buffer->vmas);
120 INIT_LIST_HEAD(&buffer->attachments); 119 INIT_LIST_HEAD(&buffer->attachments);
121 mutex_init(&buffer->lock); 120 mutex_init(&buffer->lock);
122 /*
123 * this will set up dma addresses for the sglist -- it is not
124 * technically correct as per the dma api -- a specific
125 * device isn't really taking ownership here. However, in practice on
126 * our systems the only dma_address space is physical addresses.
127 * Additionally, we can't afford the overhead of invalidating every
128 * allocation via dma_map_sg. The implicit contract here is that
129 * memory coming from the heaps is ready for dma, ie if it has a
130 * cached mapping that mapping has been invalidated
131 */
132 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
133 sg_dma_address(sg) = sg_phys(sg);
134 sg_dma_len(sg) = sg->length;
135 }
136 mutex_lock(&dev->buffer_lock); 121 mutex_lock(&dev->buffer_lock);
137 ion_buffer_add(dev, buffer); 122 ion_buffer_add(dev, buffer);
138 mutex_unlock(&dev->buffer_lock); 123 mutex_unlock(&dev->buffer_lock);