diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 47 |
1 files changed, 42 insertions, 5 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 8b78554cfde3..8cf6362e64bf 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) | |||
32 | return !msm_obj->vram_node; | 32 | return !msm_obj->vram_node; |
33 | } | 33 | } |
34 | 34 | ||
35 | /* | ||
36 | * Cache sync.. this is a bit over-complicated, to fit dma-mapping | ||
37 | * API. Really GPU cache is out of scope here (handled on cmdstream) | ||
38 | * and all we need to do is invalidate newly allocated pages before | ||
39 | * mapping to CPU as uncached/writecombine. | ||
40 | * | ||
41 | * On top of this, we have the added headache, that depending on | ||
42 | * display generation, the display's iommu may be wired up to either | ||
43 | * the toplevel drm device (mdss), or to the mdp sub-node, meaning | ||
44 | * that here we either have dma-direct or iommu ops. | ||
45 | * | ||
46 | * Let this be a cautionary tail of abstraction gone wrong. | ||
47 | */ | ||
48 | |||
49 | static void sync_for_device(struct msm_gem_object *msm_obj) | ||
50 | { | ||
51 | struct device *dev = msm_obj->base.dev->dev; | ||
52 | |||
53 | if (get_dma_ops(dev)) { | ||
54 | dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, | ||
55 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
56 | } else { | ||
57 | dma_map_sg(dev, msm_obj->sgt->sgl, | ||
58 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static void sync_for_cpu(struct msm_gem_object *msm_obj) | ||
63 | { | ||
64 | struct device *dev = msm_obj->base.dev->dev; | ||
65 | |||
66 | if (get_dma_ops(dev)) { | ||
67 | dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, | ||
68 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
69 | } else { | ||
70 | dma_unmap_sg(dev, msm_obj->sgt->sgl, | ||
71 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
72 | } | ||
73 | } | ||
74 | |||
35 | /* allocate pages from VRAM carveout, used when no IOMMU: */ | 75 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
36 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) | 76 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
37 | { | 77 | { |
@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) | |||
97 | * because display controller, GPU, etc. are not coherent: | 137 | * because display controller, GPU, etc. are not coherent: |
98 | */ | 138 | */ |
99 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | 139 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
100 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | 140 | sync_for_device(msm_obj); |
101 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
102 | } | 141 | } |
103 | 142 | ||
104 | return msm_obj->pages; | 143 | return msm_obj->pages; |
@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) | |||
127 | * GPU, etc. are not coherent: | 166 | * GPU, etc. are not coherent: |
128 | */ | 167 | */ |
129 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | 168 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
130 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | 169 | sync_for_cpu(msm_obj); |
131 | msm_obj->sgt->nents, | ||
132 | DMA_BIDIRECTIONAL); | ||
133 | 170 | ||
134 | sg_free_table(msm_obj->sgt); | 171 | sg_free_table(msm_obj->sgt); |
135 | kfree(msm_obj->sgt); | 172 | kfree(msm_obj->sgt); |