aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/tegra
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2014-10-16 08:18:50 -0400
committerThierry Reding <treding@nvidia.com>2014-11-13 10:14:46 -0500
commitc28d4a317fef0401be180b34f48d193ff2a6787b (patch)
tree244348246e8ea877f82b1c6e797d399ee5f7bf96 /drivers/gpu/drm/tegra
parent7e3bc3a98fd1df5839cdc5cbce4dfdb9e4c03655 (diff)
drm/tegra: gem: Extract tegra_bo_alloc_object()
This function implements the common buffer object allocation used for both allocation and import paths. Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/gpu/drm/tegra')
-rw-r--r--drivers/gpu/drm/tegra/gem.c77
1 files changed, 38 insertions, 39 deletions
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ce023fa3e8ae..d86ded791935 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -91,6 +91,36 @@ static const struct host1x_bo_ops tegra_bo_ops = {
91 .kunmap = tegra_bo_kunmap, 91 .kunmap = tegra_bo_kunmap,
92}; 92};
93 93
94static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
95 size_t size)
96{
97 struct tegra_bo *bo;
98 int err;
99
100 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
101 if (!bo)
102 return ERR_PTR(-ENOMEM);
103
104 host1x_bo_init(&bo->base, &tegra_bo_ops);
105 size = round_up(size, PAGE_SIZE);
106
107 err = drm_gem_object_init(drm, &bo->gem, size);
108 if (err < 0)
109 goto free;
110
111 err = drm_gem_create_mmap_offset(&bo->gem);
112 if (err < 0)
113 goto release;
114
115 return bo;
116
117release:
118 drm_gem_object_release(&bo->gem);
119free:
120 kfree(bo);
121 return ERR_PTR(err);
122}
123
94static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo) 124static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
95{ 125{
96 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 126 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
@@ -102,12 +132,9 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
102 struct tegra_bo *bo; 132 struct tegra_bo *bo;
103 int err; 133 int err;
104 134
105 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 135 bo = tegra_bo_alloc_object(drm, size);
106 if (!bo) 136 if (IS_ERR(bo))
107 return ERR_PTR(-ENOMEM); 137 return bo;
108
109 host1x_bo_init(&bo->base, &tegra_bo_ops);
110 size = round_up(size, PAGE_SIZE);
111 138
112 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, 139 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
113 GFP_KERNEL | __GFP_NOWARN); 140 GFP_KERNEL | __GFP_NOWARN);
@@ -118,14 +145,6 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
118 goto err_dma; 145 goto err_dma;
119 } 146 }
120 147
121 err = drm_gem_object_init(drm, &bo->gem, size);
122 if (err)
123 goto err_init;
124
125 err = drm_gem_create_mmap_offset(&bo->gem);
126 if (err)
127 goto err_mmap;
128
129 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 148 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
130 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; 149 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
131 150
@@ -134,10 +153,6 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
134 153
135 return bo; 154 return bo;
136 155
137err_mmap:
138 drm_gem_object_release(&bo->gem);
139err_init:
140 tegra_bo_destroy(drm, bo);
141err_dma: 156err_dma:
142 kfree(bo); 157 kfree(bo);
143 158
@@ -175,28 +190,16 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
175{ 190{
176 struct dma_buf_attachment *attach; 191 struct dma_buf_attachment *attach;
177 struct tegra_bo *bo; 192 struct tegra_bo *bo;
178 ssize_t size;
179 int err; 193 int err;
180 194
181 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 195 bo = tegra_bo_alloc_object(drm, buf->size);
182 if (!bo) 196 if (IS_ERR(bo))
183 return ERR_PTR(-ENOMEM); 197 return bo;
184
185 host1x_bo_init(&bo->base, &tegra_bo_ops);
186 size = round_up(buf->size, PAGE_SIZE);
187
188 err = drm_gem_object_init(drm, &bo->gem, size);
189 if (err < 0)
190 goto free;
191
192 err = drm_gem_create_mmap_offset(&bo->gem);
193 if (err < 0)
194 goto release;
195 198
196 attach = dma_buf_attach(buf, drm->dev); 199 attach = dma_buf_attach(buf, drm->dev);
197 if (IS_ERR(attach)) { 200 if (IS_ERR(attach)) {
198 err = PTR_ERR(attach); 201 err = PTR_ERR(attach);
199 goto free_mmap; 202 goto free;
200 } 203 }
201 204
202 get_dma_buf(buf); 205 get_dma_buf(buf);
@@ -228,13 +231,9 @@ detach:
228 231
229 dma_buf_detach(buf, attach); 232 dma_buf_detach(buf, attach);
230 dma_buf_put(buf); 233 dma_buf_put(buf);
231free_mmap:
232 drm_gem_free_mmap_offset(&bo->gem);
233release:
234 drm_gem_object_release(&bo->gem);
235free: 234free:
235 drm_gem_object_release(&bo->gem);
236 kfree(bo); 236 kfree(bo);
237
238 return ERR_PTR(err); 237 return ERR_PTR(err);
239} 238}
240 239