aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Clark <rob@ti.com>2012-01-18 19:33:02 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-02-08 17:14:12 -0500
commitc5b1247bd1c3ab6722acfa95213be9a16bfb664c (patch)
tree3ef9b02ee38d547cf00a7400186deee87e117214
parenta9e8d70c1ac6c9ccf6852c91e082e28249564e6e (diff)
staging: drm/omap: fix locking issue
The create/free mmap offset code must be synchronized. Yet only some callers of omap_gem_mmap_offset() held struct_mutex. Leading to various crashes around drm_mm_insert_helper_range(). (In the free-object path, which is currently the only place we drm_gem_free_mmap_offset(), struct_mutex is already held.) Signed-off-by: Rob Clark <rob@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/omapdrm/omap_gem.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index ae1ad357f7f2..b7d6f886c5cf 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -116,6 +116,9 @@ struct omap_gem_object {
116 } *sync; 116 } *sync;
117}; 117};
118 118
119static int get_pages(struct drm_gem_object *obj, struct page ***pages);
120static uint64_t mmap_offset(struct drm_gem_object *obj);
121
119/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 122/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
120 * not necessarily pinned in TILER all the time, and (b) when they are 123 * not necessarily pinned in TILER all the time, and (b) when they are
121 * they are not necessarily page aligned, we reserve one or more small 124 * they are not necessarily page aligned, we reserve one or more small
@@ -149,7 +152,7 @@ static void evict_entry(struct drm_gem_object *obj,
149{ 152{
150 if (obj->dev->dev_mapping) { 153 if (obj->dev->dev_mapping) {
151 size_t size = PAGE_SIZE * usergart[fmt].height; 154 size_t size = PAGE_SIZE * usergart[fmt].height;
152 loff_t off = omap_gem_mmap_offset(obj) + 155 loff_t off = mmap_offset(obj) +
153 (entry->obj_pgoff << PAGE_SHIFT); 156 (entry->obj_pgoff << PAGE_SHIFT);
154 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); 157 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
155 } 158 }
@@ -189,8 +192,6 @@ static inline bool is_shmem(struct drm_gem_object *obj)
189 return obj->filp != NULL; 192 return obj->filp != NULL;
190} 193}
191 194
192static int get_pages(struct drm_gem_object *obj, struct page ***pages);
193
194static DEFINE_SPINLOCK(sync_lock); 195static DEFINE_SPINLOCK(sync_lock);
195 196
196/** ensure backing pages are allocated */ 197/** ensure backing pages are allocated */
@@ -251,7 +252,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
251} 252}
252 253
253/** get mmap offset */ 254/** get mmap offset */
254uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 255static uint64_t mmap_offset(struct drm_gem_object *obj)
255{ 256{
256 if (!obj->map_list.map) { 257 if (!obj->map_list.map) {
257 /* Make it mmapable */ 258 /* Make it mmapable */
@@ -267,6 +268,15 @@ uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
267 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT; 268 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
268} 269}
269 270
271uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
272{
273 uint64_t offset;
274 mutex_lock(&obj->dev->struct_mutex);
275 offset = mmap_offset(obj);
276 mutex_unlock(&obj->dev->struct_mutex);
277 return offset;
278}
279
270/** get mmap size */ 280/** get mmap size */
271size_t omap_gem_mmap_size(struct drm_gem_object *obj) 281size_t omap_gem_mmap_size(struct drm_gem_object *obj)
272{ 282{