aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2009-03-10 14:44:52 -0400
committerEric Anholt <eric@anholt.net>2009-03-27 17:47:21 -0400
commiteb01459fbbccb4ca0b879cbfc97e33ac6eabf975 (patch)
tree7bc739c6729eb13c7be8bd389af39fb195e5fcf4 /drivers/gpu
parent40123c1f8dd920dcff7a42cde5b351d7d0b0422e (diff)
drm/i915: Fix lock order reversal in shmem pread path.
Signed-off-by: Eric Anholt <eric@anholt.net> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c221
1 files changed, 195 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bdc7326052df..010af908bdb6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -137,6 +137,24 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
137} 137}
138 138
139static inline int 139static inline int
140fast_shmem_read(struct page **pages,
141 loff_t page_base, int page_offset,
142 char __user *data,
143 int length)
144{
145 char __iomem *vaddr;
146 int ret;
147
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL)
150 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0);
153
154 return ret;
155}
156
157static inline int
140slow_shmem_copy(struct page *dst_page, 158slow_shmem_copy(struct page *dst_page,
141 int dst_offset, 159 int dst_offset,
142 struct page *src_page, 160 struct page *src_page,
@@ -164,6 +182,179 @@ slow_shmem_copy(struct page *dst_page,
164} 182}
165 183
166/** 184/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a
187 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
188 */
189static int
190i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
191 struct drm_i915_gem_pread *args,
192 struct drm_file *file_priv)
193{
194 struct drm_i915_gem_object *obj_priv = obj->driver_private;
195 ssize_t remain;
196 loff_t offset, page_base;
197 char __user *user_data;
198 int page_offset, page_length;
199 int ret;
200
201 user_data = (char __user *) (uintptr_t) args->data_ptr;
202 remain = args->size;
203
204 mutex_lock(&dev->struct_mutex);
205
206 ret = i915_gem_object_get_pages(obj);
207 if (ret != 0)
208 goto fail_unlock;
209
210 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
211 args->size);
212 if (ret != 0)
213 goto fail_put_pages;
214
215 obj_priv = obj->driver_private;
216 offset = args->offset;
217
218 while (remain > 0) {
219 /* Operation in this page
220 *
221 * page_base = page offset within aperture
222 * page_offset = offset within page
223 * page_length = bytes to copy for this page
224 */
225 page_base = (offset & ~(PAGE_SIZE-1));
226 page_offset = offset & (PAGE_SIZE-1);
227 page_length = remain;
228 if ((page_offset + remain) > PAGE_SIZE)
229 page_length = PAGE_SIZE - page_offset;
230
231 ret = fast_shmem_read(obj_priv->pages,
232 page_base, page_offset,
233 user_data, page_length);
234 if (ret)
235 goto fail_put_pages;
236
237 remain -= page_length;
238 user_data += page_length;
239 offset += page_length;
240 }
241
242fail_put_pages:
243 i915_gem_object_put_pages(obj);
244fail_unlock:
245 mutex_unlock(&dev->struct_mutex);
246
247 return ret;
248}
249
250/**
251 * This is the fallback shmem pread path, which allocates temporary storage
252 * in kernel space to copy_to_user into outside of the struct_mutex, so we
253 * can copy out of the object's backing pages while holding the struct mutex
254 * and not take page faults.
255 */
256static int
257i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
258 struct drm_i915_gem_pread *args,
259 struct drm_file *file_priv)
260{
261 struct drm_i915_gem_object *obj_priv = obj->driver_private;
262 struct mm_struct *mm = current->mm;
263 struct page **user_pages;
264 ssize_t remain;
265 loff_t offset, pinned_pages, i;
266 loff_t first_data_page, last_data_page, num_pages;
267 int shmem_page_index, shmem_page_offset;
268 int data_page_index, data_page_offset;
269 int page_length;
270 int ret;
271 uint64_t data_ptr = args->data_ptr;
272
273 remain = args->size;
274
275 /* Pin the user pages containing the data. We can't fault while
276 * holding the struct mutex, yet we want to hold it while
277 * dereferencing the user data.
278 */
279 first_data_page = data_ptr / PAGE_SIZE;
280 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
281 num_pages = last_data_page - first_data_page + 1;
282
283 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
284 if (user_pages == NULL)
285 return -ENOMEM;
286
287 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) {
292 ret = -EFAULT;
293 goto fail_put_user_pages;
294 }
295
296 mutex_lock(&dev->struct_mutex);
297
298 ret = i915_gem_object_get_pages(obj);
299 if (ret != 0)
300 goto fail_unlock;
301
302 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
303 args->size);
304 if (ret != 0)
305 goto fail_put_pages;
306
307 obj_priv = obj->driver_private;
308 offset = args->offset;
309
310 while (remain > 0) {
311 /* Operation in this page
312 *
313 * shmem_page_index = page number within shmem file
314 * shmem_page_offset = offset within page in shmem file
315 * data_page_index = page number in get_user_pages return
316 * data_page_offset = offset with data_page_index page.
317 * page_length = bytes to copy for this page
318 */
319 shmem_page_index = offset / PAGE_SIZE;
320 shmem_page_offset = offset & ~PAGE_MASK;
321 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
322 data_page_offset = data_ptr & ~PAGE_MASK;
323
324 page_length = remain;
325 if ((shmem_page_offset + page_length) > PAGE_SIZE)
326 page_length = PAGE_SIZE - shmem_page_offset;
327 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset;
329
330 ret = slow_shmem_copy(user_pages[data_page_index],
331 data_page_offset,
332 obj_priv->pages[shmem_page_index],
333 shmem_page_offset,
334 page_length);
335 if (ret)
336 goto fail_put_pages;
337
338 remain -= page_length;
339 data_ptr += page_length;
340 offset += page_length;
341 }
342
343fail_put_pages:
344 i915_gem_object_put_pages(obj);
345fail_unlock:
346 mutex_unlock(&dev->struct_mutex);
347fail_put_user_pages:
348 for (i = 0; i < pinned_pages; i++) {
349 SetPageDirty(user_pages[i]);
350 page_cache_release(user_pages[i]);
351 }
352 kfree(user_pages);
353
354 return ret;
355}
356
357/**
167 * Reads data from the object referenced by handle. 358 * Reads data from the object referenced by handle.
168 * 359 *
169 * On error, the contents of *data are undefined. 360 * On error, the contents of *data are undefined.
@@ -175,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
175 struct drm_i915_gem_pread *args = data; 366 struct drm_i915_gem_pread *args = data;
176 struct drm_gem_object *obj; 367 struct drm_gem_object *obj;
177 struct drm_i915_gem_object *obj_priv; 368 struct drm_i915_gem_object *obj_priv;
178 ssize_t read;
179 loff_t offset;
180 int ret; 369 int ret;
181 370
182 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 371 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -194,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
194 return -EINVAL; 383 return -EINVAL;
195 } 384 }
196 385
197 mutex_lock(&dev->struct_mutex); 386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
198 387 if (ret != 0)
199 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
200 args->size);
201 if (ret != 0) {
202 drm_gem_object_unreference(obj);
203 mutex_unlock(&dev->struct_mutex);
204 return ret;
205 }
206
207 offset = args->offset;
208
209 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
210 args->size, &offset);
211 if (read != args->size) {
212 drm_gem_object_unreference(obj);
213 mutex_unlock(&dev->struct_mutex);
214 if (read < 0)
215 return read;
216 else
217 return -EINVAL;
218 }
219 389
220 drm_gem_object_unreference(obj); 390 drm_gem_object_unreference(obj);
221 mutex_unlock(&dev->struct_mutex);
222 391
223 return 0; 392 return ret;
224} 393}
225 394
226/* This is the fast write path which cannot handle 395/* This is the fast write path which cannot handle