diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 367 |
1 files changed, 161 insertions, 206 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6370a761d137..3326770c9ed2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o | |||
43 | static __must_check int | 43 | static __must_check int |
44 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 44 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
45 | bool readonly); | 45 | bool readonly); |
46 | static int i915_gem_phys_pwrite(struct drm_device *dev, | ||
47 | struct drm_i915_gem_object *obj, | ||
48 | struct drm_i915_gem_pwrite *args, | ||
49 | struct drm_file *file); | ||
50 | 46 | ||
51 | static void i915_gem_write_fence(struct drm_device *dev, int reg, | 47 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
52 | struct drm_i915_gem_object *obj); | 48 | struct drm_i915_gem_object *obj); |
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
209 | return 0; | 205 | return 0; |
210 | } | 206 | } |
211 | 207 | ||
208 | static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) | ||
209 | { | ||
210 | drm_dma_handle_t *phys = obj->phys_handle; | ||
211 | |||
212 | if (!phys) | ||
213 | return; | ||
214 | |||
215 | if (obj->madv == I915_MADV_WILLNEED) { | ||
216 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
217 | char *vaddr = phys->vaddr; | ||
218 | int i; | ||
219 | |||
220 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | ||
221 | struct page *page = shmem_read_mapping_page(mapping, i); | ||
222 | if (!IS_ERR(page)) { | ||
223 | char *dst = kmap_atomic(page); | ||
224 | memcpy(dst, vaddr, PAGE_SIZE); | ||
225 | drm_clflush_virt_range(dst, PAGE_SIZE); | ||
226 | kunmap_atomic(dst); | ||
227 | |||
228 | set_page_dirty(page); | ||
229 | mark_page_accessed(page); | ||
230 | page_cache_release(page); | ||
231 | } | ||
232 | vaddr += PAGE_SIZE; | ||
233 | } | ||
234 | i915_gem_chipset_flush(obj->base.dev); | ||
235 | } | ||
236 | |||
237 | #ifdef CONFIG_X86 | ||
238 | set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); | ||
239 | #endif | ||
240 | drm_pci_free(obj->base.dev, phys); | ||
241 | obj->phys_handle = NULL; | ||
242 | } | ||
243 | |||
244 | int | ||
245 | i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, | ||
246 | int align) | ||
247 | { | ||
248 | drm_dma_handle_t *phys; | ||
249 | struct address_space *mapping; | ||
250 | char *vaddr; | ||
251 | int i; | ||
252 | |||
253 | if (obj->phys_handle) { | ||
254 | if ((unsigned long)obj->phys_handle->vaddr & (align -1)) | ||
255 | return -EBUSY; | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | if (obj->madv != I915_MADV_WILLNEED) | ||
261 | return -EFAULT; | ||
262 | |||
263 | if (obj->base.filp == NULL) | ||
264 | return -EINVAL; | ||
265 | |||
266 | /* create a new object */ | ||
267 | phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); | ||
268 | if (!phys) | ||
269 | return -ENOMEM; | ||
270 | |||
271 | vaddr = phys->vaddr; | ||
272 | #ifdef CONFIG_X86 | ||
273 | set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); | ||
274 | #endif | ||
275 | mapping = file_inode(obj->base.filp)->i_mapping; | ||
276 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | ||
277 | struct page *page; | ||
278 | char *src; | ||
279 | |||
280 | page = shmem_read_mapping_page(mapping, i); | ||
281 | if (IS_ERR(page)) { | ||
282 | #ifdef CONFIG_X86 | ||
283 | set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); | ||
284 | #endif | ||
285 | drm_pci_free(obj->base.dev, phys); | ||
286 | return PTR_ERR(page); | ||
287 | } | ||
288 | |||
289 | src = kmap_atomic(page); | ||
290 | memcpy(vaddr, src, PAGE_SIZE); | ||
291 | kunmap_atomic(src); | ||
292 | |||
293 | mark_page_accessed(page); | ||
294 | page_cache_release(page); | ||
295 | |||
296 | vaddr += PAGE_SIZE; | ||
297 | } | ||
298 | |||
299 | obj->phys_handle = phys; | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int | ||
304 | i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | ||
305 | struct drm_i915_gem_pwrite *args, | ||
306 | struct drm_file *file_priv) | ||
307 | { | ||
308 | struct drm_device *dev = obj->base.dev; | ||
309 | void *vaddr = obj->phys_handle->vaddr + args->offset; | ||
310 | char __user *user_data = to_user_ptr(args->data_ptr); | ||
311 | |||
312 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | ||
313 | unsigned long unwritten; | ||
314 | |||
315 | /* The physical object once assigned is fixed for the lifetime | ||
316 | * of the obj, so we can safely drop the lock and continue | ||
317 | * to access vaddr. | ||
318 | */ | ||
319 | mutex_unlock(&dev->struct_mutex); | ||
320 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
321 | mutex_lock(&dev->struct_mutex); | ||
322 | if (unwritten) | ||
323 | return -EFAULT; | ||
324 | } | ||
325 | |||
326 | i915_gem_chipset_flush(dev); | ||
327 | return 0; | ||
328 | } | ||
329 | |||
212 | void *i915_gem_object_alloc(struct drm_device *dev) | 330 | void *i915_gem_object_alloc(struct drm_device *dev) |
213 | { | 331 | { |
214 | struct drm_i915_private *dev_priv = dev->dev_private; | 332 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
921 | * pread/pwrite currently are reading and writing from the CPU | 1039 | * pread/pwrite currently are reading and writing from the CPU |
922 | * perspective, requiring manual detiling by the client. | 1040 | * perspective, requiring manual detiling by the client. |
923 | */ | 1041 | */ |
924 | if (obj->phys_obj) { | 1042 | if (obj->phys_handle) { |
925 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 1043 | ret = i915_gem_phys_pwrite(obj, args, file); |
926 | goto out; | 1044 | goto out; |
927 | } | 1045 | } |
928 | 1046 | ||
@@ -2790,7 +2908,7 @@ int i915_gpu_idle(struct drm_device *dev) | |||
2790 | 2908 | ||
2791 | /* Flush everything onto the inactive list. */ | 2909 | /* Flush everything onto the inactive list. */ |
2792 | for_each_ring(ring, dev_priv, i) { | 2910 | for_each_ring(ring, dev_priv, i) { |
2793 | ret = i915_switch_context(ring, NULL, ring->default_context); | 2911 | ret = i915_switch_context(ring, ring->default_context); |
2794 | if (ret) | 2912 | if (ret) |
2795 | return ret; | 2913 | return ret; |
2796 | 2914 | ||
@@ -3208,12 +3326,14 @@ static struct i915_vma * | |||
3208 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | 3326 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
3209 | struct i915_address_space *vm, | 3327 | struct i915_address_space *vm, |
3210 | unsigned alignment, | 3328 | unsigned alignment, |
3211 | unsigned flags) | 3329 | uint64_t flags) |
3212 | { | 3330 | { |
3213 | struct drm_device *dev = obj->base.dev; | 3331 | struct drm_device *dev = obj->base.dev; |
3214 | struct drm_i915_private *dev_priv = dev->dev_private; | 3332 | struct drm_i915_private *dev_priv = dev->dev_private; |
3215 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 3333 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
3216 | size_t gtt_max = | 3334 | unsigned long start = |
3335 | flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; | ||
3336 | unsigned long end = | ||
3217 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; | 3337 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; |
3218 | struct i915_vma *vma; | 3338 | struct i915_vma *vma; |
3219 | int ret; | 3339 | int ret; |
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3242 | /* If the object is bigger than the entire aperture, reject it early | 3362 | /* If the object is bigger than the entire aperture, reject it early |
3243 | * before evicting everything in a vain attempt to find space. | 3363 | * before evicting everything in a vain attempt to find space. |
3244 | */ | 3364 | */ |
3245 | if (obj->base.size > gtt_max) { | 3365 | if (obj->base.size > end) { |
3246 | DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", | 3366 | DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", |
3247 | obj->base.size, | 3367 | obj->base.size, |
3248 | flags & PIN_MAPPABLE ? "mappable" : "total", | 3368 | flags & PIN_MAPPABLE ? "mappable" : "total", |
3249 | gtt_max); | 3369 | end); |
3250 | return ERR_PTR(-E2BIG); | 3370 | return ERR_PTR(-E2BIG); |
3251 | } | 3371 | } |
3252 | 3372 | ||
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3263 | search_free: | 3383 | search_free: |
3264 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, | 3384 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, |
3265 | size, alignment, | 3385 | size, alignment, |
3266 | obj->cache_level, 0, gtt_max, | 3386 | obj->cache_level, |
3387 | start, end, | ||
3267 | DRM_MM_SEARCH_DEFAULT, | 3388 | DRM_MM_SEARCH_DEFAULT, |
3268 | DRM_MM_CREATE_DEFAULT); | 3389 | DRM_MM_CREATE_DEFAULT); |
3269 | if (ret) { | 3390 | if (ret) { |
3270 | ret = i915_gem_evict_something(dev, vm, size, alignment, | 3391 | ret = i915_gem_evict_something(dev, vm, size, alignment, |
3271 | obj->cache_level, flags); | 3392 | obj->cache_level, |
3393 | start, end, | ||
3394 | flags); | ||
3272 | if (ret == 0) | 3395 | if (ret == 0) |
3273 | goto search_free; | 3396 | goto search_free; |
3274 | 3397 | ||
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3828 | return ret; | 3951 | return ret; |
3829 | } | 3952 | } |
3830 | 3953 | ||
3954 | static bool | ||
3955 | i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) | ||
3956 | { | ||
3957 | struct drm_i915_gem_object *obj = vma->obj; | ||
3958 | |||
3959 | if (alignment && | ||
3960 | vma->node.start & (alignment - 1)) | ||
3961 | return true; | ||
3962 | |||
3963 | if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) | ||
3964 | return true; | ||
3965 | |||
3966 | if (flags & PIN_OFFSET_BIAS && | ||
3967 | vma->node.start < (flags & PIN_OFFSET_MASK)) | ||
3968 | return true; | ||
3969 | |||
3970 | return false; | ||
3971 | } | ||
3972 | |||
3831 | int | 3973 | int |
3832 | i915_gem_object_pin(struct drm_i915_gem_object *obj, | 3974 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
3833 | struct i915_address_space *vm, | 3975 | struct i915_address_space *vm, |
3834 | uint32_t alignment, | 3976 | uint32_t alignment, |
3835 | unsigned flags) | 3977 | uint64_t flags) |
3836 | { | 3978 | { |
3837 | struct i915_vma *vma; | 3979 | struct i915_vma *vma; |
3838 | int ret; | 3980 | int ret; |
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
3845 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) | 3987 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
3846 | return -EBUSY; | 3988 | return -EBUSY; |
3847 | 3989 | ||
3848 | if ((alignment && | 3990 | if (i915_vma_misplaced(vma, alignment, flags)) { |
3849 | vma->node.start & (alignment - 1)) || | ||
3850 | (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { | ||
3851 | WARN(vma->pin_count, | 3991 | WARN(vma->pin_count, |
3852 | "bo is already pinned with incorrect alignment:" | 3992 | "bo is already pinned with incorrect alignment:" |
3853 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," | 3993 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," |
3854 | " obj->map_and_fenceable=%d\n", | 3994 | " obj->map_and_fenceable=%d\n", |
3855 | i915_gem_obj_offset(obj, vm), alignment, | 3995 | i915_gem_obj_offset(obj, vm), alignment, |
3856 | flags & PIN_MAPPABLE, | 3996 | !!(flags & PIN_MAPPABLE), |
3857 | obj->map_and_fenceable); | 3997 | obj->map_and_fenceable); |
3858 | ret = i915_vma_unbind(vma); | 3998 | ret = i915_vma_unbind(vma); |
3859 | if (ret) | 3999 | if (ret) |
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4163 | 4303 | ||
4164 | trace_i915_gem_object_destroy(obj); | 4304 | trace_i915_gem_object_destroy(obj); |
4165 | 4305 | ||
4166 | if (obj->phys_obj) | ||
4167 | i915_gem_detach_phys_object(dev, obj); | ||
4168 | |||
4169 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { | 4306 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
4170 | int ret; | 4307 | int ret; |
4171 | 4308 | ||
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4183 | } | 4320 | } |
4184 | } | 4321 | } |
4185 | 4322 | ||
4323 | i915_gem_object_detach_phys(obj); | ||
4324 | |||
4186 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up | 4325 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up |
4187 | * before progressing. */ | 4326 | * before progressing. */ |
4188 | if (obj->stolen) | 4327 | if (obj->stolen) |
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev) | |||
4646 | register_shrinker(&dev_priv->mm.inactive_shrinker); | 4785 | register_shrinker(&dev_priv->mm.inactive_shrinker); |
4647 | } | 4786 | } |
4648 | 4787 | ||
4649 | /* | ||
4650 | * Create a physically contiguous memory object for this object | ||
4651 | * e.g. for cursor + overlay regs | ||
4652 | */ | ||
4653 | static int i915_gem_init_phys_object(struct drm_device *dev, | ||
4654 | int id, int size, int align) | ||
4655 | { | ||
4656 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4657 | struct drm_i915_gem_phys_object *phys_obj; | ||
4658 | int ret; | ||
4659 | |||
4660 | if (dev_priv->mm.phys_objs[id - 1] || !size) | ||
4661 | return 0; | ||
4662 | |||
4663 | phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL); | ||
4664 | if (!phys_obj) | ||
4665 | return -ENOMEM; | ||
4666 | |||
4667 | phys_obj->id = id; | ||
4668 | |||
4669 | phys_obj->handle = drm_pci_alloc(dev, size, align); | ||
4670 | if (!phys_obj->handle) { | ||
4671 | ret = -ENOMEM; | ||
4672 | goto kfree_obj; | ||
4673 | } | ||
4674 | #ifdef CONFIG_X86 | ||
4675 | set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
4676 | #endif | ||
4677 | |||
4678 | dev_priv->mm.phys_objs[id - 1] = phys_obj; | ||
4679 | |||
4680 | return 0; | ||
4681 | kfree_obj: | ||
4682 | kfree(phys_obj); | ||
4683 | return ret; | ||
4684 | } | ||
4685 | |||
4686 | static void i915_gem_free_phys_object(struct drm_device *dev, int id) | ||
4687 | { | ||
4688 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4689 | struct drm_i915_gem_phys_object *phys_obj; | ||
4690 | |||
4691 | if (!dev_priv->mm.phys_objs[id - 1]) | ||
4692 | return; | ||
4693 | |||
4694 | phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
4695 | if (phys_obj->cur_obj) { | ||
4696 | i915_gem_detach_phys_object(dev, phys_obj->cur_obj); | ||
4697 | } | ||
4698 | |||
4699 | #ifdef CONFIG_X86 | ||
4700 | set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
4701 | #endif | ||
4702 | drm_pci_free(dev, phys_obj->handle); | ||
4703 | kfree(phys_obj); | ||
4704 | dev_priv->mm.phys_objs[id - 1] = NULL; | ||
4705 | } | ||
4706 | |||
4707 | void i915_gem_free_all_phys_object(struct drm_device *dev) | ||
4708 | { | ||
4709 | int i; | ||
4710 | |||
4711 | for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) | ||
4712 | i915_gem_free_phys_object(dev, i); | ||
4713 | } | ||
4714 | |||
4715 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
4716 | struct drm_i915_gem_object *obj) | ||
4717 | { | ||
4718 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
4719 | char *vaddr; | ||
4720 | int i; | ||
4721 | int page_count; | ||
4722 | |||
4723 | if (!obj->phys_obj) | ||
4724 | return; | ||
4725 | vaddr = obj->phys_obj->handle->vaddr; | ||
4726 | |||
4727 | page_count = obj->base.size / PAGE_SIZE; | ||
4728 | for (i = 0; i < page_count; i++) { | ||
4729 | struct page *page = shmem_read_mapping_page(mapping, i); | ||
4730 | if (!IS_ERR(page)) { | ||
4731 | char *dst = kmap_atomic(page); | ||
4732 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); | ||
4733 | kunmap_atomic(dst); | ||
4734 | |||
4735 | drm_clflush_pages(&page, 1); | ||
4736 | |||
4737 | set_page_dirty(page); | ||
4738 | mark_page_accessed(page); | ||
4739 | page_cache_release(page); | ||
4740 | } | ||
4741 | } | ||
4742 | i915_gem_chipset_flush(dev); | ||
4743 | |||
4744 | obj->phys_obj->cur_obj = NULL; | ||
4745 | obj->phys_obj = NULL; | ||
4746 | } | ||
4747 | |||
4748 | int | ||
4749 | i915_gem_attach_phys_object(struct drm_device *dev, | ||
4750 | struct drm_i915_gem_object *obj, | ||
4751 | int id, | ||
4752 | int align) | ||
4753 | { | ||
4754 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
4755 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4756 | int ret = 0; | ||
4757 | int page_count; | ||
4758 | int i; | ||
4759 | |||
4760 | if (id > I915_MAX_PHYS_OBJECT) | ||
4761 | return -EINVAL; | ||
4762 | |||
4763 | if (obj->phys_obj) { | ||
4764 | if (obj->phys_obj->id == id) | ||
4765 | return 0; | ||
4766 | i915_gem_detach_phys_object(dev, obj); | ||
4767 | } | ||
4768 | |||
4769 | /* create a new object */ | ||
4770 | if (!dev_priv->mm.phys_objs[id - 1]) { | ||
4771 | ret = i915_gem_init_phys_object(dev, id, | ||
4772 | obj->base.size, align); | ||
4773 | if (ret) { | ||
4774 | DRM_ERROR("failed to init phys object %d size: %zu\n", | ||
4775 | id, obj->base.size); | ||
4776 | return ret; | ||
4777 | } | ||
4778 | } | ||
4779 | |||
4780 | /* bind to the object */ | ||
4781 | obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
4782 | obj->phys_obj->cur_obj = obj; | ||
4783 | |||
4784 | page_count = obj->base.size / PAGE_SIZE; | ||
4785 | |||
4786 | for (i = 0; i < page_count; i++) { | ||
4787 | struct page *page; | ||
4788 | char *dst, *src; | ||
4789 | |||
4790 | page = shmem_read_mapping_page(mapping, i); | ||
4791 | if (IS_ERR(page)) | ||
4792 | return PTR_ERR(page); | ||
4793 | |||
4794 | src = kmap_atomic(page); | ||
4795 | dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
4796 | memcpy(dst, src, PAGE_SIZE); | ||
4797 | kunmap_atomic(src); | ||
4798 | |||
4799 | mark_page_accessed(page); | ||
4800 | page_cache_release(page); | ||
4801 | } | ||
4802 | |||
4803 | return 0; | ||
4804 | } | ||
4805 | |||
4806 | static int | ||
4807 | i915_gem_phys_pwrite(struct drm_device *dev, | ||
4808 | struct drm_i915_gem_object *obj, | ||
4809 | struct drm_i915_gem_pwrite *args, | ||
4810 | struct drm_file *file_priv) | ||
4811 | { | ||
4812 | void *vaddr = obj->phys_obj->handle->vaddr + args->offset; | ||
4813 | char __user *user_data = to_user_ptr(args->data_ptr); | ||
4814 | |||
4815 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | ||
4816 | unsigned long unwritten; | ||
4817 | |||
4818 | /* The physical object once assigned is fixed for the lifetime | ||
4819 | * of the obj, so we can safely drop the lock and continue | ||
4820 | * to access vaddr. | ||
4821 | */ | ||
4822 | mutex_unlock(&dev->struct_mutex); | ||
4823 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
4824 | mutex_lock(&dev->struct_mutex); | ||
4825 | if (unwritten) | ||
4826 | return -EFAULT; | ||
4827 | } | ||
4828 | |||
4829 | i915_gem_chipset_flush(dev); | ||
4830 | return 0; | ||
4831 | } | ||
4832 | |||
4833 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) | 4788 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
4834 | { | 4789 | { |
4835 | struct drm_i915_file_private *file_priv = file->driver_priv; | 4790 | struct drm_i915_file_private *file_priv = file->driver_priv; |