aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c366
1 files changed, 160 insertions, 206 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ea09d1a0fbd9..f36126383d26 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -47,11 +47,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
47static void 47static void
48i915_gem_object_retire(struct drm_i915_gem_object *obj); 48i915_gem_object_retire(struct drm_i915_gem_object *obj);
49 49
50static int i915_gem_phys_pwrite(struct drm_device *dev,
51 struct drm_i915_gem_object *obj,
52 struct drm_i915_gem_pwrite *args,
53 struct drm_file *file);
54
55static void i915_gem_write_fence(struct drm_device *dev, int reg, 50static void i915_gem_write_fence(struct drm_device *dev, int reg,
56 struct drm_i915_gem_object *obj); 51 struct drm_i915_gem_object *obj);
57static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
@@ -214,6 +209,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
214 return 0; 209 return 0;
215} 210}
216 211
212static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
213{
214 drm_dma_handle_t *phys = obj->phys_handle;
215
216 if (!phys)
217 return;
218
219 if (obj->madv == I915_MADV_WILLNEED) {
220 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
221 char *vaddr = phys->vaddr;
222 int i;
223
224 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
225 struct page *page = shmem_read_mapping_page(mapping, i);
226 if (!IS_ERR(page)) {
227 char *dst = kmap_atomic(page);
228 memcpy(dst, vaddr, PAGE_SIZE);
229 drm_clflush_virt_range(dst, PAGE_SIZE);
230 kunmap_atomic(dst);
231
232 set_page_dirty(page);
233 mark_page_accessed(page);
234 page_cache_release(page);
235 }
236 vaddr += PAGE_SIZE;
237 }
238 i915_gem_chipset_flush(obj->base.dev);
239 }
240
241#ifdef CONFIG_X86
242 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
243#endif
244 drm_pci_free(obj->base.dev, phys);
245 obj->phys_handle = NULL;
246}
247
248int
249i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
250 int align)
251{
252 drm_dma_handle_t *phys;
253 struct address_space *mapping;
254 char *vaddr;
255 int i;
256
257 if (obj->phys_handle) {
258 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
259 return -EBUSY;
260
261 return 0;
262 }
263
264 if (obj->madv != I915_MADV_WILLNEED)
265 return -EFAULT;
266
267 if (obj->base.filp == NULL)
268 return -EINVAL;
269
270 /* create a new object */
271 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
272 if (!phys)
273 return -ENOMEM;
274
275 vaddr = phys->vaddr;
276#ifdef CONFIG_X86
277 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
278#endif
279 mapping = file_inode(obj->base.filp)->i_mapping;
280 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
281 struct page *page;
282 char *src;
283
284 page = shmem_read_mapping_page(mapping, i);
285 if (IS_ERR(page)) {
286#ifdef CONFIG_X86
287 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
288#endif
289 drm_pci_free(obj->base.dev, phys);
290 return PTR_ERR(page);
291 }
292
293 src = kmap_atomic(page);
294 memcpy(vaddr, src, PAGE_SIZE);
295 kunmap_atomic(src);
296
297 mark_page_accessed(page);
298 page_cache_release(page);
299
300 vaddr += PAGE_SIZE;
301 }
302
303 obj->phys_handle = phys;
304 return 0;
305}
306
307static int
308i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
309 struct drm_i915_gem_pwrite *args,
310 struct drm_file *file_priv)
311{
312 struct drm_device *dev = obj->base.dev;
313 void *vaddr = obj->phys_handle->vaddr + args->offset;
314 char __user *user_data = to_user_ptr(args->data_ptr);
315
316 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
317 unsigned long unwritten;
318
319 /* The physical object once assigned is fixed for the lifetime
320 * of the obj, so we can safely drop the lock and continue
321 * to access vaddr.
322 */
323 mutex_unlock(&dev->struct_mutex);
324 unwritten = copy_from_user(vaddr, user_data, args->size);
325 mutex_lock(&dev->struct_mutex);
326 if (unwritten)
327 return -EFAULT;
328 }
329
330 i915_gem_chipset_flush(dev);
331 return 0;
332}
333
217void *i915_gem_object_alloc(struct drm_device *dev) 334void *i915_gem_object_alloc(struct drm_device *dev)
218{ 335{
219 struct drm_i915_private *dev_priv = dev->dev_private; 336 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -930,8 +1047,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
930 * pread/pwrite currently are reading and writing from the CPU 1047 * pread/pwrite currently are reading and writing from the CPU
931 * perspective, requiring manual detiling by the client. 1048 * perspective, requiring manual detiling by the client.
932 */ 1049 */
933 if (obj->phys_obj) { 1050 if (obj->phys_handle) {
934 ret = i915_gem_phys_pwrite(dev, obj, args, file); 1051 ret = i915_gem_phys_pwrite(obj, args, file);
935 goto out; 1052 goto out;
936 } 1053 }
937 1054
@@ -3257,12 +3374,14 @@ static struct i915_vma *
3257i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3374i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3258 struct i915_address_space *vm, 3375 struct i915_address_space *vm,
3259 unsigned alignment, 3376 unsigned alignment,
3260 unsigned flags) 3377 uint64_t flags)
3261{ 3378{
3262 struct drm_device *dev = obj->base.dev; 3379 struct drm_device *dev = obj->base.dev;
3263 struct drm_i915_private *dev_priv = dev->dev_private; 3380 struct drm_i915_private *dev_priv = dev->dev_private;
3264 u32 size, fence_size, fence_alignment, unfenced_alignment; 3381 u32 size, fence_size, fence_alignment, unfenced_alignment;
3265 size_t gtt_max = 3382 unsigned long start =
3383 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3384 unsigned long end =
3266 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3385 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3267 struct i915_vma *vma; 3386 struct i915_vma *vma;
3268 int ret; 3387 int ret;
@@ -3291,11 +3410,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3291 /* If the object is bigger than the entire aperture, reject it early 3410 /* If the object is bigger than the entire aperture, reject it early
3292 * before evicting everything in a vain attempt to find space. 3411 * before evicting everything in a vain attempt to find space.
3293 */ 3412 */
3294 if (obj->base.size > gtt_max) { 3413 if (obj->base.size > end) {
3295 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3414 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3296 obj->base.size, 3415 obj->base.size,
3297 flags & PIN_MAPPABLE ? "mappable" : "total", 3416 flags & PIN_MAPPABLE ? "mappable" : "total",
3298 gtt_max); 3417 end);
3299 return ERR_PTR(-E2BIG); 3418 return ERR_PTR(-E2BIG);
3300 } 3419 }
3301 3420
@@ -3312,12 +3431,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3312search_free: 3431search_free:
3313 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3432 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3314 size, alignment, 3433 size, alignment,
3315 obj->cache_level, 0, gtt_max, 3434 obj->cache_level,
3435 start, end,
3316 DRM_MM_SEARCH_DEFAULT, 3436 DRM_MM_SEARCH_DEFAULT,
3317 DRM_MM_CREATE_DEFAULT); 3437 DRM_MM_CREATE_DEFAULT);
3318 if (ret) { 3438 if (ret) {
3319 ret = i915_gem_evict_something(dev, vm, size, alignment, 3439 ret = i915_gem_evict_something(dev, vm, size, alignment,
3320 obj->cache_level, flags); 3440 obj->cache_level,
3441 start, end,
3442 flags);
3321 if (ret == 0) 3443 if (ret == 0)
3322 goto search_free; 3444 goto search_free;
3323 3445
@@ -3892,11 +4014,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3892 return ret; 4014 return ret;
3893} 4015}
3894 4016
4017static bool
4018i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4019{
4020 struct drm_i915_gem_object *obj = vma->obj;
4021
4022 if (alignment &&
4023 vma->node.start & (alignment - 1))
4024 return true;
4025
4026 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4027 return true;
4028
4029 if (flags & PIN_OFFSET_BIAS &&
4030 vma->node.start < (flags & PIN_OFFSET_MASK))
4031 return true;
4032
4033 return false;
4034}
4035
3895int 4036int
3896i915_gem_object_pin(struct drm_i915_gem_object *obj, 4037i915_gem_object_pin(struct drm_i915_gem_object *obj,
3897 struct i915_address_space *vm, 4038 struct i915_address_space *vm,
3898 uint32_t alignment, 4039 uint32_t alignment,
3899 unsigned flags) 4040 uint64_t flags)
3900{ 4041{
3901 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4042 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3902 struct i915_vma *vma; 4043 struct i915_vma *vma;
@@ -3913,15 +4054,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3913 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 4054 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3914 return -EBUSY; 4055 return -EBUSY;
3915 4056
3916 if ((alignment && 4057 if (i915_vma_misplaced(vma, alignment, flags)) {
3917 vma->node.start & (alignment - 1)) ||
3918 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3919 WARN(vma->pin_count, 4058 WARN(vma->pin_count,
3920 "bo is already pinned with incorrect alignment:" 4059 "bo is already pinned with incorrect alignment:"
3921 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 4060 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3922 " obj->map_and_fenceable=%d\n", 4061 " obj->map_and_fenceable=%d\n",
3923 i915_gem_obj_offset(obj, vm), alignment, 4062 i915_gem_obj_offset(obj, vm), alignment,
3924 flags & PIN_MAPPABLE, 4063 !!(flags & PIN_MAPPABLE),
3925 obj->map_and_fenceable); 4064 obj->map_and_fenceable);
3926 ret = i915_vma_unbind(vma); 4065 ret = i915_vma_unbind(vma);
3927 if (ret) 4066 if (ret)
@@ -4281,9 +4420,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4281 4420
4282 trace_i915_gem_object_destroy(obj); 4421 trace_i915_gem_object_destroy(obj);
4283 4422
4284 if (obj->phys_obj)
4285 i915_gem_detach_phys_object(dev, obj);
4286
4287 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4423 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4288 int ret; 4424 int ret;
4289 4425
@@ -4301,6 +4437,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4301 } 4437 }
4302 } 4438 }
4303 4439
4440 i915_gem_object_detach_phys(obj);
4441
4304 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4442 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4305 * before progressing. */ 4443 * before progressing. */
4306 if (obj->stolen) 4444 if (obj->stolen)
@@ -4792,190 +4930,6 @@ i915_gem_load(struct drm_device *dev)
4792 register_oom_notifier(&dev_priv->mm.oom_notifier); 4930 register_oom_notifier(&dev_priv->mm.oom_notifier);
4793} 4931}
4794 4932
4795/*
4796 * Create a physically contiguous memory object for this object
4797 * e.g. for cursor + overlay regs
4798 */
4799static int i915_gem_init_phys_object(struct drm_device *dev,
4800 int id, int size, int align)
4801{
4802 struct drm_i915_private *dev_priv = dev->dev_private;
4803 struct drm_i915_gem_phys_object *phys_obj;
4804 int ret;
4805
4806 if (dev_priv->mm.phys_objs[id - 1] || !size)
4807 return 0;
4808
4809 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4810 if (!phys_obj)
4811 return -ENOMEM;
4812
4813 phys_obj->id = id;
4814
4815 phys_obj->handle = drm_pci_alloc(dev, size, align);
4816 if (!phys_obj->handle) {
4817 ret = -ENOMEM;
4818 goto kfree_obj;
4819 }
4820#ifdef CONFIG_X86
4821 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4822#endif
4823
4824 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4825
4826 return 0;
4827kfree_obj:
4828 kfree(phys_obj);
4829 return ret;
4830}
4831
4832static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4833{
4834 struct drm_i915_private *dev_priv = dev->dev_private;
4835 struct drm_i915_gem_phys_object *phys_obj;
4836
4837 if (!dev_priv->mm.phys_objs[id - 1])
4838 return;
4839
4840 phys_obj = dev_priv->mm.phys_objs[id - 1];
4841 if (phys_obj->cur_obj) {
4842 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4843 }
4844
4845#ifdef CONFIG_X86
4846 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4847#endif
4848 drm_pci_free(dev, phys_obj->handle);
4849 kfree(phys_obj);
4850 dev_priv->mm.phys_objs[id - 1] = NULL;
4851}
4852
4853void i915_gem_free_all_phys_object(struct drm_device *dev)
4854{
4855 int i;
4856
4857 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4858 i915_gem_free_phys_object(dev, i);
4859}
4860
4861void i915_gem_detach_phys_object(struct drm_device *dev,
4862 struct drm_i915_gem_object *obj)
4863{
4864 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4865 char *vaddr;
4866 int i;
4867 int page_count;
4868
4869 if (!obj->phys_obj)
4870 return;
4871 vaddr = obj->phys_obj->handle->vaddr;
4872
4873 page_count = obj->base.size / PAGE_SIZE;
4874 for (i = 0; i < page_count; i++) {
4875 struct page *page = shmem_read_mapping_page(mapping, i);
4876 if (!IS_ERR(page)) {
4877 char *dst = kmap_atomic(page);
4878 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4879 kunmap_atomic(dst);
4880
4881 drm_clflush_pages(&page, 1);
4882
4883 set_page_dirty(page);
4884 mark_page_accessed(page);
4885 page_cache_release(page);
4886 }
4887 }
4888 i915_gem_chipset_flush(dev);
4889
4890 obj->phys_obj->cur_obj = NULL;
4891 obj->phys_obj = NULL;
4892}
4893
4894int
4895i915_gem_attach_phys_object(struct drm_device *dev,
4896 struct drm_i915_gem_object *obj,
4897 int id,
4898 int align)
4899{
4900 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4901 struct drm_i915_private *dev_priv = dev->dev_private;
4902 int ret = 0;
4903 int page_count;
4904 int i;
4905
4906 if (id > I915_MAX_PHYS_OBJECT)
4907 return -EINVAL;
4908
4909 if (obj->phys_obj) {
4910 if (obj->phys_obj->id == id)
4911 return 0;
4912 i915_gem_detach_phys_object(dev, obj);
4913 }
4914
4915 /* create a new object */
4916 if (!dev_priv->mm.phys_objs[id - 1]) {
4917 ret = i915_gem_init_phys_object(dev, id,
4918 obj->base.size, align);
4919 if (ret) {
4920 DRM_ERROR("failed to init phys object %d size: %zu\n",
4921 id, obj->base.size);
4922 return ret;
4923 }
4924 }
4925
4926 /* bind to the object */
4927 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4928 obj->phys_obj->cur_obj = obj;
4929
4930 page_count = obj->base.size / PAGE_SIZE;
4931
4932 for (i = 0; i < page_count; i++) {
4933 struct page *page;
4934 char *dst, *src;
4935
4936 page = shmem_read_mapping_page(mapping, i);
4937 if (IS_ERR(page))
4938 return PTR_ERR(page);
4939
4940 src = kmap_atomic(page);
4941 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4942 memcpy(dst, src, PAGE_SIZE);
4943 kunmap_atomic(src);
4944
4945 mark_page_accessed(page);
4946 page_cache_release(page);
4947 }
4948
4949 return 0;
4950}
4951
4952static int
4953i915_gem_phys_pwrite(struct drm_device *dev,
4954 struct drm_i915_gem_object *obj,
4955 struct drm_i915_gem_pwrite *args,
4956 struct drm_file *file_priv)
4957{
4958 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4959 char __user *user_data = to_user_ptr(args->data_ptr);
4960
4961 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4962 unsigned long unwritten;
4963
4964 /* The physical object once assigned is fixed for the lifetime
4965 * of the obj, so we can safely drop the lock and continue
4966 * to access vaddr.
4967 */
4968 mutex_unlock(&dev->struct_mutex);
4969 unwritten = copy_from_user(vaddr, user_data, args->size);
4970 mutex_lock(&dev->struct_mutex);
4971 if (unwritten)
4972 return -EFAULT;
4973 }
4974
4975 i915_gem_chipset_flush(dev);
4976 return 0;
4977}
4978
4979void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4933void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4980{ 4934{
4981 struct drm_i915_file_private *file_priv = file->driver_priv; 4935 struct drm_i915_file_private *file_priv = file->driver_priv;