aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@linux.ie>2008-12-30 05:31:46 -0500
committerDave Airlie <airlied@linux.ie>2009-01-16 03:45:06 -0500
commit71acb5eb8d95b371f4cdd88a47f3c83c870d1c8f (patch)
tree36c370582e69cd61f2f088c31808fc02c6c86f1a
parente285f3cd2c376d2336f9a383241a98266363c7d4 (diff)
drm/i915: add support for physical memory objects
This is an initial patch to do support for objects which needs physical contiguous main ram, cursors and overlay registers on older chipsets. These objects are bound on cursor bin, like pinning, and we copy the data to/from the backing store object into the real one on attach/detach. notes: possible over the top in attach/detach operations. no overlay support yet. Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c189
-rw-r--r--drivers/gpu/drm/i915/intel_display.c32
4 files changed, 233 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 868f574363ad..bbadf1c04142 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1160,6 +1160,8 @@ int i915_driver_unload(struct drm_device *dev)
1160 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1160 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1161 intel_modeset_cleanup(dev); 1161 intel_modeset_cleanup(dev);
1162 1162
1163 i915_gem_free_all_phys_object(dev);
1164
1163 mutex_lock(&dev->struct_mutex); 1165 mutex_lock(&dev->struct_mutex);
1164 i915_gem_cleanup_ringbuffer(dev); 1166 i915_gem_cleanup_ringbuffer(dev);
1165 mutex_unlock(&dev->struct_mutex); 1167 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 563de18063fd..e13518252007 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -72,6 +72,18 @@ enum pipe {
72#define WATCH_INACTIVE 0 72#define WATCH_INACTIVE 0
73#define WATCH_PWRITE 0 73#define WATCH_PWRITE 0
74 74
75#define I915_GEM_PHYS_CURSOR_0 1
76#define I915_GEM_PHYS_CURSOR_1 2
77#define I915_GEM_PHYS_OVERLAY_REGS 3
78#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
79
80struct drm_i915_gem_phys_object {
81 int id;
82 struct page **page_list;
83 drm_dma_handle_t *handle;
84 struct drm_gem_object *cur_obj;
85};
86
75typedef struct _drm_i915_ring_buffer { 87typedef struct _drm_i915_ring_buffer {
76 int tail_mask; 88 int tail_mask;
77 unsigned long Size; 89 unsigned long Size;
@@ -358,6 +370,9 @@ typedef struct drm_i915_private {
358 uint32_t bit_6_swizzle_x; 370 uint32_t bit_6_swizzle_x;
359 /** Bit 6 swizzling required for Y tiling */ 371 /** Bit 6 swizzling required for Y tiling */
360 uint32_t bit_6_swizzle_y; 372 uint32_t bit_6_swizzle_y;
373
374 /* storage for physical objects */
375 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
361 } mm; 376 } mm;
362} drm_i915_private_t; 377} drm_i915_private_t;
363 378
@@ -436,6 +451,9 @@ struct drm_i915_gem_object {
436 /** User space pin count and filp owning the pin */ 451 /** User space pin count and filp owning the pin */
437 uint32_t user_pin_count; 452 uint32_t user_pin_count;
438 struct drm_file *pin_filp; 453 struct drm_file *pin_filp;
454
455 /** for phy allocated objects */
456 struct drm_i915_gem_phys_object *phys_obj;
439}; 457};
440 458
441/** 459/**
@@ -598,6 +616,11 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
598int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 616int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
599int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 617int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
600 int write); 618 int write);
619int i915_gem_attach_phys_object(struct drm_device *dev,
620 struct drm_gem_object *obj, int id);
621void i915_gem_detach_phys_object(struct drm_device *dev,
622 struct drm_gem_object *obj);
623void i915_gem_free_all_phys_object(struct drm_device *dev);
601 624
602/* i915_gem_tiling.c */ 625/* i915_gem_tiling.c */
603void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 626void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1384d6686555..96316fd47233 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -55,6 +55,9 @@ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
57static int i915_gem_evict_something(struct drm_device *dev); 57static int i915_gem_evict_something(struct drm_device *dev);
58static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
59 struct drm_i915_gem_pwrite *args,
60 struct drm_file *file_priv);
58 61
59int i915_gem_do_init(struct drm_device *dev, unsigned long start, 62int i915_gem_do_init(struct drm_device *dev, unsigned long start,
60 unsigned long end) 63 unsigned long end)
@@ -386,8 +389,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
386 * pread/pwrite currently are reading and writing from the CPU 389 * pread/pwrite currently are reading and writing from the CPU
387 * perspective, requiring manual detiling by the client. 390 * perspective, requiring manual detiling by the client.
388 */ 391 */
389 if (obj_priv->tiling_mode == I915_TILING_NONE && 392 if (obj_priv->phys_obj)
390 dev->gtt_total != 0) 393 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
394 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
395 dev->gtt_total != 0)
391 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); 396 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
392 else 397 else
393 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); 398 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
@@ -2858,6 +2863,9 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2858 while (obj_priv->pin_count > 0) 2863 while (obj_priv->pin_count > 0)
2859 i915_gem_object_unpin(obj); 2864 i915_gem_object_unpin(obj);
2860 2865
2866 if (obj_priv->phys_obj)
2867 i915_gem_detach_phys_object(dev, obj);
2868
2861 i915_gem_object_unbind(obj); 2869 i915_gem_object_unbind(obj);
2862 2870
2863 list = &obj->map_list; 2871 list = &obj->map_list;
@@ -3293,3 +3301,180 @@ i915_gem_load(struct drm_device *dev)
3293 3301
3294 i915_gem_detect_bit_6_swizzle(dev); 3302 i915_gem_detect_bit_6_swizzle(dev);
3295} 3303}
3304
3305/*
3306 * Create a physically contiguous memory object for this object
3307 * e.g. for cursor + overlay regs
3308 */
3309int i915_gem_init_phys_object(struct drm_device *dev,
3310 int id, int size)
3311{
3312 drm_i915_private_t *dev_priv = dev->dev_private;
3313 struct drm_i915_gem_phys_object *phys_obj;
3314 int ret;
3315
3316 if (dev_priv->mm.phys_objs[id - 1] || !size)
3317 return 0;
3318
3319 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3320 if (!phys_obj)
3321 return -ENOMEM;
3322
3323 phys_obj->id = id;
3324
3325 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
3326 if (!phys_obj->handle) {
3327 ret = -ENOMEM;
3328 goto kfree_obj;
3329 }
3330#ifdef CONFIG_X86
3331 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3332#endif
3333
3334 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3335
3336 return 0;
3337kfree_obj:
3338 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3339 return ret;
3340}
3341
3342void i915_gem_free_phys_object(struct drm_device *dev, int id)
3343{
3344 drm_i915_private_t *dev_priv = dev->dev_private;
3345 struct drm_i915_gem_phys_object *phys_obj;
3346
3347 if (!dev_priv->mm.phys_objs[id - 1])
3348 return;
3349
3350 phys_obj = dev_priv->mm.phys_objs[id - 1];
3351 if (phys_obj->cur_obj) {
3352 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3353 }
3354
3355#ifdef CONFIG_X86
3356 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3357#endif
3358 drm_pci_free(dev, phys_obj->handle);
3359 kfree(phys_obj);
3360 dev_priv->mm.phys_objs[id - 1] = NULL;
3361}
3362
3363void i915_gem_free_all_phys_object(struct drm_device *dev)
3364{
3365 int i;
3366
3367 for (i = 0; i < I915_MAX_PHYS_OBJECT; i++)
3368 i915_gem_free_phys_object(dev, i);
3369}
3370
3371void i915_gem_detach_phys_object(struct drm_device *dev,
3372 struct drm_gem_object *obj)
3373{
3374 struct drm_i915_gem_object *obj_priv;
3375 int i;
3376 int ret;
3377 int page_count;
3378
3379 obj_priv = obj->driver_private;
3380 if (!obj_priv->phys_obj)
3381 return;
3382
3383 ret = i915_gem_object_get_page_list(obj);
3384 if (ret)
3385 goto out;
3386
3387 page_count = obj->size / PAGE_SIZE;
3388
3389 for (i = 0; i < page_count; i++) {
3390 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
3391 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3392
3393 memcpy(dst, src, PAGE_SIZE);
3394 kunmap_atomic(dst, KM_USER0);
3395 }
3396 drm_clflush_pages(obj_priv->page_list, page_count);
3397 drm_agp_chipset_flush(dev);
3398out:
3399 obj_priv->phys_obj->cur_obj = NULL;
3400 obj_priv->phys_obj = NULL;
3401}
3402
3403int
3404i915_gem_attach_phys_object(struct drm_device *dev,
3405 struct drm_gem_object *obj, int id)
3406{
3407 drm_i915_private_t *dev_priv = dev->dev_private;
3408 struct drm_i915_gem_object *obj_priv;
3409 int ret = 0;
3410 int page_count;
3411 int i;
3412
3413 if (id > I915_MAX_PHYS_OBJECT)
3414 return -EINVAL;
3415
3416 obj_priv = obj->driver_private;
3417
3418 if (obj_priv->phys_obj) {
3419 if (obj_priv->phys_obj->id == id)
3420 return 0;
3421 i915_gem_detach_phys_object(dev, obj);
3422 }
3423
3424
3425 /* create a new object */
3426 if (!dev_priv->mm.phys_objs[id - 1]) {
3427 ret = i915_gem_init_phys_object(dev, id,
3428 obj->size);
3429 if (ret) {
3430 DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size);
3431 goto out;
3432 }
3433 }
3434
3435 /* bind to the object */
3436 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3437 obj_priv->phys_obj->cur_obj = obj;
3438
3439 ret = i915_gem_object_get_page_list(obj);
3440 if (ret) {
3441 DRM_ERROR("failed to get page list\n");
3442 goto out;
3443 }
3444
3445 page_count = obj->size / PAGE_SIZE;
3446
3447 for (i = 0; i < page_count; i++) {
3448 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
3449 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3450
3451 memcpy(dst, src, PAGE_SIZE);
3452 kunmap_atomic(src, KM_USER0);
3453 }
3454
3455 return 0;
3456out:
3457 return ret;
3458}
3459
3460static int
3461i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
3462 struct drm_i915_gem_pwrite *args,
3463 struct drm_file *file_priv)
3464{
3465 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3466 void *obj_addr;
3467 int ret;
3468 char __user *user_data;
3469
3470 user_data = (char __user *) (uintptr_t) args->data_ptr;
3471 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
3472
3473 DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size);
3474 ret = copy_from_user(obj_addr, user_data, args->size);
3475 if (ret)
3476 return -EFAULT;
3477
3478 drm_agp_chipset_flush(dev);
3479 return 0;
3480}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4372acff5a01..114a7a1a8740 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1020,17 +1020,23 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1020 return -ENOMEM; 1020 return -ENOMEM;
1021 } 1021 }
1022 1022
1023 if (dev_priv->cursor_needs_physical) { 1023 /* we only need to pin inside GTT if cursor is non-phy */
1024 addr = dev->agp->base + obj_priv->gtt_offset; 1024 if (!dev_priv->cursor_needs_physical) {
1025 } else { 1025 ret = i915_gem_object_pin(bo, PAGE_SIZE);
1026 if (ret) {
1027 DRM_ERROR("failed to pin cursor bo\n");
1028 drm_gem_object_unreference(bo);
1029 return ret;
1030 }
1026 addr = obj_priv->gtt_offset; 1031 addr = obj_priv->gtt_offset;
1027 } 1032 } else {
1028 1033 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
1029 ret = i915_gem_object_pin(bo, PAGE_SIZE); 1034 if (ret) {
1030 if (ret) { 1035 DRM_ERROR("failed to attach phys object\n");
1031 DRM_ERROR("failed to pin cursor bo\n"); 1036 drm_gem_object_unreference(bo);
1032 drm_gem_object_unreference(bo); 1037 return ret;
1033 return ret; 1038 }
1039 addr = obj_priv->phys_obj->handle->busaddr;
1034 } 1040 }
1035 1041
1036 temp = 0; 1042 temp = 0;
@@ -1043,7 +1049,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1043 I915_WRITE(base, addr); 1049 I915_WRITE(base, addr);
1044 1050
1045 if (intel_crtc->cursor_bo) { 1051 if (intel_crtc->cursor_bo) {
1046 i915_gem_object_unpin(intel_crtc->cursor_bo); 1052 if (dev_priv->cursor_needs_physical) {
1053 if (intel_crtc->cursor_bo != bo)
1054 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
1055 } else
1056 i915_gem_object_unpin(intel_crtc->cursor_bo);
1047 drm_gem_object_unreference(intel_crtc->cursor_bo); 1057 drm_gem_object_unreference(intel_crtc->cursor_bo);
1048 } 1058 }
1049 1059