aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJesse Barnes <jbarnes@virtuousgeek.org>2008-11-12 13:03:55 -0500
committerDave Airlie <airlied@linux.ie>2008-12-29 02:47:23 -0500
commitde151cf67ce52ed2d88083daa5e60c7858947329 (patch)
tree860c46d95061b261a7cab24a6ab57b68a0146f3a /drivers
parenta2c0a97b784f837300f7b0869c82ab712c600952 (diff)
drm/i915: add GEM GTT mapping support
Use the new core GEM object mapping code to allow GTT mapping of GEM objects on i915. The fault handler will make sure a fence register is allocated too, if the object in question is tiled. Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c488
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h20
6 files changed, 548 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index dacdf3c577cb..1b81b6a6d81b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -991,6 +991,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
991 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), 991 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
992 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), 992 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
993 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), 993 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
994 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
994 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), 995 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
995 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), 996 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
996 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 997 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c91648320a8b..e0d996ed9026 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -81,6 +81,10 @@ static int i915_resume(struct drm_device *dev)
81 return 0; 81 return 0;
82} 82}
83 83
84static struct vm_operations_struct i915_gem_vm_ops = {
85 .fault = i915_gem_fault,
86};
87
84static struct drm_driver driver = { 88static struct drm_driver driver = {
85 /* don't use mtrr's here, the Xserver or user space app should 89 /* don't use mtrr's here, the Xserver or user space app should
86 * deal with them for intel hardware. 90 * deal with them for intel hardware.
@@ -113,13 +117,14 @@ static struct drm_driver driver = {
113 .proc_cleanup = i915_gem_proc_cleanup, 117 .proc_cleanup = i915_gem_proc_cleanup,
114 .gem_init_object = i915_gem_init_object, 118 .gem_init_object = i915_gem_init_object,
115 .gem_free_object = i915_gem_free_object, 119 .gem_free_object = i915_gem_free_object,
120 .gem_vm_ops = &i915_gem_vm_ops,
116 .ioctls = i915_ioctls, 121 .ioctls = i915_ioctls,
117 .fops = { 122 .fops = {
118 .owner = THIS_MODULE, 123 .owner = THIS_MODULE,
119 .open = drm_open, 124 .open = drm_open,
120 .release = drm_release, 125 .release = drm_release,
121 .ioctl = drm_ioctl, 126 .ioctl = drm_ioctl,
122 .mmap = drm_mmap, 127 .mmap = drm_gem_mmap,
123 .poll = drm_poll, 128 .poll = drm_poll,
124 .fasync = drm_fasync, 129 .fasync = drm_fasync,
125#ifdef CONFIG_COMPAT 130#ifdef CONFIG_COMPAT
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ba096f9a7641..85a072e80637 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,11 @@ struct drm_i915_master_private {
107 drm_local_map_t *sarea; 107 drm_local_map_t *sarea;
108 struct _drm_i915_sarea *sarea_priv; 108 struct _drm_i915_sarea *sarea_priv;
109}; 109};
110#define I915_FENCE_REG_NONE -1
111
112struct drm_i915_fence_reg {
113 struct drm_gem_object *obj;
114};
110 115
111typedef struct drm_i915_private { 116typedef struct drm_i915_private {
112 struct drm_device *dev; 117 struct drm_device *dev;
@@ -149,6 +154,10 @@ typedef struct drm_i915_private {
149 154
150 struct intel_opregion opregion; 155 struct intel_opregion opregion;
151 156
157 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
158 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
159 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
160
152 /* Register state */ 161 /* Register state */
153 u8 saveLBB; 162 u8 saveLBB;
154 u32 saveDSPACNTR; 163 u32 saveDSPACNTR;
@@ -367,6 +376,21 @@ struct drm_i915_gem_object {
367 * This is the same as gtt_space->start 376 * This is the same as gtt_space->start
368 */ 377 */
369 uint32_t gtt_offset; 378 uint32_t gtt_offset;
379 /**
380 * Required alignment for the object
381 */
382 uint32_t gtt_alignment;
383 /**
384 * Fake offset for use by mmap(2)
385 */
386 uint64_t mmap_offset;
387
388 /**
389 * Fence register bits (if any) for this object. Will be set
390 * as needed when mapped into the GTT.
391 * Protected by dev->struct_mutex.
392 */
393 int fence_reg;
370 394
371 /** Boolean whether this object has a valid gtt offset. */ 395 /** Boolean whether this object has a valid gtt offset. */
372 int gtt_bound; 396 int gtt_bound;
@@ -379,6 +403,7 @@ struct drm_i915_gem_object {
379 403
380 /** Current tiling mode for the object. */ 404 /** Current tiling mode for the object. */
381 uint32_t tiling_mode; 405 uint32_t tiling_mode;
406 uint32_t stride;
382 407
383 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 408 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
384 uint32_t agp_type; 409 uint32_t agp_type;
@@ -493,6 +518,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
493 struct drm_file *file_priv); 518 struct drm_file *file_priv);
494int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 519int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
495 struct drm_file *file_priv); 520 struct drm_file *file_priv);
521int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
522 struct drm_file *file_priv);
496int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 523int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
497 struct drm_file *file_priv); 524 struct drm_file *file_priv);
498int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 525int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -529,6 +556,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev);
529void i915_gem_retire_requests(struct drm_device *dev); 556void i915_gem_retire_requests(struct drm_device *dev);
530void i915_gem_retire_work_handler(struct work_struct *work); 557void i915_gem_retire_work_handler(struct work_struct *work);
531void i915_gem_clflush_object(struct drm_gem_object *obj); 558void i915_gem_clflush_object(struct drm_gem_object *obj);
559int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
532 560
533/* i915_gem_tiling.c */ 561/* i915_gem_tiling.c */
534void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 562void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -584,6 +612,13 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
584#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) 612#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
585#define I915_READ8(reg) readb(dev_priv->regs + (reg)) 613#define I915_READ8(reg) readb(dev_priv->regs + (reg))
586#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) 614#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
615#ifdef writeq
616#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
617#else
618#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \
619 writel(upper_32_bits(val), dev_priv->regs + \
620 (reg) + 4))
621#endif
587 622
588#define I915_VERBOSE 0 623#define I915_VERBOSE 0
589 624
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 24fe8c10b4b2..0ac977112f72 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -51,6 +51,11 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
51static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 51static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 52static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
54static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
55 unsigned alignment);
56static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
57static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
58static int i915_gem_evict_something(struct drm_device *dev);
54 59
55static void 60static void
56i915_gem_cleanup_ringbuffer(struct drm_device *dev); 61i915_gem_cleanup_ringbuffer(struct drm_device *dev);
@@ -529,6 +534,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
529 return 0; 534 return 0;
530} 535}
531 536
537/**
538 * i915_gem_fault - fault a page into the GTT
539 * vma: VMA in question
540 * vmf: fault info
541 *
542 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
543 * from userspace. The fault handler takes care of binding the object to
544 * the GTT (if needed), allocating and programming a fence register (again,
545 * only if needed based on whether the old reg is still valid or the object
546 * is tiled) and inserting a new PTE into the faulting process.
547 *
548 * Note that the faulting process may involve evicting existing objects
549 * from the GTT and/or fence registers to make room. So performance may
550 * suffer if the GTT working set is large or there are few fence registers
551 * left.
552 */
553int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
554{
555 struct drm_gem_object *obj = vma->vm_private_data;
556 struct drm_device *dev = obj->dev;
557 struct drm_i915_private *dev_priv = dev->dev_private;
558 struct drm_i915_gem_object *obj_priv = obj->driver_private;
559 pgoff_t page_offset;
560 unsigned long pfn;
561 int ret = 0;
562
563 /* We don't use vmf->pgoff since that has the fake offset */
564 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
565 PAGE_SHIFT;
566
567 /* Now bind it into the GTT if needed */
568 mutex_lock(&dev->struct_mutex);
569 if (!obj_priv->gtt_space) {
570 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
571 if (ret) {
572 mutex_unlock(&dev->struct_mutex);
573 return VM_FAULT_SIGBUS;
574 }
575 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
576 }
577
578 /* Need a new fence register? */
579 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
580 obj_priv->tiling_mode != I915_TILING_NONE)
581 i915_gem_object_get_fence_reg(obj);
582
583 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
584 page_offset;
585
586 /* Finally, remap it using the new GTT offset */
587 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
588
589 mutex_unlock(&dev->struct_mutex);
590
591 switch (ret) {
592 case -ENOMEM:
593 case -EAGAIN:
594 return VM_FAULT_OOM;
595 case -EFAULT:
596 case -EBUSY:
597 DRM_ERROR("can't insert pfn?? fault or busy...\n");
598 return VM_FAULT_SIGBUS;
599 default:
600 return VM_FAULT_NOPAGE;
601 }
602}
603
604/**
605 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
606 * @obj: obj in question
607 *
608 * GEM memory mapping works by handing back to userspace a fake mmap offset
609 * it can use in a subsequent mmap(2) call. The DRM core code then looks
610 * up the object based on the offset and sets up the various memory mapping
611 * structures.
612 *
613 * This routine allocates and attaches a fake offset for @obj.
614 */
615static int
616i915_gem_create_mmap_offset(struct drm_gem_object *obj)
617{
618 struct drm_device *dev = obj->dev;
619 struct drm_gem_mm *mm = dev->mm_private;
620 struct drm_i915_gem_object *obj_priv = obj->driver_private;
621 struct drm_map_list *list;
622 struct drm_map *map;
623 int ret = 0;
624
625 /* Set the object up for mmap'ing */
626 list = &obj->map_list;
627 list->map = drm_calloc(1, sizeof(struct drm_map_list),
628 DRM_MEM_DRIVER);
629 if (!list->map)
630 return -ENOMEM;
631
632 map = list->map;
633 map->type = _DRM_GEM;
634 map->size = obj->size;
635 map->handle = obj;
636
637 /* Get a DRM GEM mmap offset allocated... */
638 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
639 obj->size / PAGE_SIZE, 0, 0);
640 if (!list->file_offset_node) {
641 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
642 ret = -ENOMEM;
643 goto out_free_list;
644 }
645
646 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
647 obj->size / PAGE_SIZE, 0);
648 if (!list->file_offset_node) {
649 ret = -ENOMEM;
650 goto out_free_list;
651 }
652
653 list->hash.key = list->file_offset_node->start;
654 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
655 DRM_ERROR("failed to add to map hash\n");
656 goto out_free_mm;
657 }
658
659 /* By now we should be all set, any drm_mmap request on the offset
660 * below will get to our mmap & fault handler */
661 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
662
663 return 0;
664
665out_free_mm:
666 drm_mm_put_block(list->file_offset_node);
667out_free_list:
668 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
669
670 return ret;
671}
672
673/**
674 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
675 * @obj: object to check
676 *
677 * Return the required GTT alignment for an object, taking into account
678 * potential fence register mapping if needed.
679 */
680static uint32_t
681i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
682{
683 struct drm_device *dev = obj->dev;
684 struct drm_i915_gem_object *obj_priv = obj->driver_private;
685 int start, i;
686
687 /*
688 * Minimum alignment is 4k (GTT page size), but might be greater
689 * if a fence register is needed for the object.
690 */
691 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
692 return 4096;
693
694 /*
695 * Previous chips need to be aligned to the size of the smallest
696 * fence register that can contain the object.
697 */
698 if (IS_I9XX(dev))
699 start = 1024*1024;
700 else
701 start = 512*1024;
702
703 for (i = start; i < obj->size; i <<= 1)
704 ;
705
706 return i;
707}
708
709/**
710 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
711 * @dev: DRM device
712 * @data: GTT mapping ioctl data
713 * @file_priv: GEM object info
714 *
715 * Simply returns the fake offset to userspace so it can mmap it.
716 * The mmap call will end up in drm_gem_mmap(), which will set things
717 * up so we can get faults in the handler above.
718 *
719 * The fault handler will take care of binding the object into the GTT
720 * (since it may have been evicted to make room for something), allocating
721 * a fence register, and mapping the appropriate aperture address into
722 * userspace.
723 */
724int
725i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
726 struct drm_file *file_priv)
727{
728 struct drm_i915_gem_mmap_gtt *args = data;
729 struct drm_i915_private *dev_priv = dev->dev_private;
730 struct drm_gem_object *obj;
731 struct drm_i915_gem_object *obj_priv;
732 int ret;
733
734 if (!(dev->driver->driver_features & DRIVER_GEM))
735 return -ENODEV;
736
737 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
738 if (obj == NULL)
739 return -EBADF;
740
741 mutex_lock(&dev->struct_mutex);
742
743 obj_priv = obj->driver_private;
744
745 if (!obj_priv->mmap_offset) {
746 ret = i915_gem_create_mmap_offset(obj);
747 if (ret)
748 return ret;
749 }
750
751 args->offset = obj_priv->mmap_offset;
752
753 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
754
755 /* Make sure the alignment is correct for fence regs etc */
756 if (obj_priv->agp_mem &&
757 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
758 drm_gem_object_unreference(obj);
759 mutex_unlock(&dev->struct_mutex);
760 return -EINVAL;
761 }
762
763 /*
764 * Pull it into the GTT so that we have a page list (makes the
765 * initial fault faster and any subsequent flushing possible).
766 */
767 if (!obj_priv->agp_mem) {
768 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
769 if (ret) {
770 drm_gem_object_unreference(obj);
771 mutex_unlock(&dev->struct_mutex);
772 return ret;
773 }
774 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
775 }
776
777 drm_gem_object_unreference(obj);
778 mutex_unlock(&dev->struct_mutex);
779
780 return 0;
781}
782
532static void 783static void
533i915_gem_object_free_page_list(struct drm_gem_object *obj) 784i915_gem_object_free_page_list(struct drm_gem_object *obj)
534{ 785{
@@ -726,6 +977,7 @@ i915_gem_retire_request(struct drm_device *dev,
726 */ 977 */
727 if (obj_priv->last_rendering_seqno != request->seqno) 978 if (obj_priv->last_rendering_seqno != request->seqno)
728 return; 979 return;
980
729#if WATCH_LRU 981#if WATCH_LRU
730 DRM_INFO("%s: retire %d moves to inactive list %p\n", 982 DRM_INFO("%s: retire %d moves to inactive list %p\n",
731 __func__, request->seqno, obj); 983 __func__, request->seqno, obj);
@@ -956,6 +1208,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
956{ 1208{
957 struct drm_device *dev = obj->dev; 1209 struct drm_device *dev = obj->dev;
958 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1210 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1211 loff_t offset;
959 int ret = 0; 1212 int ret = 0;
960 1213
961#if WATCH_BUF 1214#if WATCH_BUF
@@ -991,6 +1244,13 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
991 1244
992 BUG_ON(obj_priv->active); 1245 BUG_ON(obj_priv->active);
993 1246
1247 /* blow away mappings if mapped through GTT */
1248 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
1249 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1250
1251 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1252 i915_gem_clear_fence_reg(obj);
1253
994 i915_gem_object_free_page_list(obj); 1254 i915_gem_object_free_page_list(obj);
995 1255
996 if (obj_priv->gtt_space) { 1256 if (obj_priv->gtt_space) {
@@ -1149,6 +1409,203 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1149 return 0; 1409 return 0;
1150} 1410}
1151 1411
1412static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
1413{
1414 struct drm_gem_object *obj = reg->obj;
1415 struct drm_device *dev = obj->dev;
1416 drm_i915_private_t *dev_priv = dev->dev_private;
1417 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1418 int regnum = obj_priv->fence_reg;
1419 uint64_t val;
1420
1421 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
1422 0xfffff000) << 32;
1423 val |= obj_priv->gtt_offset & 0xfffff000;
1424 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1425 if (obj_priv->tiling_mode == I915_TILING_Y)
1426 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1427 val |= I965_FENCE_REG_VALID;
1428
1429 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
1430}
1431
1432static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1433{
1434 struct drm_gem_object *obj = reg->obj;
1435 struct drm_device *dev = obj->dev;
1436 drm_i915_private_t *dev_priv = dev->dev_private;
1437 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1438 int regnum = obj_priv->fence_reg;
1439 uint32_t val;
1440 uint32_t pitch_val;
1441
1442 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1443 (obj_priv->gtt_offset & (obj->size - 1))) {
1444 WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
1445 return;
1446 }
1447
1448 if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) ||
1449 IS_I945GM(dev) ||
1450 IS_G33(dev)))
1451 pitch_val = (obj_priv->stride / 128) - 1;
1452 else
1453 pitch_val = (obj_priv->stride / 512) - 1;
1454
1455 val = obj_priv->gtt_offset;
1456 if (obj_priv->tiling_mode == I915_TILING_Y)
1457 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1458 val |= I915_FENCE_SIZE_BITS(obj->size);
1459 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1460 val |= I830_FENCE_REG_VALID;
1461
1462 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1463}
1464
1465static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1466{
1467 struct drm_gem_object *obj = reg->obj;
1468 struct drm_device *dev = obj->dev;
1469 drm_i915_private_t *dev_priv = dev->dev_private;
1470 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1471 int regnum = obj_priv->fence_reg;
1472 uint32_t val;
1473 uint32_t pitch_val;
1474
1475 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1476 (obj_priv->gtt_offset & (obj->size - 1))) {
1477 WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
1478 return;
1479 }
1480
1481 pitch_val = (obj_priv->stride / 128) - 1;
1482
1483 val = obj_priv->gtt_offset;
1484 if (obj_priv->tiling_mode == I915_TILING_Y)
1485 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1486 val |= I830_FENCE_SIZE_BITS(obj->size);
1487 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1488 val |= I830_FENCE_REG_VALID;
1489
1490 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1491
1492}
1493
1494/**
1495 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1496 * @obj: object to map through a fence reg
1497 *
1498 * When mapping objects through the GTT, userspace wants to be able to write
1499 * to them without having to worry about swizzling if the object is tiled.
1500 *
1501 * This function walks the fence regs looking for a free one for @obj,
1502 * stealing one if it can't find any.
1503 *
1504 * It then sets up the reg based on the object's properties: address, pitch
1505 * and tiling format.
1506 */
1507static void
1508i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
1509{
1510 struct drm_device *dev = obj->dev;
1511 drm_i915_private_t *dev_priv = dev->dev_private;
1512 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1513 struct drm_i915_fence_reg *reg = NULL;
1514 int i, ret;
1515
1516 switch (obj_priv->tiling_mode) {
1517 case I915_TILING_NONE:
1518 WARN(1, "allocating a fence for non-tiled object?\n");
1519 break;
1520 case I915_TILING_X:
1521 WARN(obj_priv->stride & (512 - 1),
1522 "object is X tiled but has non-512B pitch\n");
1523 break;
1524 case I915_TILING_Y:
1525 WARN(obj_priv->stride & (128 - 1),
1526 "object is Y tiled but has non-128B pitch\n");
1527 break;
1528 }
1529
1530 /* First try to find a free reg */
1531 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
1532 reg = &dev_priv->fence_regs[i];
1533 if (!reg->obj)
1534 break;
1535 }
1536
1537 /* None available, try to steal one or wait for a user to finish */
1538 if (i == dev_priv->num_fence_regs) {
1539 struct drm_i915_gem_object *old_obj_priv = NULL;
1540 loff_t offset;
1541
1542try_again:
1543 /* Could try to use LRU here instead... */
1544 for (i = dev_priv->fence_reg_start;
1545 i < dev_priv->num_fence_regs; i++) {
1546 reg = &dev_priv->fence_regs[i];
1547 old_obj_priv = reg->obj->driver_private;
1548 if (!old_obj_priv->pin_count)
1549 break;
1550 }
1551
1552 /*
1553 * Now things get ugly... we have to wait for one of the
1554 * objects to finish before trying again.
1555 */
1556 if (i == dev_priv->num_fence_regs) {
1557 ret = i915_gem_object_wait_rendering(reg->obj);
1558 if (ret) {
1559 WARN(ret, "wait_rendering failed: %d\n", ret);
1560 return;
1561 }
1562 goto try_again;
1563 }
1564
1565 /*
1566 * Zap this virtual mapping so we can set up a fence again
1567 * for this object next time we need it.
1568 */
1569 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
1570 unmap_mapping_range(dev->dev_mapping, offset,
1571 reg->obj->size, 1);
1572 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
1573 }
1574
1575 obj_priv->fence_reg = i;
1576 reg->obj = obj;
1577
1578 if (IS_I965G(dev))
1579 i965_write_fence_reg(reg);
1580 else if (IS_I9XX(dev))
1581 i915_write_fence_reg(reg);
1582 else
1583 i830_write_fence_reg(reg);
1584}
1585
1586/**
1587 * i915_gem_clear_fence_reg - clear out fence register info
1588 * @obj: object to clear
1589 *
1590 * Zeroes out the fence register itself and clears out the associated
1591 * data structures in dev_priv and obj_priv.
1592 */
1593static void
1594i915_gem_clear_fence_reg(struct drm_gem_object *obj)
1595{
1596 struct drm_device *dev = obj->dev;
1597 struct drm_i915_private *dev_priv = dev->dev_private;
1598 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1599
1600 if (IS_I965G(dev))
1601 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
1602 else
1603 I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
1604
1605 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
1606 obj_priv->fence_reg = I915_FENCE_REG_NONE;
1607}
1608
1152/** 1609/**
1153 * Finds free space in the GTT aperture and binds the object there. 1610 * Finds free space in the GTT aperture and binds the object there.
1154 */ 1611 */
@@ -2351,12 +2808,18 @@ int i915_gem_init_object(struct drm_gem_object *obj)
2351 2808
2352 obj->driver_private = obj_priv; 2809 obj->driver_private = obj_priv;
2353 obj_priv->obj = obj; 2810 obj_priv->obj = obj;
2811 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2354 INIT_LIST_HEAD(&obj_priv->list); 2812 INIT_LIST_HEAD(&obj_priv->list);
2813
2355 return 0; 2814 return 0;
2356} 2815}
2357 2816
2358void i915_gem_free_object(struct drm_gem_object *obj) 2817void i915_gem_free_object(struct drm_gem_object *obj)
2359{ 2818{
2819 struct drm_device *dev = obj->dev;
2820 struct drm_gem_mm *mm = dev->mm_private;
2821 struct drm_map_list *list;
2822 struct drm_map *map;
2360 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2823 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2361 2824
2362 while (obj_priv->pin_count > 0) 2825 while (obj_priv->pin_count > 0)
@@ -2364,6 +2827,20 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2364 2827
2365 i915_gem_object_unbind(obj); 2828 i915_gem_object_unbind(obj);
2366 2829
2830 list = &obj->map_list;
2831 drm_ht_remove_item(&mm->offset_hash, &list->hash);
2832
2833 if (list->file_offset_node) {
2834 drm_mm_put_block(list->file_offset_node);
2835 list->file_offset_node = NULL;
2836 }
2837
2838 map = list->map;
2839 if (map) {
2840 drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
2841 list->map = NULL;
2842 }
2843
2367 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2844 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2368 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2845 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2369} 2846}
@@ -2432,8 +2909,7 @@ i915_gem_idle(struct drm_device *dev)
2432 */ 2909 */
2433 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), 2910 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2434 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2911 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2435 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | 2912 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
2436 I915_GEM_DOMAIN_GTT));
2437 2913
2438 if (seqno == 0) { 2914 if (seqno == 0) {
2439 mutex_unlock(&dev->struct_mutex); 2915 mutex_unlock(&dev->struct_mutex);
@@ -2758,5 +3234,13 @@ i915_gem_load(struct drm_device *dev)
2758 i915_gem_retire_work_handler); 3234 i915_gem_retire_work_handler);
2759 dev_priv->mm.next_gem_seqno = 1; 3235 dev_priv->mm.next_gem_seqno = 1;
2760 3236
3237 /* Old X drivers will take 0-2 for front, back, depth buffers */
3238 dev_priv->fence_reg_start = 3;
3239
3240 if (IS_I965G(dev))
3241 dev_priv->num_fence_regs = 16;
3242 else
3243 dev_priv->num_fence_regs = 8;
3244
2761 i915_gem_detect_bit_6_swizzle(dev); 3245 i915_gem_detect_bit_6_swizzle(dev);
2762} 3246}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a8cb69469c64..241f39b7f460 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
208 } 208 }
209 } 209 }
210 obj_priv->tiling_mode = args->tiling_mode; 210 obj_priv->tiling_mode = args->tiling_mode;
211 obj_priv->stride = args->stride;
211 212
212 mutex_unlock(&dev->struct_mutex); 213 mutex_unlock(&dev->struct_mutex);
213 214
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d24aaeb8a45..47e6bafeb743 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -175,9 +175,26 @@
175#define DISPLAY_PLANE_B (1<<20) 175#define DISPLAY_PLANE_B (1<<20)
176 176
177/* 177/*
178 * Instruction and interrupt control regs 178 * Fence registers
179 */ 179 */
180#define FENCE_REG_830_0 0x2000
181#define I830_FENCE_START_MASK 0x07f80000
182#define I830_FENCE_TILING_Y_SHIFT 12
183#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8)
184#define I830_FENCE_PITCH_SHIFT 4
185#define I830_FENCE_REG_VALID (1<<0)
186
187#define I915_FENCE_START_MASK 0x0ff00000
188#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8)
180 189
190#define FENCE_REG_965_0 0x03000
191#define I965_FENCE_PITCH_SHIFT 2
192#define I965_FENCE_TILING_Y_SHIFT 1
193#define I965_FENCE_REG_VALID (1<<0)
194
195/*
196 * Instruction and interrupt control regs
197 */
181#define PRB0_TAIL 0x02030 198#define PRB0_TAIL 0x02030
182#define PRB0_HEAD 0x02034 199#define PRB0_HEAD 0x02034
183#define PRB0_START 0x02038 200#define PRB0_START 0x02038
@@ -245,6 +262,7 @@
245#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 262#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
246#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 263#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
247 264
265
248/* 266/*
249 * Framebuffer compression (915+ only) 267 * Framebuffer compression (915+ only)
250 */ 268 */