diff options
author | Jesse Barnes <jbarnes@virtuousgeek.org> | 2008-11-12 13:03:55 -0500 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2008-12-29 02:47:23 -0500 |
commit | de151cf67ce52ed2d88083daa5e60c7858947329 (patch) | |
tree | 860c46d95061b261a7cab24a6ab57b68a0146f3a /drivers/gpu/drm/i915/i915_gem.c | |
parent | a2c0a97b784f837300f7b0869c82ab712c600952 (diff) |
drm/i915: add GEM GTT mapping support
Use the new core GEM object mapping code to allow GTT mapping of GEM
objects on i915. The fault handler will make sure a fence register is
allocated too, if the object in question is tiled.
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 488 |
1 files changed, 486 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 24fe8c10b4b2..0ac977112f72 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -51,6 +51,11 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o | |||
51 | static int i915_gem_object_get_page_list(struct drm_gem_object *obj); | 51 | static int i915_gem_object_get_page_list(struct drm_gem_object *obj); |
52 | static void i915_gem_object_free_page_list(struct drm_gem_object *obj); | 52 | static void i915_gem_object_free_page_list(struct drm_gem_object *obj); |
53 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 53 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
54 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | ||
55 | unsigned alignment); | ||
56 | static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | ||
57 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | ||
58 | static int i915_gem_evict_something(struct drm_device *dev); | ||
54 | 59 | ||
55 | static void | 60 | static void |
56 | i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 61 | i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
@@ -529,6 +534,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
529 | return 0; | 534 | return 0; |
530 | } | 535 | } |
531 | 536 | ||
537 | /** | ||
538 | * i915_gem_fault - fault a page into the GTT | ||
539 | * vma: VMA in question | ||
540 | * vmf: fault info | ||
541 | * | ||
542 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | ||
543 | * from userspace. The fault handler takes care of binding the object to | ||
544 | * the GTT (if needed), allocating and programming a fence register (again, | ||
545 | * only if needed based on whether the old reg is still valid or the object | ||
546 | * is tiled) and inserting a new PTE into the faulting process. | ||
547 | * | ||
548 | * Note that the faulting process may involve evicting existing objects | ||
549 | * from the GTT and/or fence registers to make room. So performance may | ||
550 | * suffer if the GTT working set is large or there are few fence registers | ||
551 | * left. | ||
552 | */ | ||
553 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
554 | { | ||
555 | struct drm_gem_object *obj = vma->vm_private_data; | ||
556 | struct drm_device *dev = obj->dev; | ||
557 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
558 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
559 | pgoff_t page_offset; | ||
560 | unsigned long pfn; | ||
561 | int ret = 0; | ||
562 | |||
563 | /* We don't use vmf->pgoff since that has the fake offset */ | ||
564 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | ||
565 | PAGE_SHIFT; | ||
566 | |||
567 | /* Now bind it into the GTT if needed */ | ||
568 | mutex_lock(&dev->struct_mutex); | ||
569 | if (!obj_priv->gtt_space) { | ||
570 | ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); | ||
571 | if (ret) { | ||
572 | mutex_unlock(&dev->struct_mutex); | ||
573 | return VM_FAULT_SIGBUS; | ||
574 | } | ||
575 | list_add(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
576 | } | ||
577 | |||
578 | /* Need a new fence register? */ | ||
579 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
580 | obj_priv->tiling_mode != I915_TILING_NONE) | ||
581 | i915_gem_object_get_fence_reg(obj); | ||
582 | |||
583 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | ||
584 | page_offset; | ||
585 | |||
586 | /* Finally, remap it using the new GTT offset */ | ||
587 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | ||
588 | |||
589 | mutex_unlock(&dev->struct_mutex); | ||
590 | |||
591 | switch (ret) { | ||
592 | case -ENOMEM: | ||
593 | case -EAGAIN: | ||
594 | return VM_FAULT_OOM; | ||
595 | case -EFAULT: | ||
596 | case -EBUSY: | ||
597 | DRM_ERROR("can't insert pfn?? fault or busy...\n"); | ||
598 | return VM_FAULT_SIGBUS; | ||
599 | default: | ||
600 | return VM_FAULT_NOPAGE; | ||
601 | } | ||
602 | } | ||
603 | |||
604 | /** | ||
605 | * i915_gem_create_mmap_offset - create a fake mmap offset for an object | ||
606 | * @obj: obj in question | ||
607 | * | ||
608 | * GEM memory mapping works by handing back to userspace a fake mmap offset | ||
609 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | ||
610 | * up the object based on the offset and sets up the various memory mapping | ||
611 | * structures. | ||
612 | * | ||
613 | * This routine allocates and attaches a fake offset for @obj. | ||
614 | */ | ||
615 | static int | ||
616 | i915_gem_create_mmap_offset(struct drm_gem_object *obj) | ||
617 | { | ||
618 | struct drm_device *dev = obj->dev; | ||
619 | struct drm_gem_mm *mm = dev->mm_private; | ||
620 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
621 | struct drm_map_list *list; | ||
622 | struct drm_map *map; | ||
623 | int ret = 0; | ||
624 | |||
625 | /* Set the object up for mmap'ing */ | ||
626 | list = &obj->map_list; | ||
627 | list->map = drm_calloc(1, sizeof(struct drm_map_list), | ||
628 | DRM_MEM_DRIVER); | ||
629 | if (!list->map) | ||
630 | return -ENOMEM; | ||
631 | |||
632 | map = list->map; | ||
633 | map->type = _DRM_GEM; | ||
634 | map->size = obj->size; | ||
635 | map->handle = obj; | ||
636 | |||
637 | /* Get a DRM GEM mmap offset allocated... */ | ||
638 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | ||
639 | obj->size / PAGE_SIZE, 0, 0); | ||
640 | if (!list->file_offset_node) { | ||
641 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | ||
642 | ret = -ENOMEM; | ||
643 | goto out_free_list; | ||
644 | } | ||
645 | |||
646 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | ||
647 | obj->size / PAGE_SIZE, 0); | ||
648 | if (!list->file_offset_node) { | ||
649 | ret = -ENOMEM; | ||
650 | goto out_free_list; | ||
651 | } | ||
652 | |||
653 | list->hash.key = list->file_offset_node->start; | ||
654 | if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { | ||
655 | DRM_ERROR("failed to add to map hash\n"); | ||
656 | goto out_free_mm; | ||
657 | } | ||
658 | |||
659 | /* By now we should be all set, any drm_mmap request on the offset | ||
660 | * below will get to our mmap & fault handler */ | ||
661 | obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; | ||
662 | |||
663 | return 0; | ||
664 | |||
665 | out_free_mm: | ||
666 | drm_mm_put_block(list->file_offset_node); | ||
667 | out_free_list: | ||
668 | drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); | ||
669 | |||
670 | return ret; | ||
671 | } | ||
672 | |||
673 | /** | ||
674 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | ||
675 | * @obj: object to check | ||
676 | * | ||
677 | * Return the required GTT alignment for an object, taking into account | ||
678 | * potential fence register mapping if needed. | ||
679 | */ | ||
680 | static uint32_t | ||
681 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | ||
682 | { | ||
683 | struct drm_device *dev = obj->dev; | ||
684 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
685 | int start, i; | ||
686 | |||
687 | /* | ||
688 | * Minimum alignment is 4k (GTT page size), but might be greater | ||
689 | * if a fence register is needed for the object. | ||
690 | */ | ||
691 | if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) | ||
692 | return 4096; | ||
693 | |||
694 | /* | ||
695 | * Previous chips need to be aligned to the size of the smallest | ||
696 | * fence register that can contain the object. | ||
697 | */ | ||
698 | if (IS_I9XX(dev)) | ||
699 | start = 1024*1024; | ||
700 | else | ||
701 | start = 512*1024; | ||
702 | |||
703 | for (i = start; i < obj->size; i <<= 1) | ||
704 | ; | ||
705 | |||
706 | return i; | ||
707 | } | ||
708 | |||
709 | /** | ||
710 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | ||
711 | * @dev: DRM device | ||
712 | * @data: GTT mapping ioctl data | ||
713 | * @file_priv: GEM object info | ||
714 | * | ||
715 | * Simply returns the fake offset to userspace so it can mmap it. | ||
716 | * The mmap call will end up in drm_gem_mmap(), which will set things | ||
717 | * up so we can get faults in the handler above. | ||
718 | * | ||
719 | * The fault handler will take care of binding the object into the GTT | ||
720 | * (since it may have been evicted to make room for something), allocating | ||
721 | * a fence register, and mapping the appropriate aperture address into | ||
722 | * userspace. | ||
723 | */ | ||
724 | int | ||
725 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | ||
726 | struct drm_file *file_priv) | ||
727 | { | ||
728 | struct drm_i915_gem_mmap_gtt *args = data; | ||
729 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
730 | struct drm_gem_object *obj; | ||
731 | struct drm_i915_gem_object *obj_priv; | ||
732 | int ret; | ||
733 | |||
734 | if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
735 | return -ENODEV; | ||
736 | |||
737 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
738 | if (obj == NULL) | ||
739 | return -EBADF; | ||
740 | |||
741 | mutex_lock(&dev->struct_mutex); | ||
742 | |||
743 | obj_priv = obj->driver_private; | ||
744 | |||
745 | if (!obj_priv->mmap_offset) { | ||
746 | ret = i915_gem_create_mmap_offset(obj); | ||
747 | if (ret) | ||
748 | return ret; | ||
749 | } | ||
750 | |||
751 | args->offset = obj_priv->mmap_offset; | ||
752 | |||
753 | obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); | ||
754 | |||
755 | /* Make sure the alignment is correct for fence regs etc */ | ||
756 | if (obj_priv->agp_mem && | ||
757 | (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { | ||
758 | drm_gem_object_unreference(obj); | ||
759 | mutex_unlock(&dev->struct_mutex); | ||
760 | return -EINVAL; | ||
761 | } | ||
762 | |||
763 | /* | ||
764 | * Pull it into the GTT so that we have a page list (makes the | ||
765 | * initial fault faster and any subsequent flushing possible). | ||
766 | */ | ||
767 | if (!obj_priv->agp_mem) { | ||
768 | ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); | ||
769 | if (ret) { | ||
770 | drm_gem_object_unreference(obj); | ||
771 | mutex_unlock(&dev->struct_mutex); | ||
772 | return ret; | ||
773 | } | ||
774 | list_add(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
775 | } | ||
776 | |||
777 | drm_gem_object_unreference(obj); | ||
778 | mutex_unlock(&dev->struct_mutex); | ||
779 | |||
780 | return 0; | ||
781 | } | ||
782 | |||
532 | static void | 783 | static void |
533 | i915_gem_object_free_page_list(struct drm_gem_object *obj) | 784 | i915_gem_object_free_page_list(struct drm_gem_object *obj) |
534 | { | 785 | { |
@@ -726,6 +977,7 @@ i915_gem_retire_request(struct drm_device *dev, | |||
726 | */ | 977 | */ |
727 | if (obj_priv->last_rendering_seqno != request->seqno) | 978 | if (obj_priv->last_rendering_seqno != request->seqno) |
728 | return; | 979 | return; |
980 | |||
729 | #if WATCH_LRU | 981 | #if WATCH_LRU |
730 | DRM_INFO("%s: retire %d moves to inactive list %p\n", | 982 | DRM_INFO("%s: retire %d moves to inactive list %p\n", |
731 | __func__, request->seqno, obj); | 983 | __func__, request->seqno, obj); |
@@ -956,6 +1208,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
956 | { | 1208 | { |
957 | struct drm_device *dev = obj->dev; | 1209 | struct drm_device *dev = obj->dev; |
958 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1210 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1211 | loff_t offset; | ||
959 | int ret = 0; | 1212 | int ret = 0; |
960 | 1213 | ||
961 | #if WATCH_BUF | 1214 | #if WATCH_BUF |
@@ -991,6 +1244,13 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
991 | 1244 | ||
992 | BUG_ON(obj_priv->active); | 1245 | BUG_ON(obj_priv->active); |
993 | 1246 | ||
1247 | /* blow away mappings if mapped through GTT */ | ||
1248 | offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; | ||
1249 | unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1); | ||
1250 | |||
1251 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
1252 | i915_gem_clear_fence_reg(obj); | ||
1253 | |||
994 | i915_gem_object_free_page_list(obj); | 1254 | i915_gem_object_free_page_list(obj); |
995 | 1255 | ||
996 | if (obj_priv->gtt_space) { | 1256 | if (obj_priv->gtt_space) { |
@@ -1149,6 +1409,203 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) | |||
1149 | return 0; | 1409 | return 0; |
1150 | } | 1410 | } |
1151 | 1411 | ||
1412 | static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | ||
1413 | { | ||
1414 | struct drm_gem_object *obj = reg->obj; | ||
1415 | struct drm_device *dev = obj->dev; | ||
1416 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1417 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1418 | int regnum = obj_priv->fence_reg; | ||
1419 | uint64_t val; | ||
1420 | |||
1421 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | ||
1422 | 0xfffff000) << 32; | ||
1423 | val |= obj_priv->gtt_offset & 0xfffff000; | ||
1424 | val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | ||
1425 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
1426 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | ||
1427 | val |= I965_FENCE_REG_VALID; | ||
1428 | |||
1429 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); | ||
1430 | } | ||
1431 | |||
1432 | static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | ||
1433 | { | ||
1434 | struct drm_gem_object *obj = reg->obj; | ||
1435 | struct drm_device *dev = obj->dev; | ||
1436 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1437 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1438 | int regnum = obj_priv->fence_reg; | ||
1439 | uint32_t val; | ||
1440 | uint32_t pitch_val; | ||
1441 | |||
1442 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | ||
1443 | (obj_priv->gtt_offset & (obj->size - 1))) { | ||
1444 | WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__); | ||
1445 | return; | ||
1446 | } | ||
1447 | |||
1448 | if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || | ||
1449 | IS_I945GM(dev) || | ||
1450 | IS_G33(dev))) | ||
1451 | pitch_val = (obj_priv->stride / 128) - 1; | ||
1452 | else | ||
1453 | pitch_val = (obj_priv->stride / 512) - 1; | ||
1454 | |||
1455 | val = obj_priv->gtt_offset; | ||
1456 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
1457 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | ||
1458 | val |= I915_FENCE_SIZE_BITS(obj->size); | ||
1459 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | ||
1460 | val |= I830_FENCE_REG_VALID; | ||
1461 | |||
1462 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | ||
1463 | } | ||
1464 | |||
1465 | static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | ||
1466 | { | ||
1467 | struct drm_gem_object *obj = reg->obj; | ||
1468 | struct drm_device *dev = obj->dev; | ||
1469 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1470 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1471 | int regnum = obj_priv->fence_reg; | ||
1472 | uint32_t val; | ||
1473 | uint32_t pitch_val; | ||
1474 | |||
1475 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | ||
1476 | (obj_priv->gtt_offset & (obj->size - 1))) { | ||
1477 | WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__); | ||
1478 | return; | ||
1479 | } | ||
1480 | |||
1481 | pitch_val = (obj_priv->stride / 128) - 1; | ||
1482 | |||
1483 | val = obj_priv->gtt_offset; | ||
1484 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
1485 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | ||
1486 | val |= I830_FENCE_SIZE_BITS(obj->size); | ||
1487 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | ||
1488 | val |= I830_FENCE_REG_VALID; | ||
1489 | |||
1490 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | ||
1491 | |||
1492 | } | ||
1493 | |||
1494 | /** | ||
1495 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | ||
1496 | * @obj: object to map through a fence reg | ||
1497 | * | ||
1498 | * When mapping objects through the GTT, userspace wants to be able to write | ||
1499 | * to them without having to worry about swizzling if the object is tiled. | ||
1500 | * | ||
1501 | * This function walks the fence regs looking for a free one for @obj, | ||
1502 | * stealing one if it can't find any. | ||
1503 | * | ||
1504 | * It then sets up the reg based on the object's properties: address, pitch | ||
1505 | * and tiling format. | ||
1506 | */ | ||
1507 | static void | ||
1508 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | ||
1509 | { | ||
1510 | struct drm_device *dev = obj->dev; | ||
1511 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1512 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1513 | struct drm_i915_fence_reg *reg = NULL; | ||
1514 | int i, ret; | ||
1515 | |||
1516 | switch (obj_priv->tiling_mode) { | ||
1517 | case I915_TILING_NONE: | ||
1518 | WARN(1, "allocating a fence for non-tiled object?\n"); | ||
1519 | break; | ||
1520 | case I915_TILING_X: | ||
1521 | WARN(obj_priv->stride & (512 - 1), | ||
1522 | "object is X tiled but has non-512B pitch\n"); | ||
1523 | break; | ||
1524 | case I915_TILING_Y: | ||
1525 | WARN(obj_priv->stride & (128 - 1), | ||
1526 | "object is Y tiled but has non-128B pitch\n"); | ||
1527 | break; | ||
1528 | } | ||
1529 | |||
1530 | /* First try to find a free reg */ | ||
1531 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | ||
1532 | reg = &dev_priv->fence_regs[i]; | ||
1533 | if (!reg->obj) | ||
1534 | break; | ||
1535 | } | ||
1536 | |||
1537 | /* None available, try to steal one or wait for a user to finish */ | ||
1538 | if (i == dev_priv->num_fence_regs) { | ||
1539 | struct drm_i915_gem_object *old_obj_priv = NULL; | ||
1540 | loff_t offset; | ||
1541 | |||
1542 | try_again: | ||
1543 | /* Could try to use LRU here instead... */ | ||
1544 | for (i = dev_priv->fence_reg_start; | ||
1545 | i < dev_priv->num_fence_regs; i++) { | ||
1546 | reg = &dev_priv->fence_regs[i]; | ||
1547 | old_obj_priv = reg->obj->driver_private; | ||
1548 | if (!old_obj_priv->pin_count) | ||
1549 | break; | ||
1550 | } | ||
1551 | |||
1552 | /* | ||
1553 | * Now things get ugly... we have to wait for one of the | ||
1554 | * objects to finish before trying again. | ||
1555 | */ | ||
1556 | if (i == dev_priv->num_fence_regs) { | ||
1557 | ret = i915_gem_object_wait_rendering(reg->obj); | ||
1558 | if (ret) { | ||
1559 | WARN(ret, "wait_rendering failed: %d\n", ret); | ||
1560 | return; | ||
1561 | } | ||
1562 | goto try_again; | ||
1563 | } | ||
1564 | |||
1565 | /* | ||
1566 | * Zap this virtual mapping so we can set up a fence again | ||
1567 | * for this object next time we need it. | ||
1568 | */ | ||
1569 | offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; | ||
1570 | unmap_mapping_range(dev->dev_mapping, offset, | ||
1571 | reg->obj->size, 1); | ||
1572 | old_obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
1573 | } | ||
1574 | |||
1575 | obj_priv->fence_reg = i; | ||
1576 | reg->obj = obj; | ||
1577 | |||
1578 | if (IS_I965G(dev)) | ||
1579 | i965_write_fence_reg(reg); | ||
1580 | else if (IS_I9XX(dev)) | ||
1581 | i915_write_fence_reg(reg); | ||
1582 | else | ||
1583 | i830_write_fence_reg(reg); | ||
1584 | } | ||
1585 | |||
1586 | /** | ||
1587 | * i915_gem_clear_fence_reg - clear out fence register info | ||
1588 | * @obj: object to clear | ||
1589 | * | ||
1590 | * Zeroes out the fence register itself and clears out the associated | ||
1591 | * data structures in dev_priv and obj_priv. | ||
1592 | */ | ||
1593 | static void | ||
1594 | i915_gem_clear_fence_reg(struct drm_gem_object *obj) | ||
1595 | { | ||
1596 | struct drm_device *dev = obj->dev; | ||
1597 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1598 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1599 | |||
1600 | if (IS_I965G(dev)) | ||
1601 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | ||
1602 | else | ||
1603 | I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0); | ||
1604 | |||
1605 | dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; | ||
1606 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
1607 | } | ||
1608 | |||
1152 | /** | 1609 | /** |
1153 | * Finds free space in the GTT aperture and binds the object there. | 1610 | * Finds free space in the GTT aperture and binds the object there. |
1154 | */ | 1611 | */ |
@@ -2351,12 +2808,18 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
2351 | 2808 | ||
2352 | obj->driver_private = obj_priv; | 2809 | obj->driver_private = obj_priv; |
2353 | obj_priv->obj = obj; | 2810 | obj_priv->obj = obj; |
2811 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
2354 | INIT_LIST_HEAD(&obj_priv->list); | 2812 | INIT_LIST_HEAD(&obj_priv->list); |
2813 | |||
2355 | return 0; | 2814 | return 0; |
2356 | } | 2815 | } |
2357 | 2816 | ||
2358 | void i915_gem_free_object(struct drm_gem_object *obj) | 2817 | void i915_gem_free_object(struct drm_gem_object *obj) |
2359 | { | 2818 | { |
2819 | struct drm_device *dev = obj->dev; | ||
2820 | struct drm_gem_mm *mm = dev->mm_private; | ||
2821 | struct drm_map_list *list; | ||
2822 | struct drm_map *map; | ||
2360 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2823 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2361 | 2824 | ||
2362 | while (obj_priv->pin_count > 0) | 2825 | while (obj_priv->pin_count > 0) |
@@ -2364,6 +2827,20 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
2364 | 2827 | ||
2365 | i915_gem_object_unbind(obj); | 2828 | i915_gem_object_unbind(obj); |
2366 | 2829 | ||
2830 | list = &obj->map_list; | ||
2831 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
2832 | |||
2833 | if (list->file_offset_node) { | ||
2834 | drm_mm_put_block(list->file_offset_node); | ||
2835 | list->file_offset_node = NULL; | ||
2836 | } | ||
2837 | |||
2838 | map = list->map; | ||
2839 | if (map) { | ||
2840 | drm_free(map, sizeof(*map), DRM_MEM_DRIVER); | ||
2841 | list->map = NULL; | ||
2842 | } | ||
2843 | |||
2367 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); | 2844 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); |
2368 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | 2845 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); |
2369 | } | 2846 | } |
@@ -2432,8 +2909,7 @@ i915_gem_idle(struct drm_device *dev) | |||
2432 | */ | 2909 | */ |
2433 | i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), | 2910 | i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), |
2434 | ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); | 2911 | ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); |
2435 | seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | | 2912 | seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU); |
2436 | I915_GEM_DOMAIN_GTT)); | ||
2437 | 2913 | ||
2438 | if (seqno == 0) { | 2914 | if (seqno == 0) { |
2439 | mutex_unlock(&dev->struct_mutex); | 2915 | mutex_unlock(&dev->struct_mutex); |
@@ -2758,5 +3234,13 @@ i915_gem_load(struct drm_device *dev) | |||
2758 | i915_gem_retire_work_handler); | 3234 | i915_gem_retire_work_handler); |
2759 | dev_priv->mm.next_gem_seqno = 1; | 3235 | dev_priv->mm.next_gem_seqno = 1; |
2760 | 3236 | ||
3237 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | ||
3238 | dev_priv->fence_reg_start = 3; | ||
3239 | |||
3240 | if (IS_I965G(dev)) | ||
3241 | dev_priv->num_fence_regs = 16; | ||
3242 | else | ||
3243 | dev_priv->num_fence_regs = 8; | ||
3244 | |||
2761 | i915_gem_detect_bit_6_swizzle(dev); | 3245 | i915_gem_detect_bit_6_swizzle(dev); |
2762 | } | 3246 | } |