aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-11-14 11:14:03 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-01-20 07:11:13 -0500
commit4b5aed62121eddfc47fd8f2739ca6b802b97390e (patch)
tree070be43496b476a545dc9a190046d134b4e4aa43
parente5c653777986b40e2986d2c918847fddbcba3a34 (diff)
drm/i915: move dev_priv->mm out of line
Tha one is really big, since it contains tons of comments explaining how things work. Which is nice ;-) Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h202
1 files changed, 102 insertions, 100 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3189034deeb8..ea3226852ea8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -665,6 +665,107 @@ struct intel_l3_parity {
665 struct work_struct error_work; 665 struct work_struct error_work;
666}; 666};
667 667
668struct i915_gem_mm {
669 /** Bridge to intel-gtt-ko */
670 struct intel_gtt *gtt;
671 /** Memory allocator for GTT stolen memory */
672 struct drm_mm stolen;
673 /** Memory allocator for GTT */
674 struct drm_mm gtt_space;
675 /** List of all objects in gtt_space. Used to restore gtt
676 * mappings on resume */
677 struct list_head bound_list;
678 /**
679 * List of objects which are not bound to the GTT (thus
680 * are idle and not used by the GPU) but still have
681 * (presumably uncached) pages still attached.
682 */
683 struct list_head unbound_list;
684
685 /** Usable portion of the GTT for GEM */
686 unsigned long stolen_base; /* limited to low memory (32-bit) */
687
688 int gtt_mtrr;
689
690 /** PPGTT used for aliasing the PPGTT with the GTT */
691 struct i915_hw_ppgtt *aliasing_ppgtt;
692
693 struct shrinker inactive_shrinker;
694 bool shrinker_no_lock_stealing;
695
696 /**
697 * List of objects currently involved in rendering.
698 *
699 * Includes buffers having the contents of their GPU caches
700 * flushed, not necessarily primitives. last_rendering_seqno
701 * represents when the rendering involved will be completed.
702 *
703 * A reference is held on the buffer while on this list.
704 */
705 struct list_head active_list;
706
707 /**
708 * LRU list of objects which are not in the ringbuffer and
709 * are ready to unbind, but are still in the GTT.
710 *
711 * last_rendering_seqno is 0 while an object is in this list.
712 *
713 * A reference is not held on the buffer while on this list,
714 * as merely being GTT-bound shouldn't prevent its being
715 * freed, and we'll pull it off the list in the free path.
716 */
717 struct list_head inactive_list;
718
719 /** LRU list of objects with fence regs on them. */
720 struct list_head fence_list;
721
722 /**
723 * We leave the user IRQ off as much as possible,
724 * but this means that requests will finish and never
725 * be retired once the system goes idle. Set a timer to
726 * fire periodically while the ring is running. When it
727 * fires, go retire requests.
728 */
729 struct delayed_work retire_work;
730
731 /**
732 * Are we in a non-interruptible section of code like
733 * modesetting?
734 */
735 bool interruptible;
736
737 /**
738 * Flag if the X Server, and thus DRM, is not currently in
739 * control of the device.
740 *
741 * This is set between LeaveVT and EnterVT. It needs to be
742 * replaced with a semaphore. It also needs to be
743 * transitioned away from for kernel modesetting.
744 */
745 int suspended;
746
747 /**
748 * Flag if the hardware appears to be wedged.
749 *
750 * This is set when attempts to idle the device timeout.
751 * It prevents command submission from occurring and makes
752 * every pending request fail
753 */
754 atomic_t wedged;
755
756 /** Bit 6 swizzling required for X tiling */
757 uint32_t bit_6_swizzle_x;
758 /** Bit 6 swizzling required for Y tiling */
759 uint32_t bit_6_swizzle_y;
760
761 /* storage for physical objects */
762 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
763
764 /* accounting, useful for userland debugging */
765 size_t object_memory;
766 u32 object_count;
767};
768
668typedef struct drm_i915_private { 769typedef struct drm_i915_private {
669 struct drm_device *dev; 770 struct drm_device *dev;
670 struct kmem_cache *slab; 771 struct kmem_cache *slab;
@@ -806,106 +907,7 @@ typedef struct drm_i915_private {
806 907
807 struct i915_gtt gtt; 908 struct i915_gtt gtt;
808 909
809 struct { 910 struct i915_gem_mm mm;
810 /** Bridge to intel-gtt-ko */
811 struct intel_gtt *gtt;
812 /** Memory allocator for GTT stolen memory */
813 struct drm_mm stolen;
814 /** Memory allocator for GTT */
815 struct drm_mm gtt_space;
816 /** List of all objects in gtt_space. Used to restore gtt
817 * mappings on resume */
818 struct list_head bound_list;
819 /**
820 * List of objects which are not bound to the GTT (thus
821 * are idle and not used by the GPU) but still have
822 * (presumably uncached) pages still attached.
823 */
824 struct list_head unbound_list;
825
826 /** Usable portion of the GTT for GEM */
827 unsigned long stolen_base; /* limited to low memory (32-bit) */
828
829 int gtt_mtrr;
830
831 /** PPGTT used for aliasing the PPGTT with the GTT */
832 struct i915_hw_ppgtt *aliasing_ppgtt;
833
834 struct shrinker inactive_shrinker;
835 bool shrinker_no_lock_stealing;
836
837 /**
838 * List of objects currently involved in rendering.
839 *
840 * Includes buffers having the contents of their GPU caches
841 * flushed, not necessarily primitives. last_rendering_seqno
842 * represents when the rendering involved will be completed.
843 *
844 * A reference is held on the buffer while on this list.
845 */
846 struct list_head active_list;
847
848 /**
849 * LRU list of objects which are not in the ringbuffer and
850 * are ready to unbind, but are still in the GTT.
851 *
852 * last_rendering_seqno is 0 while an object is in this list.
853 *
854 * A reference is not held on the buffer while on this list,
855 * as merely being GTT-bound shouldn't prevent its being
856 * freed, and we'll pull it off the list in the free path.
857 */
858 struct list_head inactive_list;
859
860 /** LRU list of objects with fence regs on them. */
861 struct list_head fence_list;
862
863 /**
864 * We leave the user IRQ off as much as possible,
865 * but this means that requests will finish and never
866 * be retired once the system goes idle. Set a timer to
867 * fire periodically while the ring is running. When it
868 * fires, go retire requests.
869 */
870 struct delayed_work retire_work;
871
872 /**
873 * Are we in a non-interruptible section of code like
874 * modesetting?
875 */
876 bool interruptible;
877
878 /**
879 * Flag if the X Server, and thus DRM, is not currently in
880 * control of the device.
881 *
882 * This is set between LeaveVT and EnterVT. It needs to be
883 * replaced with a semaphore. It also needs to be
884 * transitioned away from for kernel modesetting.
885 */
886 int suspended;
887
888 /**
889 * Flag if the hardware appears to be wedged.
890 *
891 * This is set when attempts to idle the device timeout.
892 * It prevents command submission from occurring and makes
893 * every pending request fail
894 */
895 atomic_t wedged;
896
897 /** Bit 6 swizzling required for X tiling */
898 uint32_t bit_6_swizzle_x;
899 /** Bit 6 swizzling required for Y tiling */
900 uint32_t bit_6_swizzle_y;
901
902 /* storage for physical objects */
903 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
904
905 /* accounting, useful for userland debugging */
906 size_t object_memory;
907 u32 object_count;
908 } mm;
909 911
910 /* Kernel Modesetting */ 912 /* Kernel Modesetting */
911 913