aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_drv.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h453
1 files changed, 304 insertions, 149 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12ab3bdea54d..c338b4443fd9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,6 +30,8 @@
30#ifndef _I915_DRV_H_ 30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include <uapi/drm/i915_drm.h>
34
33#include "i915_reg.h" 35#include "i915_reg.h"
34#include "intel_bios.h" 36#include "intel_bios.h"
35#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
@@ -40,6 +42,7 @@
40#include <linux/backlight.h> 42#include <linux/backlight.h>
41#include <linux/intel-iommu.h> 43#include <linux/intel-iommu.h>
42#include <linux/kref.h> 44#include <linux/kref.h>
45#include <linux/pm_qos.h>
43 46
44/* General customization: 47/* General customization:
45 */ 48 */
@@ -83,7 +86,12 @@ enum port {
83}; 86};
84#define port_name(p) ((p) + 'A') 87#define port_name(p) ((p) + 'A')
85 88
86#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 89#define I915_GEM_GPU_DOMAINS \
90 (I915_GEM_DOMAIN_RENDER | \
91 I915_GEM_DOMAIN_SAMPLER | \
92 I915_GEM_DOMAIN_COMMAND | \
93 I915_GEM_DOMAIN_INSTRUCTION | \
94 I915_GEM_DOMAIN_VERTEX)
87 95
88#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 96#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
89 97
@@ -101,6 +109,19 @@ struct intel_pch_pll {
101}; 109};
102#define I915_NUM_PLLS 2 110#define I915_NUM_PLLS 2
103 111
112/* Used by dp and fdi links */
113struct intel_link_m_n {
114 uint32_t tu;
115 uint32_t gmch_m;
116 uint32_t gmch_n;
117 uint32_t link_m;
118 uint32_t link_n;
119};
120
121void intel_link_compute_m_n(int bpp, int nlanes,
122 int pixel_clock, int link_clock,
123 struct intel_link_m_n *m_n);
124
104struct intel_ddi_plls { 125struct intel_ddi_plls {
105 int spll_refcount; 126 int spll_refcount;
106 int wrpll1_refcount; 127 int wrpll1_refcount;
@@ -279,6 +300,7 @@ struct drm_i915_display_funcs {
279 struct drm_i915_gem_object *obj); 300 struct drm_i915_gem_object *obj);
280 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 301 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
281 int x, int y); 302 int x, int y);
303 void (*hpd_irq_setup)(struct drm_device *dev);
282 /* clock updates for mode set */ 304 /* clock updates for mode set */
283 /* cursor updates */ 305 /* cursor updates */
284 /* render clock increase/decrease */ 306 /* render clock increase/decrease */
@@ -318,6 +340,7 @@ struct drm_i915_gt_funcs {
318 DEV_INFO_FLAG(has_llc) 340 DEV_INFO_FLAG(has_llc)
319 341
320struct intel_device_info { 342struct intel_device_info {
343 u32 display_mmio_offset;
321 u8 gen; 344 u8 gen;
322 u8 is_mobile:1; 345 u8 is_mobile:1;
323 u8 is_i85x:1; 346 u8 is_i85x:1;
@@ -345,6 +368,49 @@ struct intel_device_info {
345 u8 has_llc:1; 368 u8 has_llc:1;
346}; 369};
347 370
371enum i915_cache_level {
372 I915_CACHE_NONE = 0,
373 I915_CACHE_LLC,
374 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
375};
376
377/* The Graphics Translation Table is the way in which GEN hardware translates a
378 * Graphics Virtual Address into a Physical Address. In addition to the normal
379 * collateral associated with any va->pa translations GEN hardware also has a
380 * portion of the GTT which can be mapped by the CPU and remain both coherent
381 * and correct (in cases like swizzling). That region is referred to as GMADR in
382 * the spec.
383 */
384struct i915_gtt {
385 unsigned long start; /* Start offset of used GTT */
386 size_t total; /* Total size GTT can map */
387 size_t stolen_size; /* Total size of stolen memory */
388
389 unsigned long mappable_end; /* End offset that we can CPU map */
390 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
391 phys_addr_t mappable_base; /* PA of our GMADR */
392
393 /** "Graphics Stolen Memory" holds the global PTEs */
394 void __iomem *gsm;
395
396 bool do_idle_maps;
397 dma_addr_t scratch_page_dma;
398 struct page *scratch_page;
399
400 /* global gtt ops */
401 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
402 size_t *stolen);
403 void (*gtt_remove)(struct drm_device *dev);
404 void (*gtt_clear_range)(struct drm_device *dev,
405 unsigned int first_entry,
406 unsigned int num_entries);
407 void (*gtt_insert_entries)(struct drm_device *dev,
408 struct sg_table *st,
409 unsigned int pg_start,
410 enum i915_cache_level cache_level);
411};
412#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
413
348#define I915_PPGTT_PD_ENTRIES 512 414#define I915_PPGTT_PD_ENTRIES 512
349#define I915_PPGTT_PT_ENTRIES 1024 415#define I915_PPGTT_PT_ENTRIES 1024
350struct i915_hw_ppgtt { 416struct i915_hw_ppgtt {
@@ -354,6 +420,16 @@ struct i915_hw_ppgtt {
354 uint32_t pd_offset; 420 uint32_t pd_offset;
355 dma_addr_t *pt_dma_addr; 421 dma_addr_t *pt_dma_addr;
356 dma_addr_t scratch_page_dma_addr; 422 dma_addr_t scratch_page_dma_addr;
423
424 /* pte functions, mirroring the interface of the global gtt. */
425 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
426 unsigned int first_entry,
427 unsigned int num_entries);
428 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
429 struct sg_table *st,
430 unsigned int pg_start,
431 enum i915_cache_level cache_level);
432 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
357}; 433};
358 434
359 435
@@ -580,6 +656,9 @@ struct intel_gen6_power_mgmt {
580 struct mutex hw_lock; 656 struct mutex hw_lock;
581}; 657};
582 658
659/* defined intel_pm.c */
660extern spinlock_t mchdev_lock;
661
583struct intel_ilk_power_mgmt { 662struct intel_ilk_power_mgmt {
584 u8 cur_delay; 663 u8 cur_delay;
585 u8 min_delay; 664 u8 min_delay;
@@ -620,8 +699,156 @@ struct intel_l3_parity {
620 struct work_struct error_work; 699 struct work_struct error_work;
621}; 700};
622 701
702struct i915_gem_mm {
703 /** Memory allocator for GTT stolen memory */
704 struct drm_mm stolen;
705 /** Memory allocator for GTT */
706 struct drm_mm gtt_space;
707 /** List of all objects in gtt_space. Used to restore gtt
708 * mappings on resume */
709 struct list_head bound_list;
710 /**
711 * List of objects which are not bound to the GTT (thus
712 * are idle and not used by the GPU) but still have
713 * (presumably uncached) pages still attached.
714 */
715 struct list_head unbound_list;
716
717 /** Usable portion of the GTT for GEM */
718 unsigned long stolen_base; /* limited to low memory (32-bit) */
719
720 int gtt_mtrr;
721
722 /** PPGTT used for aliasing the PPGTT with the GTT */
723 struct i915_hw_ppgtt *aliasing_ppgtt;
724
725 struct shrinker inactive_shrinker;
726 bool shrinker_no_lock_stealing;
727
728 /**
729 * List of objects currently involved in rendering.
730 *
731 * Includes buffers having the contents of their GPU caches
732 * flushed, not necessarily primitives. last_rendering_seqno
733 * represents when the rendering involved will be completed.
734 *
735 * A reference is held on the buffer while on this list.
736 */
737 struct list_head active_list;
738
739 /**
740 * LRU list of objects which are not in the ringbuffer and
741 * are ready to unbind, but are still in the GTT.
742 *
743 * last_rendering_seqno is 0 while an object is in this list.
744 *
745 * A reference is not held on the buffer while on this list,
746 * as merely being GTT-bound shouldn't prevent its being
747 * freed, and we'll pull it off the list in the free path.
748 */
749 struct list_head inactive_list;
750
751 /** LRU list of objects with fence regs on them. */
752 struct list_head fence_list;
753
754 /**
755 * We leave the user IRQ off as much as possible,
756 * but this means that requests will finish and never
757 * be retired once the system goes idle. Set a timer to
758 * fire periodically while the ring is running. When it
759 * fires, go retire requests.
760 */
761 struct delayed_work retire_work;
762
763 /**
764 * Are we in a non-interruptible section of code like
765 * modesetting?
766 */
767 bool interruptible;
768
769 /**
770 * Flag if the X Server, and thus DRM, is not currently in
771 * control of the device.
772 *
773 * This is set between LeaveVT and EnterVT. It needs to be
774 * replaced with a semaphore. It also needs to be
775 * transitioned away from for kernel modesetting.
776 */
777 int suspended;
778
779 /** Bit 6 swizzling required for X tiling */
780 uint32_t bit_6_swizzle_x;
781 /** Bit 6 swizzling required for Y tiling */
782 uint32_t bit_6_swizzle_y;
783
784 /* storage for physical objects */
785 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
786
787 /* accounting, useful for userland debugging */
788 size_t object_memory;
789 u32 object_count;
790};
791
792struct i915_gpu_error {
793 /* For hangcheck timer */
794#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
795#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
796 struct timer_list hangcheck_timer;
797 int hangcheck_count;
798 uint32_t last_acthd[I915_NUM_RINGS];
799 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
800
801 /* For reset and error_state handling. */
802 spinlock_t lock;
803 /* Protected by the above dev->gpu_error.lock. */
804 struct drm_i915_error_state *first_error;
805 struct work_struct work;
806
807 unsigned long last_reset;
808
809 /**
810 * State variable and reset counter controlling the reset flow
811 *
812 * Upper bits are for the reset counter. This counter is used by the
813 * wait_seqno code to race-free noticed that a reset event happened and
814 * that it needs to restart the entire ioctl (since most likely the
815 * seqno it waited for won't ever signal anytime soon).
816 *
817 * This is important for lock-free wait paths, where no contended lock
818 * naturally enforces the correct ordering between the bail-out of the
819 * waiter and the gpu reset work code.
820 *
821 * Lowest bit controls the reset state machine: Set means a reset is in
822 * progress. This state will (presuming we don't have any bugs) decay
823 * into either unset (successful reset) or the special WEDGED value (hw
824 * terminally sour). All waiters on the reset_queue will be woken when
825 * that happens.
826 */
827 atomic_t reset_counter;
828
829 /**
830 * Special values/flags for reset_counter
831 *
832 * Note that the code relies on
833 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
834 * being true.
835 */
836#define I915_RESET_IN_PROGRESS_FLAG 1
837#define I915_WEDGED 0xffffffff
838
839 /**
840 * Waitqueue to signal when the reset has completed. Used by clients
841 * that wait for dev_priv->mm.wedged to settle.
842 */
843 wait_queue_head_t reset_queue;
844
845 /* For gpu hang simulation. */
846 unsigned int stop_rings;
847};
848
623typedef struct drm_i915_private { 849typedef struct drm_i915_private {
624 struct drm_device *dev; 850 struct drm_device *dev;
851 struct kmem_cache *slab;
625 852
626 const struct intel_device_info *info; 853 const struct intel_device_info *info;
627 854
@@ -636,10 +863,11 @@ typedef struct drm_i915_private {
636 /** forcewake_count is protected by gt_lock */ 863 /** forcewake_count is protected by gt_lock */
637 unsigned forcewake_count; 864 unsigned forcewake_count;
638 /** gt_lock is also taken in irq contexts. */ 865 /** gt_lock is also taken in irq contexts. */
639 struct spinlock gt_lock; 866 spinlock_t gt_lock;
640 867
641 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 868 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
642 869
870
643 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 871 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
644 * controller on different i2c buses. */ 872 * controller on different i2c buses. */
645 struct mutex gmbus_mutex; 873 struct mutex gmbus_mutex;
@@ -649,9 +877,11 @@ typedef struct drm_i915_private {
649 */ 877 */
650 uint32_t gpio_mmio_base; 878 uint32_t gpio_mmio_base;
651 879
880 wait_queue_head_t gmbus_wait_queue;
881
652 struct pci_dev *bridge_dev; 882 struct pci_dev *bridge_dev;
653 struct intel_ring_buffer ring[I915_NUM_RINGS]; 883 struct intel_ring_buffer ring[I915_NUM_RINGS];
654 uint32_t next_seqno; 884 uint32_t last_seqno, next_seqno;
655 885
656 drm_dma_handle_t *status_page_dmah; 886 drm_dma_handle_t *status_page_dmah;
657 struct resource mch_res; 887 struct resource mch_res;
@@ -661,31 +891,24 @@ typedef struct drm_i915_private {
661 /* protects the irq masks */ 891 /* protects the irq masks */
662 spinlock_t irq_lock; 892 spinlock_t irq_lock;
663 893
894 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
895 struct pm_qos_request pm_qos;
896
664 /* DPIO indirect register protection */ 897 /* DPIO indirect register protection */
665 spinlock_t dpio_lock; 898 struct mutex dpio_lock;
666 899
667 /** Cached value of IMR to avoid reads in updating the bitfield */ 900 /** Cached value of IMR to avoid reads in updating the bitfield */
668 u32 pipestat[2]; 901 u32 pipestat[2];
669 u32 irq_mask; 902 u32 irq_mask;
670 u32 gt_irq_mask; 903 u32 gt_irq_mask;
671 u32 pch_irq_mask;
672 904
673 u32 hotplug_supported_mask; 905 u32 hotplug_supported_mask;
674 struct work_struct hotplug_work; 906 struct work_struct hotplug_work;
907 bool enable_hotplug_processing;
675 908
676 int num_pipe; 909 int num_pipe;
677 int num_pch_pll; 910 int num_pch_pll;
678 911
679 /* For hangcheck timer */
680#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
681#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
682 struct timer_list hangcheck_timer;
683 int hangcheck_count;
684 uint32_t last_acthd[I915_NUM_RINGS];
685 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
686
687 unsigned int stop_rings;
688
689 unsigned long cfb_size; 912 unsigned long cfb_size;
690 unsigned int cfb_fb; 913 unsigned int cfb_fb;
691 enum plane cfb_plane; 914 enum plane cfb_plane;
@@ -713,7 +936,6 @@ typedef struct drm_i915_private {
713 unsigned int display_clock_mode:1; 936 unsigned int display_clock_mode:1;
714 int lvds_ssc_freq; 937 int lvds_ssc_freq;
715 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 938 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
716 unsigned int lvds_val; /* used for checking LVDS channel mode */
717 struct { 939 struct {
718 int rate; 940 int rate;
719 int lanes; 941 int lanes;
@@ -734,11 +956,6 @@ typedef struct drm_i915_private {
734 956
735 unsigned int fsb_freq, mem_freq, is_ddr3; 957 unsigned int fsb_freq, mem_freq, is_ddr3;
736 958
737 spinlock_t error_lock;
738 /* Protected by dev->error_lock. */
739 struct drm_i915_error_state *first_error;
740 struct work_struct error_work;
741 struct completion error_completion;
742 struct workqueue_struct *wq; 959 struct workqueue_struct *wq;
743 960
744 /* Display functions */ 961 /* Display functions */
@@ -753,112 +970,9 @@ typedef struct drm_i915_private {
753 /* Register state */ 970 /* Register state */
754 bool modeset_on_lid; 971 bool modeset_on_lid;
755 972
756 struct { 973 struct i915_gtt gtt;
757 /** Bridge to intel-gtt-ko */ 974
758 struct intel_gtt *gtt; 975 struct i915_gem_mm mm;
759 /** Memory allocator for GTT stolen memory */
760 struct drm_mm stolen;
761 /** Memory allocator for GTT */
762 struct drm_mm gtt_space;
763 /** List of all objects in gtt_space. Used to restore gtt
764 * mappings on resume */
765 struct list_head bound_list;
766 /**
767 * List of objects which are not bound to the GTT (thus
768 * are idle and not used by the GPU) but still have
769 * (presumably uncached) pages still attached.
770 */
771 struct list_head unbound_list;
772
773 /** Usable portion of the GTT for GEM */
774 unsigned long gtt_start;
775 unsigned long gtt_mappable_end;
776 unsigned long gtt_end;
777
778 struct io_mapping *gtt_mapping;
779 phys_addr_t gtt_base_addr;
780 int gtt_mtrr;
781
782 /** PPGTT used for aliasing the PPGTT with the GTT */
783 struct i915_hw_ppgtt *aliasing_ppgtt;
784
785 struct shrinker inactive_shrinker;
786 bool shrinker_no_lock_stealing;
787
788 /**
789 * List of objects currently involved in rendering.
790 *
791 * Includes buffers having the contents of their GPU caches
792 * flushed, not necessarily primitives. last_rendering_seqno
793 * represents when the rendering involved will be completed.
794 *
795 * A reference is held on the buffer while on this list.
796 */
797 struct list_head active_list;
798
799 /**
800 * LRU list of objects which are not in the ringbuffer and
801 * are ready to unbind, but are still in the GTT.
802 *
803 * last_rendering_seqno is 0 while an object is in this list.
804 *
805 * A reference is not held on the buffer while on this list,
806 * as merely being GTT-bound shouldn't prevent its being
807 * freed, and we'll pull it off the list in the free path.
808 */
809 struct list_head inactive_list;
810
811 /** LRU list of objects with fence regs on them. */
812 struct list_head fence_list;
813
814 /**
815 * We leave the user IRQ off as much as possible,
816 * but this means that requests will finish and never
817 * be retired once the system goes idle. Set a timer to
818 * fire periodically while the ring is running. When it
819 * fires, go retire requests.
820 */
821 struct delayed_work retire_work;
822
823 /**
824 * Are we in a non-interruptible section of code like
825 * modesetting?
826 */
827 bool interruptible;
828
829 /**
830 * Flag if the X Server, and thus DRM, is not currently in
831 * control of the device.
832 *
833 * This is set between LeaveVT and EnterVT. It needs to be
834 * replaced with a semaphore. It also needs to be
835 * transitioned away from for kernel modesetting.
836 */
837 int suspended;
838
839 /**
840 * Flag if the hardware appears to be wedged.
841 *
842 * This is set when attempts to idle the device timeout.
843 * It prevents command submission from occurring and makes
844 * every pending request fail
845 */
846 atomic_t wedged;
847
848 /** Bit 6 swizzling required for X tiling */
849 uint32_t bit_6_swizzle_x;
850 /** Bit 6 swizzling required for Y tiling */
851 uint32_t bit_6_swizzle_y;
852
853 /* storage for physical objects */
854 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
855
856 /* accounting, useful for userland debugging */
857 size_t gtt_total;
858 size_t mappable_gtt_total;
859 size_t object_memory;
860 u32 object_count;
861 } mm;
862 976
863 /* Kernel Modesetting */ 977 /* Kernel Modesetting */
864 978
@@ -900,7 +1014,7 @@ typedef struct drm_i915_private {
900 struct drm_mm_node *compressed_fb; 1014 struct drm_mm_node *compressed_fb;
901 struct drm_mm_node *compressed_llb; 1015 struct drm_mm_node *compressed_llb;
902 1016
903 unsigned long last_gpu_reset; 1017 struct i915_gpu_error gpu_error;
904 1018
905 /* list of fbdev register on this device */ 1019 /* list of fbdev register on this device */
906 struct intel_fbdev *fbdev; 1020 struct intel_fbdev *fbdev;
@@ -940,11 +1054,7 @@ enum hdmi_force_audio {
940 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1054 HDMI_AUDIO_ON, /* force turn on HDMI audio */
941}; 1055};
942 1056
943enum i915_cache_level { 1057#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
944 I915_CACHE_NONE = 0,
945 I915_CACHE_LLC,
946 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
947};
948 1058
949struct drm_i915_gem_object_ops { 1059struct drm_i915_gem_object_ops {
950 /* Interface between the GEM object and its backing storage. 1060 /* Interface between the GEM object and its backing storage.
@@ -971,6 +1081,8 @@ struct drm_i915_gem_object {
971 1081
972 /** Current space allocated to this object in the GTT, if any. */ 1082 /** Current space allocated to this object in the GTT, if any. */
973 struct drm_mm_node *gtt_space; 1083 struct drm_mm_node *gtt_space;
1084 /** Stolen memory for this object, instead of being backed by shmem. */
1085 struct drm_mm_node *stolen;
974 struct list_head gtt_list; 1086 struct list_head gtt_list;
975 1087
976 /** This object's place on the active/inactive lists */ 1088 /** This object's place on the active/inactive lists */
@@ -1141,7 +1253,7 @@ struct drm_i915_gem_request {
1141 1253
1142struct drm_i915_file_private { 1254struct drm_i915_file_private {
1143 struct { 1255 struct {
1144 struct spinlock lock; 1256 spinlock_t lock;
1145 struct list_head request_list; 1257 struct list_head request_list;
1146 } mm; 1258 } mm;
1147 struct idr context_idr; 1259 struct idr context_idr;
@@ -1227,6 +1339,8 @@ struct drm_i915_file_private {
1227 1339
1228#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1340#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1229 1341
1342#define HAS_DDI(dev) (IS_HASWELL(dev))
1343
1230#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1344#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1231#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1345#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1232#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 1346#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -1323,6 +1437,7 @@ void i915_hangcheck_elapsed(unsigned long data);
1323void i915_handle_error(struct drm_device *dev, bool wedged); 1437void i915_handle_error(struct drm_device *dev, bool wedged);
1324 1438
1325extern void intel_irq_init(struct drm_device *dev); 1439extern void intel_irq_init(struct drm_device *dev);
1440extern void intel_hpd_init(struct drm_device *dev);
1326extern void intel_gt_init(struct drm_device *dev); 1441extern void intel_gt_init(struct drm_device *dev);
1327extern void intel_gt_reset(struct drm_device *dev); 1442extern void intel_gt_reset(struct drm_device *dev);
1328 1443
@@ -1391,18 +1506,22 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1391int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 1506int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1392 struct drm_file *file_priv); 1507 struct drm_file *file_priv);
1393void i915_gem_load(struct drm_device *dev); 1508void i915_gem_load(struct drm_device *dev);
1509void *i915_gem_object_alloc(struct drm_device *dev);
1510void i915_gem_object_free(struct drm_i915_gem_object *obj);
1394int i915_gem_init_object(struct drm_gem_object *obj); 1511int i915_gem_init_object(struct drm_gem_object *obj);
1395void i915_gem_object_init(struct drm_i915_gem_object *obj, 1512void i915_gem_object_init(struct drm_i915_gem_object *obj,
1396 const struct drm_i915_gem_object_ops *ops); 1513 const struct drm_i915_gem_object_ops *ops);
1397struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1514struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1398 size_t size); 1515 size_t size);
1399void i915_gem_free_object(struct drm_gem_object *obj); 1516void i915_gem_free_object(struct drm_gem_object *obj);
1517
1400int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1518int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1401 uint32_t alignment, 1519 uint32_t alignment,
1402 bool map_and_fenceable, 1520 bool map_and_fenceable,
1403 bool nonblocking); 1521 bool nonblocking);
1404void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1522void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1405int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1523int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1524int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
1406void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1525void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1407void i915_gem_lastclose(struct drm_device *dev); 1526void i915_gem_lastclose(struct drm_device *dev);
1408 1527
@@ -1454,8 +1573,8 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1454 return (int32_t)(seq1 - seq2) >= 0; 1573 return (int32_t)(seq1 - seq2) >= 0;
1455} 1574}
1456 1575
1457extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 1576int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1458 1577int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
1459int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1578int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1460int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1579int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1461 1580
@@ -1481,8 +1600,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1481 1600
1482void i915_gem_retire_requests(struct drm_device *dev); 1601void i915_gem_retire_requests(struct drm_device *dev);
1483void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1602void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1484int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, 1603int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1485 bool interruptible); 1604 bool interruptible);
1605static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1606{
1607 return unlikely(atomic_read(&error->reset_counter)
1608 & I915_RESET_IN_PROGRESS_FLAG);
1609}
1610
1611static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1612{
1613 return atomic_read(&error->reset_counter) == I915_WEDGED;
1614}
1486 1615
1487void i915_gem_reset(struct drm_device *dev); 1616void i915_gem_reset(struct drm_device *dev);
1488void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1617void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1523,9 +1652,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
1523void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1652void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1524 1653
1525uint32_t 1654uint32_t
1526i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, 1655i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
1527 uint32_t size, 1656uint32_t
1528 int tiling_mode); 1657i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1658 int tiling_mode, bool fenced);
1529 1659
1530int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1660int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1531 enum i915_cache_level cache_level); 1661 enum i915_cache_level cache_level);
@@ -1548,7 +1678,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1548 struct drm_file *file); 1678 struct drm_file *file);
1549 1679
1550/* i915_gem_gtt.c */ 1680/* i915_gem_gtt.c */
1551int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1552void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1681void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
1553void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1682void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1554 struct drm_i915_gem_object *obj, 1683 struct drm_i915_gem_object *obj,
@@ -1562,12 +1691,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1562 enum i915_cache_level cache_level); 1691 enum i915_cache_level cache_level);
1563void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1692void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1564void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 1693void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1565void i915_gem_init_global_gtt(struct drm_device *dev, 1694void i915_gem_init_global_gtt(struct drm_device *dev);
1566 unsigned long start, 1695void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1567 unsigned long mappable_end, 1696 unsigned long mappable_end, unsigned long end);
1568 unsigned long end);
1569int i915_gem_gtt_init(struct drm_device *dev); 1697int i915_gem_gtt_init(struct drm_device *dev);
1570void i915_gem_gtt_fini(struct drm_device *dev);
1571static inline void i915_gem_chipset_flush(struct drm_device *dev) 1698static inline void i915_gem_chipset_flush(struct drm_device *dev)
1572{ 1699{
1573 if (INTEL_INFO(dev)->gen < 6) 1700 if (INTEL_INFO(dev)->gen < 6)
@@ -1585,9 +1712,22 @@ int i915_gem_evict_everything(struct drm_device *dev);
1585 1712
1586/* i915_gem_stolen.c */ 1713/* i915_gem_stolen.c */
1587int i915_gem_init_stolen(struct drm_device *dev); 1714int i915_gem_init_stolen(struct drm_device *dev);
1715int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1716void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
1588void i915_gem_cleanup_stolen(struct drm_device *dev); 1717void i915_gem_cleanup_stolen(struct drm_device *dev);
1718struct drm_i915_gem_object *
1719i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
1720void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
1589 1721
1590/* i915_gem_tiling.c */ 1722/* i915_gem_tiling.c */
1723inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1724{
1725 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1726
1727 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1728 obj->tiling_mode != I915_TILING_NONE;
1729}
1730
1591void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1731void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1592void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 1732void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1593void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 1733void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
@@ -1613,9 +1753,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
1613extern int i915_save_state(struct drm_device *dev); 1753extern int i915_save_state(struct drm_device *dev);
1614extern int i915_restore_state(struct drm_device *dev); 1754extern int i915_restore_state(struct drm_device *dev);
1615 1755
1616/* i915_suspend.c */ 1756/* i915_ums.c */
1617extern int i915_save_state(struct drm_device *dev); 1757void i915_save_display_reg(struct drm_device *dev);
1618extern int i915_restore_state(struct drm_device *dev); 1758void i915_restore_display_reg(struct drm_device *dev);
1619 1759
1620/* i915_sysfs.c */ 1760/* i915_sysfs.c */
1621void i915_setup_sysfs(struct drm_device *dev_priv); 1761void i915_setup_sysfs(struct drm_device *dev_priv);
@@ -1672,6 +1812,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
1672extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1812extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1673extern void intel_modeset_setup_hw_state(struct drm_device *dev, 1813extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1674 bool force_restore); 1814 bool force_restore);
1815extern void i915_redisable_vga(struct drm_device *dev);
1675extern bool intel_fbc_enabled(struct drm_device *dev); 1816extern bool intel_fbc_enabled(struct drm_device *dev);
1676extern void intel_disable_fbc(struct drm_device *dev); 1817extern void intel_disable_fbc(struct drm_device *dev);
1677extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1818extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1744,5 +1885,19 @@ __i915_write(64, q)
1744#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 1885#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1745#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1886#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1746 1887
1888/* "Broadcast RGB" property */
1889#define INTEL_BROADCAST_RGB_AUTO 0
1890#define INTEL_BROADCAST_RGB_FULL 1
1891#define INTEL_BROADCAST_RGB_LIMITED 2
1892
1893static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
1894{
1895 if (HAS_PCH_SPLIT(dev))
1896 return CPU_VGACNTRL;
1897 else if (IS_VALLEYVIEW(dev))
1898 return VLV_VGACNTRL;
1899 else
1900 return VGACNTRL;
1901}
1747 1902
1748#endif 1903#endif