aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_drv.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h245
1 files changed, 146 insertions, 99 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5fabc6c31fec..377c21f531e4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -38,6 +38,8 @@
38#include <linux/i2c-algo-bit.h> 38#include <linux/i2c-algo-bit.h>
39#include <drm/intel-gtt.h> 39#include <drm/intel-gtt.h>
40#include <linux/backlight.h> 40#include <linux/backlight.h>
41#include <linux/intel-iommu.h>
42#include <linux/kref.h>
41 43
42/* General customization: 44/* General customization:
43 */ 45 */
@@ -63,10 +65,30 @@ enum plane {
63}; 65};
64#define plane_name(p) ((p) + 'A') 66#define plane_name(p) ((p) + 'A')
65 67
68enum port {
69 PORT_A = 0,
70 PORT_B,
71 PORT_C,
72 PORT_D,
73 PORT_E,
74 I915_MAX_PORTS
75};
76#define port_name(p) ((p) + 'A')
77
66#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 78#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
67 79
68#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 80#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
69 81
82struct intel_pch_pll {
83 int refcount; /* count of number of CRTCs sharing this PLL */
84 int active; /* count of number of active CRTCs (i.e. DPMS on) */
85 bool on; /* is the PLL actually active? Disabled during modeset */
86 int pll_reg;
87 int fp0_reg;
88 int fp1_reg;
89};
90#define I915_NUM_PLLS 2
91
70/* Interface history: 92/* Interface history:
71 * 93 *
72 * 1.1: Original. 94 * 1.1: Original.
@@ -111,11 +133,11 @@ struct opregion_asle;
111struct drm_i915_private; 133struct drm_i915_private;
112 134
113struct intel_opregion { 135struct intel_opregion {
114 struct opregion_header *header; 136 struct opregion_header __iomem *header;
115 struct opregion_acpi *acpi; 137 struct opregion_acpi __iomem *acpi;
116 struct opregion_swsci *swsci; 138 struct opregion_swsci __iomem *swsci;
117 struct opregion_asle *asle; 139 struct opregion_asle __iomem *asle;
118 void *vbt; 140 void __iomem *vbt;
119 u32 __iomem *lid_state; 141 u32 __iomem *lid_state;
120}; 142};
121#define OPREGION_SIZE (8*1024) 143#define OPREGION_SIZE (8*1024)
@@ -135,7 +157,6 @@ struct drm_i915_master_private {
135struct drm_i915_fence_reg { 157struct drm_i915_fence_reg {
136 struct list_head lru_list; 158 struct list_head lru_list;
137 struct drm_i915_gem_object *obj; 159 struct drm_i915_gem_object *obj;
138 uint32_t setup_seqno;
139 int pin_count; 160 int pin_count;
140}; 161};
141 162
@@ -151,8 +172,11 @@ struct sdvo_device_mapping {
151struct intel_display_error_state; 172struct intel_display_error_state;
152 173
153struct drm_i915_error_state { 174struct drm_i915_error_state {
175 struct kref ref;
154 u32 eir; 176 u32 eir;
155 u32 pgtbl_er; 177 u32 pgtbl_er;
178 u32 ier;
179 bool waiting[I915_NUM_RINGS];
156 u32 pipestat[I915_MAX_PIPES]; 180 u32 pipestat[I915_MAX_PIPES];
157 u32 tail[I915_NUM_RINGS]; 181 u32 tail[I915_NUM_RINGS];
158 u32 head[I915_NUM_RINGS]; 182 u32 head[I915_NUM_RINGS];
@@ -218,11 +242,15 @@ struct drm_i915_display_funcs {
218 void (*update_wm)(struct drm_device *dev); 242 void (*update_wm)(struct drm_device *dev);
219 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 243 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
220 uint32_t sprite_width, int pixel_size); 244 uint32_t sprite_width, int pixel_size);
245 void (*sanitize_pm)(struct drm_device *dev);
246 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
247 struct drm_display_mode *mode);
221 int (*crtc_mode_set)(struct drm_crtc *crtc, 248 int (*crtc_mode_set)(struct drm_crtc *crtc,
222 struct drm_display_mode *mode, 249 struct drm_display_mode *mode,
223 struct drm_display_mode *adjusted_mode, 250 struct drm_display_mode *adjusted_mode,
224 int x, int y, 251 int x, int y,
225 struct drm_framebuffer *old_fb); 252 struct drm_framebuffer *old_fb);
253 void (*off)(struct drm_crtc *crtc);
226 void (*write_eld)(struct drm_connector *connector, 254 void (*write_eld)(struct drm_connector *connector,
227 struct drm_crtc *crtc); 255 struct drm_crtc *crtc);
228 void (*fdi_link_train)(struct drm_crtc *crtc); 256 void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -255,6 +283,9 @@ struct intel_device_info {
255 u8 is_broadwater:1; 283 u8 is_broadwater:1;
256 u8 is_crestline:1; 284 u8 is_crestline:1;
257 u8 is_ivybridge:1; 285 u8 is_ivybridge:1;
286 u8 is_valleyview:1;
287 u8 has_pch_split:1;
288 u8 is_haswell:1;
258 u8 has_fbc:1; 289 u8 has_fbc:1;
259 u8 has_pipe_cxsr:1; 290 u8 has_pipe_cxsr:1;
260 u8 has_hotplug:1; 291 u8 has_hotplug:1;
@@ -291,10 +322,12 @@ enum no_fbc_reason {
291enum intel_pch { 322enum intel_pch {
292 PCH_IBX, /* Ibexpeak PCH */ 323 PCH_IBX, /* Ibexpeak PCH */
293 PCH_CPT, /* Cougarpoint PCH */ 324 PCH_CPT, /* Cougarpoint PCH */
325 PCH_LPT, /* Lynxpoint PCH */
294}; 326};
295 327
296#define QUIRK_PIPEA_FORCE (1<<0) 328#define QUIRK_PIPEA_FORCE (1<<0)
297#define QUIRK_LVDS_SSC_DISABLE (1<<1) 329#define QUIRK_LVDS_SSC_DISABLE (1<<1)
330#define QUIRK_INVERT_BRIGHTNESS (1<<2)
298 331
299struct intel_fbdev; 332struct intel_fbdev;
300struct intel_fbc_work; 333struct intel_fbc_work;
@@ -302,7 +335,6 @@ struct intel_fbc_work;
302struct intel_gmbus { 335struct intel_gmbus {
303 struct i2c_adapter adapter; 336 struct i2c_adapter adapter;
304 bool force_bit; 337 bool force_bit;
305 bool has_gpio;
306 u32 reg0; 338 u32 reg0;
307 u32 gpio_reg; 339 u32 gpio_reg;
308 struct i2c_algo_bit_data bit_algo; 340 struct i2c_algo_bit_data bit_algo;
@@ -314,7 +346,6 @@ typedef struct drm_i915_private {
314 346
315 const struct intel_device_info *info; 347 const struct intel_device_info *info;
316 348
317 int has_gem;
318 int relative_constants_mode; 349 int relative_constants_mode;
319 350
320 void __iomem *regs; 351 void __iomem *regs;
@@ -326,19 +357,23 @@ typedef struct drm_i915_private {
326 /** gt_lock is also taken in irq contexts. */ 357 /** gt_lock is also taken in irq contexts. */
327 struct spinlock gt_lock; 358 struct spinlock gt_lock;
328 359
329 struct intel_gmbus *gmbus; 360 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
330 361
331 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 362 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
332 * controller on different i2c buses. */ 363 * controller on different i2c buses. */
333 struct mutex gmbus_mutex; 364 struct mutex gmbus_mutex;
334 365
366 /**
367 * Base address of the gmbus and gpio block.
368 */
369 uint32_t gpio_mmio_base;
370
335 struct pci_dev *bridge_dev; 371 struct pci_dev *bridge_dev;
336 struct intel_ring_buffer ring[I915_NUM_RINGS]; 372 struct intel_ring_buffer ring[I915_NUM_RINGS];
337 uint32_t next_seqno; 373 uint32_t next_seqno;
338 374
339 drm_dma_handle_t *status_page_dmah; 375 drm_dma_handle_t *status_page_dmah;
340 uint32_t counter; 376 uint32_t counter;
341 drm_local_map_t hws_map;
342 struct drm_i915_gem_object *pwrctx; 377 struct drm_i915_gem_object *pwrctx;
343 struct drm_i915_gem_object *renderctx; 378 struct drm_i915_gem_object *renderctx;
344 379
@@ -354,6 +389,10 @@ typedef struct drm_i915_private {
354 389
355 /* protects the irq masks */ 390 /* protects the irq masks */
356 spinlock_t irq_lock; 391 spinlock_t irq_lock;
392
393 /* DPIO indirect register protection */
394 spinlock_t dpio_lock;
395
357 /** Cached value of IMR to avoid reads in updating the bitfield */ 396 /** Cached value of IMR to avoid reads in updating the bitfield */
358 u32 pipestat[2]; 397 u32 pipestat[2];
359 u32 irq_mask; 398 u32 irq_mask;
@@ -363,22 +402,20 @@ typedef struct drm_i915_private {
363 u32 hotplug_supported_mask; 402 u32 hotplug_supported_mask;
364 struct work_struct hotplug_work; 403 struct work_struct hotplug_work;
365 404
366 int tex_lru_log_granularity;
367 int allow_batchbuffer;
368 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 405 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
369 int vblank_pipe;
370 int num_pipe; 406 int num_pipe;
407 int num_pch_pll;
371 408
372 /* For hangcheck timer */ 409 /* For hangcheck timer */
373#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 410#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
374 struct timer_list hangcheck_timer; 411 struct timer_list hangcheck_timer;
375 int hangcheck_count; 412 int hangcheck_count;
376 uint32_t last_acthd; 413 uint32_t last_acthd[I915_NUM_RINGS];
377 uint32_t last_acthd_bsd;
378 uint32_t last_acthd_blt;
379 uint32_t last_instdone; 414 uint32_t last_instdone;
380 uint32_t last_instdone1; 415 uint32_t last_instdone1;
381 416
417 unsigned int stop_rings;
418
382 unsigned long cfb_size; 419 unsigned long cfb_size;
383 unsigned int cfb_fb; 420 unsigned int cfb_fb;
384 enum plane cfb_plane; 421 enum plane cfb_plane;
@@ -405,6 +442,8 @@ typedef struct drm_i915_private {
405 unsigned int lvds_use_ssc:1; 442 unsigned int lvds_use_ssc:1;
406 unsigned int display_clock_mode:1; 443 unsigned int display_clock_mode:1;
407 int lvds_ssc_freq; 444 int lvds_ssc_freq;
445 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
446 unsigned int lvds_val; /* used for checking LVDS channel mode */
408 struct { 447 struct {
409 int rate; 448 int rate;
410 int lanes; 449 int lanes;
@@ -428,6 +467,7 @@ typedef struct drm_i915_private {
428 unsigned int fsb_freq, mem_freq, is_ddr3; 467 unsigned int fsb_freq, mem_freq, is_ddr3;
429 468
430 spinlock_t error_lock; 469 spinlock_t error_lock;
470 /* Protected by dev->error_lock. */
431 struct drm_i915_error_state *first_error; 471 struct drm_i915_error_state *first_error;
432 struct work_struct error_work; 472 struct work_struct error_work;
433 struct completion error_completion; 473 struct completion error_completion;
@@ -652,24 +692,10 @@ typedef struct drm_i915_private {
652 */ 692 */
653 struct list_head inactive_list; 693 struct list_head inactive_list;
654 694
655 /**
656 * LRU list of objects which are not in the ringbuffer but
657 * are still pinned in the GTT.
658 */
659 struct list_head pinned_list;
660
661 /** LRU list of objects with fence regs on them. */ 695 /** LRU list of objects with fence regs on them. */
662 struct list_head fence_list; 696 struct list_head fence_list;
663 697
664 /** 698 /**
665 * List of objects currently pending being freed.
666 *
667 * These objects are no longer in use, but due to a signal
668 * we were prevented from freeing them at the appointed time.
669 */
670 struct list_head deferred_free_list;
671
672 /**
673 * We leave the user IRQ off as much as possible, 699 * We leave the user IRQ off as much as possible,
674 * but this means that requests will finish and never 700 * but this means that requests will finish and never
675 * be retired once the system goes idle. Set a timer to 701 * be retired once the system goes idle. Set a timer to
@@ -717,6 +743,16 @@ typedef struct drm_i915_private {
717 size_t object_memory; 743 size_t object_memory;
718 u32 object_count; 744 u32 object_count;
719 } mm; 745 } mm;
746
747 /* Old dri1 support infrastructure, beware the dragons ya fools entering
748 * here! */
749 struct {
750 unsigned allow_batchbuffer : 1;
751 u32 __iomem *gfx_hws_cpu_addr;
752 } dri1;
753
754 /* Kernel Modesetting */
755
720 struct sdvo_device_mapping sdvo_mappings[2]; 756 struct sdvo_device_mapping sdvo_mappings[2];
721 /* indicate whether the LVDS_BORDER should be enabled or not */ 757 /* indicate whether the LVDS_BORDER should be enabled or not */
722 unsigned int lvds_border_bits; 758 unsigned int lvds_border_bits;
@@ -726,7 +762,8 @@ typedef struct drm_i915_private {
726 struct drm_crtc *plane_to_crtc_mapping[3]; 762 struct drm_crtc *plane_to_crtc_mapping[3];
727 struct drm_crtc *pipe_to_crtc_mapping[3]; 763 struct drm_crtc *pipe_to_crtc_mapping[3];
728 wait_queue_head_t pending_flip_queue; 764 wait_queue_head_t pending_flip_queue;
729 bool flip_pending_is_done; 765
766 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
730 767
731 /* Reclocking support */ 768 /* Reclocking support */
732 bool render_reclock_avail; 769 bool render_reclock_avail;
@@ -781,6 +818,11 @@ typedef struct drm_i915_private {
781 struct drm_property *force_audio_property; 818 struct drm_property *force_audio_property;
782} drm_i915_private_t; 819} drm_i915_private_t;
783 820
821/* Iterate over initialised rings */
822#define for_each_ring(ring__, dev_priv__, i__) \
823 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
824 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
825
784enum hdmi_force_audio { 826enum hdmi_force_audio {
785 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 827 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
786 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 828 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
@@ -844,7 +886,14 @@ struct drm_i915_gem_object {
844 * Current tiling mode for the object. 886 * Current tiling mode for the object.
845 */ 887 */
846 unsigned int tiling_mode:2; 888 unsigned int tiling_mode:2;
847 unsigned int tiling_changed:1; 889 /**
890 * Whether the tiling parameters for the currently associated fence
891 * register have changed. Note that for the purposes of tracking
892 * tiling changes we also treat the unfenced register, the register
893 * slot that the object occupies whilst it executes a fenced
894 * command (such as BLT on gen2/3), as a "fence".
895 */
896 unsigned int fence_dirty:1;
848 897
849 /** How many users have pinned this object in GTT space. The following 898 /** How many users have pinned this object in GTT space. The following
850 * users can each hold at most one reference: pwrite/pread, pin_ioctl 899 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -881,6 +930,7 @@ struct drm_i915_gem_object {
881 unsigned int cache_level:2; 930 unsigned int cache_level:2;
882 931
883 unsigned int has_aliasing_ppgtt_mapping:1; 932 unsigned int has_aliasing_ppgtt_mapping:1;
933 unsigned int has_global_gtt_mapping:1;
884 934
885 struct page **pages; 935 struct page **pages;
886 936
@@ -890,6 +940,8 @@ struct drm_i915_gem_object {
890 struct scatterlist *sg_list; 940 struct scatterlist *sg_list;
891 int num_sg; 941 int num_sg;
892 942
943 /* prime dma-buf support */
944 struct sg_table *sg_table;
893 /** 945 /**
894 * Used for performing relocations during execbuffer insertion. 946 * Used for performing relocations during execbuffer insertion.
895 */ 947 */
@@ -904,13 +956,12 @@ struct drm_i915_gem_object {
904 */ 956 */
905 uint32_t gtt_offset; 957 uint32_t gtt_offset;
906 958
907 /** Breadcrumb of last rendering to the buffer. */
908 uint32_t last_rendering_seqno;
909 struct intel_ring_buffer *ring; 959 struct intel_ring_buffer *ring;
910 960
961 /** Breadcrumb of last rendering to the buffer. */
962 uint32_t last_rendering_seqno;
911 /** Breadcrumb of last fenced GPU access to the buffer. */ 963 /** Breadcrumb of last fenced GPU access to the buffer. */
912 uint32_t last_fenced_seqno; 964 uint32_t last_fenced_seqno;
913 struct intel_ring_buffer *last_fenced_ring;
914 965
915 /** Current tiling stride for the object, if it's tiled. */ 966 /** Current tiling stride for the object, if it's tiled. */
916 uint32_t stride; 967 uint32_t stride;
@@ -918,13 +969,6 @@ struct drm_i915_gem_object {
918 /** Record of address bit 17 of each page at last unbind. */ 969 /** Record of address bit 17 of each page at last unbind. */
919 unsigned long *bit_17; 970 unsigned long *bit_17;
920 971
921
922 /**
923 * If present, while GEM_DOMAIN_CPU is in the read domain this array
924 * flags which individual pages are valid.
925 */
926 uint8_t *page_cpu_valid;
927
928 /** User space pin count and filp owning the pin */ 972 /** User space pin count and filp owning the pin */
929 uint32_t user_pin_count; 973 uint32_t user_pin_count;
930 struct drm_file *pin_filp; 974 struct drm_file *pin_filp;
@@ -1001,6 +1045,8 @@ struct drm_i915_file_private {
1001#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1045#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1002#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1046#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1003#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1047#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1048#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1049#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1004#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1050#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1005 1051
1006/* 1052/*
@@ -1044,10 +1090,11 @@ struct drm_i915_file_private {
1044#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1090#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1045#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1091#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1046 1092
1047#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 1093#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
1048#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1094#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1049 1095
1050#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1096#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1097#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1051#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1098#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1052#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1099#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1053 1100
@@ -1081,6 +1128,7 @@ extern int i915_panel_ignore_lid __read_mostly;
1081extern unsigned int i915_powersave __read_mostly; 1128extern unsigned int i915_powersave __read_mostly;
1082extern int i915_semaphores __read_mostly; 1129extern int i915_semaphores __read_mostly;
1083extern unsigned int i915_lvds_downclock __read_mostly; 1130extern unsigned int i915_lvds_downclock __read_mostly;
1131extern int i915_lvds_channel_mode __read_mostly;
1084extern int i915_panel_use_ssc __read_mostly; 1132extern int i915_panel_use_ssc __read_mostly;
1085extern int i915_vbt_sdvo_panel_type __read_mostly; 1133extern int i915_vbt_sdvo_panel_type __read_mostly;
1086extern int i915_enable_rc6 __read_mostly; 1134extern int i915_enable_rc6 __read_mostly;
@@ -1094,6 +1142,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
1094extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1142extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1095 1143
1096 /* i915_dma.c */ 1144 /* i915_dma.c */
1145void i915_update_dri1_breadcrumb(struct drm_device *dev);
1097extern void i915_kernel_lost_context(struct drm_device * dev); 1146extern void i915_kernel_lost_context(struct drm_device * dev);
1098extern int i915_driver_load(struct drm_device *, unsigned long flags); 1147extern int i915_driver_load(struct drm_device *, unsigned long flags);
1099extern int i915_driver_unload(struct drm_device *); 1148extern int i915_driver_unload(struct drm_device *);
@@ -1104,12 +1153,14 @@ extern void i915_driver_preclose(struct drm_device *dev,
1104extern void i915_driver_postclose(struct drm_device *dev, 1153extern void i915_driver_postclose(struct drm_device *dev,
1105 struct drm_file *file_priv); 1154 struct drm_file *file_priv);
1106extern int i915_driver_device_is_agp(struct drm_device * dev); 1155extern int i915_driver_device_is_agp(struct drm_device * dev);
1156#ifdef CONFIG_COMPAT
1107extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 1157extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1108 unsigned long arg); 1158 unsigned long arg);
1159#endif
1109extern int i915_emit_box(struct drm_device *dev, 1160extern int i915_emit_box(struct drm_device *dev,
1110 struct drm_clip_rect *box, 1161 struct drm_clip_rect *box,
1111 int DR1, int DR4); 1162 int DR1, int DR4);
1112extern int i915_reset(struct drm_device *dev, u8 flags); 1163extern int i915_reset(struct drm_device *dev);
1113extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1164extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1114extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1165extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1115extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1166extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -1119,19 +1170,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1119/* i915_irq.c */ 1170/* i915_irq.c */
1120void i915_hangcheck_elapsed(unsigned long data); 1171void i915_hangcheck_elapsed(unsigned long data);
1121void i915_handle_error(struct drm_device *dev, bool wedged); 1172void i915_handle_error(struct drm_device *dev, bool wedged);
1122extern int i915_irq_emit(struct drm_device *dev, void *data,
1123 struct drm_file *file_priv);
1124extern int i915_irq_wait(struct drm_device *dev, void *data,
1125 struct drm_file *file_priv);
1126 1173
1127extern void intel_irq_init(struct drm_device *dev); 1174extern void intel_irq_init(struct drm_device *dev);
1128 1175
1129extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1176void i915_error_state_free(struct kref *error_ref);
1130 struct drm_file *file_priv);
1131extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1132 struct drm_file *file_priv);
1133extern int i915_vblank_swap(struct drm_device *dev, void *data,
1134 struct drm_file *file_priv);
1135 1177
1136void 1178void
1137i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1179i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1205,8 +1247,12 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1205void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1247void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1206void i915_gem_lastclose(struct drm_device *dev); 1248void i915_gem_lastclose(struct drm_device *dev);
1207 1249
1250int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1251 gfp_t gfpmask);
1208int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1252int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1209int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); 1253int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1254int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1255 struct intel_ring_buffer *to);
1210void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1256void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1211 struct intel_ring_buffer *ring, 1257 struct intel_ring_buffer *ring,
1212 u32 seqno); 1258 u32 seqno);
@@ -1229,17 +1275,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1229 1275
1230u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); 1276u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
1231 1277
1232int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 1278int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1233 struct intel_ring_buffer *pipelined);
1234int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1279int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1235 1280
1236static inline void 1281static inline bool
1237i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 1282i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1238{ 1283{
1239 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1284 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1240 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1285 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1241 dev_priv->fence_regs[obj->fence_reg].pin_count++; 1286 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1242 } 1287 return true;
1288 } else
1289 return false;
1243} 1290}
1244 1291
1245static inline void 1292static inline void
@@ -1260,27 +1307,25 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1260 uint32_t read_domains, 1307 uint32_t read_domains,
1261 uint32_t write_domain); 1308 uint32_t write_domain);
1262int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1309int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1310int __must_check i915_gem_init(struct drm_device *dev);
1263int __must_check i915_gem_init_hw(struct drm_device *dev); 1311int __must_check i915_gem_init_hw(struct drm_device *dev);
1264void i915_gem_init_swizzling(struct drm_device *dev); 1312void i915_gem_init_swizzling(struct drm_device *dev);
1265void i915_gem_init_ppgtt(struct drm_device *dev); 1313void i915_gem_init_ppgtt(struct drm_device *dev);
1266void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1314void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1267void i915_gem_do_init(struct drm_device *dev, 1315int __must_check i915_gpu_idle(struct drm_device *dev);
1268 unsigned long start,
1269 unsigned long mappable_end,
1270 unsigned long end);
1271int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
1272int __must_check i915_gem_idle(struct drm_device *dev); 1316int __must_check i915_gem_idle(struct drm_device *dev);
1273int __must_check i915_add_request(struct intel_ring_buffer *ring, 1317int __must_check i915_add_request(struct intel_ring_buffer *ring,
1274 struct drm_file *file, 1318 struct drm_file *file,
1275 struct drm_i915_gem_request *request); 1319 struct drm_i915_gem_request *request);
1276int __must_check i915_wait_request(struct intel_ring_buffer *ring, 1320int __must_check i915_wait_request(struct intel_ring_buffer *ring,
1277 uint32_t seqno, 1321 uint32_t seqno);
1278 bool do_retire);
1279int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1322int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1280int __must_check 1323int __must_check
1281i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1324i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1282 bool write); 1325 bool write);
1283int __must_check 1326int __must_check
1327i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1328int __must_check
1284i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1329i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1285 u32 alignment, 1330 u32 alignment,
1286 struct intel_ring_buffer *pipelined); 1331 struct intel_ring_buffer *pipelined);
@@ -1301,6 +1346,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1301int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1346int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1302 enum i915_cache_level cache_level); 1347 enum i915_cache_level cache_level);
1303 1348
1349struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1350 struct dma_buf *dma_buf);
1351
1352struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1353 struct drm_gem_object *gem_obj, int flags);
1354
1355
1304/* i915_gem_gtt.c */ 1356/* i915_gem_gtt.c */
1305int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); 1357int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1306void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1358void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
@@ -1311,18 +1363,24 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1311 struct drm_i915_gem_object *obj); 1363 struct drm_i915_gem_object *obj);
1312 1364
1313void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1365void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1314int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1366int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1315void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, 1367void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1316 enum i915_cache_level cache_level); 1368 enum i915_cache_level cache_level);
1317void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1369void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1370void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1371void i915_gem_init_global_gtt(struct drm_device *dev,
1372 unsigned long start,
1373 unsigned long mappable_end,
1374 unsigned long end);
1318 1375
1319/* i915_gem_evict.c */ 1376/* i915_gem_evict.c */
1320int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1377int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1321 unsigned alignment, bool mappable); 1378 unsigned alignment, bool mappable);
1322int __must_check i915_gem_evict_everything(struct drm_device *dev, 1379int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
1323 bool purgeable_only); 1380
1324int __must_check i915_gem_evict_inactive(struct drm_device *dev, 1381/* i915_gem_stolen.c */
1325 bool purgeable_only); 1382int i915_gem_init_stolen(struct drm_device *dev);
1383void i915_gem_cleanup_stolen(struct drm_device *dev);
1326 1384
1327/* i915_gem_tiling.c */ 1385/* i915_gem_tiling.c */
1328void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1386void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1354,9 +1412,20 @@ extern int i915_restore_state(struct drm_device *dev);
1354extern int i915_save_state(struct drm_device *dev); 1412extern int i915_save_state(struct drm_device *dev);
1355extern int i915_restore_state(struct drm_device *dev); 1413extern int i915_restore_state(struct drm_device *dev);
1356 1414
1415/* i915_sysfs.c */
1416void i915_setup_sysfs(struct drm_device *dev_priv);
1417void i915_teardown_sysfs(struct drm_device *dev_priv);
1418
1357/* intel_i2c.c */ 1419/* intel_i2c.c */
1358extern int intel_setup_gmbus(struct drm_device *dev); 1420extern int intel_setup_gmbus(struct drm_device *dev);
1359extern void intel_teardown_gmbus(struct drm_device *dev); 1421extern void intel_teardown_gmbus(struct drm_device *dev);
1422extern inline bool intel_gmbus_is_port_valid(unsigned port)
1423{
1424 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
1425}
1426
1427extern struct i2c_adapter *intel_gmbus_get_adapter(
1428 struct drm_i915_private *dev_priv, unsigned port);
1360extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 1429extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1361extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 1430extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1362extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 1431extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -1391,6 +1460,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
1391#endif /* CONFIG_ACPI */ 1460#endif /* CONFIG_ACPI */
1392 1461
1393/* modesetting */ 1462/* modesetting */
1463extern void intel_modeset_init_hw(struct drm_device *dev);
1394extern void intel_modeset_init(struct drm_device *dev); 1464extern void intel_modeset_init(struct drm_device *dev);
1395extern void intel_modeset_gem_init(struct drm_device *dev); 1465extern void intel_modeset_gem_init(struct drm_device *dev);
1396extern void intel_modeset_cleanup(struct drm_device *dev); 1466extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1403,12 +1473,17 @@ extern void ironlake_enable_rc6(struct drm_device *dev);
1403extern void gen6_set_rps(struct drm_device *dev, u8 val); 1473extern void gen6_set_rps(struct drm_device *dev, u8 val);
1404extern void intel_detect_pch(struct drm_device *dev); 1474extern void intel_detect_pch(struct drm_device *dev);
1405extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1475extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1476extern int intel_enable_rc6(const struct drm_device *dev);
1406 1477
1478extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1407extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1479extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1408extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); 1480extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1409extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1481extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1410extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); 1482extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1411 1483
1484extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
1485extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
1486
1412/* overlay */ 1487/* overlay */
1413#ifdef CONFIG_DEBUG_FS 1488#ifdef CONFIG_DEBUG_FS
1414extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1489extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1420,28 +1495,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
1420 struct intel_display_error_state *error); 1495 struct intel_display_error_state *error);
1421#endif 1496#endif
1422 1497
1423#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1424
1425#define BEGIN_LP_RING(n) \
1426 intel_ring_begin(LP_RING(dev_priv), (n))
1427
1428#define OUT_RING(x) \
1429 intel_ring_emit(LP_RING(dev_priv), x)
1430
1431#define ADVANCE_LP_RING() \
1432 intel_ring_advance(LP_RING(dev_priv))
1433
1434/**
1435 * Lock test for when it's just for synchronization of ring access.
1436 *
1437 * In that case, we don't need to do it when GEM is initialized as nobody else
1438 * has access to the ring.
1439 */
1440#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1441 if (LP_RING(dev->dev_private)->obj == NULL) \
1442 LOCK_TEST_WITH_RETURN(dev, file); \
1443} while (0)
1444
1445/* On SNB platform, before reading ring registers forcewake bit 1498/* On SNB platform, before reading ring registers forcewake bit
1446 * must be set to prevent GT core from power down and stale values being 1499 * must be set to prevent GT core from power down and stale values being
1447 * returned. 1500 * returned.
@@ -1450,12 +1503,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1450void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1503void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1451int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1504int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1452 1505
1453/* We give fast paths for the really cool registers */
1454#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1455 (((dev_priv)->info->gen >= 6) && \
1456 ((reg) < 0x40000) && \
1457 ((reg) != FORCEWAKE))
1458
1459#define __i915_read(x, y) \ 1506#define __i915_read(x, y) \
1460 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1507 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1461 1508