diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 729 |
1 files changed, 491 insertions, 238 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index df77e20e3c3d..0905cd915589 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -58,7 +58,8 @@ enum pipe { | |||
58 | PIPE_A = 0, | 58 | PIPE_A = 0, |
59 | PIPE_B, | 59 | PIPE_B, |
60 | PIPE_C, | 60 | PIPE_C, |
61 | I915_MAX_PIPES | 61 | _PIPE_EDP, |
62 | I915_MAX_PIPES = _PIPE_EDP | ||
62 | }; | 63 | }; |
63 | #define pipe_name(p) ((p) + 'A') | 64 | #define pipe_name(p) ((p) + 'A') |
64 | 65 | ||
@@ -66,7 +67,8 @@ enum transcoder { | |||
66 | TRANSCODER_A = 0, | 67 | TRANSCODER_A = 0, |
67 | TRANSCODER_B, | 68 | TRANSCODER_B, |
68 | TRANSCODER_C, | 69 | TRANSCODER_C, |
69 | TRANSCODER_EDP = 0xF, | 70 | TRANSCODER_EDP, |
71 | I915_MAX_TRANSCODERS | ||
70 | }; | 72 | }; |
71 | #define transcoder_name(t) ((t) + 'A') | 73 | #define transcoder_name(t) ((t) + 'A') |
72 | 74 | ||
@@ -77,7 +79,7 @@ enum plane { | |||
77 | }; | 79 | }; |
78 | #define plane_name(p) ((p) + 'A') | 80 | #define plane_name(p) ((p) + 'A') |
79 | 81 | ||
80 | #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A') | 82 | #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') |
81 | 83 | ||
82 | enum port { | 84 | enum port { |
83 | PORT_A = 0, | 85 | PORT_A = 0, |
@@ -112,6 +114,17 @@ enum intel_display_power_domain { | |||
112 | POWER_DOMAIN_TRANSCODER_B, | 114 | POWER_DOMAIN_TRANSCODER_B, |
113 | POWER_DOMAIN_TRANSCODER_C, | 115 | POWER_DOMAIN_TRANSCODER_C, |
114 | POWER_DOMAIN_TRANSCODER_EDP, | 116 | POWER_DOMAIN_TRANSCODER_EDP, |
117 | POWER_DOMAIN_PORT_DDI_A_2_LANES, | ||
118 | POWER_DOMAIN_PORT_DDI_A_4_LANES, | ||
119 | POWER_DOMAIN_PORT_DDI_B_2_LANES, | ||
120 | POWER_DOMAIN_PORT_DDI_B_4_LANES, | ||
121 | POWER_DOMAIN_PORT_DDI_C_2_LANES, | ||
122 | POWER_DOMAIN_PORT_DDI_C_4_LANES, | ||
123 | POWER_DOMAIN_PORT_DDI_D_2_LANES, | ||
124 | POWER_DOMAIN_PORT_DDI_D_4_LANES, | ||
125 | POWER_DOMAIN_PORT_DSI, | ||
126 | POWER_DOMAIN_PORT_CRT, | ||
127 | POWER_DOMAIN_PORT_OTHER, | ||
115 | POWER_DOMAIN_VGA, | 128 | POWER_DOMAIN_VGA, |
116 | POWER_DOMAIN_AUDIO, | 129 | POWER_DOMAIN_AUDIO, |
117 | POWER_DOMAIN_INIT, | 130 | POWER_DOMAIN_INIT, |
@@ -119,8 +132,6 @@ enum intel_display_power_domain { | |||
119 | POWER_DOMAIN_NUM, | 132 | POWER_DOMAIN_NUM, |
120 | }; | 133 | }; |
121 | 134 | ||
122 | #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) | ||
123 | |||
124 | #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) | 135 | #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) |
125 | #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ | 136 | #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ |
126 | ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) | 137 | ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) |
@@ -128,14 +139,6 @@ enum intel_display_power_domain { | |||
128 | ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ | 139 | ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ |
129 | (tran) + POWER_DOMAIN_TRANSCODER_A) | 140 | (tran) + POWER_DOMAIN_TRANSCODER_A) |
130 | 141 | ||
131 | #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ | ||
132 | BIT(POWER_DOMAIN_PIPE_A) | \ | ||
133 | BIT(POWER_DOMAIN_TRANSCODER_EDP)) | ||
134 | #define BDW_ALWAYS_ON_POWER_DOMAINS ( \ | ||
135 | BIT(POWER_DOMAIN_PIPE_A) | \ | ||
136 | BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ | ||
137 | BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | ||
138 | |||
139 | enum hpd_pin { | 142 | enum hpd_pin { |
140 | HPD_NONE = 0, | 143 | HPD_NONE = 0, |
141 | HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ | 144 | HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ |
@@ -157,11 +160,16 @@ enum hpd_pin { | |||
157 | I915_GEM_DOMAIN_VERTEX) | 160 | I915_GEM_DOMAIN_VERTEX) |
158 | 161 | ||
159 | #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) | 162 | #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) |
163 | #define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) | ||
160 | 164 | ||
161 | #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ | 165 | #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ |
162 | list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ | 166 | list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ |
163 | if ((intel_encoder)->base.crtc == (__crtc)) | 167 | if ((intel_encoder)->base.crtc == (__crtc)) |
164 | 168 | ||
169 | #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ | ||
170 | list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ | ||
171 | if ((intel_connector)->base.encoder == (__encoder)) | ||
172 | |||
165 | struct drm_i915_private; | 173 | struct drm_i915_private; |
166 | 174 | ||
167 | enum intel_dpll_id { | 175 | enum intel_dpll_id { |
@@ -295,53 +303,87 @@ struct intel_display_error_state; | |||
295 | 303 | ||
296 | struct drm_i915_error_state { | 304 | struct drm_i915_error_state { |
297 | struct kref ref; | 305 | struct kref ref; |
306 | struct timeval time; | ||
307 | |||
308 | char error_msg[128]; | ||
309 | u32 reset_count; | ||
310 | u32 suspend_count; | ||
311 | |||
312 | /* Generic register state */ | ||
298 | u32 eir; | 313 | u32 eir; |
299 | u32 pgtbl_er; | 314 | u32 pgtbl_er; |
300 | u32 ier; | 315 | u32 ier; |
301 | u32 ccid; | 316 | u32 ccid; |
302 | u32 derrmr; | 317 | u32 derrmr; |
303 | u32 forcewake; | 318 | u32 forcewake; |
304 | bool waiting[I915_NUM_RINGS]; | ||
305 | u32 pipestat[I915_MAX_PIPES]; | ||
306 | u32 tail[I915_NUM_RINGS]; | ||
307 | u32 head[I915_NUM_RINGS]; | ||
308 | u32 ctl[I915_NUM_RINGS]; | ||
309 | u32 ipeir[I915_NUM_RINGS]; | ||
310 | u32 ipehr[I915_NUM_RINGS]; | ||
311 | u32 instdone[I915_NUM_RINGS]; | ||
312 | u32 acthd[I915_NUM_RINGS]; | ||
313 | u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; | ||
314 | u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; | ||
315 | u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ | ||
316 | /* our own tracking of ring head and tail */ | ||
317 | u32 cpu_ring_head[I915_NUM_RINGS]; | ||
318 | u32 cpu_ring_tail[I915_NUM_RINGS]; | ||
319 | u32 error; /* gen6+ */ | 319 | u32 error; /* gen6+ */ |
320 | u32 err_int; /* gen7 */ | 320 | u32 err_int; /* gen7 */ |
321 | u32 bbstate[I915_NUM_RINGS]; | ||
322 | u32 instpm[I915_NUM_RINGS]; | ||
323 | u32 instps[I915_NUM_RINGS]; | ||
324 | u32 extra_instdone[I915_NUM_INSTDONE_REG]; | ||
325 | u32 seqno[I915_NUM_RINGS]; | ||
326 | u64 bbaddr[I915_NUM_RINGS]; | ||
327 | u32 fault_reg[I915_NUM_RINGS]; | ||
328 | u32 done_reg; | 321 | u32 done_reg; |
329 | u32 faddr[I915_NUM_RINGS]; | 322 | u32 gac_eco; |
323 | u32 gam_ecochk; | ||
324 | u32 gab_ctl; | ||
325 | u32 gfx_mode; | ||
326 | u32 extra_instdone[I915_NUM_INSTDONE_REG]; | ||
327 | u32 pipestat[I915_MAX_PIPES]; | ||
330 | u64 fence[I915_MAX_NUM_FENCES]; | 328 | u64 fence[I915_MAX_NUM_FENCES]; |
331 | struct timeval time; | 329 | struct intel_overlay_error_state *overlay; |
330 | struct intel_display_error_state *display; | ||
331 | |||
332 | struct drm_i915_error_ring { | 332 | struct drm_i915_error_ring { |
333 | bool valid; | 333 | bool valid; |
334 | /* Software tracked state */ | ||
335 | bool waiting; | ||
336 | int hangcheck_score; | ||
337 | enum intel_ring_hangcheck_action hangcheck_action; | ||
338 | int num_requests; | ||
339 | |||
340 | /* our own tracking of ring head and tail */ | ||
341 | u32 cpu_ring_head; | ||
342 | u32 cpu_ring_tail; | ||
343 | |||
344 | u32 semaphore_seqno[I915_NUM_RINGS - 1]; | ||
345 | |||
346 | /* Register state */ | ||
347 | u32 tail; | ||
348 | u32 head; | ||
349 | u32 ctl; | ||
350 | u32 hws; | ||
351 | u32 ipeir; | ||
352 | u32 ipehr; | ||
353 | u32 instdone; | ||
354 | u32 bbstate; | ||
355 | u32 instpm; | ||
356 | u32 instps; | ||
357 | u32 seqno; | ||
358 | u64 bbaddr; | ||
359 | u64 acthd; | ||
360 | u32 fault_reg; | ||
361 | u32 faddr; | ||
362 | u32 rc_psmi; /* sleep state */ | ||
363 | u32 semaphore_mboxes[I915_NUM_RINGS - 1]; | ||
364 | |||
334 | struct drm_i915_error_object { | 365 | struct drm_i915_error_object { |
335 | int page_count; | 366 | int page_count; |
336 | u32 gtt_offset; | 367 | u32 gtt_offset; |
337 | u32 *pages[0]; | 368 | u32 *pages[0]; |
338 | } *ringbuffer, *batchbuffer, *ctx; | 369 | } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; |
370 | |||
339 | struct drm_i915_error_request { | 371 | struct drm_i915_error_request { |
340 | long jiffies; | 372 | long jiffies; |
341 | u32 seqno; | 373 | u32 seqno; |
342 | u32 tail; | 374 | u32 tail; |
343 | } *requests; | 375 | } *requests; |
344 | int num_requests; | 376 | |
377 | struct { | ||
378 | u32 gfx_mode; | ||
379 | union { | ||
380 | u64 pdp[4]; | ||
381 | u32 pp_dir_base; | ||
382 | }; | ||
383 | } vm_info; | ||
384 | |||
385 | pid_t pid; | ||
386 | char comm[TASK_COMM_LEN]; | ||
345 | } ring[I915_NUM_RINGS]; | 387 | } ring[I915_NUM_RINGS]; |
346 | struct drm_i915_error_buffer { | 388 | struct drm_i915_error_buffer { |
347 | u32 size; | 389 | u32 size; |
@@ -358,15 +400,13 @@ struct drm_i915_error_state { | |||
358 | s32 ring:4; | 400 | s32 ring:4; |
359 | u32 cache_level:3; | 401 | u32 cache_level:3; |
360 | } **active_bo, **pinned_bo; | 402 | } **active_bo, **pinned_bo; |
403 | |||
361 | u32 *active_bo_count, *pinned_bo_count; | 404 | u32 *active_bo_count, *pinned_bo_count; |
362 | struct intel_overlay_error_state *overlay; | ||
363 | struct intel_display_error_state *display; | ||
364 | int hangcheck_score[I915_NUM_RINGS]; | ||
365 | enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS]; | ||
366 | }; | 405 | }; |
367 | 406 | ||
368 | struct intel_connector; | 407 | struct intel_connector; |
369 | struct intel_crtc_config; | 408 | struct intel_crtc_config; |
409 | struct intel_plane_config; | ||
370 | struct intel_crtc; | 410 | struct intel_crtc; |
371 | struct intel_limit; | 411 | struct intel_limit; |
372 | struct dpll; | 412 | struct dpll; |
@@ -405,6 +445,8 @@ struct drm_i915_display_funcs { | |||
405 | * fills out the pipe-config with the hw state. */ | 445 | * fills out the pipe-config with the hw state. */ |
406 | bool (*get_pipe_config)(struct intel_crtc *, | 446 | bool (*get_pipe_config)(struct intel_crtc *, |
407 | struct intel_crtc_config *); | 447 | struct intel_crtc_config *); |
448 | void (*get_plane_config)(struct intel_crtc *, | ||
449 | struct intel_plane_config *); | ||
408 | int (*crtc_mode_set)(struct drm_crtc *crtc, | 450 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
409 | int x, int y, | 451 | int x, int y, |
410 | struct drm_framebuffer *old_fb); | 452 | struct drm_framebuffer *old_fb); |
@@ -420,8 +462,9 @@ struct drm_i915_display_funcs { | |||
420 | struct drm_framebuffer *fb, | 462 | struct drm_framebuffer *fb, |
421 | struct drm_i915_gem_object *obj, | 463 | struct drm_i915_gem_object *obj, |
422 | uint32_t flags); | 464 | uint32_t flags); |
423 | int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 465 | int (*update_primary_plane)(struct drm_crtc *crtc, |
424 | int x, int y); | 466 | struct drm_framebuffer *fb, |
467 | int x, int y); | ||
425 | void (*hpd_irq_setup)(struct drm_device *dev); | 468 | void (*hpd_irq_setup)(struct drm_device *dev); |
426 | /* clock updates for mode set */ | 469 | /* clock updates for mode set */ |
427 | /* cursor updates */ | 470 | /* cursor updates */ |
@@ -469,7 +512,7 @@ struct intel_uncore { | |||
469 | unsigned fw_rendercount; | 512 | unsigned fw_rendercount; |
470 | unsigned fw_mediacount; | 513 | unsigned fw_mediacount; |
471 | 514 | ||
472 | struct delayed_work force_wake_work; | 515 | struct timer_list force_wake_timer; |
473 | }; | 516 | }; |
474 | 517 | ||
475 | #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ | 518 | #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ |
@@ -504,9 +547,16 @@ struct intel_uncore { | |||
504 | struct intel_device_info { | 547 | struct intel_device_info { |
505 | u32 display_mmio_offset; | 548 | u32 display_mmio_offset; |
506 | u8 num_pipes:3; | 549 | u8 num_pipes:3; |
550 | u8 num_sprites[I915_MAX_PIPES]; | ||
507 | u8 gen; | 551 | u8 gen; |
508 | u8 ring_mask; /* Rings supported by the HW */ | 552 | u8 ring_mask; /* Rings supported by the HW */ |
509 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); | 553 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); |
554 | /* Register offsets for the various display pipes and transcoders */ | ||
555 | int pipe_offsets[I915_MAX_TRANSCODERS]; | ||
556 | int trans_offsets[I915_MAX_TRANSCODERS]; | ||
557 | int dpll_offsets[I915_MAX_PIPES]; | ||
558 | int dpll_md_offsets[I915_MAX_PIPES]; | ||
559 | int palette_offsets[I915_MAX_PIPES]; | ||
510 | }; | 560 | }; |
511 | 561 | ||
512 | #undef DEFINE_FLAG | 562 | #undef DEFINE_FLAG |
@@ -524,6 +574,57 @@ enum i915_cache_level { | |||
524 | 574 | ||
525 | typedef uint32_t gen6_gtt_pte_t; | 575 | typedef uint32_t gen6_gtt_pte_t; |
526 | 576 | ||
577 | /** | ||
578 | * A VMA represents a GEM BO that is bound into an address space. Therefore, a | ||
579 | * VMA's presence cannot be guaranteed before binding, or after unbinding the | ||
580 | * object into/from the address space. | ||
581 | * | ||
582 | * To make things as simple as possible (ie. no refcounting), a VMA's lifetime | ||
583 | * will always be <= an objects lifetime. So object refcounting should cover us. | ||
584 | */ | ||
585 | struct i915_vma { | ||
586 | struct drm_mm_node node; | ||
587 | struct drm_i915_gem_object *obj; | ||
588 | struct i915_address_space *vm; | ||
589 | |||
590 | /** This object's place on the active/inactive lists */ | ||
591 | struct list_head mm_list; | ||
592 | |||
593 | struct list_head vma_link; /* Link in the object's VMA list */ | ||
594 | |||
595 | /** This vma's place in the batchbuffer or on the eviction list */ | ||
596 | struct list_head exec_list; | ||
597 | |||
598 | /** | ||
599 | * Used for performing relocations during execbuffer insertion. | ||
600 | */ | ||
601 | struct hlist_node exec_node; | ||
602 | unsigned long exec_handle; | ||
603 | struct drm_i915_gem_exec_object2 *exec_entry; | ||
604 | |||
605 | /** | ||
606 | * How many users have pinned this object in GTT space. The following | ||
607 | * users can each hold at most one reference: pwrite/pread, pin_ioctl | ||
608 | * (via user_pin_count), execbuffer (objects are not allowed multiple | ||
609 | * times for the same batchbuffer), and the framebuffer code. When | ||
610 | * switching/pageflipping, the framebuffer code has at most two buffers | ||
611 | * pinned per crtc. | ||
612 | * | ||
613 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 | ||
614 | * bits with absolutely no headroom. So use 4 bits. */ | ||
615 | unsigned int pin_count:4; | ||
616 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | ||
617 | |||
618 | /** Unmap an object from an address space. This usually consists of | ||
619 | * setting the valid PTE entries to a reserved scratch page. */ | ||
620 | void (*unbind_vma)(struct i915_vma *vma); | ||
621 | /* Map an object into an address space with the given cache flags. */ | ||
622 | #define GLOBAL_BIND (1<<0) | ||
623 | void (*bind_vma)(struct i915_vma *vma, | ||
624 | enum i915_cache_level cache_level, | ||
625 | u32 flags); | ||
626 | }; | ||
627 | |||
527 | struct i915_address_space { | 628 | struct i915_address_space { |
528 | struct drm_mm mm; | 629 | struct drm_mm mm; |
529 | struct drm_device *dev; | 630 | struct drm_device *dev; |
@@ -564,12 +665,12 @@ struct i915_address_space { | |||
564 | enum i915_cache_level level, | 665 | enum i915_cache_level level, |
565 | bool valid); /* Create a valid PTE */ | 666 | bool valid); /* Create a valid PTE */ |
566 | void (*clear_range)(struct i915_address_space *vm, | 667 | void (*clear_range)(struct i915_address_space *vm, |
567 | unsigned int first_entry, | 668 | uint64_t start, |
568 | unsigned int num_entries, | 669 | uint64_t length, |
569 | bool use_scratch); | 670 | bool use_scratch); |
570 | void (*insert_entries)(struct i915_address_space *vm, | 671 | void (*insert_entries)(struct i915_address_space *vm, |
571 | struct sg_table *st, | 672 | struct sg_table *st, |
572 | unsigned int first_entry, | 673 | uint64_t start, |
573 | enum i915_cache_level cache_level); | 674 | enum i915_cache_level cache_level); |
574 | void (*cleanup)(struct i915_address_space *vm); | 675 | void (*cleanup)(struct i915_address_space *vm); |
575 | }; | 676 | }; |
@@ -603,55 +704,34 @@ struct i915_gtt { | |||
603 | }; | 704 | }; |
604 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) | 705 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) |
605 | 706 | ||
707 | #define GEN8_LEGACY_PDPS 4 | ||
606 | struct i915_hw_ppgtt { | 708 | struct i915_hw_ppgtt { |
607 | struct i915_address_space base; | 709 | struct i915_address_space base; |
710 | struct kref ref; | ||
711 | struct drm_mm_node node; | ||
608 | unsigned num_pd_entries; | 712 | unsigned num_pd_entries; |
713 | unsigned num_pd_pages; /* gen8+ */ | ||
609 | union { | 714 | union { |
610 | struct page **pt_pages; | 715 | struct page **pt_pages; |
611 | struct page *gen8_pt_pages; | 716 | struct page **gen8_pt_pages[GEN8_LEGACY_PDPS]; |
612 | }; | 717 | }; |
613 | struct page *pd_pages; | 718 | struct page *pd_pages; |
614 | int num_pd_pages; | ||
615 | int num_pt_pages; | ||
616 | union { | 719 | union { |
617 | uint32_t pd_offset; | 720 | uint32_t pd_offset; |
618 | dma_addr_t pd_dma_addr[4]; | 721 | dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS]; |
619 | }; | 722 | }; |
620 | union { | 723 | union { |
621 | dma_addr_t *pt_dma_addr; | 724 | dma_addr_t *pt_dma_addr; |
622 | dma_addr_t *gen8_pt_dma_addr[4]; | 725 | dma_addr_t *gen8_pt_dma_addr[4]; |
623 | }; | 726 | }; |
624 | int (*enable)(struct drm_device *dev); | ||
625 | }; | ||
626 | |||
627 | /** | ||
628 | * A VMA represents a GEM BO that is bound into an address space. Therefore, a | ||
629 | * VMA's presence cannot be guaranteed before binding, or after unbinding the | ||
630 | * object into/from the address space. | ||
631 | * | ||
632 | * To make things as simple as possible (ie. no refcounting), a VMA's lifetime | ||
633 | * will always be <= an objects lifetime. So object refcounting should cover us. | ||
634 | */ | ||
635 | struct i915_vma { | ||
636 | struct drm_mm_node node; | ||
637 | struct drm_i915_gem_object *obj; | ||
638 | struct i915_address_space *vm; | ||
639 | |||
640 | /** This object's place on the active/inactive lists */ | ||
641 | struct list_head mm_list; | ||
642 | 727 | ||
643 | struct list_head vma_link; /* Link in the object's VMA list */ | 728 | struct i915_hw_context *ctx; |
644 | |||
645 | /** This vma's place in the batchbuffer or on the eviction list */ | ||
646 | struct list_head exec_list; | ||
647 | |||
648 | /** | ||
649 | * Used for performing relocations during execbuffer insertion. | ||
650 | */ | ||
651 | struct hlist_node exec_node; | ||
652 | unsigned long exec_handle; | ||
653 | struct drm_i915_gem_exec_object2 *exec_entry; | ||
654 | 729 | ||
730 | int (*enable)(struct i915_hw_ppgtt *ppgtt); | ||
731 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, | ||
732 | struct intel_ring_buffer *ring, | ||
733 | bool synchronous); | ||
734 | void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); | ||
655 | }; | 735 | }; |
656 | 736 | ||
657 | struct i915_ctx_hang_stats { | 737 | struct i915_ctx_hang_stats { |
@@ -676,9 +756,10 @@ struct i915_hw_context { | |||
676 | bool is_initialized; | 756 | bool is_initialized; |
677 | uint8_t remap_slice; | 757 | uint8_t remap_slice; |
678 | struct drm_i915_file_private *file_priv; | 758 | struct drm_i915_file_private *file_priv; |
679 | struct intel_ring_buffer *ring; | 759 | struct intel_ring_buffer *last_ring; |
680 | struct drm_i915_gem_object *obj; | 760 | struct drm_i915_gem_object *obj; |
681 | struct i915_ctx_hang_stats hang_stats; | 761 | struct i915_ctx_hang_stats hang_stats; |
762 | struct i915_address_space *vm; | ||
682 | 763 | ||
683 | struct list_head link; | 764 | struct list_head link; |
684 | }; | 765 | }; |
@@ -831,11 +912,7 @@ struct i915_suspend_saved_registers { | |||
831 | u32 savePFIT_CONTROL; | 912 | u32 savePFIT_CONTROL; |
832 | u32 save_palette_a[256]; | 913 | u32 save_palette_a[256]; |
833 | u32 save_palette_b[256]; | 914 | u32 save_palette_b[256]; |
834 | u32 saveDPFC_CB_BASE; | ||
835 | u32 saveFBC_CFB_BASE; | ||
836 | u32 saveFBC_LL_BASE; | ||
837 | u32 saveFBC_CONTROL; | 915 | u32 saveFBC_CONTROL; |
838 | u32 saveFBC_CONTROL2; | ||
839 | u32 saveIER; | 916 | u32 saveIER; |
840 | u32 saveIIR; | 917 | u32 saveIIR; |
841 | u32 saveIMR; | 918 | u32 saveIMR; |
@@ -905,15 +982,24 @@ struct intel_gen6_power_mgmt { | |||
905 | struct work_struct work; | 982 | struct work_struct work; |
906 | u32 pm_iir; | 983 | u32 pm_iir; |
907 | 984 | ||
908 | /* The below variables an all the rps hw state are protected by | 985 | /* Frequencies are stored in potentially platform dependent multiples. |
909 | * dev->struct mutext. */ | 986 | * In other words, *_freq needs to be multiplied by X to be interesting. |
910 | u8 cur_delay; | 987 | * Soft limits are those which are used for the dynamic reclocking done |
911 | u8 min_delay; | 988 | * by the driver (raise frequencies under heavy loads, and lower for |
912 | u8 max_delay; | 989 | * lighter loads). Hard limits are those imposed by the hardware. |
913 | u8 rpe_delay; | 990 | * |
914 | u8 rp1_delay; | 991 | * A distinction is made for overclocking, which is never enabled by |
915 | u8 rp0_delay; | 992 | * default, and is considered to be above the hard limit if it's |
916 | u8 hw_max; | 993 | * possible at all. |
994 | */ | ||
995 | u8 cur_freq; /* Current frequency (cached, may not == HW) */ | ||
996 | u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ | ||
997 | u8 max_freq_softlimit; /* Max frequency permitted by the driver */ | ||
998 | u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ | ||
999 | u8 min_freq; /* AKA RPn. Minimum frequency */ | ||
1000 | u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ | ||
1001 | u8 rp1_freq; /* "less than" RP0 power/freqency */ | ||
1002 | u8 rp0_freq; /* Non-overclocked max frequency. */ | ||
917 | 1003 | ||
918 | int last_adj; | 1004 | int last_adj; |
919 | enum { LOW_POWER, BETWEEN, HIGH_POWER } power; | 1005 | enum { LOW_POWER, BETWEEN, HIGH_POWER } power; |
@@ -953,6 +1039,36 @@ struct intel_ilk_power_mgmt { | |||
953 | struct drm_i915_gem_object *renderctx; | 1039 | struct drm_i915_gem_object *renderctx; |
954 | }; | 1040 | }; |
955 | 1041 | ||
1042 | struct drm_i915_private; | ||
1043 | struct i915_power_well; | ||
1044 | |||
1045 | struct i915_power_well_ops { | ||
1046 | /* | ||
1047 | * Synchronize the well's hw state to match the current sw state, for | ||
1048 | * example enable/disable it based on the current refcount. Called | ||
1049 | * during driver init and resume time, possibly after first calling | ||
1050 | * the enable/disable handlers. | ||
1051 | */ | ||
1052 | void (*sync_hw)(struct drm_i915_private *dev_priv, | ||
1053 | struct i915_power_well *power_well); | ||
1054 | /* | ||
1055 | * Enable the well and resources that depend on it (for example | ||
1056 | * interrupts located on the well). Called after the 0->1 refcount | ||
1057 | * transition. | ||
1058 | */ | ||
1059 | void (*enable)(struct drm_i915_private *dev_priv, | ||
1060 | struct i915_power_well *power_well); | ||
1061 | /* | ||
1062 | * Disable the well and resources that depend on it. Called after | ||
1063 | * the 1->0 refcount transition. | ||
1064 | */ | ||
1065 | void (*disable)(struct drm_i915_private *dev_priv, | ||
1066 | struct i915_power_well *power_well); | ||
1067 | /* Returns the hw enabled state. */ | ||
1068 | bool (*is_enabled)(struct drm_i915_private *dev_priv, | ||
1069 | struct i915_power_well *power_well); | ||
1070 | }; | ||
1071 | |||
956 | /* Power well structure for haswell */ | 1072 | /* Power well structure for haswell */ |
957 | struct i915_power_well { | 1073 | struct i915_power_well { |
958 | const char *name; | 1074 | const char *name; |
@@ -960,11 +1076,8 @@ struct i915_power_well { | |||
960 | /* power well enable/disable usage count */ | 1076 | /* power well enable/disable usage count */ |
961 | int count; | 1077 | int count; |
962 | unsigned long domains; | 1078 | unsigned long domains; |
963 | void *data; | 1079 | unsigned long data; |
964 | void (*set)(struct drm_device *dev, struct i915_power_well *power_well, | 1080 | const struct i915_power_well_ops *ops; |
965 | bool enable); | ||
966 | bool (*is_enabled)(struct drm_device *dev, | ||
967 | struct i915_power_well *power_well); | ||
968 | }; | 1081 | }; |
969 | 1082 | ||
970 | struct i915_power_domains { | 1083 | struct i915_power_domains { |
@@ -1061,6 +1174,14 @@ struct i915_gem_mm { | |||
1061 | */ | 1174 | */ |
1062 | bool interruptible; | 1175 | bool interruptible; |
1063 | 1176 | ||
1177 | /** | ||
1178 | * Is the GPU currently considered idle, or busy executing userspace | ||
1179 | * requests? Whilst idle, we attempt to power down the hardware and | ||
1180 | * display clocks. In order to reduce the effect on performance, there | ||
1181 | * is a slight delay before we do so. | ||
1182 | */ | ||
1183 | bool busy; | ||
1184 | |||
1064 | /** Bit 6 swizzling required for X tiling */ | 1185 | /** Bit 6 swizzling required for X tiling */ |
1065 | uint32_t bit_6_swizzle_x; | 1186 | uint32_t bit_6_swizzle_x; |
1066 | /** Bit 6 swizzling required for Y tiling */ | 1187 | /** Bit 6 swizzling required for Y tiling */ |
@@ -1226,44 +1347,19 @@ struct ilk_wm_values { | |||
1226 | }; | 1347 | }; |
1227 | 1348 | ||
1228 | /* | 1349 | /* |
1229 | * This struct tracks the state needed for the Package C8+ feature. | 1350 | * This struct helps tracking the state needed for runtime PM, which puts the |
1230 | * | 1351 | * device in PCI D3 state. Notice that when this happens, nothing on the |
1231 | * Package states C8 and deeper are really deep PC states that can only be | 1352 | * graphics device works, even register access, so we don't get interrupts nor |
1232 | * reached when all the devices on the system allow it, so even if the graphics | 1353 | * anything else. |
1233 | * device allows PC8+, it doesn't mean the system will actually get to these | ||
1234 | * states. | ||
1235 | * | ||
1236 | * Our driver only allows PC8+ when all the outputs are disabled, the power well | ||
1237 | * is disabled and the GPU is idle. When these conditions are met, we manually | ||
1238 | * do the other conditions: disable the interrupts, clocks and switch LCPLL | ||
1239 | * refclk to Fclk. | ||
1240 | * | ||
1241 | * When we really reach PC8 or deeper states (not just when we allow it) we lose | ||
1242 | * the state of some registers, so when we come back from PC8+ we need to | ||
1243 | * restore this state. We don't get into PC8+ if we're not in RC6, so we don't | ||
1244 | * need to take care of the registers kept by RC6. | ||
1245 | * | 1354 | * |
1246 | * The interrupt disabling is part of the requirements. We can only leave the | 1355 | * Every piece of our code that needs to actually touch the hardware needs to |
1247 | * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we | 1356 | * either call intel_runtime_pm_get or call intel_display_power_get with the |
1248 | * can lock the machine. | 1357 | * appropriate power domain. |
1249 | * | 1358 | * |
1250 | * Ideally every piece of our code that needs PC8+ disabled would call | 1359 | * Our driver uses the autosuspend delay feature, which means we'll only really |
1251 | * hsw_disable_package_c8, which would increment disable_count and prevent the | 1360 | * suspend if we stay with zero refcount for a certain amount of time. The |
1252 | * system from reaching PC8+. But we don't have a symmetric way to do this for | 1361 | * default value is currently very conservative (see intel_init_runtime_pm), but |
1253 | * everything, so we have the requirements_met and gpu_idle variables. When we | 1362 | * it can be changed with the standard runtime PM files from sysfs. |
1254 | * switch requirements_met or gpu_idle to true we decrease disable_count, and | ||
1255 | * increase it in the opposite case. The requirements_met variable is true when | ||
1256 | * all the CRTCs, encoders and the power well are disabled. The gpu_idle | ||
1257 | * variable is true when the GPU is idle. | ||
1258 | * | ||
1259 | * In addition to everything, we only actually enable PC8+ if disable_count | ||
1260 | * stays at zero for at least some seconds. This is implemented with the | ||
1261 | * enable_work variable. We do this so we don't enable/disable PC8 dozens of | ||
1262 | * consecutive times when all screens are disabled and some background app | ||
1263 | * queries the state of our connectors, or we have some application constantly | ||
1264 | * waking up to use the GPU. Only after the enable_work function actually | ||
1265 | * enables PC8+ the "enable" variable will become true, which means that it can | ||
1266 | * be false even if disable_count is 0. | ||
1267 | * | 1363 | * |
1268 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and | 1364 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and |
1269 | * goes back to false exactly before we reenable the IRQs. We use this variable | 1365 | * goes back to false exactly before we reenable the IRQs. We use this variable |
@@ -1273,17 +1369,11 @@ struct ilk_wm_values { | |||
1273 | * inside struct regsave so when we restore the IRQs they will contain the | 1369 | * inside struct regsave so when we restore the IRQs they will contain the |
1274 | * latest expected values. | 1370 | * latest expected values. |
1275 | * | 1371 | * |
1276 | * For more, read "Display Sequences for Package C8" on our documentation. | 1372 | * For more, read the Documentation/power/runtime_pm.txt. |
1277 | */ | 1373 | */ |
1278 | struct i915_package_c8 { | 1374 | struct i915_runtime_pm { |
1279 | bool requirements_met; | 1375 | bool suspended; |
1280 | bool gpu_idle; | ||
1281 | bool irqs_disabled; | 1376 | bool irqs_disabled; |
1282 | /* Only true after the delayed work task actually enables it. */ | ||
1283 | bool enabled; | ||
1284 | int disable_count; | ||
1285 | struct mutex lock; | ||
1286 | struct delayed_work enable_work; | ||
1287 | 1377 | ||
1288 | struct { | 1378 | struct { |
1289 | uint32_t deimr; | 1379 | uint32_t deimr; |
@@ -1294,10 +1384,6 @@ struct i915_package_c8 { | |||
1294 | } regsave; | 1384 | } regsave; |
1295 | }; | 1385 | }; |
1296 | 1386 | ||
1297 | struct i915_runtime_pm { | ||
1298 | bool suspended; | ||
1299 | }; | ||
1300 | |||
1301 | enum intel_pipe_crc_source { | 1387 | enum intel_pipe_crc_source { |
1302 | INTEL_PIPE_CRC_SOURCE_NONE, | 1388 | INTEL_PIPE_CRC_SOURCE_NONE, |
1303 | INTEL_PIPE_CRC_SOURCE_PLANE1, | 1389 | INTEL_PIPE_CRC_SOURCE_PLANE1, |
@@ -1332,7 +1418,7 @@ typedef struct drm_i915_private { | |||
1332 | struct drm_device *dev; | 1418 | struct drm_device *dev; |
1333 | struct kmem_cache *slab; | 1419 | struct kmem_cache *slab; |
1334 | 1420 | ||
1335 | const struct intel_device_info *info; | 1421 | const struct intel_device_info info; |
1336 | 1422 | ||
1337 | int relative_constants_mode; | 1423 | int relative_constants_mode; |
1338 | 1424 | ||
@@ -1361,11 +1447,11 @@ typedef struct drm_i915_private { | |||
1361 | drm_dma_handle_t *status_page_dmah; | 1447 | drm_dma_handle_t *status_page_dmah; |
1362 | struct resource mch_res; | 1448 | struct resource mch_res; |
1363 | 1449 | ||
1364 | atomic_t irq_received; | ||
1365 | |||
1366 | /* protects the irq masks */ | 1450 | /* protects the irq masks */ |
1367 | spinlock_t irq_lock; | 1451 | spinlock_t irq_lock; |
1368 | 1452 | ||
1453 | bool display_irqs_enabled; | ||
1454 | |||
1369 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ | 1455 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
1370 | struct pm_qos_request pm_qos; | 1456 | struct pm_qos_request pm_qos; |
1371 | 1457 | ||
@@ -1379,6 +1465,8 @@ typedef struct drm_i915_private { | |||
1379 | }; | 1465 | }; |
1380 | u32 gt_irq_mask; | 1466 | u32 gt_irq_mask; |
1381 | u32 pm_irq_mask; | 1467 | u32 pm_irq_mask; |
1468 | u32 pm_rps_events; | ||
1469 | u32 pipestat_irq_mask[I915_MAX_PIPES]; | ||
1382 | 1470 | ||
1383 | struct work_struct hotplug_work; | 1471 | struct work_struct hotplug_work; |
1384 | bool enable_hotplug_processing; | 1472 | bool enable_hotplug_processing; |
@@ -1394,8 +1482,6 @@ typedef struct drm_i915_private { | |||
1394 | u32 hpd_event_bits; | 1482 | u32 hpd_event_bits; |
1395 | struct timer_list hotplug_reenable_timer; | 1483 | struct timer_list hotplug_reenable_timer; |
1396 | 1484 | ||
1397 | int num_plane; | ||
1398 | |||
1399 | struct i915_fbc fbc; | 1485 | struct i915_fbc fbc; |
1400 | struct intel_opregion opregion; | 1486 | struct intel_opregion opregion; |
1401 | struct intel_vbt_data vbt; | 1487 | struct intel_vbt_data vbt; |
@@ -1445,8 +1531,8 @@ typedef struct drm_i915_private { | |||
1445 | 1531 | ||
1446 | struct sdvo_device_mapping sdvo_mappings[2]; | 1532 | struct sdvo_device_mapping sdvo_mappings[2]; |
1447 | 1533 | ||
1448 | struct drm_crtc *plane_to_crtc_mapping[3]; | 1534 | struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; |
1449 | struct drm_crtc *pipe_to_crtc_mapping[3]; | 1535 | struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; |
1450 | wait_queue_head_t pending_flip_queue; | 1536 | wait_queue_head_t pending_flip_queue; |
1451 | 1537 | ||
1452 | #ifdef CONFIG_DEBUG_FS | 1538 | #ifdef CONFIG_DEBUG_FS |
@@ -1506,6 +1592,7 @@ typedef struct drm_i915_private { | |||
1506 | 1592 | ||
1507 | u32 fdi_rx_config; | 1593 | u32 fdi_rx_config; |
1508 | 1594 | ||
1595 | u32 suspend_count; | ||
1509 | struct i915_suspend_saved_registers regfile; | 1596 | struct i915_suspend_saved_registers regfile; |
1510 | 1597 | ||
1511 | struct { | 1598 | struct { |
@@ -1525,8 +1612,6 @@ typedef struct drm_i915_private { | |||
1525 | struct ilk_wm_values hw; | 1612 | struct ilk_wm_values hw; |
1526 | } wm; | 1613 | } wm; |
1527 | 1614 | ||
1528 | struct i915_package_c8 pc8; | ||
1529 | |||
1530 | struct i915_runtime_pm pm; | 1615 | struct i915_runtime_pm pm; |
1531 | 1616 | ||
1532 | /* Old dri1 support infrastructure, beware the dragons ya fools entering | 1617 | /* Old dri1 support infrastructure, beware the dragons ya fools entering |
@@ -1627,18 +1712,6 @@ struct drm_i915_gem_object { | |||
1627 | */ | 1712 | */ |
1628 | unsigned int fence_dirty:1; | 1713 | unsigned int fence_dirty:1; |
1629 | 1714 | ||
1630 | /** How many users have pinned this object in GTT space. The following | ||
1631 | * users can each hold at most one reference: pwrite/pread, pin_ioctl | ||
1632 | * (via user_pin_count), execbuffer (objects are not allowed multiple | ||
1633 | * times for the same batchbuffer), and the framebuffer code. When | ||
1634 | * switching/pageflipping, the framebuffer code has at most two buffers | ||
1635 | * pinned per crtc. | ||
1636 | * | ||
1637 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 | ||
1638 | * bits with absolutely no headroom. So use 4 bits. */ | ||
1639 | unsigned int pin_count:4; | ||
1640 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | ||
1641 | |||
1642 | /** | 1715 | /** |
1643 | * Is the object at the current location in the gtt mappable and | 1716 | * Is the object at the current location in the gtt mappable and |
1644 | * fenceable? Used to avoid costly recalculations. | 1717 | * fenceable? Used to avoid costly recalculations. |
@@ -1697,7 +1770,6 @@ struct drm_i915_gem_object { | |||
1697 | /** for phy allocated objects */ | 1770 | /** for phy allocated objects */ |
1698 | struct drm_i915_gem_phys_object *phys_obj; | 1771 | struct drm_i915_gem_phys_object *phys_obj; |
1699 | }; | 1772 | }; |
1700 | #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) | ||
1701 | 1773 | ||
1702 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) | 1774 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
1703 | 1775 | ||
@@ -1743,6 +1815,7 @@ struct drm_i915_gem_request { | |||
1743 | 1815 | ||
1744 | struct drm_i915_file_private { | 1816 | struct drm_i915_file_private { |
1745 | struct drm_i915_private *dev_priv; | 1817 | struct drm_i915_private *dev_priv; |
1818 | struct drm_file *file; | ||
1746 | 1819 | ||
1747 | struct { | 1820 | struct { |
1748 | spinlock_t lock; | 1821 | spinlock_t lock; |
@@ -1751,11 +1824,95 @@ struct drm_i915_file_private { | |||
1751 | } mm; | 1824 | } mm; |
1752 | struct idr context_idr; | 1825 | struct idr context_idr; |
1753 | 1826 | ||
1754 | struct i915_ctx_hang_stats hang_stats; | 1827 | struct i915_hw_context *private_default_ctx; |
1755 | atomic_t rps_wait_boost; | 1828 | atomic_t rps_wait_boost; |
1756 | }; | 1829 | }; |
1757 | 1830 | ||
1758 | #define INTEL_INFO(dev) (to_i915(dev)->info) | 1831 | /* |
1832 | * A command that requires special handling by the command parser. | ||
1833 | */ | ||
1834 | struct drm_i915_cmd_descriptor { | ||
1835 | /* | ||
1836 | * Flags describing how the command parser processes the command. | ||
1837 | * | ||
1838 | * CMD_DESC_FIXED: The command has a fixed length if this is set, | ||
1839 | * a length mask if not set | ||
1840 | * CMD_DESC_SKIP: The command is allowed but does not follow the | ||
1841 | * standard length encoding for the opcode range in | ||
1842 | * which it falls | ||
1843 | * CMD_DESC_REJECT: The command is never allowed | ||
1844 | * CMD_DESC_REGISTER: The command should be checked against the | ||
1845 | * register whitelist for the appropriate ring | ||
1846 | * CMD_DESC_MASTER: The command is allowed if the submitting process | ||
1847 | * is the DRM master | ||
1848 | */ | ||
1849 | u32 flags; | ||
1850 | #define CMD_DESC_FIXED (1<<0) | ||
1851 | #define CMD_DESC_SKIP (1<<1) | ||
1852 | #define CMD_DESC_REJECT (1<<2) | ||
1853 | #define CMD_DESC_REGISTER (1<<3) | ||
1854 | #define CMD_DESC_BITMASK (1<<4) | ||
1855 | #define CMD_DESC_MASTER (1<<5) | ||
1856 | |||
1857 | /* | ||
1858 | * The command's unique identification bits and the bitmask to get them. | ||
1859 | * This isn't strictly the opcode field as defined in the spec and may | ||
1860 | * also include type, subtype, and/or subop fields. | ||
1861 | */ | ||
1862 | struct { | ||
1863 | u32 value; | ||
1864 | u32 mask; | ||
1865 | } cmd; | ||
1866 | |||
1867 | /* | ||
1868 | * The command's length. The command is either fixed length (i.e. does | ||
1869 | * not include a length field) or has a length field mask. The flag | ||
1870 | * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has | ||
1871 | * a length mask. All command entries in a command table must include | ||
1872 | * length information. | ||
1873 | */ | ||
1874 | union { | ||
1875 | u32 fixed; | ||
1876 | u32 mask; | ||
1877 | } length; | ||
1878 | |||
1879 | /* | ||
1880 | * Describes where to find a register address in the command to check | ||
1881 | * against the ring's register whitelist. Only valid if flags has the | ||
1882 | * CMD_DESC_REGISTER bit set. | ||
1883 | */ | ||
1884 | struct { | ||
1885 | u32 offset; | ||
1886 | u32 mask; | ||
1887 | } reg; | ||
1888 | |||
1889 | #define MAX_CMD_DESC_BITMASKS 3 | ||
1890 | /* | ||
1891 | * Describes command checks where a particular dword is masked and | ||
1892 | * compared against an expected value. If the command does not match | ||
1893 | * the expected value, the parser rejects it. Only valid if flags has | ||
1894 | * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero | ||
1895 | * are valid. | ||
1896 | */ | ||
1897 | struct { | ||
1898 | u32 offset; | ||
1899 | u32 mask; | ||
1900 | u32 expected; | ||
1901 | } bits[MAX_CMD_DESC_BITMASKS]; | ||
1902 | }; | ||
1903 | |||
1904 | /* | ||
1905 | * A table of commands requiring special handling by the command parser. | ||
1906 | * | ||
1907 | * Each ring has an array of tables. Each table consists of an array of command | ||
1908 | * descriptors, which must be sorted with command opcodes in ascending order. | ||
1909 | */ | ||
1910 | struct drm_i915_cmd_table { | ||
1911 | const struct drm_i915_cmd_descriptor *table; | ||
1912 | int count; | ||
1913 | }; | ||
1914 | |||
1915 | #define INTEL_INFO(dev) (&to_i915(dev)->info) | ||
1759 | 1916 | ||
1760 | #define IS_I830(dev) ((dev)->pdev->device == 0x3577) | 1917 | #define IS_I830(dev) ((dev)->pdev->device == 0x3577) |
1761 | #define IS_845G(dev) ((dev)->pdev->device == 0x2562) | 1918 | #define IS_845G(dev) ((dev)->pdev->device == 0x2562) |
@@ -1824,7 +1981,11 @@ struct drm_i915_file_private { | |||
1824 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1981 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
1825 | 1982 | ||
1826 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) | 1983 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
1827 | #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) | 1984 | #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) |
1985 | #define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \ | ||
1986 | && !IS_BROADWELL(dev)) | ||
1987 | #define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) | ||
1988 | #define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) | ||
1828 | 1989 | ||
1829 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) | 1990 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
1830 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) | 1991 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
@@ -1887,32 +2048,40 @@ struct drm_i915_file_private { | |||
1887 | 2048 | ||
1888 | extern const struct drm_ioctl_desc i915_ioctls[]; | 2049 | extern const struct drm_ioctl_desc i915_ioctls[]; |
1889 | extern int i915_max_ioctl; | 2050 | extern int i915_max_ioctl; |
1890 | extern unsigned int i915_fbpercrtc __always_unused; | ||
1891 | extern int i915_panel_ignore_lid __read_mostly; | ||
1892 | extern unsigned int i915_powersave __read_mostly; | ||
1893 | extern int i915_semaphores __read_mostly; | ||
1894 | extern unsigned int i915_lvds_downclock __read_mostly; | ||
1895 | extern int i915_lvds_channel_mode __read_mostly; | ||
1896 | extern int i915_panel_use_ssc __read_mostly; | ||
1897 | extern int i915_vbt_sdvo_panel_type __read_mostly; | ||
1898 | extern int i915_enable_rc6 __read_mostly; | ||
1899 | extern int i915_enable_fbc __read_mostly; | ||
1900 | extern bool i915_enable_hangcheck __read_mostly; | ||
1901 | extern int i915_enable_ppgtt __read_mostly; | ||
1902 | extern int i915_enable_psr __read_mostly; | ||
1903 | extern unsigned int i915_preliminary_hw_support __read_mostly; | ||
1904 | extern int i915_disable_power_well __read_mostly; | ||
1905 | extern int i915_enable_ips __read_mostly; | ||
1906 | extern bool i915_fastboot __read_mostly; | ||
1907 | extern int i915_enable_pc8 __read_mostly; | ||
1908 | extern int i915_pc8_timeout __read_mostly; | ||
1909 | extern bool i915_prefault_disable __read_mostly; | ||
1910 | 2051 | ||
1911 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 2052 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
1912 | extern int i915_resume(struct drm_device *dev); | 2053 | extern int i915_resume(struct drm_device *dev); |
1913 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); | 2054 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
1914 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); | 2055 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
1915 | 2056 | ||
2057 | /* i915_params.c */ | ||
2058 | struct i915_params { | ||
2059 | int modeset; | ||
2060 | int panel_ignore_lid; | ||
2061 | unsigned int powersave; | ||
2062 | int semaphores; | ||
2063 | unsigned int lvds_downclock; | ||
2064 | int lvds_channel_mode; | ||
2065 | int panel_use_ssc; | ||
2066 | int vbt_sdvo_panel_type; | ||
2067 | int enable_rc6; | ||
2068 | int enable_fbc; | ||
2069 | int enable_ppgtt; | ||
2070 | int enable_psr; | ||
2071 | unsigned int preliminary_hw_support; | ||
2072 | int disable_power_well; | ||
2073 | int enable_ips; | ||
2074 | int invert_brightness; | ||
2075 | int enable_cmd_parser; | ||
2076 | /* leave bools at the end to not create holes */ | ||
2077 | bool enable_hangcheck; | ||
2078 | bool fastboot; | ||
2079 | bool prefault_disable; | ||
2080 | bool reset; | ||
2081 | bool disable_display; | ||
2082 | }; | ||
2083 | extern struct i915_params i915 __read_mostly; | ||
2084 | |||
1916 | /* i915_dma.c */ | 2085 | /* i915_dma.c */ |
1917 | void i915_update_dri1_breadcrumb(struct drm_device *dev); | 2086 | void i915_update_dri1_breadcrumb(struct drm_device *dev); |
1918 | extern void i915_kernel_lost_context(struct drm_device * dev); | 2087 | extern void i915_kernel_lost_context(struct drm_device * dev); |
@@ -1943,8 +2112,12 @@ extern void intel_console_resume(struct work_struct *work); | |||
1943 | 2112 | ||
1944 | /* i915_irq.c */ | 2113 | /* i915_irq.c */ |
1945 | void i915_queue_hangcheck(struct drm_device *dev); | 2114 | void i915_queue_hangcheck(struct drm_device *dev); |
1946 | void i915_handle_error(struct drm_device *dev, bool wedged); | 2115 | __printf(3, 4) |
2116 | void i915_handle_error(struct drm_device *dev, bool wedged, | ||
2117 | const char *fmt, ...); | ||
1947 | 2118 | ||
2119 | void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, | ||
2120 | int new_delay); | ||
1948 | extern void intel_irq_init(struct drm_device *dev); | 2121 | extern void intel_irq_init(struct drm_device *dev); |
1949 | extern void intel_hpd_init(struct drm_device *dev); | 2122 | extern void intel_hpd_init(struct drm_device *dev); |
1950 | 2123 | ||
@@ -1955,10 +2128,15 @@ extern void intel_uncore_check_errors(struct drm_device *dev); | |||
1955 | extern void intel_uncore_fini(struct drm_device *dev); | 2128 | extern void intel_uncore_fini(struct drm_device *dev); |
1956 | 2129 | ||
1957 | void | 2130 | void |
1958 | i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); | 2131 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
2132 | u32 status_mask); | ||
1959 | 2133 | ||
1960 | void | 2134 | void |
1961 | i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); | 2135 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
2136 | u32 status_mask); | ||
2137 | |||
2138 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); | ||
2139 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); | ||
1962 | 2140 | ||
1963 | /* i915_gem.c */ | 2141 | /* i915_gem.c */ |
1964 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, | 2142 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
@@ -2014,22 +2192,27 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, | |||
2014 | const struct drm_i915_gem_object_ops *ops); | 2192 | const struct drm_i915_gem_object_ops *ops); |
2015 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 2193 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
2016 | size_t size); | 2194 | size_t size); |
2195 | void i915_init_vm(struct drm_i915_private *dev_priv, | ||
2196 | struct i915_address_space *vm); | ||
2017 | void i915_gem_free_object(struct drm_gem_object *obj); | 2197 | void i915_gem_free_object(struct drm_gem_object *obj); |
2018 | void i915_gem_vma_destroy(struct i915_vma *vma); | 2198 | void i915_gem_vma_destroy(struct i915_vma *vma); |
2019 | 2199 | ||
2200 | #define PIN_MAPPABLE 0x1 | ||
2201 | #define PIN_NONBLOCK 0x2 | ||
2202 | #define PIN_GLOBAL 0x4 | ||
2020 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, | 2203 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
2021 | struct i915_address_space *vm, | 2204 | struct i915_address_space *vm, |
2022 | uint32_t alignment, | 2205 | uint32_t alignment, |
2023 | bool map_and_fenceable, | 2206 | unsigned flags); |
2024 | bool nonblocking); | ||
2025 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); | ||
2026 | int __must_check i915_vma_unbind(struct i915_vma *vma); | 2207 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
2027 | int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); | ||
2028 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | 2208 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
2029 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); | 2209 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
2030 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | 2210 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
2031 | void i915_gem_lastclose(struct drm_device *dev); | 2211 | void i915_gem_lastclose(struct drm_device *dev); |
2032 | 2212 | ||
2213 | int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, | ||
2214 | int *needs_clflush); | ||
2215 | |||
2033 | int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); | 2216 | int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
2034 | static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) | 2217 | static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
2035 | { | 2218 | { |
@@ -2096,8 +2279,10 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) | |||
2096 | } | 2279 | } |
2097 | } | 2280 | } |
2098 | 2281 | ||
2282 | struct drm_i915_gem_request * | ||
2283 | i915_gem_find_active_request(struct intel_ring_buffer *ring); | ||
2284 | |||
2099 | bool i915_gem_retire_requests(struct drm_device *dev); | 2285 | bool i915_gem_retire_requests(struct drm_device *dev); |
2100 | void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); | ||
2101 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, | 2286 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
2102 | bool interruptible); | 2287 | bool interruptible); |
2103 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) | 2288 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
@@ -2186,6 +2371,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, | |||
2186 | struct i915_address_space *vm); | 2371 | struct i915_address_space *vm); |
2187 | 2372 | ||
2188 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); | 2373 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); |
2374 | static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { | ||
2375 | struct i915_vma *vma; | ||
2376 | list_for_each_entry(vma, &obj->vma_list, vma_link) | ||
2377 | if (vma->pin_count > 0) | ||
2378 | return true; | ||
2379 | return false; | ||
2380 | } | ||
2189 | 2381 | ||
2190 | /* Some GGTT VM helpers */ | 2382 | /* Some GGTT VM helpers */ |
2191 | #define obj_to_ggtt(obj) \ | 2383 | #define obj_to_ggtt(obj) \ |
@@ -2217,54 +2409,69 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) | |||
2217 | static inline int __must_check | 2409 | static inline int __must_check |
2218 | i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, | 2410 | i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, |
2219 | uint32_t alignment, | 2411 | uint32_t alignment, |
2220 | bool map_and_fenceable, | 2412 | unsigned flags) |
2221 | bool nonblocking) | ||
2222 | { | 2413 | { |
2223 | return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, | 2414 | return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); |
2224 | map_and_fenceable, nonblocking); | ||
2225 | } | 2415 | } |
2226 | 2416 | ||
2417 | static inline int | ||
2418 | i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) | ||
2419 | { | ||
2420 | return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); | ||
2421 | } | ||
2422 | |||
2423 | void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); | ||
2424 | |||
2227 | /* i915_gem_context.c */ | 2425 | /* i915_gem_context.c */ |
2426 | #define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) | ||
2228 | int __must_check i915_gem_context_init(struct drm_device *dev); | 2427 | int __must_check i915_gem_context_init(struct drm_device *dev); |
2229 | void i915_gem_context_fini(struct drm_device *dev); | 2428 | void i915_gem_context_fini(struct drm_device *dev); |
2429 | void i915_gem_context_reset(struct drm_device *dev); | ||
2430 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); | ||
2431 | int i915_gem_context_enable(struct drm_i915_private *dev_priv); | ||
2230 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); | 2432 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
2231 | int i915_switch_context(struct intel_ring_buffer *ring, | 2433 | int i915_switch_context(struct intel_ring_buffer *ring, |
2232 | struct drm_file *file, int to_id); | 2434 | struct drm_file *file, struct i915_hw_context *to); |
2435 | struct i915_hw_context * | ||
2436 | i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); | ||
2233 | void i915_gem_context_free(struct kref *ctx_ref); | 2437 | void i915_gem_context_free(struct kref *ctx_ref); |
2234 | static inline void i915_gem_context_reference(struct i915_hw_context *ctx) | 2438 | static inline void i915_gem_context_reference(struct i915_hw_context *ctx) |
2235 | { | 2439 | { |
2236 | kref_get(&ctx->ref); | 2440 | if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) |
2441 | kref_get(&ctx->ref); | ||
2237 | } | 2442 | } |
2238 | 2443 | ||
2239 | static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) | 2444 | static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) |
2240 | { | 2445 | { |
2241 | kref_put(&ctx->ref, i915_gem_context_free); | 2446 | if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) |
2447 | kref_put(&ctx->ref, i915_gem_context_free); | ||
2448 | } | ||
2449 | |||
2450 | static inline bool i915_gem_context_is_default(const struct i915_hw_context *c) | ||
2451 | { | ||
2452 | return c->id == DEFAULT_CONTEXT_ID; | ||
2242 | } | 2453 | } |
2243 | 2454 | ||
2244 | struct i915_ctx_hang_stats * __must_check | ||
2245 | i915_gem_context_get_hang_stats(struct drm_device *dev, | ||
2246 | struct drm_file *file, | ||
2247 | u32 id); | ||
2248 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | 2455 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
2249 | struct drm_file *file); | 2456 | struct drm_file *file); |
2250 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | 2457 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
2251 | struct drm_file *file); | 2458 | struct drm_file *file); |
2252 | 2459 | ||
2253 | /* i915_gem_gtt.c */ | 2460 | /* i915_gem_evict.c */ |
2254 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); | 2461 | int __must_check i915_gem_evict_something(struct drm_device *dev, |
2255 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | 2462 | struct i915_address_space *vm, |
2256 | struct drm_i915_gem_object *obj, | 2463 | int min_size, |
2257 | enum i915_cache_level cache_level); | 2464 | unsigned alignment, |
2258 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | 2465 | unsigned cache_level, |
2259 | struct drm_i915_gem_object *obj); | 2466 | unsigned flags); |
2467 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | ||
2468 | int i915_gem_evict_everything(struct drm_device *dev); | ||
2260 | 2469 | ||
2470 | /* i915_gem_gtt.c */ | ||
2261 | void i915_check_and_clear_faults(struct drm_device *dev); | 2471 | void i915_check_and_clear_faults(struct drm_device *dev); |
2262 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev); | 2472 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev); |
2263 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 2473 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
2264 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); | 2474 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
2265 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, | ||
2266 | enum i915_cache_level cache_level); | ||
2267 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); | ||
2268 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); | 2475 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
2269 | void i915_gem_init_global_gtt(struct drm_device *dev); | 2476 | void i915_gem_init_global_gtt(struct drm_device *dev); |
2270 | void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, | 2477 | void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, |
@@ -2275,18 +2482,8 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) | |||
2275 | if (INTEL_INFO(dev)->gen < 6) | 2482 | if (INTEL_INFO(dev)->gen < 6) |
2276 | intel_gtt_chipset_flush(); | 2483 | intel_gtt_chipset_flush(); |
2277 | } | 2484 | } |
2278 | 2485 | int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); | |
2279 | 2486 | bool intel_enable_ppgtt(struct drm_device *dev, bool full); | |
2280 | /* i915_gem_evict.c */ | ||
2281 | int __must_check i915_gem_evict_something(struct drm_device *dev, | ||
2282 | struct i915_address_space *vm, | ||
2283 | int min_size, | ||
2284 | unsigned alignment, | ||
2285 | unsigned cache_level, | ||
2286 | bool mappable, | ||
2287 | bool nonblock); | ||
2288 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | ||
2289 | int i915_gem_evict_everything(struct drm_device *dev); | ||
2290 | 2487 | ||
2291 | /* i915_gem_stolen.c */ | 2488 | /* i915_gem_stolen.c */ |
2292 | int i915_gem_init_stolen(struct drm_device *dev); | 2489 | int i915_gem_init_stolen(struct drm_device *dev); |
@@ -2305,7 +2502,7 @@ void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); | |||
2305 | /* i915_gem_tiling.c */ | 2502 | /* i915_gem_tiling.c */ |
2306 | static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) | 2503 | static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
2307 | { | 2504 | { |
2308 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; | 2505 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2309 | 2506 | ||
2310 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 2507 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
2311 | obj->tiling_mode != I915_TILING_NONE; | 2508 | obj->tiling_mode != I915_TILING_NONE; |
@@ -2343,7 +2540,8 @@ static inline void i915_error_state_buf_release( | |||
2343 | { | 2540 | { |
2344 | kfree(eb->buf); | 2541 | kfree(eb->buf); |
2345 | } | 2542 | } |
2346 | void i915_capture_error_state(struct drm_device *dev); | 2543 | void i915_capture_error_state(struct drm_device *dev, bool wedge, |
2544 | const char *error_msg); | ||
2347 | void i915_error_state_get(struct drm_device *dev, | 2545 | void i915_error_state_get(struct drm_device *dev, |
2348 | struct i915_error_state_file_priv *error_priv); | 2546 | struct i915_error_state_file_priv *error_priv); |
2349 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv); | 2547 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv); |
@@ -2352,6 +2550,14 @@ void i915_destroy_error_state(struct drm_device *dev); | |||
2352 | void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); | 2550 | void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); |
2353 | const char *i915_cache_level_str(int type); | 2551 | const char *i915_cache_level_str(int type); |
2354 | 2552 | ||
2553 | /* i915_cmd_parser.c */ | ||
2554 | void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring); | ||
2555 | bool i915_needs_cmd_parser(struct intel_ring_buffer *ring); | ||
2556 | int i915_parse_cmds(struct intel_ring_buffer *ring, | ||
2557 | struct drm_i915_gem_object *batch_obj, | ||
2558 | u32 batch_start_offset, | ||
2559 | bool is_master); | ||
2560 | |||
2355 | /* i915_suspend.c */ | 2561 | /* i915_suspend.c */ |
2356 | extern int i915_save_state(struct drm_device *dev); | 2562 | extern int i915_save_state(struct drm_device *dev); |
2357 | extern int i915_restore_state(struct drm_device *dev); | 2563 | extern int i915_restore_state(struct drm_device *dev); |
@@ -2425,10 +2631,12 @@ extern void intel_modeset_suspend_hw(struct drm_device *dev); | |||
2425 | extern void intel_modeset_init(struct drm_device *dev); | 2631 | extern void intel_modeset_init(struct drm_device *dev); |
2426 | extern void intel_modeset_gem_init(struct drm_device *dev); | 2632 | extern void intel_modeset_gem_init(struct drm_device *dev); |
2427 | extern void intel_modeset_cleanup(struct drm_device *dev); | 2633 | extern void intel_modeset_cleanup(struct drm_device *dev); |
2634 | extern void intel_connector_unregister(struct intel_connector *); | ||
2428 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 2635 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
2429 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, | 2636 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
2430 | bool force_restore); | 2637 | bool force_restore); |
2431 | extern void i915_redisable_vga(struct drm_device *dev); | 2638 | extern void i915_redisable_vga(struct drm_device *dev); |
2639 | extern void i915_redisable_vga_power_on(struct drm_device *dev); | ||
2432 | extern bool intel_fbc_enabled(struct drm_device *dev); | 2640 | extern bool intel_fbc_enabled(struct drm_device *dev); |
2433 | extern void intel_disable_fbc(struct drm_device *dev); | 2641 | extern void intel_disable_fbc(struct drm_device *dev); |
2434 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 2642 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
@@ -2463,6 +2671,7 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, | |||
2463 | */ | 2671 | */ |
2464 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); | 2672 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); |
2465 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); | 2673 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); |
2674 | void assert_force_wake_inactive(struct drm_i915_private *dev_priv); | ||
2466 | 2675 | ||
2467 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); | 2676 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); |
2468 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); | 2677 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); |
@@ -2525,9 +2734,26 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); | |||
2525 | #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) | 2734 | #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) |
2526 | #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) | 2735 | #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) |
2527 | 2736 | ||
2737 | /* Be very careful with read/write 64-bit values. On 32-bit machines, they | ||
2738 | * will be implemented using 2 32-bit writes in an arbitrary order with | ||
2739 | * an arbitrary delay between them. This can cause the hardware to | ||
2740 | * act upon the intermediate value, possibly leading to corruption and | ||
2741 | * machine death. You have been warned. | ||
2742 | */ | ||
2528 | #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) | 2743 | #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) |
2529 | #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) | 2744 | #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) |
2530 | 2745 | ||
2746 | #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ | ||
2747 | u32 upper = I915_READ(upper_reg); \ | ||
2748 | u32 lower = I915_READ(lower_reg); \ | ||
2749 | u32 tmp = I915_READ(upper_reg); \ | ||
2750 | if (upper != tmp) { \ | ||
2751 | upper = tmp; \ | ||
2752 | lower = I915_READ(lower_reg); \ | ||
2753 | WARN_ON(I915_READ(upper_reg) != upper); \ | ||
2754 | } \ | ||
2755 | (u64)upper << 32 | lower; }) | ||
2756 | |||
2531 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) | 2757 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
2532 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) | 2758 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
2533 | 2759 | ||
@@ -2566,4 +2792,31 @@ timespec_to_jiffies_timeout(const struct timespec *value) | |||
2566 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); | 2792 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
2567 | } | 2793 | } |
2568 | 2794 | ||
2795 | /* | ||
2796 | * If you need to wait X milliseconds between events A and B, but event B | ||
2797 | * doesn't happen exactly after event A, you record the timestamp (jiffies) of | ||
2798 | * when event A happened, then just before event B you call this function and | ||
2799 | * pass the timestamp as the first argument, and X as the second argument. | ||
2800 | */ | ||
2801 | static inline void | ||
2802 | wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) | ||
2803 | { | ||
2804 | unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; | ||
2805 | |||
2806 | /* | ||
2807 | * Don't re-read the value of "jiffies" every time since it may change | ||
2808 | * behind our back and break the math. | ||
2809 | */ | ||
2810 | tmp_jiffies = jiffies; | ||
2811 | target_jiffies = timestamp_jiffies + | ||
2812 | msecs_to_jiffies_timeout(to_wait_ms); | ||
2813 | |||
2814 | if (time_after(target_jiffies, tmp_jiffies)) { | ||
2815 | remaining_jiffies = target_jiffies - tmp_jiffies; | ||
2816 | while (remaining_jiffies) | ||
2817 | remaining_jiffies = | ||
2818 | schedule_timeout_uninterruptible(remaining_jiffies); | ||
2819 | } | ||
2820 | } | ||
2821 | |||
2569 | #endif | 2822 | #endif |